From 57d75561be5496289601b2c94787ec38c718fcae Mon Sep 17 00:00:00 2001 From: Roel Kluin <12o3l@tiscali.nl> Date: Sun, 28 Oct 2007 00:22:13 +1000 Subject: [POWERPC] allocation fix in ppc/platforms/4xx/luan.c Don't allocate hose2 when when hose1 can't be allocated and free hose1 when hose2 can't be allocated. Signed-off-by: Roel Kluin <12o3l@tiscali.nl> Signed-off-by: Josh Boyer --- arch/ppc/platforms/4xx/luan.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'arch/ppc') diff --git a/arch/ppc/platforms/4xx/luan.c b/arch/ppc/platforms/4xx/luan.c index 4b169610f15..b79ebb8a3e6 100644 --- a/arch/ppc/platforms/4xx/luan.c +++ b/arch/ppc/platforms/4xx/luan.c @@ -230,9 +230,14 @@ luan_setup_hoses(void) /* Allocate hoses for PCIX1 and PCIX2 */ hose1 = pcibios_alloc_controller(); + if (!hose1) + return; + hose2 = pcibios_alloc_controller(); - if (!hose1 || !hose2) + if (!hose2) { + pcibios_free_controller(hose1); return; + } /* Setup PCIX1 */ hose1->first_busno = 0; -- cgit v1.2.3-70-g09d2 From e701d269aa28996f3502780951fe1b12d5d66b49 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 30 Oct 2007 09:46:06 +1100 Subject: [POWERPC] 4xx: Fix 4xx flush_tlb_page() On 4xx CPUs, the current implementation of flush_tlb_page() uses a low level _tlbie() assembly function that only works for the current PID. Thus, invalidations caused by, for example, a COW fault triggered by get_user_pages() from a different context will not work properly, causing among other things, gdb breakpoints to fail. This patch adds a "pid" argument to _tlbie() on 4xx processors, and uses it to flush entries in the right context. FSL BookE also gets the argument but it seems they don't need it (their tlbivax form ignores the PID when invalidating according to the document I have). Signed-off-by: Benjamin Herrenschmidt Acked-by: Kumar Gala Signed-off-by: Josh Boyer --- arch/powerpc/kernel/misc_32.S | 23 ++++++++++++++++------- arch/powerpc/mm/fault.c | 2 +- arch/powerpc/mm/mmu_decl.h | 4 ++-- arch/ppc/kernel/misc.S | 22 +++++++++++++++------- arch/ppc/mm/fault.c | 2 +- arch/ppc/mm/mmu_decl.h | 4 ++-- arch/ppc/platforms/4xx/ebony.c | 2 +- arch/ppc/platforms/4xx/ocotea.c | 2 +- arch/ppc/platforms/4xx/taishan.c | 2 +- include/asm-powerpc/tlbflush.h | 12 ++++++------ 10 files changed, 46 insertions(+), 29 deletions(-) (limited to 'arch/ppc') diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 8533de50347..0ed2c7eddc9 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -288,7 +288,16 @@ _GLOBAL(_tlbia) */ _GLOBAL(_tlbie) #if defined(CONFIG_40x) + /* We run the search with interrupts disabled because we have to change + * the PID and I don't want to preempt when that happens. + */ + mfmsr r5 + mfspr r6,SPRN_PID + wrteei 0 + mtspr SPRN_PID,r4 tlbsx. r3, 0, r3 + mtspr SPRN_PID,r6 + wrtee r5 bne 10f sync /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear. @@ -297,23 +306,23 @@ _GLOBAL(_tlbie) tlbwe r3, r3, TLB_TAG isync 10: + #elif defined(CONFIG_44x) - mfspr r4,SPRN_MMUCR - mfspr r5,SPRN_PID /* Get PID */ - rlwimi r4,r5,0,24,31 /* Set TID */ + mfspr r5,SPRN_MMUCR + rlwimi r5,r4,0,24,31 /* Set TID */ /* We have to run the search with interrupts disabled, even critical * and debug interrupts (in fact the only critical exceptions we have * are debug and machine check). Otherwise an interrupt which causes * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */ - mfmsr r5 + mfmsr r4 lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l - andc r6,r5,r6 + andc r6,r4,r6 mtmsr r6 - mtspr SPRN_MMUCR,r4 + mtspr SPRN_MMUCR,r5 tlbsx. r3, 0, r3 - mtmsr r5 + mtmsr r4 bne 10f sync /* There are only 64 TLB entries, so r3 < 64, diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index a18fda361cc..8135da06e0a 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -309,7 +309,7 @@ good_area: set_bit(PG_arch_1, &page->flags); } pte_update(ptep, 0, _PAGE_HWEXEC); - _tlbie(address); + _tlbie(address, mm->context.id); pte_unmap_unlock(ptep, ptl); up_read(&mm->mmap_sem); return 0; diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index c94a64fd3c0..eb3a732e91d 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -61,12 +61,12 @@ extern unsigned long total_lowmem; #define mmu_mapin_ram() (0UL) #elif defined(CONFIG_4xx) -#define flush_HPTE(X, va, pg) _tlbie(va) +#define flush_HPTE(pid, va, pg) _tlbie(va, pid) extern void MMU_init_hw(void); extern unsigned long mmu_mapin_ram(void); #elif defined(CONFIG_FSL_BOOKE) -#define flush_HPTE(X, va, pg) _tlbie(va) +#define flush_HPTE(pid, va, pg) _tlbie(va, pid) extern void MMU_init_hw(void); extern unsigned long mmu_mapin_ram(void); extern void adjust_total_lowmem(void); diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S index a22e1f4d94c..2b81e71d6b2 100644 --- a/arch/ppc/kernel/misc.S +++ b/arch/ppc/kernel/misc.S @@ -224,7 +224,16 @@ _GLOBAL(_tlbia) */ _GLOBAL(_tlbie) #if defined(CONFIG_40x) + /* We run the search with interrupts disabled because we have to change + * the PID and I don't want to preempt when that happens. + */ + mfmsr r5 + mfspr r6,SPRN_PID + wrteei 0 + mtspr SPRN_PID,r4 tlbsx. r3, 0, r3 + mtspr SPRN_PID,r6 + wrtee r5 bne 10f sync /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear. @@ -234,22 +243,21 @@ _GLOBAL(_tlbie) isync 10: #elif defined(CONFIG_44x) - mfspr r4,SPRN_MMUCR - mfspr r5,SPRN_PID /* Get PID */ - rlwimi r4,r5,0,24,31 /* Set TID */ + mfspr r5,SPRN_MMUCR + rlwimi r5,r4,0,24,31 /* Set TID */ /* We have to run the search with interrupts disabled, even critical * and debug interrupts (in fact the only critical exceptions we have * are debug and machine check). Otherwise an interrupt which causes * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */ - mfmsr r5 + mfmsr r4 lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l - andc r6,r5,r6 + andc r6,r4,r6 mtmsr r6 - mtspr SPRN_MMUCR,r4 + mtspr SPRN_MMUCR,r5 tlbsx. r3, 0, r3 - mtmsr r5 + mtmsr r4 bne 10f sync /* There are only 64 TLB entries, so r3 < 64, diff --git a/arch/ppc/mm/fault.c b/arch/ppc/mm/fault.c index 254c23b755e..36c0e7529ed 100644 --- a/arch/ppc/mm/fault.c +++ b/arch/ppc/mm/fault.c @@ -227,7 +227,7 @@ good_area: set_bit(PG_arch_1, &page->flags); } pte_update(ptep, 0, _PAGE_HWEXEC); - _tlbie(address); + _tlbie(address, mm->context.id); pte_unmap_unlock(ptep, ptl); up_read(&mm->mmap_sem); return 0; diff --git a/arch/ppc/mm/mmu_decl.h b/arch/ppc/mm/mmu_decl.h index 540f3292b22..f1d4f2109a9 100644 --- a/arch/ppc/mm/mmu_decl.h +++ b/arch/ppc/mm/mmu_decl.h @@ -54,12 +54,12 @@ extern unsigned int num_tlbcam_entries; #define mmu_mapin_ram() (0UL) #elif defined(CONFIG_4xx) -#define flush_HPTE(X, va, pg) _tlbie(va) +#define flush_HPTE(pid, va, pg) _tlbie(va, pid) extern void MMU_init_hw(void); extern unsigned long mmu_mapin_ram(void); #elif defined(CONFIG_FSL_BOOKE) -#define flush_HPTE(X, va, pg) _tlbie(va) +#define flush_HPTE(pid, va, pg) _tlbie(va, pid) extern void MMU_init_hw(void); extern unsigned long mmu_mapin_ram(void); extern void adjust_total_lowmem(void); diff --git a/arch/ppc/platforms/4xx/ebony.c b/arch/ppc/platforms/4xx/ebony.c index 05d7184d7e1..453643a0eee 100644 --- a/arch/ppc/platforms/4xx/ebony.c +++ b/arch/ppc/platforms/4xx/ebony.c @@ -236,7 +236,7 @@ ebony_early_serial_map(void) gen550_init(0, &port); /* Purge TLB entry added in head_44x.S for early serial access */ - _tlbie(UART0_IO_BASE); + _tlbie(UART0_IO_BASE, 0); #endif port.membase = ioremap64(PPC440GP_UART1_ADDR, 8); diff --git a/arch/ppc/platforms/4xx/ocotea.c b/arch/ppc/platforms/4xx/ocotea.c index fd0f971881d..28a712cd480 100644 --- a/arch/ppc/platforms/4xx/ocotea.c +++ b/arch/ppc/platforms/4xx/ocotea.c @@ -259,7 +259,7 @@ ocotea_early_serial_map(void) gen550_init(0, &port); /* Purge TLB entry added in head_44x.S for early serial access */ - _tlbie(UART0_IO_BASE); + _tlbie(UART0_IO_BASE, 0); #endif port.membase = ioremap64(PPC440GX_UART1_ADDR, 8); diff --git a/arch/ppc/platforms/4xx/taishan.c b/arch/ppc/platforms/4xx/taishan.c index 888c492b4a4..f6a0c6650f3 100644 --- a/arch/ppc/platforms/4xx/taishan.c +++ b/arch/ppc/platforms/4xx/taishan.c @@ -316,7 +316,7 @@ taishan_early_serial_map(void) gen550_init(0, &port); /* Purge TLB entry added in head_44x.S for early serial access */ - _tlbie(UART0_IO_BASE); + _tlbie(UART0_IO_BASE, 0); #endif port.membase = ioremap64(PPC440GX_UART1_ADDR, 8); diff --git a/include/asm-powerpc/tlbflush.h b/include/asm-powerpc/tlbflush.h index b6b036ccee3..e7b4c0d298a 100644 --- a/include/asm-powerpc/tlbflush.h +++ b/include/asm-powerpc/tlbflush.h @@ -1,5 +1,6 @@ #ifndef _ASM_POWERPC_TLBFLUSH_H #define _ASM_POWERPC_TLBFLUSH_H + /* * TLB flushing: * @@ -16,9 +17,6 @@ */ #ifdef __KERNEL__ -struct mm_struct; -struct vm_area_struct; - #if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE) /* * TLB flushing for software loaded TLB chips @@ -28,7 +26,9 @@ struct vm_area_struct; * specific tlbie's */ -extern void _tlbie(unsigned long address); +#include + +extern void _tlbie(unsigned long address, unsigned int pid); #if defined(CONFIG_40x) || defined(CONFIG_8xx) #define _tlbia() asm volatile ("tlbia; sync" : : : "memory") @@ -44,13 +44,13 @@ static inline void flush_tlb_mm(struct mm_struct *mm) static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { - _tlbie(vmaddr); + _tlbie(vmaddr, vma->vm_mm->context.id); } static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long vmaddr) { - _tlbie(vmaddr); + _tlbie(vmaddr, vma->vm_mm->context.id); } static inline void flush_tlb_range(struct vm_area_struct *vma, -- cgit v1.2.3-70-g09d2 From b98ac05d5e460301fbea24cceed0f2a601c82e22 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 31 Oct 2007 16:42:19 +1100 Subject: [POWERPC] 4xx: Deal with 44x virtually tagged icache The 44x family has an interesting "feature" which is a virtually tagged instruction cache (yuck !). So far, we haven't dealt with it properly, which means we've been mostly lucky or people didn't report the problems, unless people have been running custom patches in their distro... This is an attempt at fixing it properly. I chose to do it by setting a global flag whenever we change a PTE that was previously marked executable, and flush the entire instruction cache upon return to user space when that happens. This is a bit heavy handed, but it's hard to do more fine grained flushes as the icbi instruction, on those processor, for some very strange reasons (since the cache is virtually mapped) still requires a valid TLB entry for reading in the target address space, which isn't something I want to deal with. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Josh Boyer --- arch/powerpc/kernel/entry_32.S | 23 +++++++++++++++++++++++ arch/powerpc/kernel/misc_32.S | 9 +++++++++ arch/powerpc/mm/44x_mmu.c | 1 + arch/ppc/kernel/entry.S | 23 +++++++++++++++++++++++ arch/ppc/kernel/misc.S | 9 +++++++++ arch/ppc/mm/44x_mmu.c | 1 + include/asm-powerpc/pgtable-ppc32.h | 13 +++++++++++++ 7 files changed, 79 insertions(+) (limited to 'arch/ppc') diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 21d889e63e8..a7572cf464b 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -244,6 +244,13 @@ syscall_exit_cont: andis. r10,r0,DBCR0_IC@h bnel- load_dbcr0 #endif +#ifdef CONFIG_44x + lis r4,icache_44x_need_flush@ha + lwz r5,icache_44x_need_flush@l(r4) + cmplwi cr0,r5,0 + bne- 2f +1: +#endif /* CONFIG_44x */ stwcx. r0,0,r1 /* to clear the reservation */ lwz r4,_LINK(r1) lwz r5,_CCR(r1) @@ -258,6 +265,12 @@ syscall_exit_cont: mtspr SPRN_SRR1,r8 SYNC RFI +#ifdef CONFIG_44x +2: li r7,0 + iccci r0,r0 + stw r7,icache_44x_need_flush@l(r4) + b 1b +#endif /* CONFIG_44x */ 66: li r3,-ENOSYS b ret_from_syscall @@ -683,6 +696,16 @@ resume_kernel: /* interrupts are hard-disabled at this point */ restore: +#ifdef CONFIG_44x + lis r4,icache_44x_need_flush@ha + lwz r5,icache_44x_need_flush@l(r4) + cmplwi cr0,r5,0 + beq+ 1f + li r6,0 + iccci r0,r0 + stw r6,icache_44x_need_flush@l(r4) +1: +#endif /* CONFIG_44x */ lwz r0,GPR0(r1) lwz r2,GPR2(r1) REST_4GPRS(3, r1) diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 0ed2c7eddc9..8b642ab26d3 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -543,12 +543,21 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) addi r3,r3,L1_CACHE_BYTES bdnz 0b sync +#ifndef CONFIG_44x + /* We don't flush the icache on 44x. Those have a virtual icache + * and we don't have access to the virtual address here (it's + * not the page vaddr but where it's mapped in user space). The + * flushing of the icache on these is handled elsewhere, when + * a change in the address space occurs, before returning to + * user space + */ mtctr r4 1: icbi 0,r6 addi r6,r6,L1_CACHE_BYTES bdnz 1b sync isync +#endif /* CONFIG_44x */ blr /* diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c index c3df5047653..04dc08798d3 100644 --- a/arch/powerpc/mm/44x_mmu.c +++ b/arch/powerpc/mm/44x_mmu.c @@ -35,6 +35,7 @@ */ unsigned int tlb_44x_index; /* = 0 */ unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS; +int icache_44x_need_flush; /* * "Pins" a 256MB TLB entry in AS0 for kernel lowmem diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S index fba7ca17a67..b19bfef2034 100644 --- a/arch/ppc/kernel/entry.S +++ b/arch/ppc/kernel/entry.S @@ -244,6 +244,13 @@ syscall_exit_cont: andis. r10,r0,DBCR0_IC@h bnel- load_dbcr0 #endif +#ifdef CONFIG_44x + lis r4,icache_44x_need_flush@ha + lwz r5,icache_44x_need_flush@l(r4) + cmplwi cr0,r5,0 + bne- 2f +1: +#endif /* CONFIG_44x */ stwcx. r0,0,r1 /* to clear the reservation */ lwz r4,_LINK(r1) lwz r5,_CCR(r1) @@ -258,6 +265,12 @@ syscall_exit_cont: mtspr SPRN_SRR1,r8 SYNC RFI +#ifdef CONFIG_44x +2: li r7,0 + iccci r0,r0 + stw r7,icache_44x_need_flush@l(r4) + b 1b +#endif /* CONFIG_44x */ 66: li r3,-ENOSYS b ret_from_syscall @@ -679,6 +692,16 @@ resume_kernel: /* interrupts are hard-disabled at this point */ restore: +#ifdef CONFIG_44x + lis r4,icache_44x_need_flush@ha + lwz r5,icache_44x_need_flush@l(r4) + cmplwi cr0,r5,0 + beq+ 1f + li r6,0 + iccci r0,r0 + stw r6,icache_44x_need_flush@l(r4) +1: +#endif /* CONFIG_44x */ lwz r0,GPR0(r1) lwz r2,GPR2(r1) REST_4GPRS(3, r1) diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S index 2b81e71d6b2..e0c850d85c5 100644 --- a/arch/ppc/kernel/misc.S +++ b/arch/ppc/kernel/misc.S @@ -499,12 +499,21 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) addi r3,r3,L1_CACHE_BYTES bdnz 0b sync +#ifndef CONFIG_44x + /* We don't flush the icache on 44x. Those have a virtual icache + * and we don't have access to the virtual address here (it's + * not the page vaddr but where it's mapped in user space). The + * flushing of the icache on these is handled elsewhere, when + * a change in the address space occurs, before returning to + * user space + */ mtctr r4 1: icbi 0,r6 addi r6,r6,L1_CACHE_BYTES bdnz 1b sync isync +#endif /* CONFIG_44x */ blr /* diff --git a/arch/ppc/mm/44x_mmu.c b/arch/ppc/mm/44x_mmu.c index 0a0a0487b33..6536a25cfcb 100644 --- a/arch/ppc/mm/44x_mmu.c +++ b/arch/ppc/mm/44x_mmu.c @@ -61,6 +61,7 @@ extern char etext[], _stext[]; */ unsigned int tlb_44x_index = 0; unsigned int tlb_44x_hwater = 62; +int icache_44x_need_flush; /* * "Pins" a 256MB TLB entry in AS0 for kernel lowmem diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h index 86a54a4a8a2..fea2d8ff1e7 100644 --- a/include/asm-powerpc/pgtable-ppc32.h +++ b/include/asm-powerpc/pgtable-ppc32.h @@ -11,6 +11,11 @@ extern unsigned long va_to_phys(unsigned long address); extern pte_t *va_to_pte(unsigned long address); extern unsigned long ioremap_bot, ioremap_base; + +#ifdef CONFIG_44x +extern int icache_44x_need_flush; +#endif + #endif /* __ASSEMBLY__ */ /* @@ -562,6 +567,10 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr, : "=&r" (old), "=&r" (tmp), "=m" (*p) : "r" (p), "r" (clr), "r" (set), "m" (*p) : "cc" ); +#ifdef CONFIG_44x + if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC)) + icache_44x_need_flush = 1; +#endif return old; } #else @@ -582,6 +591,10 @@ static inline unsigned long long pte_update(pte_t *p, unsigned long clr, : "=&r" (old), "=&r" (tmp), "=m" (*p) : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) : "cc" ); +#ifdef CONFIG_44x + if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC)) + icache_44x_need_flush = 1; +#endif return old; } #endif -- cgit v1.2.3-70-g09d2 From bd942ba3db60d3bd4e21febbe7c5e339d973d5a8 Mon Sep 17 00:00:00 2001 From: Grant Likely Date: Wed, 31 Oct 2007 17:41:20 +1100 Subject: [POWERPC] ppc405 Fix arithmatic rollover bug when memory size under 16M mmu_mapin_ram() loops over total_lowmem to setup page tables. However, if total_lowmem is less that 16M, the subtraction rolls over and results in a number just under 4G (because total_lowmem is an unsigned value). This patch rejigs the loop from countup to countdown to eliminate the bug. Special thanks to Magnus Hjorth who wrote the original patch to fix this bug. This patch improves on his by making the loop code simpler (which also eliminates the possibility of another rollover at the high end) and also applies the change to arch/powerpc. Signed-off-by: Grant Likely Signed-off-by: Josh Boyer --- arch/powerpc/mm/40x_mmu.c | 17 ++++++++--------- arch/ppc/mm/4xx_mmu.c | 17 ++++++++--------- 2 files changed, 16 insertions(+), 18 deletions(-) (limited to 'arch/ppc') diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c index e067df836be..3899ea97fbd 100644 --- a/arch/powerpc/mm/40x_mmu.c +++ b/arch/powerpc/mm/40x_mmu.c @@ -98,13 +98,12 @@ unsigned long __init mmu_mapin_ram(void) v = KERNELBASE; p = PPC_MEMSTART; - s = 0; + s = total_lowmem; - if (__map_without_ltlbs) { - return s; - } + if (__map_without_ltlbs) + return 0; - while (s <= (total_lowmem - LARGE_PAGE_SIZE_16M)) { + while (s >= LARGE_PAGE_SIZE_16M) { pmd_t *pmdp; unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE; @@ -116,10 +115,10 @@ unsigned long __init mmu_mapin_ram(void) v += LARGE_PAGE_SIZE_16M; p += LARGE_PAGE_SIZE_16M; - s += LARGE_PAGE_SIZE_16M; + s -= LARGE_PAGE_SIZE_16M; } - while (s <= (total_lowmem - LARGE_PAGE_SIZE_4M)) { + while (s >= LARGE_PAGE_SIZE_4M) { pmd_t *pmdp; unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE; @@ -128,8 +127,8 @@ unsigned long __init mmu_mapin_ram(void) v += LARGE_PAGE_SIZE_4M; p += LARGE_PAGE_SIZE_4M; - s += LARGE_PAGE_SIZE_4M; + s -= LARGE_PAGE_SIZE_4M; } - return s; + return total_lowmem - s; } diff --git a/arch/ppc/mm/4xx_mmu.c b/arch/ppc/mm/4xx_mmu.c index 838e09db71d..ea785dbaac7 100644 --- a/arch/ppc/mm/4xx_mmu.c +++ b/arch/ppc/mm/4xx_mmu.c @@ -99,13 +99,12 @@ unsigned long __init mmu_mapin_ram(void) v = KERNELBASE; p = PPC_MEMSTART; - s = 0; + s = total_lowmem; - if (__map_without_ltlbs) { - return s; - } + if (__map_without_ltlbs) + return 0; - while (s <= (total_lowmem - LARGE_PAGE_SIZE_16M)) { + while (s >= LARGE_PAGE_SIZE_16M) { pmd_t *pmdp; unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE; @@ -117,10 +116,10 @@ unsigned long __init mmu_mapin_ram(void) v += LARGE_PAGE_SIZE_16M; p += LARGE_PAGE_SIZE_16M; - s += LARGE_PAGE_SIZE_16M; + s -= LARGE_PAGE_SIZE_16M; } - while (s <= (total_lowmem - LARGE_PAGE_SIZE_4M)) { + while (s >= LARGE_PAGE_SIZE_4M) { pmd_t *pmdp; unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE; @@ -129,8 +128,8 @@ unsigned long __init mmu_mapin_ram(void) v += LARGE_PAGE_SIZE_4M; p += LARGE_PAGE_SIZE_4M; - s += LARGE_PAGE_SIZE_4M; + s -= LARGE_PAGE_SIZE_4M; } - return s; + return total_lowmem - s; } -- cgit v1.2.3-70-g09d2