diff options
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/44x_mmu.c | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/4xx_mmu.c | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/fault.c | 37 | ||||
-rw-r--r-- | arch/powerpc/mm/fsl_booke_mmu.c | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_low_32.S | 35 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_low_64.S | 32 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_native_64.c | 5 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 191 | ||||
-rw-r--r-- | arch/powerpc/mm/init_32.c | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/init_64.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/lmb.c | 44 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 18 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_context_32.c | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_context_64.c | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 19 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_32.c | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_64.c | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/ppc_mmu_32.c | 17 | ||||
-rw-r--r-- | arch/powerpc/mm/slb.c | 33 | ||||
-rw-r--r-- | arch/powerpc/mm/slb_low.S | 18 | ||||
-rw-r--r-- | arch/powerpc/mm/stab.c | 5 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_32.c | 7 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_64.c | 6 |
23 files changed, 282 insertions, 204 deletions
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c index 3d79ce281b6..376829ed221 100644 --- a/arch/powerpc/mm/44x_mmu.c +++ b/arch/powerpc/mm/44x_mmu.c @@ -24,7 +24,6 @@ * */ -#include <linux/config.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> diff --git a/arch/powerpc/mm/4xx_mmu.c b/arch/powerpc/mm/4xx_mmu.c index 4d006aa1a0d..838e09db71d 100644 --- a/arch/powerpc/mm/4xx_mmu.c +++ b/arch/powerpc/mm/4xx_mmu.c @@ -21,7 +21,6 @@ * */ -#include <linux/config.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index fdbba4206d5..78a0d59903e 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -15,7 +15,6 @@ * 2 of the License, or (at your option) any later version. */ -#include <linux/config.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> @@ -40,6 +39,40 @@ #include <asm/kdebug.h> #include <asm/siginfo.h> +#ifdef CONFIG_KPROBES +ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); + +/* Hook to register for page fault notifications */ +int register_page_fault_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); +} + +int unregister_page_fault_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); +} + +static inline int notify_page_fault(enum die_val val, const char *str, + struct pt_regs *regs, long err, int trap, int sig) +{ + struct die_args args = { + .regs = regs, + .str = str, + .err = err, + .trapnr = trap, + .signr = sig + }; + return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args); +} +#else +static inline int notify_page_fault(enum die_val val, const char *str, + struct pt_regs *regs, long err, int trap, int sig) +{ + return NOTIFY_DONE; +} +#endif + /* * Check whether the instruction at regs->nip is a store using * an update addressing form which will update r1. @@ -142,7 +175,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, is_write = error_code & ESR_DST; #endif /* CONFIG_4xx || CONFIG_BOOKE */ - if (notify_die(DIE_PAGE_FAULT, "page_fault", regs, error_code, + if (notify_page_fault(DIE_PAGE_FAULT, "page_fault", regs, error_code, 11, SIGSEGV) == NOTIFY_STOP) return 0; diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index 5d581bb3aa1..123da03ab11 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c @@ -26,7 +26,6 @@ * */ -#include <linux/config.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S index ea469eefa14..bd68df5fa78 100644 --- a/arch/powerpc/mm/hash_low_32.S +++ b/arch/powerpc/mm/hash_low_32.S @@ -21,7 +21,6 @@ * */ -#include <linux/config.h> #include <asm/reg.h> #include <asm/page.h> #include <asm/pgtable.h> @@ -74,12 +73,6 @@ _GLOBAL(hash_page_sync) */ .text _GLOBAL(hash_page) -#ifdef CONFIG_PPC64BRIDGE - mfmsr r0 - clrldi r0,r0,1 /* make sure it's in 32-bit mode */ - MTMSRD(r0) - isync -#endif tophys(r7,0) /* gets -KERNELBASE into r7 */ #ifdef CONFIG_SMP addis r8,r7,mmu_hash_lock@h @@ -285,7 +278,6 @@ Hash_base = 0xc0180000 Hash_bits = 12 /* e.g. 256kB hash table */ Hash_msk = (((1 << Hash_bits) - 1) * 64) -#ifndef CONFIG_PPC64BRIDGE /* defines for the PTE format for 32-bit PPCs */ #define PTE_SIZE 8 #define PTEG_SIZE 64 @@ -299,21 +291,6 @@ Hash_msk = (((1 << Hash_bits) - 1) * 64) #define SET_V(r) oris r,r,PTE_V@h #define CLR_V(r,t) rlwinm r,r,0,1,31 -#else -/* defines for the PTE format for 64-bit PPCs */ -#define PTE_SIZE 16 -#define PTEG_SIZE 128 -#define LG_PTEG_SIZE 7 -#define LDPTEu ldu -#define STPTE std -#define CMPPTE cmpd -#define PTE_H 2 -#define PTE_V 1 -#define TST_V(r) andi. r,r,PTE_V -#define SET_V(r) ori r,r,PTE_V -#define CLR_V(r,t) li t,PTE_V; andc r,r,t -#endif /* CONFIG_PPC64BRIDGE */ - #define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1) #define HASH_RIGHT 31-LG_PTEG_SIZE @@ -331,14 +308,8 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT) /* Construct the high word of the PPC-style PTE (r5) */ -#ifndef CONFIG_PPC64BRIDGE rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */ -#else /* CONFIG_PPC64BRIDGE */ - clrlwi r3,r3,8 /* reduce vsid to 24 bits */ - sldi r5,r3,12 /* shift vsid into position */ - rlwimi r5,r4,16,20,24 /* put in API (abbrev page index) */ -#endif /* CONFIG_PPC64BRIDGE */ SET_V(r5) /* set V (valid) bit */ /* Get the address of the primary PTE group in the hash table (r3) */ @@ -516,14 +487,8 @@ _GLOBAL(flush_hash_pages) add r3,r3,r0 /* note code below trims to 24 bits */ /* Construct the high word of the PPC-style PTE (r11) */ -#ifndef CONFIG_PPC64BRIDGE rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */ -#else /* CONFIG_PPC64BRIDGE */ - clrlwi r3,r3,8 /* reduce vsid to 24 bits */ - sldi r11,r3,12 /* shift vsid into position */ - rlwimi r11,r4,16,20,24 /* put in API (abbrev page index) */ -#endif /* CONFIG_PPC64BRIDGE */ SET_V(r11) /* set V (valid) bit */ #ifdef CONFIG_SMP diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index e0d02c4a261..9bc0a9c2b9b 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S @@ -10,7 +10,6 @@ * described in the kernel's COPYING file. */ -#include <linux/config.h> #include <asm/reg.h> #include <asm/pgtable.h> #include <asm/mmu.h> @@ -136,6 +135,7 @@ _GLOBAL(__hash_page_4K) and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ andc r0,r30,r0 /* r0 = pte & ~r0 */ rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ + ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */ /* We eventually do the icache sync here (maybe inline that * code rather than call a C function...) @@ -368,6 +368,7 @@ _GLOBAL(__hash_page_4K) rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */ or r30,r30,r31 ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE + oris r30,r30,_PAGE_COMBO@h /* Write the linux PTE atomically (setting busy) */ stdcx. r30,0,r6 bne- 1b @@ -400,6 +401,7 @@ _GLOBAL(__hash_page_4K) and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ andc r0,r30,r0 /* r0 = pte & ~r0 */ rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ + ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */ /* We eventually do the icache sync here (maybe inline that * code rather than call a C function...) @@ -426,6 +428,14 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) andi. r0,r31,_PAGE_HASHPTE li r26,0 /* Default hidx */ beq htab_insert_pte + + /* + * Check if the pte was already inserted into the hash table + * as a 64k HW page, and invalidate the 64k HPTE if so. + */ + andis. r0,r31,_PAGE_COMBO@h + beq htab_inval_old_hpte + ld r6,STK_PARM(r6)(r1) ori r26,r6,0x8000 /* Load the hidx mask */ ld r26,0(r26) @@ -496,6 +506,19 @@ _GLOBAL(htab_call_hpte_remove) /* Try all again */ b htab_insert_pte + /* + * Call out to C code to invalidate an 64k HW HPTE that is + * useless now that the segment has been switched to 4k pages. + */ +htab_inval_old_hpte: + mr r3,r29 /* virtual addr */ + mr r4,r31 /* PTE.pte */ + li r5,0 /* PTE.hidx */ + li r6,MMU_PAGE_64K /* psize */ + ld r7,STK_PARM(r8)(r1) /* local */ + bl .flush_hash_page + b htab_insert_pte + htab_bail_ok: li r3,0 b htab_bail @@ -636,6 +659,12 @@ _GLOBAL(__hash_page_64K) * is changing this PTE anyway and might hash it. */ bne- ht64_bail_ok +BEGIN_FTR_SECTION + /* Check if PTE has the cache-inhibit bit set */ + andi. r0,r31,_PAGE_NO_CACHE + /* If so, bail out and refault as a 4k page */ + bne- ht64_bail_ok +END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE) /* Prepare new PTE value (turn access RW into DIRTY, then * add BUSY,HASHPTE and ACCESSED) */ @@ -671,6 +700,7 @@ _GLOBAL(__hash_page_64K) and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ andc r0,r30,r0 /* r0 = pte & ~r0 */ rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ + ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */ /* We eventually do the icache sync here (maybe inline that * code rather than call a C function...) diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 994856e55b7..c90f124f3c7 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -238,7 +238,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, DBG_LOW(" -> hit\n"); /* Update the HPTE */ hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | - (newpp & (HPTE_R_PP | HPTE_R_N)); + (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C)); native_unlock_hpte(hptep); } @@ -520,7 +520,7 @@ static inline int tlb_batching_enabled(void) } #endif -void hpte_init_native(void) +void __init hpte_init_native(void) { ppc_md.hpte_invalidate = native_hpte_invalidate; ppc_md.hpte_updatepp = native_hpte_updatepp; @@ -530,5 +530,4 @@ void hpte_init_native(void) ppc_md.hpte_clear_all = native_hpte_clear; if (tlb_batching_enabled()) ppc_md.flush_hash_range = native_flush_hash_range; - htab_finish_init(); } diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index c006d903963..1915661c2c8 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -21,7 +21,6 @@ #undef DEBUG #undef DEBUG_LOW -#include <linux/config.h> #include <linux/spinlock.h> #include <linux/errno.h> #include <linux/sched.h> @@ -92,10 +91,15 @@ unsigned long htab_size_bytes; unsigned long htab_hash_mask; int mmu_linear_psize = MMU_PAGE_4K; int mmu_virtual_psize = MMU_PAGE_4K; +int mmu_vmalloc_psize = MMU_PAGE_4K; +int mmu_io_psize = MMU_PAGE_4K; #ifdef CONFIG_HUGETLB_PAGE int mmu_huge_psize = MMU_PAGE_16M; unsigned int HPAGE_SHIFT; #endif +#ifdef CONFIG_PPC_64K_PAGES +int mmu_ci_restrictions; +#endif /* There are definitions of page sizes arrays to be used when none * is provided by the firmware. @@ -162,34 +166,12 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, hash = hpt_hash(va, shift); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); - /* The crap below can be cleaned once ppd_md.probe() can - * set up the hash callbacks, thus we can just used the - * normal insert callback here. - */ -#ifdef CONFIG_PPC_ISERIES - if (machine_is(iseries)) - ret = iSeries_hpte_insert(hpteg, va, - paddr, - tmp_mode, - HPTE_V_BOLTED, - psize); - else -#endif -#ifdef CONFIG_PPC_PSERIES - if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) - ret = pSeries_lpar_hpte_insert(hpteg, va, - paddr, - tmp_mode, - HPTE_V_BOLTED, - psize); - else -#endif -#ifdef CONFIG_PPC_MULTIPLATFORM - ret = native_hpte_insert(hpteg, va, - paddr, - tmp_mode, HPTE_V_BOLTED, - psize); -#endif + DBG("htab_bolt_mapping: calling %p\n", ppc_md.hpte_insert); + + BUG_ON(!ppc_md.hpte_insert); + ret = ppc_md.hpte_insert(hpteg, va, paddr, + tmp_mode, HPTE_V_BOLTED, psize); + if (ret < 0) break; } @@ -308,20 +290,31 @@ static void __init htab_init_page_sizes(void) else if (mmu_psize_defs[MMU_PAGE_1M].shift) mmu_linear_psize = MMU_PAGE_1M; +#ifdef CONFIG_PPC_64K_PAGES /* * Pick a size for the ordinary pages. Default is 4K, we support - * 64K if cache inhibited large pages are supported by the - * processor + * 64K for user mappings and vmalloc if supported by the processor. + * We only use 64k for ioremap if the processor + * (and firmware) support cache-inhibited large pages. + * If not, we use 4k and set mmu_ci_restrictions so that + * hash_page knows to switch processes that use cache-inhibited + * mappings to 4k pages. */ -#ifdef CONFIG_PPC_64K_PAGES - if (mmu_psize_defs[MMU_PAGE_64K].shift && - cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) + if (mmu_psize_defs[MMU_PAGE_64K].shift) { mmu_virtual_psize = MMU_PAGE_64K; + mmu_vmalloc_psize = MMU_PAGE_64K; + if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) + mmu_io_psize = MMU_PAGE_64K; + else + mmu_ci_restrictions = 1; + } #endif - printk(KERN_INFO "Page orders: linear mapping = %d, others = %d\n", + printk(KERN_DEBUG "Page orders: linear mapping = %d, " + "virtual = %d, io = %d\n", mmu_psize_defs[mmu_linear_psize].shift, - mmu_psize_defs[mmu_virtual_psize].shift); + mmu_psize_defs[mmu_virtual_psize].shift, + mmu_psize_defs[mmu_io_psize].shift); #ifdef CONFIG_HUGETLB_PAGE /* Init large page size. Currently, we pick 16M or 1M depending @@ -397,6 +390,41 @@ void create_section_mapping(unsigned long start, unsigned long end) } #endif /* CONFIG_MEMORY_HOTPLUG */ +static inline void make_bl(unsigned int *insn_addr, void *func) +{ + unsigned long funcp = *((unsigned long *)func); + int offset = funcp - (unsigned long)insn_addr; + + *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc)); + flush_icache_range((unsigned long)insn_addr, 4+ + (unsigned long)insn_addr); +} + +static void __init htab_finish_init(void) +{ + extern unsigned int *htab_call_hpte_insert1; + extern unsigned int *htab_call_hpte_insert2; + extern unsigned int *htab_call_hpte_remove; + extern unsigned int *htab_call_hpte_updatepp; + +#ifdef CONFIG_PPC_64K_PAGES + extern unsigned int *ht64_call_hpte_insert1; + extern unsigned int *ht64_call_hpte_insert2; + extern unsigned int *ht64_call_hpte_remove; + extern unsigned int *ht64_call_hpte_updatepp; + + make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert); + make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert); + make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove); + make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp); +#endif /* CONFIG_PPC_64K_PAGES */ + + make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert); + make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert); + make_bl(htab_call_hpte_remove, ppc_md.hpte_remove); + make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp); +} + void __init htab_initialize(void) { unsigned long table; @@ -509,6 +537,8 @@ void __init htab_initialize(void) mmu_linear_psize)); } + htab_finish_init(); + DBG(" <- htab_initialize()\n"); } #undef KB @@ -556,6 +586,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) pte_t *ptep; cpumask_t tmp; int rc, user_region = 0, local = 0; + int psize; DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", ea, access, trap); @@ -575,10 +606,15 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) return 1; } vsid = get_vsid(mm->context.id, ea); + psize = mm->context.user_psize; break; case VMALLOC_REGION_ID: mm = &init_mm; vsid = get_kernel_vsid(ea); + if (ea < VMALLOC_END) + psize = mmu_vmalloc_psize; + else + psize = mmu_io_psize; break; default: /* Not a valid range @@ -629,7 +665,40 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) #ifndef CONFIG_PPC_64K_PAGES rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); #else - if (mmu_virtual_psize == MMU_PAGE_64K) + if (mmu_ci_restrictions) { + /* If this PTE is non-cacheable, switch to 4k */ + if (psize == MMU_PAGE_64K && + (pte_val(*ptep) & _PAGE_NO_CACHE)) { + if (user_region) { + psize = MMU_PAGE_4K; + mm->context.user_psize = MMU_PAGE_4K; + mm->context.sllp = SLB_VSID_USER | + mmu_psize_defs[MMU_PAGE_4K].sllp; + } else if (ea < VMALLOC_END) { + /* + * some driver did a non-cacheable mapping + * in vmalloc space, so switch vmalloc + * to 4k pages + */ + printk(KERN_ALERT "Reducing vmalloc segment " + "to 4kB pages because of " + "non-cacheable mapping\n"); + psize = mmu_vmalloc_psize = MMU_PAGE_4K; + } + } + if (user_region) { + if (psize != get_paca()->context.user_psize) { + get_paca()->context = mm->context; + slb_flush_and_rebolt(); + } + } else if (get_paca()->vmalloc_sllp != + mmu_psize_defs[mmu_vmalloc_psize].sllp) { + get_paca()->vmalloc_sllp = + mmu_psize_defs[mmu_vmalloc_psize].sllp; + slb_flush_and_rebolt(); + } + } + if (psize == MMU_PAGE_64K) rc = __hash_page_64K(ea, access, vsid, ptep, trap, local); else rc = __hash_page_4K(ea, access, vsid, ptep, trap, local); @@ -681,7 +750,18 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, #ifndef CONFIG_PPC_64K_PAGES __hash_page_4K(ea, access, vsid, ptep, trap, local); #else - if (mmu_virtual_psize == MMU_PAGE_64K) + if (mmu_ci_restrictions) { + /* If this PTE is non-cacheable, switch to 4k */ + if (mm->context.user_psize == MMU_PAGE_64K && + (pte_val(*ptep) & _PAGE_NO_CACHE)) { + mm->context.user_psize = MMU_PAGE_4K; + mm->context.sllp = SLB_VSID_USER | + mmu_psize_defs[MMU_PAGE_4K].sllp; + get_paca()->context = mm->context; + slb_flush_and_rebolt(); + } + } + if (mm->context.user_psize == MMU_PAGE_64K) __hash_page_64K(ea, access, vsid, ptep, trap, local); else __hash_page_4K(ea, access, vsid, ptep, trap, local); @@ -721,16 +801,6 @@ void flush_hash_range(unsigned long number, int local) } } -static inline void make_bl(unsigned int *insn_addr, void *func) -{ - unsigned long funcp = *((unsigned long *)func); - int offset = funcp - (unsigned long)insn_addr; - - *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc)); - flush_icache_range((unsigned long)insn_addr, 4+ - (unsigned long)insn_addr); -} - /* * low_hash_fault is called when we the low level hash code failed * to instert a PTE due to an hypervisor error @@ -749,28 +819,3 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address) } bad_page_fault(regs, address, SIGBUS); } - -void __init htab_finish_init(void) -{ - extern unsigned int *htab_call_hpte_insert1; - extern unsigned int *htab_call_hpte_insert2; - extern unsigned int *htab_call_hpte_remove; - extern unsigned int *htab_call_hpte_updatepp; - -#ifdef CONFIG_PPC_64K_PAGES - extern unsigned int *ht64_call_hpte_insert1; - extern unsigned int *ht64_call_hpte_insert2; - extern unsigned int *ht64_call_hpte_remove; - extern unsigned int *ht64_call_hpte_updatepp; - - make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert); - make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert); - make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove); - make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp); -#endif /* CONFIG_PPC_64K_PAGES */ - - make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert); - make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert); - make_bl(htab_call_hpte_remove, ppc_md.hpte_remove); - make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp); -} diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index b57fb3a2b7b..0e53ca8f02f 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -18,7 +18,6 @@ * */ -#include <linux/config.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/kernel.h> diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 9e30f968c18..3ff374697e3 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -22,7 +22,6 @@ #undef DEBUG -#include <linux/config.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> @@ -41,6 +40,7 @@ #include <linux/idr.h> #include <linux/nodemask.h> #include <linux/module.h> +#include <linux/poison.h> #include <asm/pgalloc.h> #include <asm/page.h> @@ -90,7 +90,7 @@ void free_initmem(void) addr = (unsigned long)__init_begin; for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) { - memset((void *)addr, 0xcc, PAGE_SIZE); + memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); ClearPageReserved(virt_to_page(addr)); init_page_count(virt_to_page(addr)); free_page(addr); diff --git a/arch/powerpc/mm/lmb.c b/arch/powerpc/mm/lmb.c index 417d5851855..4b17a735992 100644 --- a/arch/powerpc/mm/lmb.c +++ b/arch/powerpc/mm/lmb.c @@ -10,7 +10,6 @@ * 2 of the License, or (at your option) any later version. */ -#include <linux/config.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/bitops.h> @@ -89,20 +88,25 @@ static long __init lmb_regions_adjacent(struct lmb_region *rgn, return lmb_addrs_adjacent(base1, size1, base2, size2); } -/* Assumption: base addr of region 1 < base addr of region 2 */ -static void __init lmb_coalesce_regions(struct lmb_region *rgn, - unsigned long r1, unsigned long r2) +static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r) { unsigned long i; - rgn->region[r1].size += rgn->region[r2].size; - for (i=r2; i < rgn->cnt-1; i++) { - rgn->region[i].base = rgn->region[i+1].base; - rgn->region[i].size = rgn->region[i+1].size; + for (i = r; i < rgn->cnt - 1; i++) { + rgn->region[i].base = rgn->region[i + 1].base; + rgn->region[i].size = rgn->region[i + 1].size; } rgn->cnt--; } +/* Assumption: base addr of region 1 < base addr of region 2 */ +static void __init lmb_coalesce_regions(struct lmb_region *rgn, + unsigned long r1, unsigned long r2) +{ + rgn->region[r1].size += rgn->region[r2].size; + lmb_remove_region(rgn, r2); +} + /* This routine called with relocation disabled. */ void __init lmb_init(void) { @@ -294,17 +298,16 @@ unsigned long __init lmb_end_of_DRAM(void) return (lmb.memory.region[idx].base + lmb.memory.region[idx].size); } -/* - * Truncate the lmb list to memory_limit if it's set - * You must call lmb_analyze() after this. - */ +/* You must call lmb_analyze() after this. */ void __init lmb_enforce_memory_limit(unsigned long memory_limit) { unsigned long i, limit; + struct lmb_property *p; if (! memory_limit) return; + /* Truncate the lmb regions to satisfy the memory limit. */ limit = memory_limit; for (i = 0; i < lmb.memory.cnt; i++) { if (limit > lmb.memory.region[i].size) { @@ -316,4 +319,21 @@ void __init lmb_enforce_memory_limit(unsigned long memory_limit) lmb.memory.cnt = i + 1; break; } + + lmb.rmo_size = lmb.memory.region[0].size; + + /* And truncate any reserves above the limit also. */ + for (i = 0; i < lmb.reserved.cnt; i++) { + p = &lmb.reserved.region[i]; + + if (p->base > memory_limit) + p->size = 0; + else if ((p->base + p->size) > memory_limit) + p->size = memory_limit - p->base; + + if (p->size == 0) { + lmb_remove_region(&lmb.reserved, i); + i--; + } + } } diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 741dd8802d4..eebd8b83a6b 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -18,7 +18,6 @@ * */ -#include <linux/config.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/kernel.h> @@ -114,15 +113,20 @@ void online_page(struct page *page) num_physpages++; } -int __devinit add_memory(u64 start, u64 size) +#ifdef CONFIG_NUMA +int memory_add_physaddr_to_nid(u64 start) +{ + return hot_add_scn_to_nid(start); +} +#endif + +int __devinit arch_add_memory(int nid, u64 start, u64 size) { struct pglist_data *pgdata; struct zone *zone; - int nid; unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - nid = hot_add_scn_to_nid(start); pgdata = NODE_DATA(nid); start = (unsigned long)__va(start); @@ -299,9 +303,9 @@ void __init paging_init(void) kmap_prot = PAGE_KERNEL; #endif /* CONFIG_HIGHMEM */ - printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", + printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram); - printk(KERN_INFO "Memory hole size: %ldMB\n", + printk(KERN_DEBUG "Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20); /* * All pages are DMA-able so we put them all in the DMA zone. @@ -380,7 +384,7 @@ void __init mem_init(void) totalhigh_pages++; } totalram_pages += totalhigh_pages; - printk(KERN_INFO "High memory: %luk\n", + printk(KERN_DEBUG "High memory: %luk\n", totalhigh_pages << (PAGE_SHIFT-10)); } #endif /* CONFIG_HIGHMEM */ diff --git a/arch/powerpc/mm/mmu_context_32.c b/arch/powerpc/mm/mmu_context_32.c index a8816e0f6a8..792086b0100 100644 --- a/arch/powerpc/mm/mmu_context_32.c +++ b/arch/powerpc/mm/mmu_context_32.c @@ -23,14 +23,13 @@ * */ -#include <linux/config.h> #include <linux/mm.h> #include <linux/init.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> -mm_context_t next_mmu_context; +unsigned long next_mmu_context; unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1]; #ifdef FEW_CONTEXTS atomic_t nr_free_contexts; diff --git a/arch/powerpc/mm/mmu_context_64.c b/arch/powerpc/mm/mmu_context_64.c index 714a84dd8d5..90a06ac02d5 100644 --- a/arch/powerpc/mm/mmu_context_64.c +++ b/arch/powerpc/mm/mmu_context_64.c @@ -10,7 +10,6 @@ * */ -#include <linux/config.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> @@ -44,11 +43,16 @@ again: return err; if (index > MAX_CONTEXT) { + spin_lock(&mmu_context_lock); idr_remove(&mmu_context_idr, index); + spin_unlock(&mmu_context_lock); return -ENOMEM; } mm->context.id = index; + mm->context.user_psize = mmu_virtual_psize; + mm->context.sllp = SLB_VSID_USER | + mmu_psize_defs[mmu_virtual_psize].sllp; return 0; } diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 092355f3739..fbe23933f73 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -334,7 +334,7 @@ out: return nid; } -static int cpu_numa_callback(struct notifier_block *nfb, +static int __cpuinit cpu_numa_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { @@ -487,9 +487,9 @@ static void __init setup_nonnuma(void) unsigned long total_ram = lmb_phys_mem_size(); unsigned int i; - printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", + printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram); - printk(KERN_INFO "Memory hole size: %ldMB\n", + printk(KERN_DEBUG "Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20); for (i = 0; i < lmb.memory.cnt; ++i) @@ -507,7 +507,7 @@ void __init dump_numa_cpu_topology(void) return; for_each_online_node(node) { - printk(KERN_INFO "Node %d CPUs:", node); + printk(KERN_DEBUG "Node %d CPUs:", node); count = 0; /* @@ -543,7 +543,7 @@ static void __init dump_numa_memory_topology(void) for_each_online_node(node) { unsigned long i; - printk(KERN_INFO "Node %d Memory:", node); + printk(KERN_DEBUG "Node %d Memory:", node); count = 0; @@ -609,14 +609,15 @@ static void __init *careful_allocation(int nid, unsigned long size, return (void *)ret; } +static struct notifier_block __cpuinitdata ppc64_numa_nb = { + .notifier_call = cpu_numa_callback, + .priority = 1 /* Must run before sched domains notifier. */ +}; + void __init do_init_bootmem(void) { int nid; unsigned int i; - static struct notifier_block ppc64_numa_nb = { - .notifier_call = cpu_numa_callback, - .priority = 1 /* Must run before sched domains notifier. */ - }; min_low_pfn = 0; max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 90628601fac..8fcacb0239d 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -20,7 +20,6 @@ * */ -#include <linux/config.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 7b278d83739..b1da0316549 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -22,7 +22,6 @@ * */ -#include <linux/config.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index ed7fcfe5fd3..7cceb2c44cb 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c @@ -23,7 +23,6 @@ * */ -#include <linux/config.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/init.h> @@ -42,18 +41,14 @@ unsigned long _SDR1; union ubat { /* BAT register values to be loaded */ BAT bat; -#ifdef CONFIG_PPC64BRIDGE - u64 word[2]; -#else u32 word[2]; -#endif -} BATS[4][2]; /* 4 pairs of IBAT, DBAT */ +} BATS[8][2]; /* 8 pairs of IBAT, DBAT */ struct batrange { /* stores address ranges mapped by BATs */ unsigned long start; unsigned long limit; unsigned long phys; -} bat_addrs[4]; +} bat_addrs[8]; /* * Return PA for this VA if it is mapped by a BAT, or 0 @@ -190,7 +185,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, return; pmd = pmd_offset(pgd_offset(mm, ea), ea); if (!pmd_none(*pmd)) - add_hash_page(mm->context, ea, pmd_val(*pmd)); + add_hash_page(mm->context.id, ea, pmd_val(*pmd)); } /* @@ -220,15 +215,9 @@ void __init MMU_init_hw(void) if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105); -#ifdef CONFIG_PPC64BRIDGE -#define LG_HPTEG_SIZE 7 /* 128 bytes per HPTEG */ -#define SDR1_LOW_BITS (lg_n_hpteg - 11) -#define MIN_N_HPTEG 2048 /* min 256kB hash table */ -#else #define LG_HPTEG_SIZE 6 /* 64 bytes per HPTEG */ #define SDR1_LOW_BITS ((n_hpteg - 1) >> 10) #define MIN_N_HPTEG 1024 /* min 64kB hash table */ -#endif /* * Allow 1 HPTE (1/8 HPTEG) for each page of memory. diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index ffc8ed4de62..de0c8842415 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -16,7 +16,6 @@ #undef DEBUG -#include <linux/config.h> #include <asm/pgtable.h> #include <asm/mmu.h> #include <asm/mmu_context.h> @@ -60,19 +59,19 @@ static inline void create_slbe(unsigned long ea, unsigned long flags, : "memory" ); } -static void slb_flush_and_rebolt(void) +void slb_flush_and_rebolt(void) { /* If you change this make sure you change SLB_NUM_BOLTED * appropriately too. */ - unsigned long linear_llp, virtual_llp, lflags, vflags; + unsigned long linear_llp, vmalloc_llp, lflags, vflags; unsigned long ksp_esid_data; WARN_ON(!irqs_disabled()); linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; - virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp; + vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; lflags = SLB_VSID_KERNEL | linear_llp; - vflags = SLB_VSID_KERNEL | virtual_llp; + vflags = SLB_VSID_KERNEL | vmalloc_llp; ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) @@ -122,9 +121,6 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) get_paca()->slb_cache_ptr = 0; get_paca()->context = mm->context; -#ifdef CONFIG_PPC_64K_PAGES - get_paca()->pgdir = mm->pgd; -#endif /* CONFIG_PPC_64K_PAGES */ /* * preload some userspace segments into the SLB. @@ -167,11 +163,10 @@ static inline void patch_slb_encoding(unsigned int *insn_addr, void slb_initialize(void) { - unsigned long linear_llp, virtual_llp; + unsigned long linear_llp, vmalloc_llp, io_llp; static int slb_encoding_inited; extern unsigned int *slb_miss_kernel_load_linear; - extern unsigned int *slb_miss_kernel_load_virtual; - extern unsigned int *slb_miss_user_load_normal; + extern unsigned int *slb_miss_kernel_load_io; #ifdef CONFIG_HUGETLB_PAGE extern unsigned int *slb_miss_user_load_huge; unsigned long huge_llp; @@ -181,18 +176,19 @@ void slb_initialize(void) /* Prepare our SLB miss handler based on our page size */ linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; - virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp; + io_llp = mmu_psize_defs[mmu_io_psize].sllp; + vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; + get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp; + if (!slb_encoding_inited) { slb_encoding_inited = 1; patch_slb_encoding(slb_miss_kernel_load_linear, SLB_VSID_KERNEL | linear_llp); - patch_slb_encoding(slb_miss_kernel_load_virtual, - SLB_VSID_KERNEL | virtual_llp); - patch_slb_encoding(slb_miss_user_load_normal, - SLB_VSID_USER | virtual_llp); + patch_slb_encoding(slb_miss_kernel_load_io, + SLB_VSID_KERNEL | io_llp); DBG("SLB: linear LLP = %04x\n", linear_llp); - DBG("SLB: virtual LLP = %04x\n", virtual_llp); + DBG("SLB: io LLP = %04x\n", io_llp); #ifdef CONFIG_HUGETLB_PAGE patch_slb_encoding(slb_miss_user_load_huge, SLB_VSID_USER | huge_llp); @@ -207,7 +203,7 @@ void slb_initialize(void) unsigned long lflags, vflags; lflags = SLB_VSID_KERNEL | linear_llp; - vflags = SLB_VSID_KERNEL | virtual_llp; + vflags = SLB_VSID_KERNEL | vmalloc_llp; /* Invalidate the entire SLB (even slot 0) & all the ERATS */ asm volatile("isync":::"memory"); @@ -215,7 +211,6 @@ void slb_initialize(void) asm volatile("isync; slbia; isync":::"memory"); create_slbe(PAGE_OFFSET, lflags, 0); - /* VMALLOC space has 4K pages always for now */ create_slbe(VMALLOC_START, vflags, 1); /* We don't bolt the stack for the time being - we're in boot, diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index abfaabf667b..dbc1abbde03 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -14,7 +14,6 @@ * 2 of the License, or (at your option) any later version. */ -#include <linux/config.h> #include <asm/processor.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> @@ -59,10 +58,19 @@ _GLOBAL(slb_miss_kernel_load_linear) li r11,0 b slb_finish_load -1: /* vmalloc/ioremap mapping encoding bits, the "li" instruction below +1: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below * will be patched by the kernel at boot */ -_GLOBAL(slb_miss_kernel_load_virtual) +BEGIN_FTR_SECTION + /* check whether this is in vmalloc or ioremap space */ + clrldi r11,r10,48 + cmpldi r11,(VMALLOC_SIZE >> 28) - 1 + bgt 5f + lhz r11,PACAVMALLOCSLLP(r13) + b slb_finish_load +5: +END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE) +_GLOBAL(slb_miss_kernel_load_io) li r11,0 b slb_finish_load @@ -96,9 +104,7 @@ _GLOBAL(slb_miss_user_load_huge) 1: #endif /* CONFIG_HUGETLB_PAGE */ -_GLOBAL(slb_miss_user_load_normal) - li r11,0 - + lhz r11,PACACONTEXTSLLP(r13) 2: ld r9,PACACONTEXTID(r13) rldimi r10,r9,USER_ESID_BITS,0 diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c index 4a9291d9fef..eeeacab548e 100644 --- a/arch/powerpc/mm/stab.c +++ b/arch/powerpc/mm/stab.c @@ -12,7 +12,6 @@ * 2 of the License, or (at your option) any later version. */ -#include <linux/config.h> #include <asm/pgtable.h> #include <asm/mmu.h> #include <asm/mmu_context.h> @@ -200,10 +199,6 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm) __get_cpu_var(stab_cache_ptr) = 0; -#ifdef CONFIG_PPC_64K_PAGES - get_paca()->pgdir = mm->pgd; -#endif /* CONFIG_PPC_64K_PAGES */ - /* Now preload some entries for the new task */ if (test_tsk_thread_flag(tsk, TIF_32BIT)) unmapped_base = TASK_UNMAPPED_BASE_USER32; diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_32.c index ad580f3742e..925ff70be8b 100644 --- a/arch/powerpc/mm/tlb_32.c +++ b/arch/powerpc/mm/tlb_32.c @@ -23,7 +23,6 @@ * */ -#include <linux/config.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/init.h> @@ -42,7 +41,7 @@ void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr) if (Hash != 0) { ptephys = __pa(ptep) & PAGE_MASK; - flush_hash_pages(mm->context, addr, ptephys, 1); + flush_hash_pages(mm->context.id, addr, ptephys, 1); } } @@ -102,7 +101,7 @@ static void flush_range(struct mm_struct *mm, unsigned long start, pmd_t *pmd; unsigned long pmd_end; int count; - unsigned int ctx = mm->context; + unsigned int ctx = mm->context.id; if (Hash == 0) { _tlbia(); @@ -172,7 +171,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr); if (!pmd_none(*pmd)) - flush_hash_pages(mm->context, vmaddr, pmd_val(*pmd), 1); + flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); FINISH_FLUSH; } diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c index f734b11566c..f6eef78efd2 100644 --- a/arch/powerpc/mm/tlb_64.c +++ b/arch/powerpc/mm/tlb_64.c @@ -22,7 +22,6 @@ * 2 of the License, or (at your option) any later version. */ -#include <linux/config.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/init.h> @@ -131,7 +130,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr, { struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); unsigned long vsid; - unsigned int psize = mmu_virtual_psize; + unsigned int psize; int i; i = batch->index; @@ -148,7 +147,8 @@ void hpte_update(struct mm_struct *mm, unsigned long addr, #else BUG(); #endif - } + } else + psize = pte_pagesize_index(pte); /* * This can happen when we are in the middle of a TLB batch and |