diff options
-rw-r--r-- | arch/powerpc/mm/slb.c | 17 |
1 files changed, 8 insertions, 9 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 227056c21ee..6bc8b4aeba5 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -184,7 +184,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) unsigned long slbie_data = 0; unsigned long pc = KSTK_EIP(tsk); unsigned long stack = KSTK_ESP(tsk); - unsigned long unmapped_base; + unsigned long exec_base; if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) && offset <= SLB_CACHE_ENTRIES) { @@ -212,14 +212,13 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) /* * preload some userspace segments into the SLB. + * Almost all 32 and 64bit PowerPC executables are linked at + * 0x10000000 so it makes sense to preload this segment. */ - if (test_tsk_thread_flag(tsk, TIF_32BIT)) - unmapped_base = TASK_UNMAPPED_BASE_USER32; - else - unmapped_base = TASK_UNMAPPED_BASE_USER64; + exec_base = 0x10000000; if (is_kernel_addr(pc) || is_kernel_addr(stack) || - is_kernel_addr(unmapped_base)) + is_kernel_addr(exec_base)) return; slb_allocate(pc); @@ -227,9 +226,9 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) if (!esids_match(pc, stack)) slb_allocate(stack); - if (!esids_match(pc, unmapped_base) && - !esids_match(stack, unmapped_base)) - slb_allocate(unmapped_base); + if (!esids_match(pc, exec_base) && + !esids_match(stack, exec_base)) + slb_allocate(exec_base); } static inline void patch_slb_encoding(unsigned int *insn_addr, |