summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2009-07-13 20:53:53 +0000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-08-20 10:12:26 +1000
commitde4376c2846bb5a8fc6fe8dbd0e4ff30905493e6 (patch)
tree58c430e20a457b36059c983afeb08ddd712c6661
parent5eb9bac0406f2beb84b21aac6feb89d33d9f3f5c (diff)
powerpc: Preload application text segment instead of TASK_UNMAPPED_BASE
TASK_UNMAPPED_BASE is not used with the new top down mmap layout. We can reuse this preload slot by loading in the segment at 0x10000000, where almost all PowerPC binaries are linked at. On a microbenchmark that bounces a token between two 64bit processes over pipes and calls gettimeofday each iteration (to access the VDSO), both the 32bit and 64bit context switch rate improves (tested on a 4GHz POWER6): 32bit: 273k/sec -> 283k/sec 64bit: 277k/sec -> 284k/sec Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r--arch/powerpc/mm/slb.c17
1 files changed, 8 insertions, 9 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 227056c21ee..6bc8b4aeba5 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -184,7 +184,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
unsigned long slbie_data = 0;
unsigned long pc = KSTK_EIP(tsk);
unsigned long stack = KSTK_ESP(tsk);
- unsigned long unmapped_base;
+ unsigned long exec_base;
if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) &&
offset <= SLB_CACHE_ENTRIES) {
@@ -212,14 +212,13 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
/*
* preload some userspace segments into the SLB.
+ * Almost all 32 and 64bit PowerPC executables are linked at
+ * 0x10000000 so it makes sense to preload this segment.
*/
- if (test_tsk_thread_flag(tsk, TIF_32BIT))
- unmapped_base = TASK_UNMAPPED_BASE_USER32;
- else
- unmapped_base = TASK_UNMAPPED_BASE_USER64;
+ exec_base = 0x10000000;
if (is_kernel_addr(pc) || is_kernel_addr(stack) ||
- is_kernel_addr(unmapped_base))
+ is_kernel_addr(exec_base))
return;
slb_allocate(pc);
@@ -227,9 +226,9 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
if (!esids_match(pc, stack))
slb_allocate(stack);
- if (!esids_match(pc, unmapped_base) &&
- !esids_match(stack, unmapped_base))
- slb_allocate(unmapped_base);
+ if (!esids_match(pc, exec_base) &&
+ !esids_match(stack, exec_base))
+ slb_allocate(exec_base);
}
static inline void patch_slb_encoding(unsigned int *insn_addr,