summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/setup_percpu.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-01-13 20:41:35 +0900
committerIngo Molnar <mingo@elte.hu>2009-01-16 14:19:58 +0100
commit9939ddaff52787b2a7c1adf1b2afc95421aa0884 (patch)
tree6e7266d065914e19c3c3f4b4e475f09b9669fa51 /arch/x86/kernel/setup_percpu.c
parent1a51e3a0aed18767cf2762e95456ecfeb0bca5e6 (diff)
x86: merge 64 and 32 SMP percpu handling
Now that pda is allocated as part of percpu, percpu doesn't need to be accessed through pda. Unify x86_64 SMP percpu access with x86_32 SMP one. Other than the segment register, operand size and the base of percpu symbols, they behave identical now. This patch replaces now unnecessary pda->data_offset with a dummy field which is necessary to keep stack_canary at its place. This patch also moves per_cpu_offset initialization out of init_gdt() into setup_per_cpu_areas(). Note that this change also necessitates explicit per_cpu_offset initializations in voyager_smp.c. With this change, x86_OP_percpu()'s are as efficient on x86_64 as on x86_32 and also x86_64 can use assembly PER_CPU macros. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/setup_percpu.c')
-rw-r--r--arch/x86/kernel/setup_percpu.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 63d46280227..be1ff34db11 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -125,14 +125,14 @@ static void __init setup_per_cpu_maps(void)
#endif
}
-#ifdef CONFIG_X86_32
-/*
- * Great future not-so-futuristic plan: make i386 and x86_64 do it
- * the same way
- */
+#ifdef CONFIG_X86_64
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
+ [0] = (unsigned long)__per_cpu_load,
+};
+#else
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
-EXPORT_SYMBOL(__per_cpu_offset);
#endif
+EXPORT_SYMBOL(__per_cpu_offset);
/*
* Great future plan:
@@ -178,6 +178,7 @@ void __init setup_per_cpu_areas(void)
#endif
memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start);
+ per_cpu_offset(cpu) = ptr - __per_cpu_start;
#ifdef CONFIG_X86_64
cpu_pda(cpu) = (void *)ptr;
@@ -190,7 +191,7 @@ void __init setup_per_cpu_areas(void)
else
memset(cpu_pda(cpu), 0, sizeof(*cpu_pda(cpu)));
#endif
- per_cpu_offset(cpu) = ptr - __per_cpu_start;
+ per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
}