summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorBrian Gerst <brgerst@gmail.com>2009-02-08 09:58:39 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-09 10:30:30 +0100
commit2add8e235cbe0dcd672c33fc322754e15500238c (patch)
tree3cf9abe886432ac82fc08e99241ec7254dc20193 /arch
parentd3770449d3cb058b94ca1d050d5ced4a66c75ce4 (diff)
x86: use linker to offset symbols by __per_cpu_load
Impact: cleanup and bug fix Use the linker to create symbols for certain per-cpu variables that are offset by __per_cpu_load. This allows the removal of the runtime fixup of the GDT pointer, which fixes a bug with resume reported by Jiri Slaby. Reported-by: Jiri Slaby <jirislaby@gmail.com> Signed-off-by: Brian Gerst <brgerst@gmail.com> Acked-by: Jiri Slaby <jirislaby@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/percpu.h22
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/kernel/cpu/common.c6
-rw-r--r--arch/x86/kernel/head_64.S21
-rw-r--r--arch/x86/kernel/vmlinux_64.lds.S8
5 files changed, 35 insertions, 24 deletions
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 0b64af4f13a..aee103b26d0 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -34,6 +34,12 @@
#define PER_CPU_VAR(var) per_cpu__##var
#endif /* SMP */
+#ifdef CONFIG_X86_64_SMP
+#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
+#else
+#define INIT_PER_CPU_VAR(var) per_cpu__##var
+#endif
+
#else /* ...!ASSEMBLY */
#include <linux/stringify.h>
@@ -45,6 +51,22 @@
#define __percpu_arg(x) "%" #x
#endif
+/*
+ * Initialized pointers to per-cpu variables needed for the boot
+ * processor need to use these macros to get the proper address
+ * offset from __per_cpu_load on SMP.
+ *
+ * There also must be an entry in vmlinux_64.lds.S
+ */
+#define DECLARE_INIT_PER_CPU(var) \
+ extern typeof(per_cpu_var(var)) init_per_cpu_var(var)
+
+#ifdef CONFIG_X86_64_SMP
+#define init_per_cpu_var(var) init_per_cpu__##var
+#else
+#define init_per_cpu_var(var) per_cpu_var(var)
+#endif
+
/* For arch-specific code, we can use direct single-insn ops (they
* don't give an lvalue though). */
extern void __bad_percpu_size(void);
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 656d02ea509..373d3f628d2 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -393,6 +393,8 @@ union irq_stack_union {
};
DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
+DECLARE_INIT_PER_CPU(irq_stack_union);
+
DECLARE_PER_CPU(char *, irq_stack_ptr);
#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 0f73ea42308..41b0de6df87 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -902,12 +902,8 @@ struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
DEFINE_PER_CPU_FIRST(union irq_stack_union,
irq_stack_union) __aligned(PAGE_SIZE);
-#ifdef CONFIG_SMP
-DEFINE_PER_CPU(char *, irq_stack_ptr); /* will be set during per cpu init */
-#else
DEFINE_PER_CPU(char *, irq_stack_ptr) =
- per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
-#endif
+ init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
DEFINE_PER_CPU(unsigned long, kernel_stack) =
(unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index a0a2b5ca9b7..2e648e3a5ea 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -205,19 +205,6 @@ ENTRY(secondary_startup_64)
pushq $0
popfq
-#ifdef CONFIG_SMP
- /*
- * Fix up static pointers that need __per_cpu_load added. The assembler
- * is unable to do this directly. This is only needed for the boot cpu.
- * These values are set up with the correct base addresses by C code for
- * secondary cpus.
- */
- movq initial_gs(%rip), %rax
- cmpl $0, per_cpu__cpu_number(%rax)
- jne 1f
- addq %rax, early_gdt_descr_base(%rip)
-1:
-#endif
/*
* We must switch to a new descriptor in kernel space for the GDT
* because soon the kernel won't have access anymore to the userspace
@@ -275,11 +262,7 @@ ENTRY(secondary_startup_64)
ENTRY(initial_code)
.quad x86_64_start_kernel
ENTRY(initial_gs)
-#ifdef CONFIG_SMP
- .quad __per_cpu_load
-#else
- .quad PER_CPU_VAR(irq_stack_union)
-#endif
+ .quad INIT_PER_CPU_VAR(irq_stack_union)
__FINITDATA
ENTRY(stack_start)
@@ -425,7 +408,7 @@ NEXT_PAGE(level2_spare_pgt)
early_gdt_descr:
.word GDT_ENTRIES*8-1
early_gdt_descr_base:
- .quad per_cpu__gdt_page
+ .quad INIT_PER_CPU_VAR(gdt_page)
ENTRY(phys_base)
/* This must match the first entry in level2_kernel_pgt */
diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S
index 07f62d287ff..087a7f2c639 100644
--- a/arch/x86/kernel/vmlinux_64.lds.S
+++ b/arch/x86/kernel/vmlinux_64.lds.S
@@ -257,6 +257,14 @@ SECTIONS
DWARF_DEBUG
}
+ /*
+ * Per-cpu symbols which need to be offset from __per_cpu_load
+ * for the boot processor.
+ */
+#define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
+INIT_PER_CPU(gdt_page);
+INIT_PER_CPU(irq_stack_union);
+
/*
* Build-time check on the image size:
*/