summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/pda.h3
-rw-r--r--arch/x86/include/asm/percpu.h6
-rw-r--r--arch/x86/include/asm/processor.h23
-rw-r--r--arch/x86/include/asm/stackprotector.h6
-rw-r--r--arch/x86/include/asm/system.h4
-rw-r--r--arch/x86/kernel/asm-offsets_64.c4
-rw-r--r--arch/x86/kernel/cpu/common.c7
-rw-r--r--arch/x86/kernel/head_64.S13
-rw-r--r--arch/x86/kernel/setup_percpu.c34
-rw-r--r--arch/x86/kernel/vmlinux_64.lds.S8
10 files changed, 46 insertions, 62 deletions
diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h
index b473e952439..ba46416634f 100644
--- a/arch/x86/include/asm/pda.h
+++ b/arch/x86/include/asm/pda.h
@@ -17,9 +17,6 @@ struct x8664_pda {
unsigned long unused4;
int unused5;
unsigned int unused6; /* 36 was cpunumber */
- unsigned long stack_canary; /* 40 stack canary value */
- /* gcc-ABI: this canary MUST be at
- offset 40!!! */
short in_bootmem; /* pda lives in bootmem */
} ____cacheline_aligned_in_smp;
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 165d5272ece..ce980db5e59 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -133,12 +133,6 @@ do { \
/* We can use this directly for local CPU (faster). */
DECLARE_PER_CPU(unsigned long, this_cpu_off);
-#ifdef CONFIG_X86_64
-extern void load_pda_offset(int cpu);
-#else
-static inline void load_pda_offset(int cpu) { }
-#endif
-
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_SMP
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index f511246fa6c..48676b943b9 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -379,8 +379,29 @@ union thread_xstate {
#ifdef CONFIG_X86_64
DECLARE_PER_CPU(struct orig_ist, orig_ist);
-DECLARE_PER_CPU(char[IRQ_STACK_SIZE], irq_stack);
+union irq_stack_union {
+ char irq_stack[IRQ_STACK_SIZE];
+ /*
+ * GCC hardcodes the stack canary as %gs:40. Since the
+ * irq_stack is the object at %gs:0, we reserve the bottom
+ * 48 bytes of the irq stack for the canary.
+ */
+ struct {
+ char gs_base[40];
+ unsigned long stack_canary;
+ };
+};
+
+DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
DECLARE_PER_CPU(char *, irq_stack_ptr);
+
+static inline void load_gs_base(int cpu)
+{
+ /* Memory clobbers used to order pda/percpu accesses */
+ mb();
+ wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
+ mb();
+}
#endif
extern void print_cpu_info(struct cpuinfo_x86 *);
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index 2383e5bb475..36a700acaf2 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -2,7 +2,7 @@
#define _ASM_STACKPROTECTOR_H 1
#include <asm/tsc.h>
-#include <asm/pda.h>
+#include <asm/processor.h>
/*
* Initialize the stackprotector canary value.
@@ -19,7 +19,7 @@ static __always_inline void boot_init_stack_canary(void)
* Build time only check to make sure the stack_canary is at
* offset 40 in the pda; this is a gcc ABI requirement
*/
- BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40);
+ BUILD_BUG_ON(offsetof(union irq_stack_union, stack_canary) != 40);
/*
* We both use the random pool and the current TSC as a source
@@ -32,7 +32,7 @@ static __always_inline void boot_init_stack_canary(void)
canary += tsc + (tsc << 32UL);
current->stack_canary = canary;
- write_pda(stack_canary, canary);
+ percpu_write(irq_stack_union.stack_canary, canary);
}
#endif
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index b77bd8bd3cc..52eb748a68a 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -89,10 +89,10 @@ do { \
#ifdef CONFIG_CC_STACKPROTECTOR
#define __switch_canary \
"movq %P[task_canary](%%rsi),%%r8\n\t" \
- "movq %%r8,%%gs:%P[pda_canary]\n\t"
+ "movq %%r8,%%gs:%P[gs_canary]\n\t"
#define __switch_canary_param \
, [task_canary] "i" (offsetof(struct task_struct, stack_canary)) \
- , [pda_canary] "i" (offsetof(struct x8664_pda, stack_canary))
+ , [gs_canary] "i" (offsetof(union irq_stack_union, stack_canary))
#else /* CC_STACKPROTECTOR */
#define __switch_canary
#define __switch_canary_param
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 64c834a39aa..94f9c8b39d2 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -48,10 +48,6 @@ int main(void)
#endif
BLANK();
#undef ENTRY
-#define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry))
- DEFINE(pda_size, sizeof(struct x8664_pda));
- BLANK();
-#undef ENTRY
#ifdef CONFIG_PARAVIRT
BLANK();
OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index f83a4d6160f..098934e72a1 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -881,12 +881,13 @@ __setup("clearcpuid=", setup_disablecpuid);
#ifdef CONFIG_X86_64
struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
-DEFINE_PER_CPU_PAGE_ALIGNED(char[IRQ_STACK_SIZE], irq_stack);
+DEFINE_PER_CPU_FIRST(union irq_stack_union,
+ irq_stack_union) __aligned(PAGE_SIZE);
#ifdef CONFIG_SMP
DEFINE_PER_CPU(char *, irq_stack_ptr); /* will be set during per cpu init */
#else
DEFINE_PER_CPU(char *, irq_stack_ptr) =
- per_cpu_var(irq_stack) + IRQ_STACK_SIZE - 64;
+ per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
#endif
DEFINE_PER_CPU(unsigned long, kernel_stack) =
@@ -960,7 +961,7 @@ void __cpuinit cpu_init(void)
loadsegment(fs, 0);
loadsegment(gs, 0);
- load_pda_offset(cpu);
+ load_gs_base(cpu);
#ifdef CONFIG_NUMA
if (cpu != 0 && percpu_read(node_number) == 0 &&
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 98ea26a2fca..a0a2b5ca9b7 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -242,13 +242,10 @@ ENTRY(secondary_startup_64)
/* Set up %gs.
*
- * On SMP, %gs should point to the per-cpu area. For initial
- * boot, make %gs point to the init data section. For a
- * secondary CPU,initial_gs should be set to its pda address
- * before the CPU runs this code.
- *
- * On UP, initial_gs points to PER_CPU_VAR(__pda) and doesn't
- * change.
+ * The base of %gs always points to the bottom of the irqstack
+ * union. If the stack protector canary is enabled, it is
+ * located at %gs:40. Note that, on SMP, the boot cpu uses
+ * init data section till per cpu areas are set up.
*/
movl $MSR_GS_BASE,%ecx
movq initial_gs(%rip),%rax
@@ -281,7 +278,7 @@ ENTRY(secondary_startup_64)
#ifdef CONFIG_SMP
.quad __per_cpu_load
#else
- .quad PER_CPU_VAR(__pda)
+ .quad PER_CPU_VAR(irq_stack_union)
#endif
__FINITDATA
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index efbafbbff58..90b8e154bb5 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -77,30 +77,6 @@ static void __init setup_node_to_cpumask_map(void);
static inline void setup_node_to_cpumask_map(void) { }
#endif
-/*
- * Define load_pda_offset() and per-cpu __pda for x86_64.
- * load_pda_offset() is responsible for loading the offset of pda into
- * %gs.
- *
- * On SMP, pda offset also duals as percpu base address and thus it
- * should be at the start of per-cpu area. To achieve this, it's
- * preallocated in vmlinux_64.lds.S directly instead of using
- * DEFINE_PER_CPU().
- */
-#ifdef CONFIG_X86_64
-void __cpuinit load_pda_offset(int cpu)
-{
- /* Memory clobbers used to order pda/percpu accesses */
- mb();
- wrmsrl(MSR_GS_BASE, cpu_pda(cpu));
- mb();
-}
-#ifndef CONFIG_SMP
-DEFINE_PER_CPU(struct x8664_pda, __pda);
-#endif
-EXPORT_PER_CPU_SYMBOL(__pda);
-#endif /* CONFIG_SMP && CONFIG_X86_64 */
-
#ifdef CONFIG_X86_64
/* correctly size the local cpu masks */
@@ -207,15 +183,13 @@ void __init setup_per_cpu_areas(void)
per_cpu(cpu_number, cpu) = cpu;
#ifdef CONFIG_X86_64
per_cpu(irq_stack_ptr, cpu) =
- (char *)per_cpu(irq_stack, cpu) + IRQ_STACK_SIZE - 64;
+ per_cpu(irq_stack_union.irq_stack, cpu) + IRQ_STACK_SIZE - 64;
/*
- * CPU0 modified pda in the init data area, reload pda
- * offset for CPU0 and clear the area for others.
+ * Up to this point, CPU0 has been using .data.init
+ * area. Reload %gs offset for CPU0.
*/
if (cpu == 0)
- load_pda_offset(0);
- else
- memset(cpu_pda(cpu), 0, sizeof(*cpu_pda(cpu)));
+ load_gs_base(cpu);
#endif
DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S
index a09abb8fb97..c9740996430 100644
--- a/arch/x86/kernel/vmlinux_64.lds.S
+++ b/arch/x86/kernel/vmlinux_64.lds.S
@@ -220,8 +220,7 @@ SECTIONS
* so that it can be accessed as a percpu variable.
*/
. = ALIGN(PAGE_SIZE);
- PERCPU_VADDR_PREALLOC(0, :percpu, pda_size)
- per_cpu____pda = __per_cpu_start;
+ PERCPU_VADDR(0, :percpu)
#else
PERCPU(PAGE_SIZE)
#endif
@@ -262,3 +261,8 @@ SECTIONS
*/
ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
"kernel image bigger than KERNEL_IMAGE_SIZE")
+
+#ifdef CONFIG_SMP
+ASSERT((per_cpu__irq_stack_union == 0),
+ "irq_stack_union is not at start of per-cpu area");
+#endif