diff options
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r-- | arch/x86/include/asm/local.h | 37 | ||||
-rw-r--r-- | arch/x86/include/asm/percpu.h | 119 | ||||
-rw-r--r-- | arch/x86/include/asm/system.h | 8 |
3 files changed, 91 insertions, 73 deletions
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h index 47b9b6f1905..2e9972468a5 100644 --- a/arch/x86/include/asm/local.h +++ b/arch/x86/include/asm/local.h @@ -195,41 +195,4 @@ static inline long local_sub_return(long i, local_t *l) #define __local_add(i, l) local_add((i), (l)) #define __local_sub(i, l) local_sub((i), (l)) -/* Use these for per-cpu local_t variables: on some archs they are - * much more efficient than these naive implementations. Note they take - * a variable, not an address. - * - * X86_64: This could be done better if we moved the per cpu data directly - * after GS. - */ - -/* Need to disable preemption for the cpu local counters otherwise we could - still access a variable of a previous CPU in a non atomic way. */ -#define cpu_local_wrap_v(l) \ -({ \ - local_t res__; \ - preempt_disable(); \ - res__ = (l); \ - preempt_enable(); \ - res__; \ -}) -#define cpu_local_wrap(l) \ -({ \ - preempt_disable(); \ - (l); \ - preempt_enable(); \ -}) \ - -#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l)))) -#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i))) -#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l)))) -#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l)))) -#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l)))) -#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l)))) - -#define __cpu_local_inc(l) cpu_local_inc((l)) -#define __cpu_local_dec(l) cpu_local_dec((l)) -#define __cpu_local_add(i, l) cpu_local_add((i), (l)) -#define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) - #endif /* _ASM_X86_LOCAL_H */ diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 0c44196b78a..66a272dfd8b 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -25,19 +25,18 @@ */ #ifdef CONFIG_SMP #define PER_CPU(var, reg) \ - __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg; \ - lea per_cpu__##var(reg), reg -#define PER_CPU_VAR(var) %__percpu_seg:per_cpu__##var + __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \ + lea var(reg), reg +#define PER_CPU_VAR(var) %__percpu_seg:var #else /* ! SMP */ -#define PER_CPU(var, reg) \ - __percpu_mov_op $per_cpu__##var, reg -#define PER_CPU_VAR(var) per_cpu__##var +#define PER_CPU(var, reg) __percpu_mov_op $var, reg +#define PER_CPU_VAR(var) var #endif /* SMP */ #ifdef CONFIG_X86_64_SMP #define INIT_PER_CPU_VAR(var) init_per_cpu__##var #else -#define INIT_PER_CPU_VAR(var) per_cpu__##var +#define INIT_PER_CPU_VAR(var) var #endif #else /* ...!ASSEMBLY */ @@ -60,12 +59,12 @@ * There also must be an entry in vmlinux_64.lds.S */ #define DECLARE_INIT_PER_CPU(var) \ - extern typeof(per_cpu_var(var)) init_per_cpu_var(var) + extern typeof(var) init_per_cpu_var(var) #ifdef CONFIG_X86_64_SMP #define init_per_cpu_var(var) init_per_cpu__##var #else -#define init_per_cpu_var(var) per_cpu_var(var) +#define init_per_cpu_var(var) var #endif /* For arch-specific code, we can use direct single-insn ops (they @@ -104,6 +103,64 @@ do { \ } \ } while (0) +/* + * Generate a percpu add to memory instruction and optimize code + * if a one is added or subtracted. + */ +#define percpu_add_op(var, val) \ +do { \ + typedef typeof(var) pao_T__; \ + const int pao_ID__ = (__builtin_constant_p(val) && \ + ((val) == 1 || (val) == -1)) ? (val) : 0; \ + if (0) { \ + pao_T__ pao_tmp__; \ + pao_tmp__ = (val); \ + } \ + switch (sizeof(var)) { \ + case 1: \ + if (pao_ID__ == 1) \ + asm("incb "__percpu_arg(0) : "+m" (var)); \ + else if (pao_ID__ == -1) \ + asm("decb "__percpu_arg(0) : "+m" (var)); \ + else \ + asm("addb %1, "__percpu_arg(0) \ + : "+m" (var) \ + : "qi" ((pao_T__)(val))); \ + break; \ + case 2: \ + if (pao_ID__ == 1) \ + asm("incw "__percpu_arg(0) : "+m" (var)); \ + else if (pao_ID__ == -1) \ + asm("decw "__percpu_arg(0) : "+m" (var)); \ + else \ + asm("addw %1, "__percpu_arg(0) \ + : "+m" (var) \ + : "ri" ((pao_T__)(val))); \ + break; \ + case 4: \ + if (pao_ID__ == 1) \ + asm("incl "__percpu_arg(0) : "+m" (var)); \ + else if (pao_ID__ == -1) \ + asm("decl "__percpu_arg(0) : "+m" (var)); \ + else \ + asm("addl %1, "__percpu_arg(0) \ + : "+m" (var) \ + : "ri" ((pao_T__)(val))); \ + break; \ + case 8: \ + if (pao_ID__ == 1) \ + asm("incq "__percpu_arg(0) : "+m" (var)); \ + else if (pao_ID__ == -1) \ + asm("decq "__percpu_arg(0) : "+m" (var)); \ + else \ + asm("addq %1, "__percpu_arg(0) \ + : "+m" (var) \ + : "re" ((pao_T__)(val))); \ + break; \ + default: __bad_percpu_size(); \ + } \ +} while (0) + #define percpu_from_op(op, var, constraint) \ ({ \ typeof(var) pfo_ret__; \ @@ -142,16 +199,14 @@ do { \ * per-thread variables implemented as per-cpu variables and thus * stable for the duration of the respective task. */ -#define percpu_read(var) percpu_from_op("mov", per_cpu__##var, \ - "m" (per_cpu__##var)) -#define percpu_read_stable(var) percpu_from_op("mov", per_cpu__##var, \ - "p" (&per_cpu__##var)) -#define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val) -#define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val) -#define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val) -#define percpu_and(var, val) percpu_to_op("and", per_cpu__##var, val) -#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val) -#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val) +#define percpu_read(var) percpu_from_op("mov", var, "m" (var)) +#define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var))) +#define percpu_write(var, val) percpu_to_op("mov", var, val) +#define percpu_add(var, val) percpu_add_op(var, val) +#define percpu_sub(var, val) percpu_add_op(var, -(val)) +#define percpu_and(var, val) percpu_to_op("and", var, val) +#define percpu_or(var, val) percpu_to_op("or", var, val) +#define percpu_xor(var, val) percpu_to_op("xor", var, val) #define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) #define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) @@ -160,9 +215,9 @@ do { \ #define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) #define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) #define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) -#define __this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val) -#define __this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val) -#define __this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val) +#define __this_cpu_add_1(pcp, val) percpu_add_op((pcp), val) +#define __this_cpu_add_2(pcp, val) percpu_add_op((pcp), val) +#define __this_cpu_add_4(pcp, val) percpu_add_op((pcp), val) #define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) #define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) #define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) @@ -179,9 +234,9 @@ do { \ #define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val) #define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val) #define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val) -#define this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val) -#define this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val) -#define this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val) +#define this_cpu_add_1(pcp, val) percpu_add_op((pcp), val) +#define this_cpu_add_2(pcp, val) percpu_add_op((pcp), val) +#define this_cpu_add_4(pcp, val) percpu_add_op((pcp), val) #define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) #define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) #define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) @@ -192,9 +247,9 @@ do { \ #define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) #define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) -#define irqsafe_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val) -#define irqsafe_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val) -#define irqsafe_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val) +#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val) +#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val) +#define irqsafe_cpu_add_4(pcp, val) percpu_add_op((pcp), val) #define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) #define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) #define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) @@ -212,19 +267,19 @@ do { \ #ifdef CONFIG_X86_64 #define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) #define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) -#define __this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val) +#define __this_cpu_add_8(pcp, val) percpu_add_op((pcp), val) #define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) #define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) #define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) #define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) #define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) -#define this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val) +#define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val) #define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) #define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) #define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) -#define irqsafe_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val) +#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val) #define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) #define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) #define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) @@ -236,7 +291,7 @@ do { \ ({ \ int old__; \ asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \ - : "=r" (old__), "+m" (per_cpu__##var) \ + : "=r" (old__), "+m" (var) \ : "dIr" (bit)); \ old__; \ }) diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h index e04740f7a0b..b8fe48ee2ed 100644 --- a/arch/x86/include/asm/system.h +++ b/arch/x86/include/asm/system.h @@ -32,7 +32,7 @@ extern void show_regs_common(void); "movl %P[task_canary](%[next]), %%ebx\n\t" \ "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" #define __switch_canary_oparam \ - , [stack_canary] "=m" (per_cpu_var(stack_canary.canary)) + , [stack_canary] "=m" (stack_canary.canary) #define __switch_canary_iparam \ , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) #else /* CC_STACKPROTECTOR */ @@ -114,7 +114,7 @@ do { \ "movq %P[task_canary](%%rsi),%%r8\n\t" \ "movq %%r8,"__percpu_arg([gs_canary])"\n\t" #define __switch_canary_oparam \ - , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary)) + , [gs_canary] "=m" (irq_stack_union.stack_canary) #define __switch_canary_iparam \ , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) #else /* CC_STACKPROTECTOR */ @@ -133,7 +133,7 @@ do { \ __switch_canary \ "movq %P[thread_info](%%rsi),%%r8\n\t" \ "movq %%rax,%%rdi\n\t" \ - "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ + "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \ "jnz ret_from_fork\n\t" \ RESTORE_CONTEXT \ : "=a" (last) \ @@ -143,7 +143,7 @@ do { \ [ti_flags] "i" (offsetof(struct thread_info, flags)), \ [_tif_fork] "i" (_TIF_FORK), \ [thread_info] "i" (offsetof(struct task_struct, stack)), \ - [current_task] "m" (per_cpu_var(current_task)) \ + [current_task] "m" (current_task) \ __switch_canary_iparam \ : "memory", "cc" __EXTRA_CLOBBER) #endif |