diff options
Diffstat (limited to 'arch/x86/include/asm/percpu.h')
-rw-r--r-- | arch/x86/include/asm/percpu.h | 86 |
1 files changed, 68 insertions, 18 deletions
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 8ee45167e81..d475b4398d8 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -45,7 +45,7 @@ #include <linux/stringify.h> #ifdef CONFIG_SMP -#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x +#define __percpu_prefix "%%"__stringify(__percpu_seg)":" #define __my_cpu_offset percpu_read(this_cpu_off) /* @@ -62,9 +62,11 @@ (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \ }) #else -#define __percpu_arg(x) "%P" #x +#define __percpu_prefix "" #endif +#define __percpu_arg(x) __percpu_prefix "%P" #x + /* * Initialized pointers to per-cpu variables needed for the boot * processor need to use these macros to get the proper address @@ -273,34 +275,34 @@ do { \ typeof(var) pxo_new__ = (nval); \ switch (sizeof(var)) { \ case 1: \ - asm("\n1:mov "__percpu_arg(1)",%%al" \ - "\n\tcmpxchgb %2, "__percpu_arg(1) \ + asm("\n\tmov "__percpu_arg(1)",%%al" \ + "\n1:\tcmpxchgb %2, "__percpu_arg(1) \ "\n\tjnz 1b" \ - : "=a" (pxo_ret__), "+m" (var) \ + : "=&a" (pxo_ret__), "+m" (var) \ : "q" (pxo_new__) \ : "memory"); \ break; \ case 2: \ - asm("\n1:mov "__percpu_arg(1)",%%ax" \ - "\n\tcmpxchgw %2, "__percpu_arg(1) \ + asm("\n\tmov "__percpu_arg(1)",%%ax" \ + "\n1:\tcmpxchgw %2, "__percpu_arg(1) \ "\n\tjnz 1b" \ - : "=a" (pxo_ret__), "+m" (var) \ + : "=&a" (pxo_ret__), "+m" (var) \ : "r" (pxo_new__) \ : "memory"); \ break; \ case 4: \ - asm("\n1:mov "__percpu_arg(1)",%%eax" \ - "\n\tcmpxchgl %2, "__percpu_arg(1) \ + asm("\n\tmov "__percpu_arg(1)",%%eax" \ + "\n1:\tcmpxchgl %2, "__percpu_arg(1) \ "\n\tjnz 1b" \ - : "=a" (pxo_ret__), "+m" (var) \ + : "=&a" (pxo_ret__), "+m" (var) \ : "r" (pxo_new__) \ : "memory"); \ break; \ case 8: \ - asm("\n1:mov "__percpu_arg(1)",%%rax" \ - "\n\tcmpxchgq %2, "__percpu_arg(1) \ + asm("\n\tmov "__percpu_arg(1)",%%rax" \ + "\n1:\tcmpxchgq %2, "__percpu_arg(1) \ "\n\tjnz 1b" \ - : "=a" (pxo_ret__), "+m" (var) \ + : "=&a" (pxo_ret__), "+m" (var) \ : "r" (pxo_new__) \ : "memory"); \ break; \ @@ -414,8 +416,6 @@ do { \ #define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval) #define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) #define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) -#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) -#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) #define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val) #define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val) @@ -432,8 +432,6 @@ do { \ #define irqsafe_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval) #define irqsafe_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) #define irqsafe_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) -#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) -#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) #ifndef CONFIG_M386 #define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) @@ -455,6 +453,26 @@ do { \ #define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) #endif /* !CONFIG_M386 */ +#ifdef CONFIG_X86_CMPXCHG64 +#define percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) \ +({ \ + char __ret; \ + typeof(o1) __o1 = o1; \ + typeof(o1) __n1 = n1; \ + typeof(o2) __o2 = o2; \ + typeof(o2) __n2 = n2; \ + typeof(o2) __dummy = n2; \ + asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \ + : "=a"(__ret), "=m" (pcp1), "=d"(__dummy) \ + : "b"(__n1), "c"(__n2), "a"(__o1), "d"(__o2)); \ + __ret; \ +}) + +#define __this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) +#define this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) +#define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) +#endif /* CONFIG_X86_CMPXCHG64 */ + /* * Per cpu atomic 64 bit operations are only available under 64 bit. * 32 bit must fall back to generic operations. @@ -475,11 +493,43 @@ do { \ #define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) #define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) #define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) +#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) +#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) #define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val) #define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) #define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) #define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) +#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) +#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) + +/* + * Pretty complex macro to generate cmpxchg16 instruction. The instruction + * is not supported on early AMD64 processors so we must be able to emulate + * it in software. The address used in the cmpxchg16 instruction must be + * aligned to a 16 byte boundary. + */ +#define percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) \ +({ \ + char __ret; \ + typeof(o1) __o1 = o1; \ + typeof(o1) __n1 = n1; \ + typeof(o2) __o2 = o2; \ + typeof(o2) __n2 = n2; \ + typeof(o2) __dummy; \ + alternative_io("call this_cpu_cmpxchg16b_emu\n\t" P6_NOP4, \ + "cmpxchg16b " __percpu_prefix "(%%rsi)\n\tsetz %0\n\t", \ + X86_FEATURE_CX16, \ + ASM_OUTPUT2("=a"(__ret), "=d"(__dummy)), \ + "S" (&pcp1), "b"(__n1), "c"(__n2), \ + "a"(__o1), "d"(__o2) : "memory"); \ + __ret; \ +}) + +#define __this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) +#define this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) +#define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) + #endif /* This is not atomic against other CPUs -- CPU preemption needs to be off */ |