summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/asm-i386/local.h26
-rw-r--r--include/asm-x86_64/local.h26
2 files changed, 40 insertions, 12 deletions
diff --git a/include/asm-i386/local.h b/include/asm-i386/local.h
index e67fa08260f..3b4998c51d0 100644
--- a/include/asm-i386/local.h
+++ b/include/asm-i386/local.h
@@ -55,12 +55,26 @@ static __inline__ void local_sub(long i, local_t *v)
* much more efficient than these naive implementations. Note they take
* a variable, not an address.
*/
-#define cpu_local_read(v) local_read(&__get_cpu_var(v))
-#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
-#define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
-#define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
-#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
-#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
+
+/* Need to disable preemption for the cpu local counters otherwise we could
+ still access a variable of a previous CPU in a non atomic way. */
+#define cpu_local_wrap_v(v) \
+ ({ local_t res__; \
+ preempt_disable(); \
+ res__ = (v); \
+ preempt_enable(); \
+ res__; })
+#define cpu_local_wrap(v) \
+ ({ preempt_disable(); \
+ v; \
+ preempt_enable(); }) \
+
+#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v)))
+#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i)))
+#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v)))
+#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v)))
+#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v)))
+#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v)))
#define __cpu_local_inc(v) cpu_local_inc(v)
#define __cpu_local_dec(v) cpu_local_dec(v)
diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h
index cd17945bf21..e769e620022 100644
--- a/include/asm-x86_64/local.h
+++ b/include/asm-x86_64/local.h
@@ -59,12 +59,26 @@ static inline void local_sub(long i, local_t *v)
* This could be done better if we moved the per cpu data directly
* after GS.
*/
-#define cpu_local_read(v) local_read(&__get_cpu_var(v))
-#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
-#define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
-#define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
-#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
-#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
+
+/* Need to disable preemption for the cpu local counters otherwise we could
+ still access a variable of a previous CPU in a non atomic way. */
+#define cpu_local_wrap_v(v) \
+ ({ local_t res__; \
+ preempt_disable(); \
+ res__ = (v); \
+ preempt_enable(); \
+ res__; })
+#define cpu_local_wrap(v) \
+ ({ preempt_disable(); \
+ v; \
+ preempt_enable(); }) \
+
+#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v)))
+#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i)))
+#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v)))
+#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v)))
+#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v)))
+#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v)))
#define __cpu_local_inc(v) cpu_local_inc(v)
#define __cpu_local_dec(v) cpu_local_dec(v)