summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/bitops.h
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2011-11-15 17:11:27 +0000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2011-11-17 16:26:07 +1100
commitb97021f85517552ea8a0d2c1680c1ee4beab6d14 (patch)
treef8f4c0af8d7a76d405fcae62f2ddecff642cc4e9 /arch/powerpc/include/asm/bitops.h
parenta9a8f77ac72d6dd3c92ea268291678836f77681c (diff)
powerpc: Fix atomic_xxx_return barrier semantics
The Documentation/memory-barriers.txt document requires that atomic operations that return a value act as a memory barrier both before and after the actual atomic operation. Our current implementation doesn't guarantee this. More specifically, while a load following the isync can not be issued before stwcx. has completed, that completion doesn't architecturally means that the result of stwcx. is visible to other processors (or any previous stores for that matter) (typically, the other processors L1 caches can still hold the old value). This has caused an actual crash in RCU torture testing on Power 7 This fixes it by changing those atomic ops to use new macros instead of RELEASE/ACQUIRE barriers, called ATOMIC_ENTRY and ATMOIC_EXIT barriers, which are then defined respectively to lwsync and sync. I haven't had a chance to measure the performance impact (or rather what I measured with kernel compiles is in the noise, I yet have to find a more precise benchmark) Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'arch/powerpc/include/asm/bitops.h')
-rw-r--r--arch/powerpc/include/asm/bitops.h12
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index e137afcc10f..efdc92618b3 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -124,14 +124,14 @@ static __inline__ unsigned long fn( \
return (old & mask); \
}
-DEFINE_TESTOP(test_and_set_bits, or, PPC_RELEASE_BARRIER,
- PPC_ACQUIRE_BARRIER, 0)
+DEFINE_TESTOP(test_and_set_bits, or, PPC_ATOMIC_ENTRY_BARRIER,
+ PPC_ATOMIC_EXIT_BARRIER, 0)
DEFINE_TESTOP(test_and_set_bits_lock, or, "",
PPC_ACQUIRE_BARRIER, 1)
-DEFINE_TESTOP(test_and_clear_bits, andc, PPC_RELEASE_BARRIER,
- PPC_ACQUIRE_BARRIER, 0)
-DEFINE_TESTOP(test_and_change_bits, xor, PPC_RELEASE_BARRIER,
- PPC_ACQUIRE_BARRIER, 0)
+DEFINE_TESTOP(test_and_clear_bits, andc, PPC_ATOMIC_ENTRY_BARRIER,
+ PPC_ATOMIC_EXIT_BARRIER, 0)
+DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER,
+ PPC_ATOMIC_EXIT_BARRIER, 0)
static __inline__ int test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)