summaryrefslogtreecommitdiffstats
path: root/arch/tile/include/asm/atomic_64.h
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2011-05-16 13:59:39 -0400
committerChris Metcalf <cmetcalf@tilera.com>2011-05-19 22:55:49 -0400
commit8aaf1dda42576b0f8dffb004065baa806f4df9b6 (patch)
treee9376caaf70b54e4b236840a1cc77a443c07b341 /arch/tile/include/asm/atomic_64.h
parent4800a5bb13c09a572f7c74662a77c9eca229eba1 (diff)
arch/tile: use better definitions of xchg() and cmpxchg()
These definitions use a ({}) construct to avoid some cases where we were getting warnings about unused return values. We also promote the definition to the common <asm/atomic.h>, since it applies to both the 32- and 64-bit atomics. In addition, define __HAVE_ARCH_CMPXCHG for TILE-Gx since it has efficient direct atomic instructions. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/include/asm/atomic_64.h')
-rw-r--r--arch/tile/include/asm/atomic_64.h17
1 files changed, 2 insertions, 15 deletions
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
index 32170529480..1c1e60d8ccb 100644
--- a/arch/tile/include/asm/atomic_64.h
+++ b/arch/tile/include/asm/atomic_64.h
@@ -148,21 +148,8 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()
-#define xchg(ptr, x) \
- ((typeof(*(ptr))) \
- ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \
- atomic_xchg((atomic_t *)(ptr), (long)(x)) : \
- (sizeof(*(ptr)) == sizeof(atomic_long_t)) ? \
- atomic_long_xchg((atomic_long_t *)(ptr), (long)(x)) : \
- __xchg_called_with_bad_pointer()))
-
-#define cmpxchg(ptr, o, n) \
- ((typeof(*(ptr))) \
- ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \
- atomic_cmpxchg((atomic_t *)(ptr), (long)(o), (long)(n)) : \
- (sizeof(*(ptr)) == sizeof(atomic_long_t)) ? \
- atomic_long_cmpxchg((atomic_long_t *)(ptr), (long)(o), (long)(n)) : \
- __cmpxchg_called_with_bad_pointer()))
+/* Define this to indicate that cmpxchg is an efficient operation. */
+#define __HAVE_ARCH_CMPXCHG
#endif /* !__ASSEMBLY__ */