diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 15:48:00 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 15:48:00 +0200 |
commit | dbb885fecc1b1b35e93416bedd24d21bd20f60ed (patch) | |
tree | 9aa92bcc4e3d3594eba0ba85d72b878d85f35a59 /arch/m68k/include/asm/atomic.h | |
parent | d6dd50e07c5bec00db2005969b1a01f8ca3d25ef (diff) | |
parent | 2291059c852706c6f5ffb400366042b7625066cd (diff) |
Merge branch 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull arch atomic cleanups from Ingo Molnar:
"This is a series kept separate from the main locking tree, which
cleans up and improves various details in the atomics type handling:
- Remove the unused atomic_or_long() method
- Consolidate and compress atomic ops implementations between
architectures, to reduce linecount and to make it easier to add new
ops.
- Rewrite generic atomic support to only require cmpxchg() from an
architecture - generate all other methods from that"
* 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits)
locking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read()
locking, mips: Fix atomics
locking, sparc64: Fix atomics
locking,arch: Rewrite generic atomic support
locking,arch,xtensa: Fold atomic_ops
locking,arch,sparc: Fold atomic_ops
locking,arch,sh: Fold atomic_ops
locking,arch,powerpc: Fold atomic_ops
locking,arch,parisc: Fold atomic_ops
locking,arch,mn10300: Fold atomic_ops
locking,arch,mips: Fold atomic_ops
locking,arch,metag: Fold atomic_ops
locking,arch,m68k: Fold atomic_ops
locking,arch,m32r: Fold atomic_ops
locking,arch,ia64: Fold atomic_ops
locking,arch,hexagon: Fold atomic_ops
locking,arch,cris: Fold atomic_ops
locking,arch,avr32: Fold atomic_ops
locking,arch,arm64: Fold atomic_ops
locking,arch,arm: Fold atomic_ops
...
Diffstat (limited to 'arch/m68k/include/asm/atomic.h')
-rw-r--r-- | arch/m68k/include/asm/atomic.h | 111 |
1 files changed, 48 insertions, 63 deletions
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h index 55695212a2a..e85f047fb07 100644 --- a/arch/m68k/include/asm/atomic.h +++ b/arch/m68k/include/asm/atomic.h @@ -17,7 +17,7 @@ #define ATOMIC_INIT(i) { (i) } -#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) #define atomic_set(v, i) (((v)->counter) = i) /* @@ -30,16 +30,57 @@ #define ASM_DI "di" #endif -static inline void atomic_add(int i, atomic_t *v) -{ - __asm__ __volatile__("addl %1,%0" : "+m" (*v) : ASM_DI (i)); +#define ATOMIC_OP(op, c_op, asm_op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + __asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\ +} \ + +#ifdef CONFIG_RMW_INSNS + +#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + int t, tmp; \ + \ + __asm__ __volatile__( \ + "1: movel %2,%1\n" \ + " " #asm_op "l %3,%1\n" \ + " casl %2,%1,%0\n" \ + " jne 1b" \ + : "+m" (*v), "=&d" (t), "=&d" (tmp) \ + : "g" (i), "2" (atomic_read(v))); \ + return t; \ } -static inline void atomic_sub(int i, atomic_t *v) -{ - __asm__ __volatile__("subl %1,%0" : "+m" (*v) : ASM_DI (i)); +#else + +#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ +static inline int atomic_##op##_return(int i, atomic_t * v) \ +{ \ + unsigned long flags; \ + int t; \ + \ + local_irq_save(flags); \ + t = (v->counter c_op i); \ + local_irq_restore(flags); \ + \ + return t; \ } +#endif /* CONFIG_RMW_INSNS */ + +#define ATOMIC_OPS(op, c_op, asm_op) \ + ATOMIC_OP(op, c_op, asm_op) \ + ATOMIC_OP_RETURN(op, c_op, asm_op) + +ATOMIC_OPS(add, +=, add) +ATOMIC_OPS(sub, -=, sub) + +#undef ATOMIC_OPS +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + static inline void atomic_inc(atomic_t *v) { __asm__ __volatile__("addql #1,%0" : "+m" (*v)); @@ -76,67 +117,11 @@ static inline int atomic_inc_and_test(atomic_t *v) #ifdef CONFIG_RMW_INSNS -static inline int atomic_add_return(int i, atomic_t *v) -{ - int t, tmp; - - __asm__ __volatile__( - "1: movel %2,%1\n" - " addl %3,%1\n" - " casl %2,%1,%0\n" - " jne 1b" - : "+m" (*v), "=&d" (t), "=&d" (tmp) - : "g" (i), "2" (atomic_read(v))); - return t; -} - -static inline int atomic_sub_return(int i, atomic_t *v) -{ - int t, tmp; - - __asm__ __volatile__( - "1: movel %2,%1\n" - " subl %3,%1\n" - " casl %2,%1,%0\n" - " jne 1b" - : "+m" (*v), "=&d" (t), "=&d" (tmp) - : "g" (i), "2" (atomic_read(v))); - return t; -} - #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #else /* !CONFIG_RMW_INSNS */ -static inline int atomic_add_return(int i, atomic_t * v) -{ - unsigned long flags; - int t; - - local_irq_save(flags); - t = atomic_read(v); - t += i; - atomic_set(v, t); - local_irq_restore(flags); - - return t; -} - -static inline int atomic_sub_return(int i, atomic_t * v) -{ - unsigned long flags; - int t; - - local_irq_save(flags); - t = atomic_read(v); - t -= i; - atomic_set(v, t); - local_irq_restore(flags); - - return t; -} - static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { unsigned long flags; |