diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 15:48:00 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 15:48:00 +0200 |
commit | dbb885fecc1b1b35e93416bedd24d21bd20f60ed (patch) | |
tree | 9aa92bcc4e3d3594eba0ba85d72b878d85f35a59 /arch/powerpc | |
parent | d6dd50e07c5bec00db2005969b1a01f8ca3d25ef (diff) | |
parent | 2291059c852706c6f5ffb400366042b7625066cd (diff) |
Merge branch 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull arch atomic cleanups from Ingo Molnar:
"This is a series kept separate from the main locking tree, which
cleans up and improves various details in the atomics type handling:
- Remove the unused atomic_or_long() method
- Consolidate and compress atomic ops implementations between
architectures, to reduce linecount and to make it easier to add new
ops.
- Rewrite generic atomic support to only require cmpxchg() from an
architecture - generate all other methods from that"
* 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits)
locking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read()
locking, mips: Fix atomics
locking, sparc64: Fix atomics
locking,arch: Rewrite generic atomic support
locking,arch,xtensa: Fold atomic_ops
locking,arch,sparc: Fold atomic_ops
locking,arch,sh: Fold atomic_ops
locking,arch,powerpc: Fold atomic_ops
locking,arch,parisc: Fold atomic_ops
locking,arch,mn10300: Fold atomic_ops
locking,arch,mips: Fold atomic_ops
locking,arch,metag: Fold atomic_ops
locking,arch,m68k: Fold atomic_ops
locking,arch,m32r: Fold atomic_ops
locking,arch,ia64: Fold atomic_ops
locking,arch,hexagon: Fold atomic_ops
locking,arch,cris: Fold atomic_ops
locking,arch,avr32: Fold atomic_ops
locking,arch,arm64: Fold atomic_ops
locking,arch,arm: Fold atomic_ops
...
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/atomic.h | 198 |
1 files changed, 77 insertions, 121 deletions
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 28992d01292..512d2782b04 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -26,76 +26,53 @@ static __inline__ void atomic_set(atomic_t *v, int i) __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); } -static __inline__ void atomic_add(int a, atomic_t *v) -{ - int t; - - __asm__ __volatile__( -"1: lwarx %0,0,%3 # atomic_add\n\ - add %0,%2,%0\n" - PPC405_ERR77(0,%3) -" stwcx. %0,0,%3 \n\ - bne- 1b" - : "=&r" (t), "+m" (v->counter) - : "r" (a), "r" (&v->counter) - : "cc"); +#define ATOMIC_OP(op, asm_op) \ +static __inline__ void atomic_##op(int a, atomic_t *v) \ +{ \ + int t; \ + \ + __asm__ __volatile__( \ +"1: lwarx %0,0,%3 # atomic_" #op "\n" \ + #asm_op " %0,%2,%0\n" \ + PPC405_ERR77(0,%3) \ +" stwcx. %0,0,%3 \n" \ +" bne- 1b\n" \ + : "=&r" (t), "+m" (v->counter) \ + : "r" (a), "r" (&v->counter) \ + : "cc"); \ +} \ + +#define ATOMIC_OP_RETURN(op, asm_op) \ +static __inline__ int atomic_##op##_return(int a, atomic_t *v) \ +{ \ + int t; \ + \ + __asm__ __volatile__( \ + PPC_ATOMIC_ENTRY_BARRIER \ +"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \ + #asm_op " %0,%1,%0\n" \ + PPC405_ERR77(0,%2) \ +" stwcx. %0,0,%2 \n" \ +" bne- 1b\n" \ + PPC_ATOMIC_EXIT_BARRIER \ + : "=&r" (t) \ + : "r" (a), "r" (&v->counter) \ + : "cc", "memory"); \ + \ + return t; \ } -static __inline__ int atomic_add_return(int a, atomic_t *v) -{ - int t; +#define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op) - __asm__ __volatile__( - PPC_ATOMIC_ENTRY_BARRIER -"1: lwarx %0,0,%2 # atomic_add_return\n\ - add %0,%1,%0\n" - PPC405_ERR77(0,%2) -" stwcx. %0,0,%2 \n\ - bne- 1b" - PPC_ATOMIC_EXIT_BARRIER - : "=&r" (t) - : "r" (a), "r" (&v->counter) - : "cc", "memory"); +ATOMIC_OPS(add, add) +ATOMIC_OPS(sub, subf) - return t; -} +#undef ATOMIC_OPS +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) -static __inline__ void atomic_sub(int a, atomic_t *v) -{ - int t; - - __asm__ __volatile__( -"1: lwarx %0,0,%3 # atomic_sub\n\ - subf %0,%2,%0\n" - PPC405_ERR77(0,%3) -" stwcx. %0,0,%3 \n\ - bne- 1b" - : "=&r" (t), "+m" (v->counter) - : "r" (a), "r" (&v->counter) - : "cc"); -} - -static __inline__ int atomic_sub_return(int a, atomic_t *v) -{ - int t; - - __asm__ __volatile__( - PPC_ATOMIC_ENTRY_BARRIER -"1: lwarx %0,0,%2 # atomic_sub_return\n\ - subf %0,%1,%0\n" - PPC405_ERR77(0,%2) -" stwcx. %0,0,%2 \n\ - bne- 1b" - PPC_ATOMIC_EXIT_BARRIER - : "=&r" (t) - : "r" (a), "r" (&v->counter) - : "cc", "memory"); - - return t; -} - static __inline__ void atomic_inc(atomic_t *v) { int t; @@ -289,71 +266,50 @@ static __inline__ void atomic64_set(atomic64_t *v, long i) __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); } -static __inline__ void atomic64_add(long a, atomic64_t *v) -{ - long t; - - __asm__ __volatile__( -"1: ldarx %0,0,%3 # atomic64_add\n\ - add %0,%2,%0\n\ - stdcx. %0,0,%3 \n\ - bne- 1b" - : "=&r" (t), "+m" (v->counter) - : "r" (a), "r" (&v->counter) - : "cc"); +#define ATOMIC64_OP(op, asm_op) \ +static __inline__ void atomic64_##op(long a, atomic64_t *v) \ +{ \ + long t; \ + \ + __asm__ __volatile__( \ +"1: ldarx %0,0,%3 # atomic64_" #op "\n" \ + #asm_op " %0,%2,%0\n" \ +" stdcx. %0,0,%3 \n" \ +" bne- 1b\n" \ + : "=&r" (t), "+m" (v->counter) \ + : "r" (a), "r" (&v->counter) \ + : "cc"); \ } -static __inline__ long atomic64_add_return(long a, atomic64_t *v) -{ - long t; - - __asm__ __volatile__( - PPC_ATOMIC_ENTRY_BARRIER -"1: ldarx %0,0,%2 # atomic64_add_return\n\ - add %0,%1,%0\n\ - stdcx. %0,0,%2 \n\ - bne- 1b" - PPC_ATOMIC_EXIT_BARRIER - : "=&r" (t) - : "r" (a), "r" (&v->counter) - : "cc", "memory"); - - return t; +#define ATOMIC64_OP_RETURN(op, asm_op) \ +static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \ +{ \ + long t; \ + \ + __asm__ __volatile__( \ + PPC_ATOMIC_ENTRY_BARRIER \ +"1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \ + #asm_op " %0,%1,%0\n" \ +" stdcx. %0,0,%2 \n" \ +" bne- 1b\n" \ + PPC_ATOMIC_EXIT_BARRIER \ + : "=&r" (t) \ + : "r" (a), "r" (&v->counter) \ + : "cc", "memory"); \ + \ + return t; \ } -#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) - -static __inline__ void atomic64_sub(long a, atomic64_t *v) -{ - long t; - - __asm__ __volatile__( -"1: ldarx %0,0,%3 # atomic64_sub\n\ - subf %0,%2,%0\n\ - stdcx. %0,0,%3 \n\ - bne- 1b" - : "=&r" (t), "+m" (v->counter) - : "r" (a), "r" (&v->counter) - : "cc"); -} +#define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op) -static __inline__ long atomic64_sub_return(long a, atomic64_t *v) -{ - long t; +ATOMIC64_OPS(add, add) +ATOMIC64_OPS(sub, subf) - __asm__ __volatile__( - PPC_ATOMIC_ENTRY_BARRIER -"1: ldarx %0,0,%2 # atomic64_sub_return\n\ - subf %0,%1,%0\n\ - stdcx. %0,0,%2 \n\ - bne- 1b" - PPC_ATOMIC_EXIT_BARRIER - : "=&r" (t) - : "r" (a), "r" (&v->counter) - : "cc", "memory"); +#undef ATOMIC64_OPS +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP - return t; -} +#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) static __inline__ void atomic64_inc(atomic64_t *v) { |