diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2006-10-31 03:45:07 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2006-12-04 22:43:14 +0000 |
commit | 0004a9dfeaa709a7f853487aba19932c9b1a87c8 (patch) | |
tree | e9f1f4b1ca897e57f46778cef283617ba83fc855 /include/asm-mips/atomic.h | |
parent | 08f57f7ffe5819e537301b1f1109fa4fc670bfff (diff) |
[MIPS] Cleanup memory barriers for weakly ordered systems.
Also the R4000 / R4600 LL/SC instructions imply a sync so no explicit sync
needed.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'include/asm-mips/atomic.h')
-rw-r--r-- | include/asm-mips/atomic.h | 37 |
1 files changed, 25 insertions, 12 deletions
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h index 365767074c7..c1a2409bb52 100644 --- a/include/asm-mips/atomic.h +++ b/include/asm-mips/atomic.h @@ -15,6 +15,7 @@ #define _ASM_ATOMIC_H #include <linux/irqflags.h> +#include <asm/barrier.h> #include <asm/cpu-features.h> #include <asm/war.h> @@ -130,6 +131,8 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) { unsigned long result; + smp_mb(); + if (cpu_has_llsc && R10000_LLSC_WAR) { unsigned long temp; @@ -140,7 +143,6 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) " sc %0, %2 \n" " beqzl %0, 1b \n" " addu %0, %1, %3 \n" - " sync \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) @@ -155,7 +157,6 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) " sc %0, %2 \n" " beqz %0, 1b \n" " addu %0, %1, %3 \n" - " sync \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) @@ -170,6 +171,8 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) local_irq_restore(flags); } + smp_mb(); + return result; } @@ -177,6 +180,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) { unsigned long result; + smp_mb(); + if (cpu_has_llsc && R10000_LLSC_WAR) { unsigned long temp; @@ -187,7 +192,6 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) " sc %0, %2 \n" " beqzl %0, 1b \n" " subu %0, %1, %3 \n" - " sync \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) @@ -202,7 +206,6 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) " sc %0, %2 \n" " beqz %0, 1b \n" " subu %0, %1, %3 \n" - " sync \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) @@ -217,6 +220,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) local_irq_restore(flags); } + smp_mb(); + return result; } @@ -232,6 +237,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) { unsigned long result; + smp_mb(); + if (cpu_has_llsc && R10000_LLSC_WAR) { unsigned long temp; @@ -245,7 +252,6 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) " beqzl %0, 1b \n" " subu %0, %1, %3 \n" " .set reorder \n" - " sync \n" "1: \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) @@ -264,7 +270,6 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) " beqz %0, 1b \n" " subu %0, %1, %3 \n" " .set reorder \n" - " sync \n" "1: \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) @@ -281,6 +286,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) local_irq_restore(flags); } + smp_mb(); + return result; } @@ -484,6 +491,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) { unsigned long result; + smp_mb(); + if (cpu_has_llsc && R10000_LLSC_WAR) { unsigned long temp; @@ -494,7 +503,6 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) " scd %0, %2 \n" " beqzl %0, 1b \n" " addu %0, %1, %3 \n" - " sync \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) @@ -509,7 +517,6 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) " scd %0, %2 \n" " beqz %0, 1b \n" " addu %0, %1, %3 \n" - " sync \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) @@ -524,6 +531,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) local_irq_restore(flags); } + smp_mb(); + return result; } @@ -531,6 +540,8 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) { unsigned long result; + smp_mb(); + if (cpu_has_llsc && R10000_LLSC_WAR) { unsigned long temp; @@ -541,7 +552,6 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) " scd %0, %2 \n" " beqzl %0, 1b \n" " subu %0, %1, %3 \n" - " sync \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) @@ -556,7 +566,6 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) " scd %0, %2 \n" " beqz %0, 1b \n" " subu %0, %1, %3 \n" - " sync \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) @@ -571,6 +580,8 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) local_irq_restore(flags); } + smp_mb(); + return result; } @@ -586,6 +597,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) { unsigned long result; + smp_mb(); + if (cpu_has_llsc && R10000_LLSC_WAR) { unsigned long temp; @@ -599,7 +612,6 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) " beqzl %0, 1b \n" " dsubu %0, %1, %3 \n" " .set reorder \n" - " sync \n" "1: \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) @@ -618,7 +630,6 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) " beqz %0, 1b \n" " dsubu %0, %1, %3 \n" " .set reorder \n" - " sync \n" "1: \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) @@ -635,6 +646,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) local_irq_restore(flags); } + smp_mb(); + return result; } |