summaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/include/asm/atomic_64.h7
-rw-r--r--arch/sparc/include/asm/bitops_64.h5
-rw-r--r--arch/sparc/include/asm/spinlock_64.h14
-rw-r--r--arch/sparc/include/asm/system_64.h35
-rw-r--r--arch/sparc/include/asm/tsb.h6
5 files changed, 8 insertions, 59 deletions
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index 2c71ec4a3b1..5982c5ae7f0 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -112,17 +112,10 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
/* Atomic operations are already serializing */
-#ifdef CONFIG_SMP
-#define smp_mb__before_atomic_dec() membar_storeload_loadload();
-#define smp_mb__after_atomic_dec() membar_storeload_storestore();
-#define smp_mb__before_atomic_inc() membar_storeload_loadload();
-#define smp_mb__after_atomic_inc() membar_storeload_storestore();
-#else
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
-#endif
#include <asm-generic/atomic.h>
#endif /* !(__ARCH_SPARC64_ATOMIC__) */
diff --git a/arch/sparc/include/asm/bitops_64.h b/arch/sparc/include/asm/bitops_64.h
index bb87b808022..e72ac9cdfb9 100644
--- a/arch/sparc/include/asm/bitops_64.h
+++ b/arch/sparc/include/asm/bitops_64.h
@@ -23,13 +23,8 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);
#include <asm-generic/bitops/non-atomic.h>
-#ifdef CONFIG_SMP
-#define smp_mb__before_clear_bit() membar_storeload_loadload()
-#define smp_mb__after_clear_bit() membar_storeload_storestore()
-#else
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
-#endif
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/__ffs.h>
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index fbac9d00744..c4d274d330e 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -33,12 +33,10 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
__asm__ __volatile__(
"1: ldstub [%1], %0\n"
-" membar #StoreLoad | #StoreStore\n"
" brnz,pn %0, 2f\n"
" nop\n"
" .subsection 2\n"
"2: ldub [%1], %0\n"
-" membar #LoadLoad\n"
" brnz,pt %0, 2b\n"
" nop\n"
" ba,a,pt %%xcc, 1b\n"
@@ -54,7 +52,6 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
__asm__ __volatile__(
" ldstub [%1], %0\n"
-" membar #StoreLoad | #StoreStore"
: "=r" (result)
: "r" (lock)
: "memory");
@@ -65,7 +62,6 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
__asm__ __volatile__(
-" membar #StoreStore | #LoadStore\n"
" stb %%g0, [%0]"
: /* No outputs */
: "r" (lock)
@@ -78,14 +74,12 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
__asm__ __volatile__(
"1: ldstub [%2], %0\n"
-" membar #StoreLoad | #StoreStore\n"
" brnz,pn %0, 2f\n"
" nop\n"
" .subsection 2\n"
"2: rdpr %%pil, %1\n"
" wrpr %3, %%pil\n"
"3: ldub [%2], %0\n"
-" membar #LoadLoad\n"
" brnz,pt %0, 3b\n"
" nop\n"
" ba,pt %%xcc, 1b\n"
@@ -108,12 +102,10 @@ static void inline __read_lock(raw_rwlock_t *lock)
"4: add %0, 1, %1\n"
" cas [%2], %0, %1\n"
" cmp %0, %1\n"
-" membar #StoreLoad | #StoreStore\n"
" bne,pn %%icc, 1b\n"
" nop\n"
" .subsection 2\n"
"2: ldsw [%2], %0\n"
-" membar #LoadLoad\n"
" brlz,pt %0, 2b\n"
" nop\n"
" ba,a,pt %%xcc, 4b\n"
@@ -134,7 +126,6 @@ static int inline __read_trylock(raw_rwlock_t *lock)
" add %0, 1, %1\n"
" cas [%2], %0, %1\n"
" cmp %0, %1\n"
-" membar #StoreLoad | #StoreStore\n"
" bne,pn %%icc, 1b\n"
" mov 1, %0\n"
"2:"
@@ -150,7 +141,6 @@ static void inline __read_unlock(raw_rwlock_t *lock)
unsigned long tmp1, tmp2;
__asm__ __volatile__(
-" membar #StoreLoad | #LoadLoad\n"
"1: lduw [%2], %0\n"
" sub %0, 1, %1\n"
" cas [%2], %0, %1\n"
@@ -174,12 +164,10 @@ static void inline __write_lock(raw_rwlock_t *lock)
"4: or %0, %3, %1\n"
" cas [%2], %0, %1\n"
" cmp %0, %1\n"
-" membar #StoreLoad | #StoreStore\n"
" bne,pn %%icc, 1b\n"
" nop\n"
" .subsection 2\n"
"2: lduw [%2], %0\n"
-" membar #LoadLoad\n"
" brnz,pt %0, 2b\n"
" nop\n"
" ba,a,pt %%xcc, 4b\n"
@@ -192,7 +180,6 @@ static void inline __write_lock(raw_rwlock_t *lock)
static void inline __write_unlock(raw_rwlock_t *lock)
{
__asm__ __volatile__(
-" membar #LoadStore | #StoreStore\n"
" stw %%g0, [%0]"
: /* no outputs */
: "r" (lock)
@@ -212,7 +199,6 @@ static int inline __write_trylock(raw_rwlock_t *lock)
" or %0, %4, %1\n"
" cas [%3], %0, %1\n"
" cmp %0, %1\n"
-" membar #StoreLoad | #StoreStore\n"
" bne,pn %%icc, 1b\n"
" nop\n"
" mov 1, %2\n"
diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h
index 8759f2a1b83..7554ad39b5a 100644
--- a/arch/sparc/include/asm/system_64.h
+++ b/arch/sparc/include/asm/system_64.h
@@ -59,20 +59,9 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
: : : "memory"); \
} while (0)
-#define mb() \
- membar_safe("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
-#define rmb() \
- membar_safe("#LoadLoad")
-#define wmb() \
- membar_safe("#StoreStore")
-#define membar_storeload() \
- membar_safe("#StoreLoad")
-#define membar_storeload_storestore() \
- membar_safe("#StoreLoad | #StoreStore")
-#define membar_storeload_loadload() \
- membar_safe("#StoreLoad | #LoadLoad")
-#define membar_storestore_loadstore() \
- membar_safe("#StoreStore | #LoadStore")
+#define mb() membar_safe("#StoreLoad")
+#define rmb() __asm__ __volatile__("":::"memory")
+#define wmb() __asm__ __volatile__("":::"memory")
#endif
@@ -80,20 +69,20 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
#define read_barrier_depends() do { } while(0)
#define set_mb(__var, __value) \
- do { __var = __value; membar_storeload_storestore(); } while(0)
+ do { __var = __value; membar_safe("#StoreLoad"); } while(0)
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
-#define smp_read_barrier_depends() read_barrier_depends()
#else
#define smp_mb() __asm__ __volatile__("":::"memory")
#define smp_rmb() __asm__ __volatile__("":::"memory")
#define smp_wmb() __asm__ __volatile__("":::"memory")
-#define smp_read_barrier_depends() do { } while(0)
#endif
+#define smp_read_barrier_depends() do { } while(0)
+
#define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
#define flushw_all() __asm__ __volatile__("flushw")
@@ -209,14 +198,12 @@ static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int va
unsigned long tmp1, tmp2;
__asm__ __volatile__(
-" membar #StoreLoad | #LoadLoad\n"
" mov %0, %1\n"
"1: lduw [%4], %2\n"
" cas [%4], %2, %0\n"
" cmp %2, %0\n"
" bne,a,pn %%icc, 1b\n"
" mov %1, %0\n"
-" membar #StoreLoad | #StoreStore\n"
: "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
: "0" (val), "r" (m)
: "cc", "memory");
@@ -228,14 +215,12 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
unsigned long tmp1, tmp2;
__asm__ __volatile__(
-" membar #StoreLoad | #LoadLoad\n"
" mov %0, %1\n"
"1: ldx [%4], %2\n"
" casx [%4], %2, %0\n"
" cmp %2, %0\n"
" bne,a,pn %%xcc, 1b\n"
" mov %1, %0\n"
-" membar #StoreLoad | #StoreStore\n"
: "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
: "0" (val), "r" (m)
: "cc", "memory");
@@ -272,9 +257,7 @@ extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noret
static inline unsigned long
__cmpxchg_u32(volatile int *m, int old, int new)
{
- __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
- "cas [%2], %3, %0\n\t"
- "membar #StoreLoad | #StoreStore"
+ __asm__ __volatile__("cas [%2], %3, %0"
: "=&r" (new)
: "0" (new), "r" (m), "r" (old)
: "memory");
@@ -285,9 +268,7 @@ __cmpxchg_u32(volatile int *m, int old, int new)
static inline unsigned long
__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
{
- __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
- "casx [%2], %3, %0\n\t"
- "membar #StoreLoad | #StoreStore"
+ __asm__ __volatile__("casx [%2], %3, %0"
: "=&r" (new)
: "0" (new), "r" (m), "r" (old)
: "memory");
diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h
index 76e4299dd9b..83c571d8c8a 100644
--- a/arch/sparc/include/asm/tsb.h
+++ b/arch/sparc/include/asm/tsb.h
@@ -50,8 +50,6 @@
#define TSB_TAG_INVALID_BIT 46
#define TSB_TAG_INVALID_HIGH (1 << (TSB_TAG_INVALID_BIT - 32))
-#define TSB_MEMBAR membar #StoreStore
-
/* Some cpus support physical address quad loads. We want to use
* those if possible so we don't need to hard-lock the TSB mapping
* into the TLB. We encode some instruction patching in order to
@@ -128,13 +126,11 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
cmp REG1, REG2; \
bne,pn %icc, 99b; \
nop; \
- TSB_MEMBAR
#define TSB_WRITE(TSB, TTE, TAG) \
add TSB, 0x8, TSB; \
TSB_STORE(TSB, TTE); \
sub TSB, 0x8, TSB; \
- TSB_MEMBAR; \
TSB_STORE(TSB, TAG);
#define KTSB_LOAD_QUAD(TSB, REG) \
@@ -153,13 +149,11 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
cmp REG1, REG2; \
bne,pn %icc, 99b; \
nop; \
- TSB_MEMBAR
#define KTSB_WRITE(TSB, TTE, TAG) \
add TSB, 0x8, TSB; \
stxa TTE, [TSB] ASI_N; \
sub TSB, 0x8, TSB; \
- TSB_MEMBAR; \
stxa TAG, [TSB] ASI_N;
/* Do a kernel page table walk. Leaves physical PTE pointer in