summaryrefslogtreecommitdiffstats
path: root/include/asm-arm/spinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-arm/spinlock.h')
-rw-r--r--include/asm-arm/spinlock.h82
1 files changed, 54 insertions, 28 deletions
diff --git a/include/asm-arm/spinlock.h b/include/asm-arm/spinlock.h
index 182323619ca..1f906d09b68 100644
--- a/include/asm-arm/spinlock.h
+++ b/include/asm-arm/spinlock.h
@@ -8,9 +8,10 @@
/*
* ARMv6 Spin-locking.
*
- * We (exclusively) read the old value, and decrement it. If it
- * hits zero, we may have won the lock, so we try (exclusively)
- * storing it.
+ * We exclusively read the old value. If it is zero, we may have
+ * won the lock, so we try exclusively storing it. A memory barrier
+ * is required after we get a lock, and before we release it, because
+ * V6 CPUs are assumed to have weakly ordered memory.
*
* Unlocked value: 0
* Locked value: 1
@@ -41,7 +42,9 @@ static inline void _raw_spin_lock(spinlock_t *lock)
" bne 1b"
: "=&r" (tmp)
: "r" (&lock->lock), "r" (1)
- : "cc", "memory");
+ : "cc");
+
+ smp_mb();
}
static inline int _raw_spin_trylock(spinlock_t *lock)
@@ -54,18 +57,25 @@ static inline int _raw_spin_trylock(spinlock_t *lock)
" strexeq %0, %2, [%1]"
: "=&r" (tmp)
: "r" (&lock->lock), "r" (1)
- : "cc", "memory");
-
- return tmp == 0;
+ : "cc");
+
+ if (tmp == 0) {
+ smp_mb();
+ return 1;
+ } else {
+ return 0;
+ }
}
static inline void _raw_spin_unlock(spinlock_t *lock)
{
+ smp_mb();
+
__asm__ __volatile__(
" str %1, [%0]"
:
: "r" (&lock->lock), "r" (0)
- : "cc", "memory");
+ : "cc");
}
/*
@@ -79,7 +89,8 @@ typedef struct {
} rwlock_t;
#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
-#define rwlock_init(x) do { *(x) + RW_LOCK_UNLOCKED; } while (0)
+#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0)
+#define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0)
/*
* Write locks are easy - we just set bit 31. When unlocking, we can
@@ -97,16 +108,40 @@ static inline void _raw_write_lock(rwlock_t *rw)
" bne 1b"
: "=&r" (tmp)
: "r" (&rw->lock), "r" (0x80000000)
- : "cc", "memory");
+ : "cc");
+
+ smp_mb();
+}
+
+static inline int _raw_write_trylock(rwlock_t *rw)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+"1: ldrex %0, [%1]\n"
+" teq %0, #0\n"
+" strexeq %0, %2, [%1]"
+ : "=&r" (tmp)
+ : "r" (&rw->lock), "r" (0x80000000)
+ : "cc");
+
+ if (tmp == 0) {
+ smp_mb();
+ return 1;
+ } else {
+ return 0;
+ }
}
static inline void _raw_write_unlock(rwlock_t *rw)
{
+ smp_mb();
+
__asm__ __volatile__(
"str %1, [%0]"
:
: "r" (&rw->lock), "r" (0)
- : "cc", "memory");
+ : "cc");
}
/*
@@ -133,11 +168,17 @@ static inline void _raw_read_lock(rwlock_t *rw)
" bmi 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&rw->lock)
- : "cc", "memory");
+ : "cc");
+
+ smp_mb();
}
static inline void _raw_read_unlock(rwlock_t *rw)
{
+ unsigned long tmp, tmp2;
+
+ smp_mb();
+
__asm__ __volatile__(
"1: ldrex %0, [%2]\n"
" sub %0, %0, #1\n"
@@ -146,24 +187,9 @@ static inline void _raw_read_unlock(rwlock_t *rw)
" bne 1b"
: "=&r" (tmp), "=&r" (tmp2)
: "r" (&rw->lock)
- : "cc", "memory");
+ : "cc");
}
#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
-static inline int _raw_write_trylock(rwlock_t *rw)
-{
- unsigned long tmp;
-
- __asm__ __volatile__(
-"1: ldrex %0, [%1]\n"
-" teq %0, #0\n"
-" strexeq %0, %2, [%1]"
- : "=&r" (tmp)
- : "r" (&rw->lock), "r" (0x80000000)
- : "cc", "memory");
-
- return tmp == 0;
-}
-
#endif /* __ASM_SPINLOCK_H */