summaryrefslogtreecommitdiffstats
path: root/include/asm-ia64/spinlock.h
diff options
context:
space:
mode:
authorKeith Owens <kaos@sgi.com>2005-12-10 14:24:28 +1100
committerTony Luck <tony.luck@intel.com>2005-12-12 08:54:18 -0800
commitbf7ececa9b68f4720f1ce344f442435660bcdae7 (patch)
tree7caff55f6ce8cfee2547ad0c93ad893c8f9583cb /include/asm-ia64/spinlock.h
parentf64fa6772aa874e5cad02a9d87e6b0d99ced3d48 (diff)
[IA64] Define an ia64 version of __raw_read_trylock
IA64 is using the generic version of __raw_read_trylock, which always waits for the lock to be free instead of returning when the lock is in use. Define an ia64 version of __raw_read_trylock which behaves correctly, and drop the generic one. Signed-off-by: Keith Owens <kaos@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'include/asm-ia64/spinlock.h')
-rw-r--r--include/asm-ia64/spinlock.h12
1 files changed, 11 insertions, 1 deletions
diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h
index 5b78611411c..0c91a76c5ea 100644
--- a/include/asm-ia64/spinlock.h
+++ b/include/asm-ia64/spinlock.h
@@ -201,6 +201,16 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
#endif /* !ASM_SUPPORTED */
-#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
+static inline int __raw_read_trylock(raw_rwlock_t *x)
+{
+ union {
+ raw_rwlock_t lock;
+ __u32 word;
+ } old, new;
+ old.lock = new.lock = *x;
+ old.lock.write_lock = new.lock.write_lock = 0;
+ ++new.lock.read_counter;
+ return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
+}
#endif /* _ASM_IA64_SPINLOCK_H */