diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-alpha/rwsem.h |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-alpha/rwsem.h')
-rw-r--r-- | include/asm-alpha/rwsem.h | 266 |
1 files changed, 266 insertions, 0 deletions
diff --git a/include/asm-alpha/rwsem.h b/include/asm-alpha/rwsem.h new file mode 100644 index 00000000000..8e058a67c9a --- /dev/null +++ b/include/asm-alpha/rwsem.h @@ -0,0 +1,266 @@ +#ifndef _ALPHA_RWSEM_H +#define _ALPHA_RWSEM_H + +/* + * Written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001. + * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h + */ + +#ifndef _LINUX_RWSEM_H +#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" +#endif + +#ifdef __KERNEL__ + +#include <linux/compiler.h> +#include <linux/list.h> +#include <linux/spinlock.h> + +struct rwsem_waiter; + +extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); +extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); + +/* + * the semaphore definition + */ +struct rw_semaphore { + long count; +#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L +#define RWSEM_ACTIVE_BIAS 0x0000000000000001L +#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL +#define RWSEM_WAITING_BIAS (-0x0000000100000000L) +#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS +#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) + spinlock_t wait_lock; + struct list_head wait_list; +#if RWSEM_DEBUG + int debug; +#endif +}; + +#if RWSEM_DEBUG +#define __RWSEM_DEBUG_INIT , 0 +#else +#define __RWSEM_DEBUG_INIT /* */ +#endif + +#define __RWSEM_INITIALIZER(name) \ + { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ + LIST_HEAD_INIT((name).wait_list) __RWSEM_DEBUG_INIT } + +#define DECLARE_RWSEM(name) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name) + +static inline void init_rwsem(struct rw_semaphore *sem) +{ + sem->count = RWSEM_UNLOCKED_VALUE; + spin_lock_init(&sem->wait_lock); + INIT_LIST_HEAD(&sem->wait_list); +#if RWSEM_DEBUG + sem->debug = 0; +#endif +} + +static inline void __down_read(struct rw_semaphore *sem) +{ + long oldcount; +#ifndef CONFIG_SMP + oldcount = sem->count; + sem->count += RWSEM_ACTIVE_READ_BIAS; +#else + long temp; + __asm__ __volatile__( + "1: ldq_l %0,%1\n" + " addq %0,%3,%2\n" + " stq_c %2,%1\n" + " beq %2,2f\n" + " mb\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp) + :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory"); +#endif + if (unlikely(oldcount < 0)) + rwsem_down_read_failed(sem); +} + +/* + * trylock for reading -- returns 1 if successful, 0 if contention + */ +static inline int __down_read_trylock(struct rw_semaphore *sem) +{ + long old, new, res; + + res = sem->count; + do { + new = res + RWSEM_ACTIVE_READ_BIAS; + if (new <= 0) + break; + old = res; + res = cmpxchg(&sem->count, old, new); + } while (res != old); + return res >= 0 ? 1 : 0; +} + +static inline void __down_write(struct rw_semaphore *sem) +{ + long oldcount; +#ifndef CONFIG_SMP + oldcount = sem->count; + sem->count += RWSEM_ACTIVE_WRITE_BIAS; +#else + long temp; + __asm__ __volatile__( + "1: ldq_l %0,%1\n" + " addq %0,%3,%2\n" + " stq_c %2,%1\n" + " beq %2,2f\n" + " mb\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp) + :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory"); +#endif + if (unlikely(oldcount)) + rwsem_down_write_failed(sem); +} + +/* + * trylock for writing -- returns 1 if successful, 0 if contention + */ +static inline int __down_write_trylock(struct rw_semaphore *sem) +{ + long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, + RWSEM_ACTIVE_WRITE_BIAS); + if (ret == RWSEM_UNLOCKED_VALUE) + return 1; + return 0; +} + +static inline void __up_read(struct rw_semaphore *sem) +{ + long oldcount; +#ifndef CONFIG_SMP + oldcount = sem->count; + sem->count -= RWSEM_ACTIVE_READ_BIAS; +#else + long temp; + __asm__ __volatile__( + " mb\n" + "1: ldq_l %0,%1\n" + " subq %0,%3,%2\n" + " stq_c %2,%1\n" + " beq %2,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp) + :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory"); +#endif + if (unlikely(oldcount < 0)) + if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0) + rwsem_wake(sem); +} + +static inline void __up_write(struct rw_semaphore *sem) +{ + long count; +#ifndef CONFIG_SMP + sem->count -= RWSEM_ACTIVE_WRITE_BIAS; + count = sem->count; +#else + long temp; + __asm__ __volatile__( + " mb\n" + "1: ldq_l %0,%1\n" + " subq %0,%3,%2\n" + " stq_c %2,%1\n" + " beq %2,2f\n" + " subq %0,%3,%0\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (count), "=m" (sem->count), "=&r" (temp) + :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory"); +#endif + if (unlikely(count)) + if ((int)count == 0) + rwsem_wake(sem); +} + +/* + * downgrade write lock to read lock + */ +static inline void __downgrade_write(struct rw_semaphore *sem) +{ + long oldcount; +#ifndef CONFIG_SMP + oldcount = sem->count; + sem->count -= RWSEM_WAITING_BIAS; +#else + long temp; + __asm__ __volatile__( + "1: ldq_l %0,%1\n" + " addq %0,%3,%2\n" + " stq_c %2,%1\n" + " beq %2,2f\n" + " mb\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp) + :"Ir" (-RWSEM_WAITING_BIAS), "m" (sem->count) : "memory"); +#endif + if (unlikely(oldcount < 0)) + rwsem_downgrade_wake(sem); +} + +static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem) +{ +#ifndef CONFIG_SMP + sem->count += val; +#else + long temp; + __asm__ __volatile__( + "1: ldq_l %0,%1\n" + " addq %0,%2,%0\n" + " stq_c %0,%1\n" + " beq %0,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (sem->count) + :"Ir" (val), "m" (sem->count)); +#endif +} + +static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem) +{ +#ifndef CONFIG_SMP + sem->count += val; + return sem->count; +#else + long ret, temp; + __asm__ __volatile__( + "1: ldq_l %0,%1\n" + " addq %0,%3,%2\n" + " addq %0,%3,%0\n" + " stq_c %2,%1\n" + " beq %2,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (ret), "=m" (sem->count), "=&r" (temp) + :"Ir" (val), "m" (sem->count)); + + return ret; +#endif +} + +#endif /* __KERNEL__ */ +#endif /* _ALPHA_RWSEM_H */ |