diff options
author | Matthew Wilcox <matthew@wil.cx> | 2008-03-07 21:55:58 -0500 |
---|---|---|
committer | Matthew Wilcox <willy@linux.intel.com> | 2008-04-17 10:42:34 -0400 |
commit | 64ac24e738823161693bf791f87adc802cf529ff (patch) | |
tree | 19c0b0cf314d4394ca580c05b86cdf874ce0a167 /include/asm-arm/semaphore-helper.h | |
parent | e48b3deee475134585eed03e7afebe4bf9e0dba9 (diff) |
Generic semaphore implementation
Semaphores are no longer performance-critical, so a generic C
implementation is better for maintainability, debuggability and
extensibility. Thanks to Peter Zijlstra for fixing the lockdep
warning. Thanks to Harvey Harrison for pointing out that the
unlikely() was unnecessary.
Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-arm/semaphore-helper.h')
-rw-r--r-- | include/asm-arm/semaphore-helper.h | 84 |
1 files changed, 0 insertions, 84 deletions
diff --git a/include/asm-arm/semaphore-helper.h b/include/asm-arm/semaphore-helper.h deleted file mode 100644 index 1d7f1987edb..00000000000 --- a/include/asm-arm/semaphore-helper.h +++ /dev/null @@ -1,84 +0,0 @@ -#ifndef ASMARM_SEMAPHORE_HELPER_H -#define ASMARM_SEMAPHORE_HELPER_H - -/* - * These two _must_ execute atomically wrt each other. - */ -static inline void wake_one_more(struct semaphore * sem) -{ - unsigned long flags; - - spin_lock_irqsave(&semaphore_wake_lock, flags); - if (atomic_read(&sem->count) <= 0) - sem->waking++; - spin_unlock_irqrestore(&semaphore_wake_lock, flags); -} - -static inline int waking_non_zero(struct semaphore *sem) -{ - unsigned long flags; - int ret = 0; - - spin_lock_irqsave(&semaphore_wake_lock, flags); - if (sem->waking > 0) { - sem->waking--; - ret = 1; - } - spin_unlock_irqrestore(&semaphore_wake_lock, flags); - return ret; -} - -/* - * waking non zero interruptible - * 1 got the lock - * 0 go to sleep - * -EINTR interrupted - * - * We must undo the sem->count down_interruptible() increment while we are - * protected by the spinlock in order to make this atomic_inc() with the - * atomic_read() in wake_one_more(), otherwise we can race. -arca - */ -static inline int waking_non_zero_interruptible(struct semaphore *sem, - struct task_struct *tsk) -{ - unsigned long flags; - int ret = 0; - - spin_lock_irqsave(&semaphore_wake_lock, flags); - if (sem->waking > 0) { - sem->waking--; - ret = 1; - } else if (signal_pending(tsk)) { - atomic_inc(&sem->count); - ret = -EINTR; - } - spin_unlock_irqrestore(&semaphore_wake_lock, flags); - return ret; -} - -/* - * waking_non_zero_try_lock: - * 1 failed to lock - * 0 got the lock - * - * We must undo the sem->count down_interruptible() increment while we are - * protected by the spinlock in order to make this atomic_inc() with the - * atomic_read() in wake_one_more(), otherwise we can race. -arca - */ -static inline int waking_non_zero_trylock(struct semaphore *sem) -{ - unsigned long flags; - int ret = 1; - - spin_lock_irqsave(&semaphore_wake_lock, flags); - if (sem->waking <= 0) - atomic_inc(&sem->count); - else { - sem->waking--; - ret = 0; - } - spin_unlock_irqrestore(&semaphore_wake_lock, flags); - return ret; -} - -#endif |