summaryrefslogtreecommitdiffstats
path: root/include/asm-sh64/semaphore-helper.h
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2007-11-21 17:28:09 +0900
committerPaul Mundt <lethal@linux-sh.org>2008-01-28 13:18:53 +0900
commitc8eef8800f1c693a2de6374b1948c8ea5e0ad75f (patch)
treeb8f66bd608ae5aa22e2ef508ccf0536a137bd127 /include/asm-sh64/semaphore-helper.h
parente150e7f25f7e3878cf2230193a5c708d99b9971e (diff)
sh: Purge dead sh64 headers.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include/asm-sh64/semaphore-helper.h')
-rw-r--r--include/asm-sh64/semaphore-helper.h101
1 files changed, 0 insertions, 101 deletions
diff --git a/include/asm-sh64/semaphore-helper.h b/include/asm-sh64/semaphore-helper.h
deleted file mode 100644
index fcfafe263e8..00000000000
--- a/include/asm-sh64/semaphore-helper.h
+++ /dev/null
@@ -1,101 +0,0 @@
-#ifndef __ASM_SH64_SEMAPHORE_HELPER_H
-#define __ASM_SH64_SEMAPHORE_HELPER_H
-
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * include/asm-sh64/semaphore-helper.h
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- *
- */
-#include <asm/errno.h>
-
-/*
- * SMP- and interrupt-safe semaphores helper functions.
- *
- * (C) Copyright 1996 Linus Torvalds
- * (C) Copyright 1999 Andrea Arcangeli
- */
-
-/*
- * These two _must_ execute atomically wrt each other.
- *
- * This is trivially done with load_locked/store_cond,
- * which we have. Let the rest of the losers suck eggs.
- */
-static __inline__ void wake_one_more(struct semaphore * sem)
-{
- atomic_inc((atomic_t *)&sem->sleepers);
-}
-
-static __inline__ int waking_non_zero(struct semaphore *sem)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->sleepers > 0) {
- sem->sleepers--;
- ret = 1;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_interruptible:
- * 1 got the lock
- * 0 go to sleep
- * -EINTR interrupted
- *
- * We must undo the sem->count down_interruptible() increment while we are
- * protected by the spinlock in order to make atomic this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
- struct task_struct *tsk)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->sleepers > 0) {
- sem->sleepers--;
- ret = 1;
- } else if (signal_pending(tsk)) {
- atomic_inc(&sem->count);
- ret = -EINTR;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
- * waking_non_zero_trylock:
- * 1 failed to lock
- * 0 got the lock
- *
- * We must undo the sem->count down_trylock() increment while we are
- * protected by the spinlock in order to make atomic this atomic_inc() with the
- * atomic_read() in wake_one_more(), otherwise we can race. -arca
- */
-static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
-{
- unsigned long flags;
- int ret = 1;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->sleepers <= 0)
- atomic_inc(&sem->count);
- else {
- sem->sleepers--;
- ret = 0;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-#endif /* __ASM_SH64_SEMAPHORE_HELPER_H */