summaryrefslogtreecommitdiffstats
path: root/kernel/locking/qrwlock.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 16:09:06 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 16:09:06 -0700
commit8efb90cf1e80129fad197b916714e1d01ee183d2 (patch)
tree8eacf9e5f34cc7a9e2b553974b9f9181d61be294 /kernel/locking/qrwlock.c
parent5bda4f638f36ef4c4e3b1397b02affc3db94356e (diff)
parent3a6bfbc91df04b081a44d419e0260bad54abddf7 (diff)
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "The main changes in this cycle are: - big rtmutex and futex cleanup and robustification from Thomas Gleixner - mutex optimizations and refinements from Jason Low - arch_mutex_cpu_relax() removal and related cleanups - smaller lockdep tweaks" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits) arch, locking: Ciao arch_mutex_cpu_relax() locking/lockdep: Only ask for /proc/lock_stat output when available locking/mutexes: Optimize mutex trylock slowpath locking/mutexes: Try to acquire mutex only if it is unlocked locking/mutexes: Delete the MUTEX_SHOW_NO_WAITER macro locking/mutexes: Correct documentation on mutex optimistic spinning rtmutex: Make the rtmutex tester depend on BROKEN futex: Simplify futex_lock_pi_atomic() and make it more robust futex: Split out the first waiter attachment from lookup_pi_state() futex: Split out the waiter check from lookup_pi_state() futex: Use futex_top_waiter() in lookup_pi_state() futex: Make unlock_pi more robust rtmutex: Avoid pointless requeueing in the deadlock detection chain walk rtmutex: Cleanup deadlock detector debug logic rtmutex: Confine deadlock logic to futex rtmutex: Simplify remove_waiter() rtmutex: Document pi chain walk rtmutex: Clarify the boost/deboost part rtmutex: No need to keep task ref for lock owner check rtmutex: Simplify and document try_to_take_rtmutex() ...
Diffstat (limited to 'kernel/locking/qrwlock.c')
-rw-r--r--kernel/locking/qrwlock.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index fb5b8ac411a..f956ede7f90 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -20,7 +20,6 @@
#include <linux/cpumask.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
-#include <linux/mutex.h>
#include <asm/qrwlock.h>
/**
@@ -35,7 +34,7 @@ static __always_inline void
rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts)
{
while ((cnts & _QW_WMASK) == _QW_LOCKED) {
- arch_mutex_cpu_relax();
+ cpu_relax_lowlatency();
cnts = smp_load_acquire((u32 *)&lock->cnts);
}
}
@@ -75,7 +74,7 @@ void queue_read_lock_slowpath(struct qrwlock *lock)
* to make sure that the write lock isn't taken.
*/
while (atomic_read(&lock->cnts) & _QW_WMASK)
- arch_mutex_cpu_relax();
+ cpu_relax_lowlatency();
cnts = atomic_add_return(_QR_BIAS, &lock->cnts) - _QR_BIAS;
rspin_until_writer_unlock(lock, cnts);
@@ -114,7 +113,7 @@ void queue_write_lock_slowpath(struct qrwlock *lock)
cnts | _QW_WAITING) == cnts))
break;
- arch_mutex_cpu_relax();
+ cpu_relax_lowlatency();
}
/* When no more readers, set the locked flag */
@@ -125,7 +124,7 @@ void queue_write_lock_slowpath(struct qrwlock *lock)
_QW_LOCKED) == _QW_WAITING))
break;
- arch_mutex_cpu_relax();
+ cpu_relax_lowlatency();
}
unlock:
arch_spin_unlock(&lock->lock);