diff options
author | James Morris <james.l.morris@oracle.com> | 2014-11-19 21:32:12 +1100 |
---|---|---|
committer | James Morris <james.l.morris@oracle.com> | 2014-11-19 21:32:12 +1100 |
commit | b10778a00d40b3d9fdaaf5891e802794781ff71c (patch) | |
tree | 6ba4cbac86eecedc3f30650e7f764ecf00c83898 /kernel/locking/qrwlock.c | |
parent | 594081ee7145cc30a3977cb4e218f81213b63dc5 (diff) | |
parent | bfe01a5ba2490f299e1d2d5508cbbbadd897bbe9 (diff) |
Merge commit 'v3.17' into next
Diffstat (limited to 'kernel/locking/qrwlock.c')
-rw-r--r-- | kernel/locking/qrwlock.c | 9 |
1 files changed, 4 insertions, 5 deletions
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c index fb5b8ac411a..f956ede7f90 100644 --- a/kernel/locking/qrwlock.c +++ b/kernel/locking/qrwlock.c @@ -20,7 +20,6 @@ #include <linux/cpumask.h> #include <linux/percpu.h> #include <linux/hardirq.h> -#include <linux/mutex.h> #include <asm/qrwlock.h> /** @@ -35,7 +34,7 @@ static __always_inline void rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts) { while ((cnts & _QW_WMASK) == _QW_LOCKED) { - arch_mutex_cpu_relax(); + cpu_relax_lowlatency(); cnts = smp_load_acquire((u32 *)&lock->cnts); } } @@ -75,7 +74,7 @@ void queue_read_lock_slowpath(struct qrwlock *lock) * to make sure that the write lock isn't taken. */ while (atomic_read(&lock->cnts) & _QW_WMASK) - arch_mutex_cpu_relax(); + cpu_relax_lowlatency(); cnts = atomic_add_return(_QR_BIAS, &lock->cnts) - _QR_BIAS; rspin_until_writer_unlock(lock, cnts); @@ -114,7 +113,7 @@ void queue_write_lock_slowpath(struct qrwlock *lock) cnts | _QW_WAITING) == cnts)) break; - arch_mutex_cpu_relax(); + cpu_relax_lowlatency(); } /* When no more readers, set the locked flag */ @@ -125,7 +124,7 @@ void queue_write_lock_slowpath(struct qrwlock *lock) _QW_LOCKED) == _QW_WAITING)) break; - arch_mutex_cpu_relax(); + cpu_relax_lowlatency(); } unlock: arch_spin_unlock(&lock->lock); |