summaryrefslogtreecommitdiffstats
path: root/arch/s390/lib/spinlock.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/lib/spinlock.c')
-rw-r--r--arch/s390/lib/spinlock.c157
1 files changed, 87 insertions, 70 deletions
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index f709983f41f..5b0e445bc3f 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -26,83 +26,81 @@ __setup("spin_retry=", spin_retry_setup);
void arch_spin_lock_wait(arch_spinlock_t *lp)
{
- int count = spin_retry;
- unsigned int cpu = ~smp_processor_id();
+ unsigned int cpu = SPINLOCK_LOCKVAL;
unsigned int owner;
+ int count;
while (1) {
- owner = lp->owner_cpu;
- if (!owner || smp_vcpu_scheduled(~owner)) {
- for (count = spin_retry; count > 0; count--) {
- if (arch_spin_is_locked(lp))
- continue;
- if (_raw_compare_and_swap(&lp->owner_cpu, 0,
- cpu) == 0)
- return;
- }
- if (MACHINE_IS_LPAR)
- continue;
+ owner = ACCESS_ONCE(lp->lock);
+ /* Try to get the lock if it is free. */
+ if (!owner) {
+ if (_raw_compare_and_swap(&lp->lock, 0, cpu))
+ return;
+ continue;
}
- owner = lp->owner_cpu;
- if (owner)
+ /* Check if the lock owner is running. */
+ if (!smp_vcpu_scheduled(~owner)) {
+ smp_yield_cpu(~owner);
+ continue;
+ }
+ /* Loop for a while on the lock value. */
+ count = spin_retry;
+ do {
+ owner = ACCESS_ONCE(lp->lock);
+ } while (owner && count-- > 0);
+ if (!owner)
+ continue;
+ /*
+ * For multiple layers of hypervisors, e.g. z/VM + LPAR
+ * yield the CPU if the lock is still unavailable.
+ */
+ if (!MACHINE_IS_LPAR)
smp_yield_cpu(~owner);
- if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
- return;
}
}
EXPORT_SYMBOL(arch_spin_lock_wait);
void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
{
- int count = spin_retry;
- unsigned int cpu = ~smp_processor_id();
+ unsigned int cpu = SPINLOCK_LOCKVAL;
unsigned int owner;
+ int count;
local_irq_restore(flags);
while (1) {
- owner = lp->owner_cpu;
- if (!owner || smp_vcpu_scheduled(~owner)) {
- for (count = spin_retry; count > 0; count--) {
- if (arch_spin_is_locked(lp))
- continue;
- local_irq_disable();
- if (_raw_compare_and_swap(&lp->owner_cpu, 0,
- cpu) == 0)
- return;
- local_irq_restore(flags);
- }
- if (MACHINE_IS_LPAR)
- continue;
+ owner = ACCESS_ONCE(lp->lock);
+ /* Try to get the lock if it is free. */
+ if (!owner) {
+ local_irq_disable();
+ if (_raw_compare_and_swap(&lp->lock, 0, cpu))
+ return;
+ local_irq_restore(flags);
}
- owner = lp->owner_cpu;
- if (owner)
+ /* Check if the lock owner is running. */
+ if (!smp_vcpu_scheduled(~owner)) {
smp_yield_cpu(~owner);
- local_irq_disable();
- if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
- return;
- local_irq_restore(flags);
- }
-}
-EXPORT_SYMBOL(arch_spin_lock_wait_flags);
-
-int arch_spin_trylock_retry(arch_spinlock_t *lp)
-{
- unsigned int cpu = ~smp_processor_id();
- int count;
-
- for (count = spin_retry; count > 0; count--) {
- if (arch_spin_is_locked(lp))
continue;
- if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
- return 1;
+ }
+ /* Loop for a while on the lock value. */
+ count = spin_retry;
+ do {
+ owner = ACCESS_ONCE(lp->lock);
+ } while (owner && count-- > 0);
+ if (!owner)
+ continue;
+ /*
+ * For multiple layers of hypervisors, e.g. z/VM + LPAR
+ * yield the CPU if the lock is still unavailable.
+ */
+ if (!MACHINE_IS_LPAR)
+ smp_yield_cpu(~owner);
}
- return 0;
}
-EXPORT_SYMBOL(arch_spin_trylock_retry);
+EXPORT_SYMBOL(arch_spin_lock_wait_flags);
-void arch_spin_relax(arch_spinlock_t *lock)
+void arch_spin_relax(arch_spinlock_t *lp)
{
- unsigned int cpu = lock->owner_cpu;
+ unsigned int cpu = lp->lock;
if (cpu != 0) {
if (MACHINE_IS_VM || MACHINE_IS_KVM ||
!smp_vcpu_scheduled(~cpu))
@@ -111,6 +109,17 @@ void arch_spin_relax(arch_spinlock_t *lock)
}
EXPORT_SYMBOL(arch_spin_relax);
+int arch_spin_trylock_retry(arch_spinlock_t *lp)
+{
+ int count;
+
+ for (count = spin_retry; count > 0; count--)
+ if (arch_spin_trylock_once(lp))
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(arch_spin_trylock_retry);
+
void _raw_read_lock_wait(arch_rwlock_t *rw)
{
unsigned int old;
@@ -121,10 +130,10 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
smp_yield();
count = spin_retry;
}
- if (!arch_read_can_lock(rw))
+ old = ACCESS_ONCE(rw->lock);
+ if ((int) old < 0)
continue;
- old = rw->lock & 0x7fffffffU;
- if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
+ if (_raw_compare_and_swap(&rw->lock, old, old + 1))
return;
}
}
@@ -141,12 +150,13 @@ void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
smp_yield();
count = spin_retry;
}
- if (!arch_read_can_lock(rw))
+ old = ACCESS_ONCE(rw->lock);
+ if ((int) old < 0)
continue;
- old = rw->lock & 0x7fffffffU;
local_irq_disable();
- if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
+ if (_raw_compare_and_swap(&rw->lock, old, old + 1))
return;
+ local_irq_restore(flags);
}
}
EXPORT_SYMBOL(_raw_read_lock_wait_flags);
@@ -157,10 +167,10 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
int count = spin_retry;
while (count-- > 0) {
- if (!arch_read_can_lock(rw))
+ old = ACCESS_ONCE(rw->lock);
+ if ((int) old < 0)
continue;
- old = rw->lock & 0x7fffffffU;
- if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
+ if (_raw_compare_and_swap(&rw->lock, old, old + 1))
return 1;
}
return 0;
@@ -169,6 +179,7 @@ EXPORT_SYMBOL(_raw_read_trylock_retry);
void _raw_write_lock_wait(arch_rwlock_t *rw)
{
+ unsigned int old;
int count = spin_retry;
while (1) {
@@ -176,9 +187,10 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
smp_yield();
count = spin_retry;
}
- if (!arch_write_can_lock(rw))
+ old = ACCESS_ONCE(rw->lock);
+ if (old)
continue;
- if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
+ if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
return;
}
}
@@ -186,6 +198,7 @@ EXPORT_SYMBOL(_raw_write_lock_wait);
void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
{
+ unsigned int old;
int count = spin_retry;
local_irq_restore(flags);
@@ -194,23 +207,27 @@ void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
smp_yield();
count = spin_retry;
}
- if (!arch_write_can_lock(rw))
+ old = ACCESS_ONCE(rw->lock);
+ if (old)
continue;
local_irq_disable();
- if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
+ if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
return;
+ local_irq_restore(flags);
}
}
EXPORT_SYMBOL(_raw_write_lock_wait_flags);
int _raw_write_trylock_retry(arch_rwlock_t *rw)
{
+ unsigned int old;
int count = spin_retry;
while (count-- > 0) {
- if (!arch_write_can_lock(rw))
+ old = ACCESS_ONCE(rw->lock);
+ if (old)
continue;
- if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
+ if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
return 1;
}
return 0;