summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/xen/spinlock.c65
-rw-r--r--drivers/xen/events.c25
-rw-r--r--include/xen/events.h2
3 files changed, 77 insertions, 15 deletions
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 8dc4d31da67..4884bc603aa 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -47,25 +47,41 @@ static int xen_spin_trylock(struct raw_spinlock *lock)
static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
-static inline void spinning_lock(struct xen_spinlock *xl)
+/*
+ * Mark a cpu as interested in a lock. Returns the CPU's previous
+ * lock of interest, in case we got preempted by an interrupt.
+ */
+static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
{
+ struct xen_spinlock *prev;
+
+ prev = __get_cpu_var(lock_spinners);
__get_cpu_var(lock_spinners) = xl;
+
wmb(); /* set lock of interest before count */
+
asm(LOCK_PREFIX " incw %0"
: "+m" (xl->spinners) : : "memory");
+
+ return prev;
}
-static inline void unspinning_lock(struct xen_spinlock *xl)
+/*
+ * Mark a cpu as no longer interested in a lock. Restores previous
+ * lock of interest (NULL for none).
+ */
+static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev)
{
asm(LOCK_PREFIX " decw %0"
: "+m" (xl->spinners) : : "memory");
- wmb(); /* decrement count before clearing lock */
- __get_cpu_var(lock_spinners) = NULL;
+ wmb(); /* decrement count before restoring lock */
+ __get_cpu_var(lock_spinners) = prev;
}
static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
{
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
+ struct xen_spinlock *prev;
int irq = __get_cpu_var(lock_kicker_irq);
int ret;
@@ -74,23 +90,42 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
return 0;
/* announce we're spinning */
- spinning_lock(xl);
+ prev = spinning_lock(xl);
- /* clear pending */
- xen_clear_irq_pending(irq);
+ do {
+ /* clear pending */
+ xen_clear_irq_pending(irq);
+
+ /* check again make sure it didn't become free while
+ we weren't looking */
+ ret = xen_spin_trylock(lock);
+ if (ret) {
+ /*
+ * If we interrupted another spinlock while it
+ * was blocking, make sure it doesn't block
+ * without rechecking the lock.
+ */
+ if (prev != NULL)
+ xen_set_irq_pending(irq);
+ goto out;
+ }
- /* check again make sure it didn't become free while
- we weren't looking */
- ret = xen_spin_trylock(lock);
- if (ret)
- goto out;
+ /*
+ * Block until irq becomes pending. If we're
+ * interrupted at this point (after the trylock but
+ * before entering the block), then the nested lock
+ * handler guarantees that the irq will be left
+ * pending if there's any chance the lock became free;
+ * xen_poll_irq() returns immediately if the irq is
+ * pending.
+ */
+ xen_poll_irq(irq);
+ } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
- /* block until irq becomes pending */
- xen_poll_irq(irq);
kstat_this_cpu.irqs[irq]++;
out:
- unspinning_lock(xl);
+ unspinning_lock(xl, prev);
return ret;
}
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index a0837036d89..b6c2b8f16be 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -164,6 +164,12 @@ static inline void set_evtchn(int port)
sync_set_bit(port, &s->evtchn_pending[0]);
}
+static inline int test_evtchn(int port)
+{
+ struct shared_info *s = HYPERVISOR_shared_info;
+ return sync_test_bit(port, &s->evtchn_pending[0]);
+}
+
/**
* notify_remote_via_irq - send event to remote end of event channel via irq
@@ -732,6 +738,25 @@ void xen_clear_irq_pending(int irq)
clear_evtchn(evtchn);
}
+void xen_set_irq_pending(int irq)
+{
+ int evtchn = evtchn_from_irq(irq);
+
+ if (VALID_EVTCHN(evtchn))
+ set_evtchn(evtchn);
+}
+
+bool xen_test_irq_pending(int irq)
+{
+ int evtchn = evtchn_from_irq(irq);
+ bool ret = false;
+
+ if (VALID_EVTCHN(evtchn))
+ ret = test_evtchn(evtchn);
+
+ return ret;
+}
+
/* Poll waiting for an irq to become pending. In the usual case, the
irq will be disabled so it won't deliver an interrupt. */
void xen_poll_irq(int irq)
diff --git a/include/xen/events.h b/include/xen/events.h
index 4680ff3fbc9..0d5f1adc036 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -46,6 +46,8 @@ extern void xen_irq_resume(void);
/* Clear an irq's pending state, in preparation for polling on it */
void xen_clear_irq_pending(int irq);
+void xen_set_irq_pending(int irq);
+bool xen_test_irq_pending(int irq);
/* Poll waiting for an irq to become pending. In the usual case, the
irq will be disabled so it won't deliver an interrupt. */