summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/irq.h1
-rw-r--r--kernel/irq/chip.c26
-rw-r--r--kernel/irq/internals.h11
-rw-r--r--kernel/irq/spurious.c51
4 files changed, 61 insertions, 28 deletions
diff --git a/include/linux/irq.h b/include/linux/irq.h
index f5e900309d2..e32b64ccdc8 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -71,6 +71,7 @@ typedef void (*irq_flow_handler_t)(unsigned int irq,
#define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */
#define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */
#define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */
+#define IRQ_POLL_INPROGRESS 0x20000000 /* IRQ poll is in progress */
#define IRQF_MODIFY_MASK \
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 622b55ac0e0..31258782742 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -448,6 +448,13 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(handle_nested_irq);
+static bool irq_check_poll(struct irq_desc *desc)
+{
+ if (!(desc->status & IRQ_POLL_INPROGRESS))
+ return false;
+ return irq_wait_for_poll(desc);
+}
+
/**
* handle_simple_irq - Simple and software-decoded IRQs.
* @irq: the interrupt number
@@ -469,7 +476,9 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
raw_spin_lock(&desc->lock);
if (unlikely(desc->status & IRQ_INPROGRESS))
- goto out_unlock;
+ if (!irq_check_poll(desc))
+ goto out_unlock;
+
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
kstat_incr_irqs_this_cpu(irq, desc);
@@ -510,7 +519,9 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
mask_ack_irq(desc);
if (unlikely(desc->status & IRQ_INPROGRESS))
- goto out_unlock;
+ if (!irq_check_poll(desc))
+ goto out_unlock;
+
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
kstat_incr_irqs_this_cpu(irq, desc);
@@ -558,7 +569,8 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
raw_spin_lock(&desc->lock);
if (unlikely(desc->status & IRQ_INPROGRESS))
- goto out;
+ if (!irq_check_poll(desc))
+ goto out;
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
kstat_incr_irqs_this_cpu(irq, desc);
@@ -620,9 +632,11 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
*/
if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
!desc->action)) {
- desc->status |= (IRQ_PENDING | IRQ_MASKED);
- mask_ack_irq(desc);
- goto out_unlock;
+ if (!irq_check_poll(desc)) {
+ desc->status |= (IRQ_PENDING | IRQ_MASKED);
+ mask_ack_irq(desc);
+ goto out_unlock;
+ }
}
kstat_incr_irqs_this_cpu(irq, desc);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index b5bfa24aa6a..0eff7e92b1a 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -28,6 +28,7 @@ extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
/* Resending of interrupts :*/
void check_irq_resend(struct irq_desc *desc, unsigned int irq);
+bool irq_wait_for_poll(struct irq_desc *desc);
#ifdef CONFIG_PROC_FS
extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
@@ -47,16 +48,6 @@ extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
extern void irq_set_thread_affinity(struct irq_desc *desc);
-#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
-static inline void irq_end(unsigned int irq, struct irq_desc *desc)
-{
- if (desc->irq_data.chip && desc->irq_data.chip->end)
- desc->irq_data.chip->end(irq);
-}
-#else
-static inline void irq_end(unsigned int irq, struct irq_desc *desc) { }
-#endif
-
/* Inline functions for support of irq chips on slow busses */
static inline void chip_bus_lock(struct irq_desc *desc)
{
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 56ff8fffb8b..f749d29bfd8 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -25,12 +25,44 @@ static int irq_poll_cpu;
static atomic_t irq_poll_active;
/*
+ * We wait here for a poller to finish.
+ *
+ * If the poll runs on this CPU, then we yell loudly and return
+ * false. That will leave the interrupt line disabled in the worst
+ * case, but it should never happen.
+ *
+ * We wait until the poller is done and then recheck disabled and
+ * action (about to be disabled). Only if it's still active, we return
+ * true and let the handler run.
+ */
+bool irq_wait_for_poll(struct irq_desc *desc)
+{
+ if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
+ "irq poll in progress on cpu %d for irq %d\n",
+ smp_processor_id(), desc->irq_data.irq))
+ return false;
+
+#ifdef CONFIG_SMP
+ do {
+ raw_spin_unlock(&desc->lock);
+ while (desc->status & IRQ_INPROGRESS)
+ cpu_relax();
+ raw_spin_lock(&desc->lock);
+ } while (desc->status & IRQ_INPROGRESS);
+ /* Might have been disabled in meantime */
+ return !(desc->status & IRQ_DISABLED) && desc->action;
+#else
+ return false;
+#endif
+}
+
+/*
* Recovery handler for misrouted interrupts.
*/
static int try_one_irq(int irq, struct irq_desc *desc, bool force)
{
struct irqaction *action;
- int ok = 0, work = 0;
+ int ok = 0;
raw_spin_lock(&desc->lock);
@@ -64,10 +96,9 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
goto out;
}
- /* Honour the normal IRQ locking */
- desc->status |= IRQ_INPROGRESS;
+ /* Honour the normal IRQ locking and mark it poll in progress */
+ desc->status |= IRQ_INPROGRESS | IRQ_POLL_INPROGRESS;
do {
- work++;
desc->status &= ~IRQ_PENDING;
raw_spin_unlock(&desc->lock);
if (handle_IRQ_event(irq, action) != IRQ_NONE)
@@ -76,14 +107,7 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
action = desc->action;
} while ((desc->status & IRQ_PENDING) && action);
- desc->status &= ~IRQ_INPROGRESS;
- /*
- * If we did actual work for the real IRQ line we must let the
- * IRQ controller clean up too
- */
- if (work > 1)
- irq_end(irq, desc);
-
+ desc->status &= ~(IRQ_INPROGRESS | IRQ_POLL_INPROGRESS);
out:
raw_spin_unlock(&desc->lock);
return ok;
@@ -238,6 +262,9 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
void note_interrupt(unsigned int irq, struct irq_desc *desc,
irqreturn_t action_ret)
{
+ if (desc->status & IRQ_POLL_INPROGRESS)
+ return;
+
if (unlikely(action_ret != IRQ_HANDLED)) {
/*
* If we are seeing only the odd spurious IRQ caused by