From c777ac5594f772ac760e02c3ac71d067616b579d Mon Sep 17 00:00:00 2001
From: Andrew Morton <akpm@osdl.org>
Date: Sat, 25 Mar 2006 03:07:36 -0800
Subject: [PATCH] irq: uninline migration functions

Uninline some massive IRQ migration functions.  Put them in the new
kernel/irq/migration.c.

Cc: Andi Kleen <ak@muc.de>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
---
 kernel/irq/migration.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 52 insertions(+)
 create mode 100644 kernel/irq/migration.c

(limited to 'kernel/irq/migration.c')

diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
new file mode 100644
index 00000000000..6bdd03c524c
--- /dev/null
+++ b/kernel/irq/migration.c
@@ -0,0 +1,52 @@
+#include <linux/irq.h>
+
+#if defined(CONFIG_GENERIC_PENDING_IRQ)
+
+void set_pending_irq(unsigned int irq, cpumask_t mask)
+{
+	irq_desc_t *desc = irq_desc + irq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&desc->lock, flags);
+	desc->move_irq = 1;
+	pending_irq_cpumask[irq] = mask;
+	spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+void move_native_irq(int irq)
+{
+	cpumask_t tmp;
+	irq_desc_t *desc = irq_descp(irq);
+
+	if (likely (!desc->move_irq))
+		return;
+
+	desc->move_irq = 0;
+
+	if (likely(cpus_empty(pending_irq_cpumask[irq])))
+		return;
+
+	if (!desc->handler->set_affinity)
+		return;
+
+	/* note - we hold the desc->lock */
+	cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
+
+	/*
+	 * If there was a valid mask to work with, please
+	 * do the disable, re-program, enable sequence.
+	 * This is *not* particularly important for level triggered
+	 * but in a edge trigger case, we might be setting rte
+	 * when an active trigger is comming in. This could
+	 * cause some ioapics to mal-function.
+	 * Being paranoid i guess!
+	 */
+	if (unlikely(!cpus_empty(tmp))) {
+		desc->handler->disable(irq);
+		desc->handler->set_affinity(irq,tmp);
+		desc->handler->enable(irq);
+	}
+	cpus_clear(pending_irq_cpumask[irq]);
+}
+
+#endif
-- 
cgit v1.2.3-70-g09d2


From 501f2499b897ca4be68b1acc7a4bc8cf66f5fd24 Mon Sep 17 00:00:00 2001
From: Bryan Holty <lgeek@frontiernet.net>
Date: Sat, 25 Mar 2006 03:07:37 -0800
Subject: [PATCH] IRQ: prevent enabling of previously disabled interrupt

This fix prevents re-disabling and enabling of a previously disabled
interrupt.  On an SMP system with irq balancing enabled; If an interrupt is
disabled from within its own interrupt context with disable_irq_nosync and is
also earmarked for processor migration, the interrupt is blindly moved to the
other processor and enabled without regard for its current "enabled" state.
If there is an interrupt pending, it will unexpectedly invoke the irq handler
on the new irq owning processor (even though the irq was previously disabled)

The more intuitive fix would be to invoke disable_irq_nosync and
enable_irq, but since we already have the desc->lock from __do_IRQ, we
cannot call them directly.  Instead we can use the same logic to disable
and enable found in disable_irq_nosync and enable_irq, with regards to the
desc->depth.

This now prevents a disabled interrupt from being re-disabled, and more
importantly prevents a disabled interrupt from being incorrectly enabled on
a different processor.

Signed-off-by: Bryan Holty <lgeek@frontiernet.net>
Cc: Andi Kleen <ak@muc.de>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
---
 kernel/irq/migration.c | 21 +++++++++++++++++----
 1 file changed, 17 insertions(+), 4 deletions(-)

(limited to 'kernel/irq/migration.c')

diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 6bdd03c524c..52a8655fa08 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -18,9 +18,17 @@ void move_native_irq(int irq)
 	cpumask_t tmp;
 	irq_desc_t *desc = irq_descp(irq);
 
-	if (likely (!desc->move_irq))
+	if (likely(!desc->move_irq))
 		return;
 
+	/*
+	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
+	 */
+	if (CHECK_IRQ_PER_CPU(desc->status)) {
+		WARN_ON(1);
+		return;
+	}
+
 	desc->move_irq = 0;
 
 	if (likely(cpus_empty(pending_irq_cpumask[irq])))
@@ -29,7 +37,8 @@ void move_native_irq(int irq)
 	if (!desc->handler->set_affinity)
 		return;
 
-	/* note - we hold the desc->lock */
+	assert_spin_locked(&desc->lock);
+
 	cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
 
 	/*
@@ -42,9 +51,13 @@ void move_native_irq(int irq)
 	 * Being paranoid i guess!
 	 */
 	if (unlikely(!cpus_empty(tmp))) {
-		desc->handler->disable(irq);
+		if (likely(!(desc->status & IRQ_DISABLED)))
+			desc->handler->disable(irq);
+
 		desc->handler->set_affinity(irq,tmp);
-		desc->handler->enable(irq);
+
+		if (likely(!(desc->status & IRQ_DISABLED)))
+			desc->handler->enable(irq);
 	}
 	cpus_clear(pending_irq_cpumask[irq]);
 }
-- 
cgit v1.2.3-70-g09d2