diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-01-23 12:09:36 +0000 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-02-23 17:24:16 +0000 |
commit | 617912440bf20497d23d01ab58076998aced3f15 (patch) | |
tree | 69bc841621171decddfd96c060cfa404184fdd2e | |
parent | 1dbfa187dad57d3e17207328ec8bd2d90b4177d2 (diff) |
ARM: irq migration: ensure migration is handled safely
Ensure appropriate locks are taken to ensure that IRQ migration off
the current CPU is race-free. We may have a concurrent set_affinity
via procfs running on another CPU in parallel with the IRQ migration,
resulting in unpredictable results.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r-- | arch/arm/kernel/irq.c | 50 |
1 files changed, 31 insertions, 19 deletions
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 2f19aa5f339..3535d3793e6 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -179,14 +179,21 @@ int __init arch_probe_nr_irqs(void) #ifdef CONFIG_HOTPLUG_CPU -static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) +static bool migrate_one_irq(struct irq_data *d) { - pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->irq_data.node, cpu); + unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask); + bool ret = false; - raw_spin_lock_irq(&desc->lock); - desc->irq_data.chip->irq_set_affinity(&desc->irq_data, - cpumask_of(cpu), true); - raw_spin_unlock_irq(&desc->lock); + if (cpu >= nr_cpu_ids) { + cpu = cpumask_any(cpu_online_mask); + ret = true; + } + + pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu); + + d->chip->irq_set_affinity(d, cpumask_of(cpu), true); + + return ret; } /* @@ -198,25 +205,30 @@ void migrate_irqs(void) { unsigned int i, cpu = smp_processor_id(); struct irq_desc *desc; + unsigned long flags; + + local_irq_save(flags); for_each_irq_desc(i, desc) { struct irq_data *d = &desc->irq_data; + bool affinity_broken = false; - if (d->node == cpu) { - unsigned int newcpu = cpumask_any_and(d->affinity, - cpu_online_mask); - if (newcpu >= nr_cpu_ids) { - if (printk_ratelimit()) - printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", - i, cpu); + raw_spin_lock(&desc->lock); + do { + if (desc->action == NULL) + break; - cpumask_setall(d->affinity); - newcpu = cpumask_any_and(d->affinity, - cpu_online_mask); - } + if (d->node != cpu) + break; - route_irq(desc, i, newcpu); - } + affinity_broken = migrate_one_irq(d); + } while (0); + raw_spin_unlock(&desc->lock); + + if (affinity_broken && printk_ratelimit()) + pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu); } + + local_irq_restore(flags); } #endif /* CONFIG_HOTPLUG_CPU */ |