diff options
author | Kumar Gala <galak@kernel.crashing.org> | 2008-10-28 18:01:39 +0000 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-10-31 16:13:50 +1100 |
commit | 3c10c9c45e290022ca7d2aa1ad33a0b6ed767520 (patch) | |
tree | 002769098cd2b53aad995acac3c42f0331302588 /arch/powerpc/sysdev | |
parent | f9226d572d2f8b5f564596db8c6a13e458c46191 (diff) |
powerpc/mpic: Fix regression caused by change of default IRQ affinity
The Freescale implementation of MPIC only allows a single CPU destination
for non-IPI interrupts. We add a flag to the mpic_init to distinquish
these variants of MPIC. We pull in the irq_choose_cpu from sparc64 to
select a single CPU as the destination of the interrupt.
This is to deal with the fact that the default smp affinity was
changed by commit 18404756765c713a0be4eb1082920c04822ce588 ("genirq:
Expose default irq affinity mask (take 3)") to be all CPUs.
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/sysdev')
-rw-r--r-- | arch/powerpc/sysdev/mpic.c | 59 |
1 files changed, 55 insertions, 4 deletions
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 8e3478c995e..f6299cca781 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -563,6 +563,51 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic) #endif /* CONFIG_MPIC_U3_HT_IRQS */ +#ifdef CONFIG_SMP +static int irq_choose_cpu(unsigned int virt_irq) +{ + cpumask_t mask = irq_desc[virt_irq].affinity; + int cpuid; + + if (cpus_equal(mask, CPU_MASK_ALL)) { + static int irq_rover; + static DEFINE_SPINLOCK(irq_rover_lock); + unsigned long flags; + + /* Round-robin distribution... */ + do_round_robin: + spin_lock_irqsave(&irq_rover_lock, flags); + + while (!cpu_online(irq_rover)) { + if (++irq_rover >= NR_CPUS) + irq_rover = 0; + } + cpuid = irq_rover; + do { + if (++irq_rover >= NR_CPUS) + irq_rover = 0; + } while (!cpu_online(irq_rover)); + + spin_unlock_irqrestore(&irq_rover_lock, flags); + } else { + cpumask_t tmp; + + cpus_and(tmp, cpu_online_map, mask); + + if (cpus_empty(tmp)) + goto do_round_robin; + + cpuid = first_cpu(tmp); + } + + return cpuid; +} +#else +static int irq_choose_cpu(unsigned int virt_irq) +{ + return hard_smp_processor_id(); +} +#endif #define mpic_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) @@ -777,12 +822,18 @@ void mpic_set_affinity(unsigned int irq, cpumask_t cpumask) struct mpic *mpic = mpic_from_irq(irq); unsigned int src = mpic_irq_to_hw(irq); - cpumask_t tmp; + if (mpic->flags & MPIC_SINGLE_DEST_CPU) { + int cpuid = irq_choose_cpu(irq); - cpus_and(tmp, cpumask, cpu_online_map); + mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid); + } else { + cpumask_t tmp; - mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), - mpic_physmask(cpus_addr(tmp)[0])); + cpus_and(tmp, cpumask, cpu_online_map); + + mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), + mpic_physmask(cpus_addr(tmp)[0])); + } } static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type) |