diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2006-03-06 22:50:44 -0800 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-03-20 01:14:21 -0800 |
commit | ee29074d3bd23848905f52c515974e0cd0219faa (patch) | |
tree | d5306446b2e26d9e45f65467b4f3b3f3b0c8494c /arch/sparc64/kernel/smp.c | |
parent | a77754b4d0731321db266c6c60ffcd7c62757da5 (diff) |
[SPARC64]: Fix new context version SMP handling.
Don't piggy back the SMP receive signal code to do the
context version change handling.
Instead allocate another fixed PIL number for this
asynchronous cross-call. We can't use smp_call_function()
because this thing is invoked with interrupts disabled
and a few spinlocks held.
Also, fix smp_call_function_mask() to count "cpus" correctly.
There is no guarentee that the local cpu is in the mask
yet that is exactly what this code was assuming.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/smp.c')
-rw-r--r-- | arch/sparc64/kernel/smp.c | 39 |
1 files changed, 24 insertions, 15 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index c4548a88953..cf56128097c 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -760,12 +760,9 @@ static int smp_call_function_mask(void (*func)(void *info), void *info, int nonatomic, int wait, cpumask_t mask) { struct call_data_struct data; - int cpus = cpus_weight(mask) - 1; + int cpus; long timeout; - if (!cpus) - return 0; - /* Can deadlock when called with interrupts disabled */ WARN_ON(irqs_disabled()); @@ -776,6 +773,11 @@ static int smp_call_function_mask(void (*func)(void *info), void *info, spin_lock(&call_lock); + cpu_clear(smp_processor_id(), mask); + cpus = cpus_weight(mask); + if (!cpus) + goto out_unlock; + call_data = &data; smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask); @@ -792,6 +794,7 @@ static int smp_call_function_mask(void (*func)(void *info), void *info, udelay(1); } +out_unlock: spin_unlock(&call_lock); return 0; @@ -845,6 +848,7 @@ extern unsigned long xcall_flush_tlb_pending; extern unsigned long xcall_flush_tlb_kernel_range; extern unsigned long xcall_report_regs; extern unsigned long xcall_receive_signal; +extern unsigned long xcall_new_mmu_context_version; #ifdef DCACHE_ALIASING_POSSIBLE extern unsigned long xcall_flush_dcache_page_cheetah; @@ -974,7 +978,13 @@ void smp_receive_signal(int cpu) void smp_receive_signal_client(int irq, struct pt_regs *regs) { + clear_softint(1 << irq); +} + +void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) +{ struct mm_struct *mm; + unsigned long flags; clear_softint(1 << irq); @@ -982,25 +992,24 @@ void smp_receive_signal_client(int irq, struct pt_regs *regs) * the version of the one we are using is now out of date. */ mm = current->active_mm; - if (likely(mm)) { - unsigned long flags; + if (unlikely(!mm || (mm == &init_mm))) + return; - spin_lock_irqsave(&mm->context.lock, flags); + spin_lock_irqsave(&mm->context.lock, flags); - if (unlikely(!CTX_VALID(mm->context))) - get_new_mmu_context(mm); + if (unlikely(!CTX_VALID(mm->context))) + get_new_mmu_context(mm); - load_secondary_context(mm); - __flush_tlb_mm(CTX_HWBITS(mm->context), - SECONDARY_CONTEXT); + spin_unlock_irqrestore(&mm->context.lock, flags); - spin_unlock_irqrestore(&mm->context.lock, flags); - } + load_secondary_context(mm); + __flush_tlb_mm(CTX_HWBITS(mm->context), + SECONDARY_CONTEXT); } void smp_new_mmu_context_version(void) { - __smp_receive_signal_mask(cpu_online_map); + smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0); } void smp_report_regs(void) |