summaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel/smp.c')
-rw-r--r--arch/sparc64/kernel/smp.c50
1 files changed, 26 insertions, 24 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 7dc28a48426..90eaca3ec9a 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -745,12 +745,21 @@ struct call_data_struct {
int wait;
};
-static DEFINE_SPINLOCK(call_lock);
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
static struct call_data_struct *call_data;
extern unsigned long xcall_call_function;
-/*
+/**
+ * smp_call_function(): Run a function on all other CPUs.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @nonatomic: currently unused.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code. Does not return until
+ * remote CPUs are nearly ready to execute <<func>> or are or have executed.
+ *
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
@@ -759,7 +768,6 @@ static int smp_call_function_mask(void (*func)(void *info), void *info,
{
struct call_data_struct data;
int cpus;
- long timeout;
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
@@ -777,31 +785,18 @@ static int smp_call_function_mask(void (*func)(void *info), void *info,
goto out_unlock;
call_data = &data;
+ mb();
smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
- /*
- * Wait for other cpus to complete function or at
- * least snap the call data.
- */
- timeout = 1000000;
- while (atomic_read(&data.finished) != cpus) {
- if (--timeout <= 0)
- goto out_timeout;
- barrier();
- udelay(1);
- }
+ /* Wait for response */
+ while (atomic_read(&data.finished) != cpus)
+ cpu_relax();
out_unlock:
spin_unlock(&call_lock);
return 0;
-
-out_timeout:
- spin_unlock(&call_lock);
- printk("XCALL: Remote cpus not responding, ncpus=%d finished=%d\n",
- cpus, atomic_read(&data.finished));
- return 0;
}
int smp_call_function(void (*func)(void *info), void *info,
@@ -830,9 +825,16 @@ void smp_call_function_client(int irq, struct pt_regs *regs)
static void tsb_sync(void *info)
{
+ struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
struct mm_struct *mm = info;
- if (current->active_mm == mm)
+ /* It is not valid to test "currrent->active_mm == mm" here.
+ *
+ * The value of "current" is not changed atomically with
+ * switch_mm(). But that's OK, we just need to check the
+ * current cpu's trap block PGD physical address.
+ */
+ if (tp->pgd_paddr == __pa(mm->pgd))
tsb_context_switch(mm);
}
@@ -1278,7 +1280,7 @@ int setup_profiling_timer(unsigned int multiplier)
return -EINVAL;
spin_lock_irqsave(&prof_setup_lock, flags);
- for_each_cpu(i)
+ for_each_possible_cpu(i)
prof_multiplier(i) = multiplier;
current_tick_offset = (timer_tick_offset / multiplier);
spin_unlock_irqrestore(&prof_setup_lock, flags);
@@ -1306,12 +1308,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
}
- for_each_cpu(i) {
+ for_each_possible_cpu(i) {
if (tlb_type == hypervisor) {
int j;
/* XXX get this mapping from machine description */
- for_each_cpu(j) {
+ for_each_possible_cpu(j) {
if ((j >> 2) == (i >> 2))
cpu_set(j, cpu_sibling_map[i]);
}