diff options
Diffstat (limited to 'arch/arm/kernel/smp.c')
-rw-r--r-- | arch/arm/kernel/smp.c | 173 |
1 files changed, 159 insertions, 14 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index ebd8ad274d7..84f4cbf652e 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -19,14 +19,15 @@ #include <linux/mm.h> #include <linux/err.h> #include <linux/cpu.h> -#include <linux/smp.h> #include <linux/seq_file.h> #include <linux/irq.h> #include <linux/percpu.h> #include <linux/clockchips.h> #include <linux/completion.h> +#include <linux/cpufreq.h> #include <linux/atomic.h> +#include <asm/smp.h> #include <asm/cacheflush.h> #include <asm/cpu.h> #include <asm/cputype.h> @@ -42,6 +43,8 @@ #include <asm/ptrace.h> #include <asm/localtimer.h> #include <asm/smp_plat.h> +#include <asm/virt.h> +#include <asm/mach/arch.h> /* * as from 2.5, kernels no longer have an init_tasks structure @@ -50,8 +53,15 @@ */ struct secondary_data secondary_data; +/* + * control for which core is the next to come out of the secondary + * boot "holding pen" + */ +volatile int __cpuinitdata pen_release = -1; + enum ipi_msg_type { - IPI_TIMER = 2, + IPI_WAKEUP, + IPI_TIMER, IPI_RESCHEDULE, IPI_CALL_FUNC, IPI_CALL_FUNC_SINGLE, @@ -60,6 +70,14 @@ enum ipi_msg_type { static DECLARE_COMPLETION(cpu_running); +static struct smp_operations smp_ops; + +void __init smp_set_ops(struct smp_operations *ops) +{ + if (ops) + smp_ops = *ops; +}; + int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) { int ret; @@ -100,13 +118,64 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) return ret; } +/* platform specific SMP operations */ +void __init smp_init_cpus(void) +{ + if (smp_ops.smp_init_cpus) + smp_ops.smp_init_cpus(); +} + +static void __init platform_smp_prepare_cpus(unsigned int max_cpus) +{ + if (smp_ops.smp_prepare_cpus) + smp_ops.smp_prepare_cpus(max_cpus); +} + +static void __cpuinit platform_secondary_init(unsigned int cpu) +{ + if (smp_ops.smp_secondary_init) + smp_ops.smp_secondary_init(cpu); +} + +int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) +{ + if (smp_ops.smp_boot_secondary) + return smp_ops.smp_boot_secondary(cpu, idle); + return -ENOSYS; +} + #ifdef CONFIG_HOTPLUG_CPU static void percpu_timer_stop(void); +static int platform_cpu_kill(unsigned int cpu) +{ + if (smp_ops.cpu_kill) + return smp_ops.cpu_kill(cpu); + return 1; +} + +static void platform_cpu_die(unsigned int cpu) +{ + if (smp_ops.cpu_die) + smp_ops.cpu_die(cpu); +} + +static int platform_cpu_disable(unsigned int cpu) +{ + if (smp_ops.cpu_disable) + return smp_ops.cpu_disable(cpu); + + /* + * By default, allow disabling all CPUs except the first one, + * since this is special on a lot of platforms, e.g. because + * of clock tick interrupts. + */ + return cpu == 0 ? -EPERM : 0; +} /* * __cpu_disable runs on the processor to be shutdown. */ -int __cpu_disable(void) +int __cpuinit __cpu_disable(void) { unsigned int cpu = smp_processor_id(); int ret; @@ -134,8 +203,11 @@ int __cpu_disable(void) /* * Flush user cache and TLB mappings, and then remove this CPU * from the vm mask set of all processes. + * + * Caches are flushed to the Level of Unification Inner Shareable + * to write-back dirty lines to unified caches shared by all CPUs. */ - flush_cache_all(); + flush_cache_louis(); local_flush_tlb_all(); clear_tasks_mm_cpumask(cpu); @@ -149,7 +221,7 @@ static DECLARE_COMPLETION(cpu_died); * called on the thread which is asking for a CPU to be shutdown - * waits until shutdown has completed, or it is timed out. */ -void __cpu_die(unsigned int cpu) +void __cpuinit __cpu_die(unsigned int cpu) { if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { pr_err("CPU%u: cpu didn't die\n", cpu); @@ -209,6 +281,7 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid) struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); cpu_info->loops_per_jiffy = loops_per_jiffy; + cpu_info->cpuid = read_cpuid_id(); store_cpu_topology(cpuid); } @@ -222,22 +295,29 @@ static void percpu_timer_setup(void); asmlinkage void __cpuinit secondary_start_kernel(void) { struct mm_struct *mm = &init_mm; - unsigned int cpu = smp_processor_id(); + unsigned int cpu; + + /* + * The identity mapping is uncached (strongly ordered), so + * switch away from it before attempting any exclusive accesses. + */ + cpu_switch_mm(mm->pgd, mm); + enter_lazy_tlb(mm, current); + local_flush_tlb_all(); /* * All kernel threads share the same mm context; grab a * reference and switch to it. */ + cpu = smp_processor_id(); atomic_inc(&mm->mm_count); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); - cpu_switch_mm(mm->pgd, mm); - enter_lazy_tlb(mm, current); - local_flush_tlb_all(); + + cpu_init(); printk("CPU%u: Booted secondary processor\n", cpu); - cpu_init(); preempt_disable(); trace_hardirqs_off(); @@ -287,10 +367,13 @@ void __init smp_cpus_done(unsigned int max_cpus) num_online_cpus(), bogosum / (500000/HZ), (bogosum / (5000/HZ)) % 100); + + hyp_mode_check(); } void __init smp_prepare_boot_cpu(void) { + set_my_cpu_offset(per_cpu_offset(smp_processor_id())); } void __init smp_prepare_cpus(unsigned int max_cpus) @@ -341,13 +424,19 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) smp_cross_call(mask, IPI_CALL_FUNC); } +void arch_send_wakeup_ipi_mask(const struct cpumask *mask) +{ + smp_cross_call(mask, IPI_WAKEUP); +} + void arch_send_call_function_single_ipi(int cpu) { smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); } static const char *ipi_types[NR_IPI] = { -#define S(x,s) [x - IPI_TIMER] = s +#define S(x,s) [x] = s + S(IPI_WAKEUP, "CPU wakeup interrupts"), S(IPI_TIMER, "Timer broadcast interrupts"), S(IPI_RESCHEDULE, "Rescheduling interrupts"), S(IPI_CALL_FUNC, "Function call interrupts"), @@ -362,7 +451,7 @@ void show_ipi_list(struct seq_file *p, int prec) for (i = 0; i < NR_IPI; i++) { seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); - for_each_present_cpu(cpu) + for_each_online_cpu(cpu) seq_printf(p, "%10u ", __get_irq_stat(cpu, ipi_irqs[i])); @@ -500,10 +589,13 @@ void handle_IPI(int ipinr, struct pt_regs *regs) unsigned int cpu = smp_processor_id(); struct pt_regs *old_regs = set_irq_regs(regs); - if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI) - __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]); + if (ipinr < NR_IPI) + __inc_irq_stat(cpu, ipi_irqs[ipinr]); switch (ipinr) { + case IPI_WAKEUP: + break; + case IPI_TIMER: irq_enter(); ipi_timer(); @@ -584,3 +676,56 @@ int setup_profiling_timer(unsigned int multiplier) { return -EINVAL; } + +#ifdef CONFIG_CPU_FREQ + +static DEFINE_PER_CPU(unsigned long, l_p_j_ref); +static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq); +static unsigned long global_l_p_j_ref; +static unsigned long global_l_p_j_ref_freq; + +static int cpufreq_callback(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_freqs *freq = data; + int cpu = freq->cpu; + + if (freq->flags & CPUFREQ_CONST_LOOPS) + return NOTIFY_OK; + + if (!per_cpu(l_p_j_ref, cpu)) { + per_cpu(l_p_j_ref, cpu) = + per_cpu(cpu_data, cpu).loops_per_jiffy; + per_cpu(l_p_j_ref_freq, cpu) = freq->old; + if (!global_l_p_j_ref) { + global_l_p_j_ref = loops_per_jiffy; + global_l_p_j_ref_freq = freq->old; + } + } + + if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || + (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || + (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { + loops_per_jiffy = cpufreq_scale(global_l_p_j_ref, + global_l_p_j_ref_freq, + freq->new); + per_cpu(cpu_data, cpu).loops_per_jiffy = + cpufreq_scale(per_cpu(l_p_j_ref, cpu), + per_cpu(l_p_j_ref_freq, cpu), + freq->new); + } + return NOTIFY_OK; +} + +static struct notifier_block cpufreq_notifier = { + .notifier_call = cpufreq_callback, +}; + +static int __init register_cpufreq_notifier(void) +{ + return cpufreq_register_notifier(&cpufreq_notifier, + CPUFREQ_TRANSITION_NOTIFIER); +} +core_initcall(register_cpufreq_notifier); + +#endif |