diff options
author | Richard Zhao <richard.zhao@linaro.org> | 2012-09-05 01:08:59 +0200 |
---|---|---|
committer | Rafael J. Wysocki <rjw@sisk.pl> | 2012-09-09 22:06:33 +0200 |
commit | ec971ea5f2426a0bf9d5cca9a103743918c12978 (patch) | |
tree | 58594e18af78718548dea595c26e1a67d964a29e /arch/arm/kernel | |
parent | e1f0b8e9b04a262834ed111e605e5d215685dfab (diff) |
ARM: add cpufreq transiton notifier to adjust loops_per_jiffy for smp
If CONFIG_SMP, cpufreq skips loops_per_jiffy update, because different
arch has different per-cpu loops_per_jiffy definition.
Signed-off-by: Richard Zhao <richard.zhao@linaro.org>
Acked-by: Russell King <rmk+kernel@arm.linux.org.uk>
Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Signed-off-by: Shawn Guo <shawn.guo@linaro.org>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/smp.c | 54 |
1 files changed, 54 insertions, 0 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index ebd8ad274d7..8e03567c958 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -25,6 +25,7 @@ #include <linux/percpu.h> #include <linux/clockchips.h> #include <linux/completion.h> +#include <linux/cpufreq.h> #include <linux/atomic.h> #include <asm/cacheflush.h> @@ -584,3 +585,56 @@ int setup_profiling_timer(unsigned int multiplier) { return -EINVAL; } + +#ifdef CONFIG_CPU_FREQ + +static DEFINE_PER_CPU(unsigned long, l_p_j_ref); +static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq); +static unsigned long global_l_p_j_ref; +static unsigned long global_l_p_j_ref_freq; + +static int cpufreq_callback(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_freqs *freq = data; + int cpu = freq->cpu; + + if (freq->flags & CPUFREQ_CONST_LOOPS) + return NOTIFY_OK; + + if (!per_cpu(l_p_j_ref, cpu)) { + per_cpu(l_p_j_ref, cpu) = + per_cpu(cpu_data, cpu).loops_per_jiffy; + per_cpu(l_p_j_ref_freq, cpu) = freq->old; + if (!global_l_p_j_ref) { + global_l_p_j_ref = loops_per_jiffy; + global_l_p_j_ref_freq = freq->old; + } + } + + if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || + (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || + (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { + loops_per_jiffy = cpufreq_scale(global_l_p_j_ref, + global_l_p_j_ref_freq, + freq->new); + per_cpu(cpu_data, cpu).loops_per_jiffy = + cpufreq_scale(per_cpu(l_p_j_ref, cpu), + per_cpu(l_p_j_ref_freq, cpu), + freq->new); + } + return NOTIFY_OK; +} + +static struct notifier_block cpufreq_notifier = { + .notifier_call = cpufreq_callback, +}; + +static int __init register_cpufreq_notifier(void) +{ + return cpufreq_register_notifier(&cpufreq_notifier, + CPUFREQ_TRANSITION_NOTIFIER); +} +core_initcall(register_cpufreq_notifier); + +#endif |