diff options
author | Andi Kleen <andi@firstfloor.org> | 2012-02-06 08:17:11 -0800 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2012-03-22 02:16:14 -0400 |
commit | 2815ab92ba3ab27556212cc306288dc95692824b (patch) | |
tree | 5e18864dbc2f07f7da0f552ea5e4c2624d7a8fdb /drivers/acpi/processor_thermal.c | |
parent | c16fa4f2ad19908a47c63d8fa436a1178438c7e7 (diff) |
ACPI: Do cpufreq clamping for throttling per package v2
On Intel CPUs the processor typically uses the highest frequency
set by any logical CPU. When the system overheats
Linux first forces the frequency to the lowest available one
to lower the temperature.
However this was done only per logical CPU, which means all
logical CPUs in a package would need to go through this before
the frequency is actually lowered.
Worse this delay actually prevents real throttling, because
the real throttle code only proceeds when the lowest frequency
is already reached.
So when a throttle event happens force the lowest frequency
for all CPUs in the package where it happened. The per CPU
state is now kept per package, not per logical CPU. An alternative
would be to do it per cpufreq unit, but since we want to bring
down the temperature of the complete chip it's better
to do it for all.
In principle it may even make sense to do it for all CPUs,
but I kept it on the package for now.
With this change the frequency is actually lowered, which
in terms also allows real throttling to proceed.
I also removed an unnecessary per cpu variable initialization.
v2: Fix package mapping
Cc: <stable@vger.kernel.org>
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'drivers/acpi/processor_thermal.c')
-rw-r--r-- | drivers/acpi/processor_thermal.c | 45 |
1 files changed, 37 insertions, 8 deletions
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c index 3b599abf2b4..641b5450a0d 100644 --- a/drivers/acpi/processor_thermal.c +++ b/drivers/acpi/processor_thermal.c @@ -57,6 +57,27 @@ ACPI_MODULE_NAME("processor_thermal"); static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg); static unsigned int acpi_thermal_cpufreq_is_init = 0; +#define reduction_pctg(cpu) \ + per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu)) + +/* + * Emulate "per package data" using per cpu data (which should really be + * provided elsewhere) + * + * Note we can lose a CPU on cpu hotunplug, in this case we forget the state + * temporarily. Fortunately that's not a big issue here (I hope) + */ +static int phys_package_first_cpu(int cpu) +{ + int i; + int id = topology_physical_package_id(cpu); + + for_each_online_cpu(i) + if (topology_physical_package_id(i) == id) + return i; + return 0; +} + static int cpu_has_cpufreq(unsigned int cpu) { struct cpufreq_policy policy; @@ -76,7 +97,7 @@ static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb, max_freq = ( policy->cpuinfo.max_freq * - (100 - per_cpu(cpufreq_thermal_reduction_pctg, policy->cpu) * 20) + (100 - reduction_pctg(policy->cpu) * 20) ) / 100; cpufreq_verify_within_limits(policy, 0, max_freq); @@ -102,16 +123,28 @@ static int cpufreq_get_cur_state(unsigned int cpu) if (!cpu_has_cpufreq(cpu)) return 0; - return per_cpu(cpufreq_thermal_reduction_pctg, cpu); + return reduction_pctg(cpu); } static int cpufreq_set_cur_state(unsigned int cpu, int state) { + int i; + if (!cpu_has_cpufreq(cpu)) return 0; - per_cpu(cpufreq_thermal_reduction_pctg, cpu) = state; - cpufreq_update_policy(cpu); + reduction_pctg(cpu) = state; + + /* + * Update all the CPUs in the same package because they all + * contribute to the temperature and often share the same + * frequency. + */ + for_each_online_cpu(i) { + if (topology_physical_package_id(i) == + topology_physical_package_id(cpu)) + cpufreq_update_policy(i); + } return 0; } @@ -119,10 +152,6 @@ void acpi_thermal_cpufreq_init(void) { int i; - for (i = 0; i < nr_cpu_ids; i++) - if (cpu_present(i)) - per_cpu(cpufreq_thermal_reduction_pctg, i) = 0; - i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block, CPUFREQ_POLICY_NOTIFIER); if (!i) |