summaryrefslogtreecommitdiffstats
path: root/drivers/acpi/processor_thermal.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi/processor_thermal.c')
-rw-r--r--drivers/acpi/processor_thermal.c45
1 files changed, 37 insertions, 8 deletions
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 3b599abf2b4..641b5450a0d 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -57,6 +57,27 @@ ACPI_MODULE_NAME("processor_thermal");
static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
static unsigned int acpi_thermal_cpufreq_is_init = 0;
+#define reduction_pctg(cpu) \
+ per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
+
+/*
+ * Emulate "per package data" using per cpu data (which should really be
+ * provided elsewhere)
+ *
+ * Note we can lose a CPU on cpu hotunplug, in this case we forget the state
+ * temporarily. Fortunately that's not a big issue here (I hope)
+ */
+static int phys_package_first_cpu(int cpu)
+{
+ int i;
+ int id = topology_physical_package_id(cpu);
+
+ for_each_online_cpu(i)
+ if (topology_physical_package_id(i) == id)
+ return i;
+ return 0;
+}
+
static int cpu_has_cpufreq(unsigned int cpu)
{
struct cpufreq_policy policy;
@@ -76,7 +97,7 @@ static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
max_freq = (
policy->cpuinfo.max_freq *
- (100 - per_cpu(cpufreq_thermal_reduction_pctg, policy->cpu) * 20)
+ (100 - reduction_pctg(policy->cpu) * 20)
) / 100;
cpufreq_verify_within_limits(policy, 0, max_freq);
@@ -102,16 +123,28 @@ static int cpufreq_get_cur_state(unsigned int cpu)
if (!cpu_has_cpufreq(cpu))
return 0;
- return per_cpu(cpufreq_thermal_reduction_pctg, cpu);
+ return reduction_pctg(cpu);
}
static int cpufreq_set_cur_state(unsigned int cpu, int state)
{
+ int i;
+
if (!cpu_has_cpufreq(cpu))
return 0;
- per_cpu(cpufreq_thermal_reduction_pctg, cpu) = state;
- cpufreq_update_policy(cpu);
+ reduction_pctg(cpu) = state;
+
+ /*
+ * Update all the CPUs in the same package because they all
+ * contribute to the temperature and often share the same
+ * frequency.
+ */
+ for_each_online_cpu(i) {
+ if (topology_physical_package_id(i) ==
+ topology_physical_package_id(cpu))
+ cpufreq_update_policy(i);
+ }
return 0;
}
@@ -119,10 +152,6 @@ void acpi_thermal_cpufreq_init(void)
{
int i;
- for (i = 0; i < nr_cpu_ids; i++)
- if (cpu_present(i))
- per_cpu(cpufreq_thermal_reduction_pctg, i) = 0;
-
i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
if (!i)