summaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorRickard Andersson <rickard.andersson@stericsson.com>2012-12-27 14:55:38 +0000
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2013-02-02 00:01:13 +0100
commit2abfa876f1117b0ab45f191fb1f82c41b1cbc8fe (patch)
tree929f7c181cead27bc5c1d320c53ba0e99a9f0544 /drivers/cpufreq
parent88b62b915b0b7e25870eb0604ed9a92ba4bfc9f7 (diff)
cpufreq: handle SW coordinated CPUs
This patch fixes a bug that occurred when we had load on a secondary CPU and the primary CPU was sleeping. Only one sampling timer was spawned and it was spawned as a deferred timer on the primary CPU, so when a secondary CPU had a change in load this was not detected by the cpufreq governor (both ondemand and conservative). This patch make sure that deferred timers are run on all CPUs in the case of software controlled CPUs that run on the same frequency. Signed-off-by: Rickard Andersson <rickard.andersson@stericsson.com> Signed-off-by: Fabio Baltieri <fabio.baltieri@linaro.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c3
-rw-r--r--drivers/cpufreq/cpufreq_governor.c44
-rw-r--r--drivers/cpufreq/cpufreq_governor.h1
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c3
4 files changed, 43 insertions, 8 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 64ef737e7e7..b9d7f14d7d3 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -122,7 +122,8 @@ static void cs_dbs_timer(struct work_struct *work)
dbs_check_cpu(&cs_dbs_data, cpu);
- schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay);
+ schedule_delayed_work_on(smp_processor_id(), &dbs_info->cdbs.work,
+ delay);
mutex_unlock(&dbs_info->cdbs.timer_mutex);
}
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 6c5f1d383cd..b0e4506f2ca 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -161,13 +161,23 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
}
EXPORT_SYMBOL_GPL(dbs_check_cpu);
+bool dbs_sw_coordinated_cpus(struct cpu_dbs_common_info *cdbs)
+{
+ struct cpufreq_policy *policy = cdbs->cur_policy;
+
+ return cpumask_weight(policy->cpus) > 1;
+}
+EXPORT_SYMBOL_GPL(dbs_sw_coordinated_cpus);
+
static inline void dbs_timer_init(struct dbs_data *dbs_data,
- struct cpu_dbs_common_info *cdbs, unsigned int sampling_rate)
+ struct cpu_dbs_common_info *cdbs,
+ unsigned int sampling_rate,
+ int cpu)
{
int delay = delay_for_sampling_rate(sampling_rate);
+ struct cpu_dbs_common_info *cdbs_local = dbs_data->get_cpu_cdbs(cpu);
- INIT_DEFERRABLE_WORK(&cdbs->work, dbs_data->gov_dbs_timer);
- schedule_delayed_work_on(cdbs->cpu, &cdbs->work, delay);
+ schedule_delayed_work_on(cpu, &cdbs_local->work, delay);
}
static inline void dbs_timer_exit(struct cpu_dbs_common_info *cdbs)
@@ -217,6 +227,10 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data,
if (ignore_nice)
j_cdbs->prev_cpu_nice =
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+
+ mutex_init(&j_cdbs->timer_mutex);
+ INIT_DEFERRABLE_WORK(&j_cdbs->work,
+ dbs_data->gov_dbs_timer);
}
/*
@@ -275,15 +289,33 @@ second_time:
}
mutex_unlock(&dbs_data->mutex);
- mutex_init(&cpu_cdbs->timer_mutex);
- dbs_timer_init(dbs_data, cpu_cdbs, *sampling_rate);
+ if (dbs_sw_coordinated_cpus(cpu_cdbs)) {
+ for_each_cpu(j, policy->cpus) {
+ struct cpu_dbs_common_info *j_cdbs;
+
+ j_cdbs = dbs_data->get_cpu_cdbs(j);
+ dbs_timer_init(dbs_data, j_cdbs,
+ *sampling_rate, j);
+ }
+ } else {
+ dbs_timer_init(dbs_data, cpu_cdbs, *sampling_rate, cpu);
+ }
break;
case CPUFREQ_GOV_STOP:
if (dbs_data->governor == GOV_CONSERVATIVE)
cs_dbs_info->enable = 0;
- dbs_timer_exit(cpu_cdbs);
+ if (dbs_sw_coordinated_cpus(cpu_cdbs)) {
+ for_each_cpu(j, policy->cpus) {
+ struct cpu_dbs_common_info *j_cdbs;
+
+ j_cdbs = dbs_data->get_cpu_cdbs(j);
+ dbs_timer_exit(j_cdbs);
+ }
+ } else {
+ dbs_timer_exit(cpu_cdbs);
+ }
mutex_lock(&dbs_data->mutex);
mutex_destroy(&cpu_cdbs->timer_mutex);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index f6616540c53..5bf6fb8023e 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -171,6 +171,7 @@ static inline int delay_for_sampling_rate(unsigned int sampling_rate)
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall);
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
+bool dbs_sw_coordinated_cpus(struct cpu_dbs_common_info *cdbs);
int cpufreq_governor_dbs(struct dbs_data *dbs_data,
struct cpufreq_policy *policy, unsigned int event);
#endif /* _CPUFREQ_GOVERNER_H */
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 7731f7c7e79..93bb56d14cf 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -243,7 +243,8 @@ static void od_dbs_timer(struct work_struct *work)
}
}
- schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay);
+ schedule_delayed_work_on(smp_processor_id(), &dbs_info->cdbs.work,
+ delay);
mutex_unlock(&dbs_info->cdbs.timer_mutex);
}