summaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/cpufreq_governor.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq/cpufreq_governor.c')
-rw-r--r--drivers/cpufreq/cpufreq_governor.c38
1 files changed, 17 insertions, 21 deletions
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 7b839a8db2a..0806c31e576 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -16,15 +16,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <asm/cputime.h>
-#include <linux/cpufreq.h>
-#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/kernel_stat.h>
-#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/workqueue.h>
#include "cpufreq_governor.h"
@@ -47,13 +41,13 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
unsigned int j;
if (dbs_data->cdata->governor == GOV_ONDEMAND)
- ignore_nice = od_tuners->ignore_nice;
+ ignore_nice = od_tuners->ignore_nice_load;
else
- ignore_nice = cs_tuners->ignore_nice;
+ ignore_nice = cs_tuners->ignore_nice_load;
policy = cdbs->cur_policy;
- /* Get Absolute Load (in terms of freq for ondemand gov) */
+ /* Get Absolute Load */
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_common_info *j_cdbs;
u64 cur_wall_time, cur_idle_time;
@@ -104,14 +98,6 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
load = 100 * (wall_time - idle_time) / wall_time;
- if (dbs_data->cdata->governor == GOV_ONDEMAND) {
- int freq_avg = __cpufreq_driver_getavg(policy, j);
- if (freq_avg <= 0)
- freq_avg = policy->cur;
-
- load *= freq_avg;
- }
-
if (load > max_load)
max_load = load;
}
@@ -133,8 +119,18 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
{
int i;
+ if (!policy->governor_enabled)
+ return;
+
if (!all_cpus) {
- __gov_queue_work(smp_processor_id(), dbs_data, delay);
+ /*
+ * Use raw_smp_processor_id() to avoid preemptible warnings.
+ * We know that this is only called with all_cpus == false from
+ * works that have been queued with *_work_on() functions and
+ * those works are canceled during CPU_DOWN_PREPARE so they
+ * can't possibly run on any other CPU.
+ */
+ __gov_queue_work(raw_smp_processor_id(), dbs_data, delay);
} else {
for_each_cpu(i, policy->cpus)
__gov_queue_work(i, dbs_data, delay);
@@ -244,7 +240,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
policy->governor_data = dbs_data;
- /* policy latency is in nS. Convert it to uS first */
+ /* policy latency is in ns. Convert it to us first */
latency = policy->cpuinfo.transition_latency / 1000;
if (latency == 0)
latency = 1;
@@ -298,12 +294,12 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
cs_tuners = dbs_data->tuners;
cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
sampling_rate = cs_tuners->sampling_rate;
- ignore_nice = cs_tuners->ignore_nice;
+ ignore_nice = cs_tuners->ignore_nice_load;
} else {
od_tuners = dbs_data->tuners;
od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
sampling_rate = od_tuners->sampling_rate;
- ignore_nice = od_tuners->ignore_nice;
+ ignore_nice = od_tuners->ignore_nice_load;
od_ops = dbs_data->cdata->gov_ops;
io_busy = od_tuners->io_is_busy;
}