diff options
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 33 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 25 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 147 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_performance.c | 4 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_powersave.c | 4 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_userspace.c | 4 |
6 files changed, 152 insertions, 65 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 8d6a3ff0267..31d6f535a79 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -825,6 +825,9 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) policy->user_policy.min = policy->cpuinfo.min_freq; policy->user_policy.max = policy->cpuinfo.max_freq; + blocking_notifier_call_chain(&cpufreq_policy_notifier_list, + CPUFREQ_START, policy); + #ifdef CONFIG_SMP #ifdef CONFIG_HOTPLUG_CPU @@ -1464,25 +1467,27 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { - int ret; + int ret = -EINVAL; policy = cpufreq_cpu_get(policy->cpu); if (!policy) - return -EINVAL; + goto no_policy; if (unlikely(lock_policy_rwsem_write(policy->cpu))) - return -EINVAL; + goto fail; ret = __cpufreq_driver_target(policy, target_freq, relation); unlock_policy_rwsem_write(policy->cpu); +fail: cpufreq_cpu_put(policy); +no_policy: return ret; } EXPORT_SYMBOL_GPL(cpufreq_driver_target); -int __cpufreq_driver_getavg(struct cpufreq_policy *policy) +int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu) { int ret = 0; @@ -1490,8 +1495,8 @@ int __cpufreq_driver_getavg(struct cpufreq_policy *policy) if (!policy) return -EINVAL; - if (cpu_online(policy->cpu) && cpufreq_driver->getavg) - ret = cpufreq_driver->getavg(policy->cpu); + if (cpu_online(cpu) && cpufreq_driver->getavg) + ret = cpufreq_driver->getavg(policy, cpu); cpufreq_cpu_put(policy); return ret; @@ -1714,13 +1719,17 @@ int cpufreq_update_policy(unsigned int cpu) { struct cpufreq_policy *data = cpufreq_cpu_get(cpu); struct cpufreq_policy policy; - int ret = 0; + int ret; - if (!data) - return -ENODEV; + if (!data) { + ret = -ENODEV; + goto no_policy; + } - if (unlikely(lock_policy_rwsem_write(cpu))) - return -EINVAL; + if (unlikely(lock_policy_rwsem_write(cpu))) { + ret = -EINVAL; + goto fail; + } dprintk("updating policy for CPU %u\n", cpu); memcpy(&policy, data, sizeof(struct cpufreq_policy)); @@ -1747,7 +1756,9 @@ int cpufreq_update_policy(unsigned int cpu) unlock_policy_rwsem_write(cpu); +fail: cpufreq_cpu_put(data); +no_policy: return ret; } EXPORT_SYMBOL(cpufreq_update_policy); diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index fe565ee4375..e2657837d95 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -333,7 +333,7 @@ static void dbs_check_cpu(int cpu) { unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; unsigned int tmp_idle_ticks, total_idle_ticks; - unsigned int freq_step; + unsigned int freq_target; unsigned int freq_down_sampling_rate; struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); struct cpufreq_policy *policy; @@ -383,13 +383,13 @@ static void dbs_check_cpu(int cpu) if (this_dbs_info->requested_freq == policy->max) return; - freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; + freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; /* max freq cannot be less than 100. But who knows.... */ - if (unlikely(freq_step == 0)) - freq_step = 5; + if (unlikely(freq_target == 0)) + freq_target = 5; - this_dbs_info->requested_freq += freq_step; + this_dbs_info->requested_freq += freq_target; if (this_dbs_info->requested_freq > policy->max) this_dbs_info->requested_freq = policy->max; @@ -425,19 +425,19 @@ static void dbs_check_cpu(int cpu) /* * if we are already at the lowest speed then break out early * or if we 'cannot' reduce the speed as the user might want - * freq_step to be zero + * freq_target to be zero */ if (this_dbs_info->requested_freq == policy->min || dbs_tuners_ins.freq_step == 0) return; - freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; + freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; /* max freq cannot be less than 100. But who knows.... */ - if (unlikely(freq_step == 0)) - freq_step = 5; + if (unlikely(freq_target == 0)) + freq_target = 5; - this_dbs_info->requested_freq -= freq_step; + this_dbs_info->requested_freq -= freq_target; if (this_dbs_info->requested_freq < policy->min) this_dbs_info->requested_freq = policy->min; @@ -460,6 +460,7 @@ static void do_dbs_timer(struct work_struct *work) static inline void dbs_timer_init(void) { + init_timer_deferrable(&dbs_work.timer); schedule_delayed_work(&dbs_work, usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); return; @@ -575,13 +576,15 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, return 0; } +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE +static +#endif struct cpufreq_governor cpufreq_gov_conservative = { .name = "conservative", .governor = cpufreq_governor_dbs, .max_transition_latency = TRANSITION_LATENCY_LIMIT, .owner = THIS_MODULE, }; -EXPORT_SYMBOL(cpufreq_gov_conservative); static int __init cpufreq_gov_dbs_init(void) { diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 33855cb3cf1..2ab3c12b88a 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -18,13 +18,19 @@ #include <linux/jiffies.h> #include <linux/kernel_stat.h> #include <linux/mutex.h> +#include <linux/hrtimer.h> +#include <linux/tick.h> +#include <linux/ktime.h> /* * dbs is used in this file as a shortform for demandbased switching * It helps to keep variable names smaller, simpler */ +#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) #define DEF_FREQUENCY_UP_THRESHOLD (80) +#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) +#define MICRO_FREQUENCY_UP_THRESHOLD (95) #define MIN_FREQUENCY_UP_THRESHOLD (11) #define MAX_FREQUENCY_UP_THRESHOLD (100) @@ -57,6 +63,7 @@ enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; struct cpu_dbs_info_s { cputime64_t prev_cpu_idle; cputime64_t prev_cpu_wall; + cputime64_t prev_cpu_nice; struct cpufreq_policy *cur_policy; struct delayed_work work; struct cpufreq_frequency_table *freq_table; @@ -86,21 +93,24 @@ static struct workqueue_struct *kondemand_wq; static struct dbs_tuners { unsigned int sampling_rate; unsigned int up_threshold; + unsigned int down_differential; unsigned int ignore_nice; unsigned int powersave_bias; } dbs_tuners_ins = { .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, + .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, .ignore_nice = 0, .powersave_bias = 0, }; -static inline cputime64_t get_cpu_idle_time(unsigned int cpu) +static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, + cputime64_t *wall) { cputime64_t idle_time; - cputime64_t cur_jiffies; + cputime64_t cur_wall_time; cputime64_t busy_time; - cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, kstat_cpu(cpu).cpustat.system); @@ -113,7 +123,37 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu) kstat_cpu(cpu).cpustat.nice); } - idle_time = cputime64_sub(cur_jiffies, busy_time); + idle_time = cputime64_sub(cur_wall_time, busy_time); + if (wall) + *wall = cur_wall_time; + + return idle_time; +} + +static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) +{ + u64 idle_time = get_cpu_idle_time_us(cpu, wall); + + if (idle_time == -1ULL) + return get_cpu_idle_time_jiffy(cpu, wall); + + if (dbs_tuners_ins.ignore_nice) { + cputime64_t cur_nice; + unsigned long cur_nice_jiffies; + struct cpu_dbs_info_s *dbs_info; + + dbs_info = &per_cpu(cpu_dbs_info, cpu); + cur_nice = cputime64_sub(kstat_cpu(cpu).cpustat.nice, + dbs_info->prev_cpu_nice); + /* + * Assumption: nice time between sampling periods will be + * less than 2^32 jiffies for 32 bit sys + */ + cur_nice_jiffies = (unsigned long) + cputime64_to_jiffies64(cur_nice); + dbs_info->prev_cpu_nice = kstat_cpu(cpu).cpustat.nice; + return idle_time + jiffies_to_usecs(cur_nice_jiffies); + } return idle_time; } @@ -277,8 +317,8 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, for_each_online_cpu(j) { struct cpu_dbs_info_s *dbs_info; dbs_info = &per_cpu(cpu_dbs_info, j); - dbs_info->prev_cpu_idle = get_cpu_idle_time(j); - dbs_info->prev_cpu_wall = get_jiffies_64(); + dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &dbs_info->prev_cpu_wall); } mutex_unlock(&dbs_mutex); @@ -334,9 +374,7 @@ static struct attribute_group dbs_attr_group = { static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) { - unsigned int idle_ticks, total_ticks; - unsigned int load = 0; - cputime64_t cur_jiffies; + unsigned int max_load_freq; struct cpufreq_policy *policy; unsigned int j; @@ -346,13 +384,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) this_dbs_info->freq_lo = 0; policy = this_dbs_info->cur_policy; - cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); - total_ticks = (unsigned int) cputime64_sub(cur_jiffies, - this_dbs_info->prev_cpu_wall); - this_dbs_info->prev_cpu_wall = get_jiffies_64(); - if (!total_ticks) - return; /* * Every sampling_rate, we check, if current idle time is less * than 20% (default), then we try to increase frequency @@ -365,27 +397,44 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) * 5% (default) of current frequency */ - /* Get Idle Time */ - idle_ticks = UINT_MAX; + /* Get Absolute Load - in terms of freq */ + max_load_freq = 0; + for_each_cpu_mask_nr(j, policy->cpus) { - cputime64_t total_idle_ticks; - unsigned int tmp_idle_ticks; struct cpu_dbs_info_s *j_dbs_info; + cputime64_t cur_wall_time, cur_idle_time; + unsigned int idle_time, wall_time; + unsigned int load, load_freq; + int freq_avg; j_dbs_info = &per_cpu(cpu_dbs_info, j); - total_idle_ticks = get_cpu_idle_time(j); - tmp_idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks, + + cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); + + wall_time = (unsigned int) cputime64_sub(cur_wall_time, + j_dbs_info->prev_cpu_wall); + j_dbs_info->prev_cpu_wall = cur_wall_time; + + idle_time = (unsigned int) cputime64_sub(cur_idle_time, j_dbs_info->prev_cpu_idle); - j_dbs_info->prev_cpu_idle = total_idle_ticks; + j_dbs_info->prev_cpu_idle = cur_idle_time; + + if (unlikely(!wall_time || wall_time < idle_time)) + continue; + + load = 100 * (wall_time - idle_time) / wall_time; + + freq_avg = __cpufreq_driver_getavg(policy, j); + if (freq_avg <= 0) + freq_avg = policy->cur; - if (tmp_idle_ticks < idle_ticks) - idle_ticks = tmp_idle_ticks; + load_freq = load * freq_avg; + if (load_freq > max_load_freq) + max_load_freq = load_freq; } - if (likely(total_ticks > idle_ticks)) - load = (100 * (total_ticks - idle_ticks)) / total_ticks; /* Check for frequency increase */ - if (load > dbs_tuners_ins.up_threshold) { + if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { /* if we are already at full speed then break out early */ if (!dbs_tuners_ins.powersave_bias) { if (policy->cur == policy->max) @@ -412,15 +461,13 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) * can support the current CPU usage without triggering the up * policy. To be safe, we focus 10 points under the threshold. */ - if (load < (dbs_tuners_ins.up_threshold - 10)) { - unsigned int freq_next, freq_cur; - - freq_cur = __cpufreq_driver_getavg(policy); - if (!freq_cur) - freq_cur = policy->cur; - - freq_next = (freq_cur * load) / - (dbs_tuners_ins.up_threshold - 10); + if (max_load_freq < + (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * + policy->cur) { + unsigned int freq_next; + freq_next = max_load_freq / + (dbs_tuners_ins.up_threshold - + dbs_tuners_ins.down_differential); if (!dbs_tuners_ins.powersave_bias) { __cpufreq_driver_target(policy, freq_next, @@ -526,8 +573,8 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, j_dbs_info = &per_cpu(cpu_dbs_info, j); j_dbs_info->cur_policy = policy; - j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j); - j_dbs_info->prev_cpu_wall = get_jiffies_64(); + j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, + &j_dbs_info->prev_cpu_wall); } this_dbs_info->cpu = cpu; /* @@ -579,22 +626,42 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, return 0; } +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND +static +#endif struct cpufreq_governor cpufreq_gov_ondemand = { .name = "ondemand", .governor = cpufreq_governor_dbs, .max_transition_latency = TRANSITION_LATENCY_LIMIT, .owner = THIS_MODULE, }; -EXPORT_SYMBOL(cpufreq_gov_ondemand); static int __init cpufreq_gov_dbs_init(void) { + int err; + cputime64_t wall; + u64 idle_time; + int cpu = get_cpu(); + + idle_time = get_cpu_idle_time_us(cpu, &wall); + put_cpu(); + if (idle_time != -1ULL) { + /* Idle micro accounting is supported. Use finer thresholds */ + dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; + dbs_tuners_ins.down_differential = + MICRO_FREQUENCY_DOWN_DIFFERENTIAL; + } + kondemand_wq = create_workqueue("kondemand"); if (!kondemand_wq) { printk(KERN_ERR "Creation of kondemand failed\n"); return -EFAULT; } - return cpufreq_register_governor(&cpufreq_gov_ondemand); + err = cpufreq_register_governor(&cpufreq_gov_ondemand); + if (err) + destroy_workqueue(kondemand_wq); + + return err; } static void __exit cpufreq_gov_dbs_exit(void) diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c index e8e1451ef1c..7e2e515087f 100644 --- a/drivers/cpufreq/cpufreq_performance.c +++ b/drivers/cpufreq/cpufreq_performance.c @@ -36,12 +36,14 @@ static int cpufreq_governor_performance(struct cpufreq_policy *policy, return 0; } +#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE_MODULE +static +#endif struct cpufreq_governor cpufreq_gov_performance = { .name = "performance", .governor = cpufreq_governor_performance, .owner = THIS_MODULE, }; -EXPORT_SYMBOL(cpufreq_gov_performance); static int __init cpufreq_gov_performance_init(void) diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c index 88d2f44fba4..e6db5faf3eb 100644 --- a/drivers/cpufreq/cpufreq_powersave.c +++ b/drivers/cpufreq/cpufreq_powersave.c @@ -35,12 +35,14 @@ static int cpufreq_governor_powersave(struct cpufreq_policy *policy, return 0; } +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE +static +#endif struct cpufreq_governor cpufreq_gov_powersave = { .name = "powersave", .governor = cpufreq_governor_powersave, .owner = THIS_MODULE, }; -EXPORT_SYMBOL(cpufreq_gov_powersave); static int __init cpufreq_gov_powersave_init(void) { diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index 32244aa7cc0..1442bbada05 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c @@ -187,6 +187,9 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, } +#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE +static +#endif struct cpufreq_governor cpufreq_gov_userspace = { .name = "userspace", .governor = cpufreq_governor_userspace, @@ -194,7 +197,6 @@ struct cpufreq_governor cpufreq_gov_userspace = { .show_setspeed = show_speed, .owner = THIS_MODULE, }; -EXPORT_SYMBOL(cpufreq_gov_userspace); static int __init cpufreq_gov_userspace_init(void) { |