summaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/cpufreq.c82
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c133
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c4
3 files changed, 183 insertions, 36 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index af93a8175c5..67bc2ece7b4 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -41,7 +41,7 @@ static struct cpufreq_driver *cpufreq_driver;
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
#ifdef CONFIG_HOTPLUG_CPU
/* This one keeps track of the previously set governor of a removed CPU */
-static DEFINE_PER_CPU(struct cpufreq_governor *, cpufreq_cpu_governor);
+static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
#endif
static DEFINE_SPINLOCK(cpufreq_driver_lock);
@@ -647,6 +647,21 @@ static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
return policy->governor->show_setspeed(policy, buf);
}
+/**
+ * show_scaling_driver - show the current cpufreq HW/BIOS limitation
+ */
+static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
+{
+ unsigned int limit;
+ int ret;
+ if (cpufreq_driver->bios_limit) {
+ ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
+ if (!ret)
+ return sprintf(buf, "%u\n", limit);
+ }
+ return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
+}
+
#define define_one_ro(_name) \
static struct freq_attr _name = \
__ATTR(_name, 0444, show_##_name, NULL)
@@ -666,6 +681,7 @@ define_one_ro(cpuinfo_transition_latency);
define_one_ro(scaling_available_governors);
define_one_ro(scaling_driver);
define_one_ro(scaling_cur_freq);
+define_one_ro(bios_limit);
define_one_ro(related_cpus);
define_one_ro(affected_cpus);
define_one_rw(scaling_min_freq);
@@ -767,17 +783,20 @@ static struct kobj_type ktype_cpufreq = {
* 0: Success
* Positive: When we have a managed CPU and the sysfs got symlinked
*/
-int cpufreq_add_dev_policy(unsigned int cpu, struct cpufreq_policy *policy,
- struct sys_device *sys_dev)
+static int cpufreq_add_dev_policy(unsigned int cpu,
+ struct cpufreq_policy *policy,
+ struct sys_device *sys_dev)
{
int ret = 0;
#ifdef CONFIG_SMP
unsigned long flags;
unsigned int j;
-
#ifdef CONFIG_HOTPLUG_CPU
- if (per_cpu(cpufreq_cpu_governor, cpu)) {
- policy->governor = per_cpu(cpufreq_cpu_governor, cpu);
+ struct cpufreq_governor *gov;
+
+ gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
+ if (gov) {
+ policy->governor = gov;
dprintk("Restoring governor %s for cpu %d\n",
policy->governor->name, cpu);
}
@@ -840,7 +859,8 @@ int cpufreq_add_dev_policy(unsigned int cpu, struct cpufreq_policy *policy,
/* symlink affected CPUs */
-int cpufreq_add_dev_symlink(unsigned int cpu, struct cpufreq_policy *policy)
+static int cpufreq_add_dev_symlink(unsigned int cpu,
+ struct cpufreq_policy *policy)
{
unsigned int j;
int ret = 0;
@@ -867,8 +887,9 @@ int cpufreq_add_dev_symlink(unsigned int cpu, struct cpufreq_policy *policy)
return ret;
}
-int cpufreq_add_dev_interface(unsigned int cpu, struct cpufreq_policy *policy,
- struct sys_device *sys_dev)
+static int cpufreq_add_dev_interface(unsigned int cpu,
+ struct cpufreq_policy *policy,
+ struct sys_device *sys_dev)
{
struct cpufreq_policy new_policy;
struct freq_attr **drv_attr;
@@ -900,6 +921,11 @@ int cpufreq_add_dev_interface(unsigned int cpu, struct cpufreq_policy *policy,
if (ret)
goto err_out_kobj_put;
}
+ if (cpufreq_driver->bios_limit) {
+ ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
+ if (ret)
+ goto err_out_kobj_put;
+ }
spin_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus) {
@@ -949,10 +975,13 @@ err_out_kobj_put:
static int cpufreq_add_dev(struct sys_device *sys_dev)
{
unsigned int cpu = sys_dev->id;
- int ret = 0;
+ int ret = 0, found = 0;
struct cpufreq_policy *policy;
unsigned long flags;
unsigned int j;
+#ifdef CONFIG_HOTPLUG_CPU
+ int sibling;
+#endif
if (cpu_is_offline(cpu))
return 0;
@@ -999,7 +1028,19 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
INIT_WORK(&policy->update, handle_update);
/* Set governor before ->init, so that driver could check it */
- policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
+#ifdef CONFIG_HOTPLUG_CPU
+ for_each_online_cpu(sibling) {
+ struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
+ if (cp && cp->governor &&
+ (cpumask_test_cpu(cpu, cp->related_cpus))) {
+ policy->governor = cp->governor;
+ found = 1;
+ break;
+ }
+ }
+#endif
+ if (!found)
+ policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
/* call driver. From then on the cpufreq must be able
* to accept all calls to ->verify and ->setpolicy for this CPU
*/
@@ -1111,7 +1152,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
#ifdef CONFIG_SMP
#ifdef CONFIG_HOTPLUG_CPU
- per_cpu(cpufreq_cpu_governor, cpu) = data->governor;
+ strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
+ CPUFREQ_NAME_LEN);
#endif
/* if we have other CPUs still registered, we need to unlink them,
@@ -1135,7 +1177,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
continue;
dprintk("removing link for cpu %u\n", j);
#ifdef CONFIG_HOTPLUG_CPU
- per_cpu(cpufreq_cpu_governor, j) = data->governor;
+ strncpy(per_cpu(cpufreq_cpu_governor, j),
+ data->governor->name, CPUFREQ_NAME_LEN);
#endif
cpu_sys_dev = get_cpu_sysdev(j);
sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
@@ -1606,9 +1649,22 @@ EXPORT_SYMBOL_GPL(cpufreq_register_governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor)
{
+#ifdef CONFIG_HOTPLUG_CPU
+ int cpu;
+#endif
+
if (!governor)
return;
+#ifdef CONFIG_HOTPLUG_CPU
+ for_each_present_cpu(cpu) {
+ if (cpu_online(cpu))
+ continue;
+ if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
+ strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
+ }
+#endif
+
mutex_lock(&cpufreq_governor_mutex);
list_del(&governor->governor_list);
mutex_unlock(&cpufreq_governor_mutex);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index bc33ddc9c97..599a40b25cb 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -116,9 +116,9 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
idle_time = cputime64_sub(cur_wall_time, busy_time);
if (wall)
- *wall = cur_wall_time;
+ *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
- return idle_time;
+ return (cputime64_t)jiffies_to_usecs(idle_time);;
}
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
@@ -164,20 +164,22 @@ static struct notifier_block dbs_cpufreq_notifier_block = {
};
/************************** sysfs interface ************************/
-static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
+static ssize_t show_sampling_rate_max(struct kobject *kobj,
+ struct attribute *attr, char *buf)
{
printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
"sysfs file is deprecated - used by: %s\n", current->comm);
return sprintf(buf, "%u\n", -1U);
}
-static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
+static ssize_t show_sampling_rate_min(struct kobject *kobj,
+ struct attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", min_sampling_rate);
}
#define define_one_ro(_name) \
-static struct freq_attr _name = \
+static struct global_attr _name = \
__ATTR(_name, 0444, show_##_name, NULL)
define_one_ro(sampling_rate_max);
@@ -186,7 +188,7 @@ define_one_ro(sampling_rate_min);
/* cpufreq_conservative Governor Tunables */
#define show_one(file_name, object) \
static ssize_t show_##file_name \
-(struct cpufreq_policy *unused, char *buf) \
+(struct kobject *kobj, struct attribute *attr, char *buf) \
{ \
return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
}
@@ -197,8 +199,40 @@ show_one(down_threshold, down_threshold);
show_one(ignore_nice_load, ignore_nice);
show_one(freq_step, freq_step);
-static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
- const char *buf, size_t count)
+/*** delete after deprecation time ***/
+#define DEPRECATION_MSG(file_name) \
+ printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
+ "interface is deprecated - " #file_name "\n");
+
+#define show_one_old(file_name) \
+static ssize_t show_##file_name##_old \
+(struct cpufreq_policy *unused, char *buf) \
+{ \
+ printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
+ "interface is deprecated - " #file_name "\n"); \
+ return show_##file_name(NULL, NULL, buf); \
+}
+show_one_old(sampling_rate);
+show_one_old(sampling_down_factor);
+show_one_old(up_threshold);
+show_one_old(down_threshold);
+show_one_old(ignore_nice_load);
+show_one_old(freq_step);
+show_one_old(sampling_rate_min);
+show_one_old(sampling_rate_max);
+
+#define define_one_ro_old(object, _name) \
+static struct freq_attr object = \
+__ATTR(_name, 0444, show_##_name##_old, NULL)
+
+define_one_ro_old(sampling_rate_min_old, sampling_rate_min);
+define_one_ro_old(sampling_rate_max_old, sampling_rate_max);
+
+/*** delete after deprecation time ***/
+
+static ssize_t store_sampling_down_factor(struct kobject *a,
+ struct attribute *b,
+ const char *buf, size_t count)
{
unsigned int input;
int ret;
@@ -214,8 +248,8 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
return count;
}
-static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
- const char *buf, size_t count)
+static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
{
unsigned int input;
int ret;
@@ -231,8 +265,8 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
return count;
}
-static ssize_t store_up_threshold(struct cpufreq_policy *unused,
- const char *buf, size_t count)
+static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
{
unsigned int input;
int ret;
@@ -251,8 +285,8 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
return count;
}
-static ssize_t store_down_threshold(struct cpufreq_policy *unused,
- const char *buf, size_t count)
+static ssize_t store_down_threshold(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
{
unsigned int input;
int ret;
@@ -272,8 +306,8 @@ static ssize_t store_down_threshold(struct cpufreq_policy *unused,
return count;
}
-static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
- const char *buf, size_t count)
+static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
{
unsigned int input;
int ret;
@@ -308,8 +342,8 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
return count;
}
-static ssize_t store_freq_step(struct cpufreq_policy *policy,
- const char *buf, size_t count)
+static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
{
unsigned int input;
int ret;
@@ -331,7 +365,7 @@ static ssize_t store_freq_step(struct cpufreq_policy *policy,
}
#define define_one_rw(_name) \
-static struct freq_attr _name = \
+static struct global_attr _name = \
__ATTR(_name, 0644, show_##_name, store_##_name)
define_one_rw(sampling_rate);
@@ -358,6 +392,53 @@ static struct attribute_group dbs_attr_group = {
.name = "conservative",
};
+/*** delete after deprecation time ***/
+
+#define write_one_old(file_name) \
+static ssize_t store_##file_name##_old \
+(struct cpufreq_policy *unused, const char *buf, size_t count) \
+{ \
+ printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
+ "interface is deprecated - " #file_name "\n"); \
+ return store_##file_name(NULL, NULL, buf, count); \
+}
+write_one_old(sampling_rate);
+write_one_old(sampling_down_factor);
+write_one_old(up_threshold);
+write_one_old(down_threshold);
+write_one_old(ignore_nice_load);
+write_one_old(freq_step);
+
+#define define_one_rw_old(object, _name) \
+static struct freq_attr object = \
+__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old)
+
+define_one_rw_old(sampling_rate_old, sampling_rate);
+define_one_rw_old(sampling_down_factor_old, sampling_down_factor);
+define_one_rw_old(up_threshold_old, up_threshold);
+define_one_rw_old(down_threshold_old, down_threshold);
+define_one_rw_old(ignore_nice_load_old, ignore_nice_load);
+define_one_rw_old(freq_step_old, freq_step);
+
+static struct attribute *dbs_attributes_old[] = {
+ &sampling_rate_max_old.attr,
+ &sampling_rate_min_old.attr,
+ &sampling_rate_old.attr,
+ &sampling_down_factor_old.attr,
+ &up_threshold_old.attr,
+ &down_threshold_old.attr,
+ &ignore_nice_load_old.attr,
+ &freq_step_old.attr,
+ NULL
+};
+
+static struct attribute_group dbs_attr_group_old = {
+ .attrs = dbs_attributes_old,
+ .name = "conservative",
+};
+
+/*** delete after deprecation time ***/
+
/************************** sysfs end ************************/
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
@@ -530,7 +611,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
mutex_lock(&dbs_mutex);
- rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
+ rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old);
if (rc) {
mutex_unlock(&dbs_mutex);
return rc;
@@ -564,6 +645,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
if (latency == 0)
latency = 1;
+ rc = sysfs_create_group(cpufreq_global_kobject,
+ &dbs_attr_group);
+ if (rc) {
+ mutex_unlock(&dbs_mutex);
+ return rc;
+ }
+
/*
* conservative does not implement micro like ondemand
* governor, thus we are bound to jiffes/HZ
@@ -591,7 +679,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
dbs_timer_exit(this_dbs_info);
mutex_lock(&dbs_mutex);
- sysfs_remove_group(&policy->kobj, &dbs_attr_group);
+ sysfs_remove_group(&policy->kobj, &dbs_attr_group_old);
dbs_enable--;
mutex_destroy(&this_dbs_info->timer_mutex);
@@ -605,6 +693,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
CPUFREQ_TRANSITION_NOTIFIER);
mutex_unlock(&dbs_mutex);
+ if (!dbs_enable)
+ sysfs_remove_group(cpufreq_global_kobject,
+ &dbs_attr_group);
break;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 071699de50e..4b34ade2332 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -133,9 +133,9 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
idle_time = cputime64_sub(cur_wall_time, busy_time);
if (wall)
- *wall = cur_wall_time;
+ *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
- return idle_time;
+ return (cputime64_t)jiffies_to_usecs(idle_time);
}
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)