From 731e0cc639364646d36981d90ab0b6af12b8face Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Wed, 11 Aug 2010 17:02:43 -0700 Subject: cpufreq: OMAP: cleanup for multi-SoC support, move into drivers/cpufreq Move OMAP cpufreq driver from arch/arm/mach-omap2 into drivers/cpufreq, along with a few cleanups: - generalize support for better handling of different SoCs in the OMAP - use OPP layer instead of OMAP clock internals for frequency table init Signed-off-by: Santosh Shilimkar [khilman@ti.com: move to drivers] Signed-off-by: Kevin Hilman --- drivers/cpufreq/Makefile | 1 + drivers/cpufreq/omap-cpufreq.c | 188 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 189 insertions(+) create mode 100644 drivers/cpufreq/omap-cpufreq.c (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index a48bc02cd76..ce75fcbcca4 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -43,6 +43,7 @@ obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o +obj-$(CONFIG_ARCH_OMAP2PLUS) += omap-cpufreq.o ################################################################################## # PowerPC platform drivers diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c new file mode 100644 index 00000000000..a6b2be7ea5a --- /dev/null +++ b/drivers/cpufreq/omap-cpufreq.c @@ -0,0 +1,188 @@ +/* + * CPU frequency scaling for OMAP + * + * Copyright (C) 2005 Nokia Corporation + * Written by Tony Lindgren + * + * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King + * + * Copyright (C) 2007-2011 Texas Instruments, Inc. + * - OMAP3/4 support by Rajendra Nayak, Santosh Shilimkar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include + +#define VERY_HI_RATE 900000000 + +static struct cpufreq_frequency_table *freq_table; +static struct clk *mpu_clk; + +static int omap_verify_speed(struct cpufreq_policy *policy) +{ + if (freq_table) + return cpufreq_frequency_table_verify(policy, freq_table); + + if (policy->cpu) + return -EINVAL; + + cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, + policy->cpuinfo.max_freq); + + policy->min = clk_round_rate(mpu_clk, policy->min * 1000) / 1000; + policy->max = clk_round_rate(mpu_clk, policy->max * 1000) / 1000; + cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, + policy->cpuinfo.max_freq); + return 0; +} + +static unsigned int omap_getspeed(unsigned int cpu) +{ + unsigned long rate; + + if (cpu) + return 0; + + rate = clk_get_rate(mpu_clk) / 1000; + return rate; +} + +static int omap_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + int ret = 0; + struct cpufreq_freqs freqs; + + /* Ensure desired rate is within allowed range. Some govenors + * (ondemand) will just pass target_freq=0 to get the minimum. */ + if (target_freq < policy->min) + target_freq = policy->min; + if (target_freq > policy->max) + target_freq = policy->max; + + freqs.old = omap_getspeed(0); + freqs.new = clk_round_rate(mpu_clk, target_freq * 1000) / 1000; + freqs.cpu = 0; + + if (freqs.old == freqs.new) + return ret; + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + +#ifdef CONFIG_CPU_FREQ_DEBUG + pr_info("cpufreq-omap: transition: %u --> %u\n", freqs.old, freqs.new); +#endif + + ret = clk_set_rate(mpu_clk, freqs.new * 1000); + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + return ret; +} + +static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) +{ + int result = 0; + struct device *mpu_dev; + + if (cpu_is_omap24xx()) + mpu_clk = clk_get(NULL, "virt_prcm_set"); + else if (cpu_is_omap34xx()) + mpu_clk = clk_get(NULL, "dpll1_ck"); + else if (cpu_is_omap44xx()) + mpu_clk = clk_get(NULL, "dpll_mpu_ck"); + + if (IS_ERR(mpu_clk)) + return PTR_ERR(mpu_clk); + + if (policy->cpu != 0) + return -EINVAL; + + policy->cur = policy->min = policy->max = omap_getspeed(0); + + mpu_dev = omap2_get_mpuss_device(); + if (!mpu_dev) { + pr_warning("%s: unable to get the mpu device\n", __func__); + return -EINVAL; + } + opp_init_cpufreq_table(mpu_dev, &freq_table); + + if (freq_table) { + result = cpufreq_frequency_table_cpuinfo(policy, freq_table); + if (!result) + cpufreq_frequency_table_get_attr(freq_table, + policy->cpu); + } else { + policy->cpuinfo.min_freq = clk_round_rate(mpu_clk, 0) / 1000; + policy->cpuinfo.max_freq = clk_round_rate(mpu_clk, + VERY_HI_RATE) / 1000; + } + + policy->min = policy->cpuinfo.min_freq; + policy->max = policy->cpuinfo.max_freq; + policy->cur = omap_getspeed(0); + + /* FIXME: what's the actual transition time? */ + policy->cpuinfo.transition_latency = 300 * 1000; + + return 0; +} + +static int omap_cpu_exit(struct cpufreq_policy *policy) +{ + clk_exit_cpufreq_table(&freq_table); + clk_put(mpu_clk); + return 0; +} + +static struct freq_attr *omap_cpufreq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver omap_driver = { + .flags = CPUFREQ_STICKY, + .verify = omap_verify_speed, + .target = omap_target, + .get = omap_getspeed, + .init = omap_cpu_init, + .exit = omap_cpu_exit, + .name = "omap", + .attr = omap_cpufreq_attr, +}; + +static int __init omap_cpufreq_init(void) +{ + return cpufreq_register_driver(&omap_driver); +} + +static void __exit omap_cpufreq_exit(void) +{ + cpufreq_unregister_driver(&omap_driver); +} + +MODULE_DESCRIPTION("cpufreq driver for OMAP SoCs"); +MODULE_LICENSE("GPL"); +module_init(omap_cpufreq_init); +module_exit(omap_cpufreq_exit); -- cgit v1.2.3-70-g09d2 From 46c12216c81b470b957d7fdefd8630efc2edddd0 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 21 Sep 2011 16:53:00 -0700 Subject: cpufreq: OMAP: Add SMP support for OMAP4+ On OMAP SMP configuartion, both processors share the voltage and clock. So both CPUs needs to be scaled together and hence needs software co-ordination. Also, update lpj with reference value to avoid progressive error. Adjust _both_ the per-cpu loops_per_jiffy and global lpj. Calibrate them with with reference to the initial values to avoid a progressively bigger and bigger error in the value over time. While at this, re-use the notifiers for UP/SMP since on UP machine or UP_ON_SMP policy->cpus mask would contain only the boot CPU. Based on initial SMP support by Santosh Shilimkar. Signed-off-by: Russell King Signed-off-by: Santosh Shilimkar [khilman@ti.com: due to overlap/rework, combined original Santosh patch and Russell's rework] Signed-off-by: Kevin Hilman --- drivers/cpufreq/omap-cpufreq.c | 81 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 71 insertions(+), 10 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index a6b2be7ea5a..1953f9d082a 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -23,9 +23,11 @@ #include #include #include +#include #include #include +#include #include #include @@ -35,6 +37,16 @@ #define VERY_HI_RATE 900000000 +#ifdef CONFIG_SMP +struct lpj_info { + unsigned long ref; + unsigned int freq; +}; + +static DEFINE_PER_CPU(struct lpj_info, lpj_ref); +static struct lpj_info global_lpj_ref; +#endif + static struct cpufreq_frequency_table *freq_table; static struct clk *mpu_clk; @@ -60,7 +72,7 @@ static unsigned int omap_getspeed(unsigned int cpu) { unsigned long rate; - if (cpu) + if (cpu >= NR_CPUS) return 0; rate = clk_get_rate(mpu_clk) / 1000; @@ -71,7 +83,7 @@ static int omap_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { - int ret = 0; + int i, ret = 0; struct cpufreq_freqs freqs; /* Ensure desired rate is within allowed range. Some govenors @@ -81,22 +93,57 @@ static int omap_target(struct cpufreq_policy *policy, if (target_freq > policy->max) target_freq = policy->max; - freqs.old = omap_getspeed(0); + freqs.old = omap_getspeed(policy->cpu); freqs.new = clk_round_rate(mpu_clk, target_freq * 1000) / 1000; - freqs.cpu = 0; + freqs.cpu = policy->cpu; if (freqs.old == freqs.new) return ret; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + /* notifiers */ + for_each_cpu(i, policy->cpus) { + freqs.cpu = i; + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + } #ifdef CONFIG_CPU_FREQ_DEBUG pr_info("cpufreq-omap: transition: %u --> %u\n", freqs.old, freqs.new); #endif ret = clk_set_rate(mpu_clk, freqs.new * 1000); + freqs.new = omap_getspeed(policy->cpu); + +#ifdef CONFIG_SMP + /* + * Note that loops_per_jiffy is not updated on SMP systems in + * cpufreq driver. So, update the per-CPU loops_per_jiffy value + * on frequency transition. We need to update all dependent CPUs. + */ + for_each_cpu(i, policy->cpus) { + struct lpj_info *lpj = &per_cpu(lpj_ref, i); + if (!lpj->freq) { + lpj->ref = per_cpu(cpu_data, i).loops_per_jiffy; + lpj->freq = freqs.old; + } + + per_cpu(cpu_data, i).loops_per_jiffy = + cpufreq_scale(lpj->ref, lpj->freq, freqs.new); + } - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + /* And don't forget to adjust the global one */ + if (!global_lpj_ref.freq) { + global_lpj_ref.ref = loops_per_jiffy; + global_lpj_ref.freq = freqs.old; + } + loops_per_jiffy = cpufreq_scale(global_lpj_ref.ref, global_lpj_ref.freq, + freqs.new); +#endif + + /* notifiers */ + for_each_cpu(i, policy->cpus) { + freqs.cpu = i; + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + } return ret; } @@ -105,6 +152,7 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) { int result = 0; struct device *mpu_dev; + static cpumask_var_t cpumask; if (cpu_is_omap24xx()) mpu_clk = clk_get(NULL, "virt_prcm_set"); @@ -116,12 +164,12 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) if (IS_ERR(mpu_clk)) return PTR_ERR(mpu_clk); - if (policy->cpu != 0) + if (policy->cpu >= NR_CPUS) return -EINVAL; - policy->cur = policy->min = policy->max = omap_getspeed(0); - + policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu); mpu_dev = omap2_get_mpuss_device(); + if (!mpu_dev) { pr_warning("%s: unable to get the mpu device\n", __func__); return -EINVAL; @@ -141,7 +189,20 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) policy->min = policy->cpuinfo.min_freq; policy->max = policy->cpuinfo.max_freq; - policy->cur = omap_getspeed(0); + policy->cur = omap_getspeed(policy->cpu); + + /* + * On OMAP SMP configuartion, both processors share the voltage + * and clock. So both CPUs needs to be scaled together and hence + * needs software co-ordination. Use cpufreq affected_cpus + * interface to handle this scenario. Additional is_smp() check + * is to keep SMP_ON_UP build working. + */ + if (is_smp()) { + policy->shared_type = CPUFREQ_SHARED_TYPE_ANY; + cpumask_or(cpumask, cpumask_of(policy->cpu), cpumask); + cpumask_copy(policy->cpus, cpumask); + } /* FIXME: what's the actual transition time? */ policy->cpuinfo.transition_latency = 300 * 1000; -- cgit v1.2.3-70-g09d2 From ed8ce00c52fb49aca299b79513bbfcee975442bc Mon Sep 17 00:00:00 2001 From: Todd Poynor Date: Tue, 7 Jun 2011 13:57:52 -0700 Subject: cpufreq: OMAP: Enable all CPUs in shared policy mask Enable all CPUs in the shared policy in the CPU init callback. Otherwise, the governor CPUFREQ_GOV_START event is invoked with a policy that only includes the first CPU, leaving other CPUs uninitialized by the governor. Signed-off-by: Todd Poynor Acked-by: Santosh Shilimkar Signed-off-by: Kevin Hilman --- drivers/cpufreq/omap-cpufreq.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index 1953f9d082a..3f5a816a64b 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -152,7 +152,6 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) { int result = 0; struct device *mpu_dev; - static cpumask_var_t cpumask; if (cpu_is_omap24xx()) mpu_clk = clk_get(NULL, "virt_prcm_set"); @@ -200,8 +199,7 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) */ if (is_smp()) { policy->shared_type = CPUFREQ_SHARED_TYPE_ANY; - cpumask_or(cpumask, cpumask_of(policy->cpu), cpumask); - cpumask_copy(policy->cpus, cpumask); + cpumask_setall(policy->cpus); } /* FIXME: what's the actual transition time? */ -- cgit v1.2.3-70-g09d2 From 022ac03b45d6899219539894cff3c7ce5bd990f9 Mon Sep 17 00:00:00 2001 From: Colin Cross Date: Mon, 6 Jun 2011 21:05:29 -0500 Subject: cpufreq: OMAP: notify even with bad boot frequency Sometimes, bootloaders starts up with a frequency which is not in the OPP table. At cpu_init, policy->cur contains the frequency we pick at boot. It is possible that system might have fixed it's boot frequency later on as part of power initialization. After this condition, the first call to omap_target results in the following: omap_getspeed(actual device frequency) != policy->cur(frequency that cpufreq thinks that the system is at), and it is possible that freqs.old == freqs.new (because the governor requested a scale down). We exit without triggering the notifiers in the current code, which does'nt let code which depends on cpufreq_notify_transition to have accurate information as to what the system frequency is. Instead, we do a normal transition if policy->cur is wrong, then, freqs.old will be the actual cpu frequency, freqs.new will be the actual new cpu frequency and all required notifiers have the accurate information. Acked-by: Nishanth Menon Signed-off-by: Colin Cross Signed-off-by: Kevin Hilman --- drivers/cpufreq/omap-cpufreq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index 3f5a816a64b..0a5d95c4f8e 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -97,7 +97,7 @@ static int omap_target(struct cpufreq_policy *policy, freqs.new = clk_round_rate(mpu_clk, target_freq * 1000) / 1000; freqs.cpu = policy->cpu; - if (freqs.old == freqs.new) + if (freqs.old == freqs.new && policy->cur == freqs.new) return ret; /* notifiers */ -- cgit v1.2.3-70-g09d2 From 08ca3e3b8ddf0e75f734d46b31518b97256d2c17 Mon Sep 17 00:00:00 2001 From: Nishanth Menon Date: Wed, 25 May 2011 16:38:46 -0700 Subject: cpufreq: OMAP: move clk name decision to init Clk name does'nt need to dynamically detected during clk init. move them off to driver initialization, if we dont have a clk name, there is no point in registering the driver anyways. The actual clk get and put is left at cpu_init and exit functions. Signed-off-by: Nishanth Menon Signed-off-by: Kevin Hilman --- drivers/cpufreq/omap-cpufreq.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index 0a5d95c4f8e..3651825e7fb 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -49,6 +49,7 @@ static struct lpj_info global_lpj_ref; static struct cpufreq_frequency_table *freq_table; static struct clk *mpu_clk; +static char *mpu_clk_name; static int omap_verify_speed(struct cpufreq_policy *policy) { @@ -153,13 +154,7 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) int result = 0; struct device *mpu_dev; - if (cpu_is_omap24xx()) - mpu_clk = clk_get(NULL, "virt_prcm_set"); - else if (cpu_is_omap34xx()) - mpu_clk = clk_get(NULL, "dpll1_ck"); - else if (cpu_is_omap44xx()) - mpu_clk = clk_get(NULL, "dpll_mpu_ck"); - + mpu_clk = clk_get(NULL, mpu_clk_name); if (IS_ERR(mpu_clk)) return PTR_ERR(mpu_clk); @@ -233,6 +228,17 @@ static struct cpufreq_driver omap_driver = { static int __init omap_cpufreq_init(void) { + if (cpu_is_omap24xx()) + mpu_clk_name = "virt_prcm_set"; + else if (cpu_is_omap34xx()) + mpu_clk_name = "dpll1_ck"; + else if (cpu_is_omap44xx()) + mpu_clk_name = "dpll_mpu_ck"; + + if (!mpu_clk_name) { + pr_err("%s: unsupported Silicon?\n", __func__); + return -EINVAL; + } return cpufreq_register_driver(&omap_driver); } -- cgit v1.2.3-70-g09d2 From a820ffa8fdbcaa4f5fe32e88db58acca27abbc76 Mon Sep 17 00:00:00 2001 From: Nishanth Menon Date: Wed, 25 May 2011 16:38:47 -0700 Subject: cpufreq: OMAP: deny initialization if no mpudev if we do not have mpu_dev we normally fail in cpu_init. It is better to fail driver registration if the devices are not available. Signed-off-by: Nishanth Menon Signed-off-by: Kevin Hilman --- drivers/cpufreq/omap-cpufreq.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index 3651825e7fb..dda32fd0343 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -50,6 +50,7 @@ static struct lpj_info global_lpj_ref; static struct cpufreq_frequency_table *freq_table; static struct clk *mpu_clk; static char *mpu_clk_name; +static struct device *mpu_dev; static int omap_verify_speed(struct cpufreq_policy *policy) { @@ -152,7 +153,6 @@ static int omap_target(struct cpufreq_policy *policy, static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) { int result = 0; - struct device *mpu_dev; mpu_clk = clk_get(NULL, mpu_clk_name); if (IS_ERR(mpu_clk)) @@ -162,12 +162,6 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) return -EINVAL; policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu); - mpu_dev = omap2_get_mpuss_device(); - - if (!mpu_dev) { - pr_warning("%s: unable to get the mpu device\n", __func__); - return -EINVAL; - } opp_init_cpufreq_table(mpu_dev, &freq_table); if (freq_table) { @@ -239,6 +233,13 @@ static int __init omap_cpufreq_init(void) pr_err("%s: unsupported Silicon?\n", __func__); return -EINVAL; } + + mpu_dev = omap2_get_mpuss_device(); + if (!mpu_dev) { + pr_warning("%s: unable to get the mpu device\n", __func__); + return -EINVAL; + } + return cpufreq_register_driver(&omap_driver); } -- cgit v1.2.3-70-g09d2 From bf2a359d504bca3ef71a65e8759d51af4b17055a Mon Sep 17 00:00:00 2001 From: Nishanth Menon Date: Thu, 26 May 2011 19:39:17 -0700 Subject: cpufreq: OMAP: dont support !freq_table OMAP2+ all have frequency tables, hence the hacks we had for older silicon do not need to be carried forward. As part of this change, use cpufreq_frequency_table_target to find the best match for frequency requested. Signed-off-by: Nishanth Menon Signed-off-by: Kevin Hilman --- drivers/cpufreq/omap-cpufreq.c | 67 +++++++++++++++++++++--------------------- 1 file changed, 33 insertions(+), 34 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index dda32fd0343..eecb0961c6b 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -35,8 +35,6 @@ #include -#define VERY_HI_RATE 900000000 - #ifdef CONFIG_SMP struct lpj_info { unsigned long ref; @@ -54,20 +52,9 @@ static struct device *mpu_dev; static int omap_verify_speed(struct cpufreq_policy *policy) { - if (freq_table) - return cpufreq_frequency_table_verify(policy, freq_table); - - if (policy->cpu) + if (!freq_table) return -EINVAL; - - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - - policy->min = clk_round_rate(mpu_clk, policy->min * 1000) / 1000; - policy->max = clk_round_rate(mpu_clk, policy->max * 1000) / 1000; - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, - policy->cpuinfo.max_freq); - return 0; + return cpufreq_frequency_table_verify(policy, freq_table); } static unsigned int omap_getspeed(unsigned int cpu) @@ -85,18 +72,31 @@ static int omap_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { - int i, ret = 0; + unsigned int i; + int ret = 0; struct cpufreq_freqs freqs; - /* Ensure desired rate is within allowed range. Some govenors - * (ondemand) will just pass target_freq=0 to get the minimum. */ - if (target_freq < policy->min) - target_freq = policy->min; - if (target_freq > policy->max) - target_freq = policy->max; + if (!freq_table) { + dev_err(mpu_dev, "%s: cpu%d: no freq table!\n", __func__, + policy->cpu); + return -EINVAL; + } + + ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, + relation, &i); + if (ret) { + dev_dbg(mpu_dev, "%s: cpu%d: no freq match for %d(ret=%d)\n", + __func__, policy->cpu, target_freq, ret); + return ret; + } + freqs.new = freq_table[i].frequency; + if (!freqs.new) { + dev_err(mpu_dev, "%s: cpu%d: no match for freq %d\n", __func__, + policy->cpu, target_freq); + return -EINVAL; + } freqs.old = omap_getspeed(policy->cpu); - freqs.new = clk_round_rate(mpu_clk, target_freq * 1000) / 1000; freqs.cpu = policy->cpu; if (freqs.old == freqs.new && policy->cur == freqs.new) @@ -162,19 +162,18 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) return -EINVAL; policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu); - opp_init_cpufreq_table(mpu_dev, &freq_table); - - if (freq_table) { - result = cpufreq_frequency_table_cpuinfo(policy, freq_table); - if (!result) - cpufreq_frequency_table_get_attr(freq_table, - policy->cpu); - } else { - policy->cpuinfo.min_freq = clk_round_rate(mpu_clk, 0) / 1000; - policy->cpuinfo.max_freq = clk_round_rate(mpu_clk, - VERY_HI_RATE) / 1000; + result = opp_init_cpufreq_table(mpu_dev, &freq_table); + + if (result) { + dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n", + __func__, policy->cpu, result); + return result; } + result = cpufreq_frequency_table_cpuinfo(policy, freq_table); + if (!result) + cpufreq_frequency_table_get_attr(freq_table, policy->cpu); + policy->min = policy->cpuinfo.min_freq; policy->max = policy->cpuinfo.max_freq; policy->cur = omap_getspeed(policy->cpu); -- cgit v1.2.3-70-g09d2 From ffe4f0f115420e3843aa0d8dc1baf31ea5b6fcf2 Mon Sep 17 00:00:00 2001 From: Nishanth Menon Date: Thu, 26 May 2011 19:39:18 -0700 Subject: cpufreq: OMAP: only supports OPP library OMAP2 is the only family using clk_[init|exit]_cpufreq_table, however, the cpufreq code does not currently use clk_init_cpufreq_table. As a result, it is unusuable for OMAP2 and only usable only on platforms using OPP library. Remove the unbalanced clk_exit_cpufreq_table(). Any platforms where OPPs are not availble will fail on init because a freq table will not be properly initialized. Signed-off-by: Nishanth Menon [khilman@ti.com: changelog edits, and graceful failure mode changes] Signed-off-by: Kevin Hilman --- drivers/cpufreq/omap-cpufreq.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index eecb0961c6b..8f778b9dbb4 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -1,5 +1,5 @@ /* - * CPU frequency scaling for OMAP + * CPU frequency scaling for OMAP using OPP information * * Copyright (C) 2005 Nokia Corporation * Written by Tony Lindgren @@ -198,7 +198,6 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) static int omap_cpu_exit(struct cpufreq_policy *policy) { - clk_exit_cpufreq_table(&freq_table); clk_put(mpu_clk); return 0; } -- cgit v1.2.3-70-g09d2 From 11e04fdd98f0fd6edf1ad6eccb0db4d2f965c392 Mon Sep 17 00:00:00 2001 From: Nishanth Menon Date: Thu, 26 May 2011 19:39:19 -0700 Subject: cpufreq: OMAP: put clk if cpu_init failed Release the mpu_clk in fail paths. Reported-by: Todd Poynor Signed-off-by: Nishanth Menon Signed-off-by: Kevin Hilman --- drivers/cpufreq/omap-cpufreq.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index 8f778b9dbb4..8c5419201ac 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -158,8 +158,10 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) if (IS_ERR(mpu_clk)) return PTR_ERR(mpu_clk); - if (policy->cpu >= NR_CPUS) - return -EINVAL; + if (policy->cpu >= NR_CPUS) { + result = -EINVAL; + goto fail_ck; + } policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu); result = opp_init_cpufreq_table(mpu_dev, &freq_table); @@ -167,12 +169,14 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) if (result) { dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n", __func__, policy->cpu, result); - return result; + goto fail_ck; } result = cpufreq_frequency_table_cpuinfo(policy, freq_table); if (!result) cpufreq_frequency_table_get_attr(freq_table, policy->cpu); + else + goto fail_ck; policy->min = policy->cpuinfo.min_freq; policy->max = policy->cpuinfo.max_freq; @@ -194,6 +198,10 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) policy->cpuinfo.transition_latency = 300 * 1000; return 0; + +fail_ck: + clk_put(mpu_clk); + return result; } static int omap_cpu_exit(struct cpufreq_policy *policy) -- cgit v1.2.3-70-g09d2 From 1c78217fc8c0983f5768a2d1c17c022f1079751e Mon Sep 17 00:00:00 2001 From: Nishanth Menon Date: Thu, 26 May 2011 19:39:20 -0700 Subject: cpufreq: OMAP: fix freq_table leak We use a single frequency table for multiple CPUs. But, with OMAP4, since we have multiple CPUs, the cpu_init call for CPU1 causes freq_table previously allocated for CPU0 to be overwritten. In addition, we dont free the table on exit path. We solve this by maintaining an atomic type counter to ensure just a single table exists at a given time. Signed-off-by: Nishanth Menon Signed-off-by: Kevin Hilman --- drivers/cpufreq/omap-cpufreq.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index 8c5419201ac..ad94b4f2892 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -46,6 +46,7 @@ static struct lpj_info global_lpj_ref; #endif static struct cpufreq_frequency_table *freq_table; +static atomic_t freq_table_users = ATOMIC_INIT(0); static struct clk *mpu_clk; static char *mpu_clk_name; static struct device *mpu_dev; @@ -150,6 +151,12 @@ static int omap_target(struct cpufreq_policy *policy, return ret; } +static inline void freq_table_free(void) +{ + if (atomic_dec_and_test(&freq_table_users)) + opp_free_cpufreq_table(mpu_dev, &freq_table); +} + static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) { int result = 0; @@ -164,7 +171,9 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) } policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu); - result = opp_init_cpufreq_table(mpu_dev, &freq_table); + + if (atomic_inc_return(&freq_table_users) == 1) + result = opp_init_cpufreq_table(mpu_dev, &freq_table); if (result) { dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n", @@ -173,10 +182,10 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) } result = cpufreq_frequency_table_cpuinfo(policy, freq_table); - if (!result) - cpufreq_frequency_table_get_attr(freq_table, policy->cpu); - else - goto fail_ck; + if (result) + goto fail_table; + + cpufreq_frequency_table_get_attr(freq_table, policy->cpu); policy->min = policy->cpuinfo.min_freq; policy->max = policy->cpuinfo.max_freq; @@ -199,6 +208,8 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) return 0; +fail_table: + freq_table_free(); fail_ck: clk_put(mpu_clk); return result; @@ -206,6 +217,7 @@ fail_ck: static int omap_cpu_exit(struct cpufreq_policy *policy) { + freq_table_free(); clk_put(mpu_clk); return 0; } -- cgit v1.2.3-70-g09d2 From c1b547bc222f4027d9394b6bd8f4a6bb0bd7b1b4 Mon Sep 17 00:00:00 2001 From: Kevin Hilman Date: Fri, 30 Sep 2011 10:41:26 -0700 Subject: cpufreq: OMAP: fixup for omap_device changes, include Minor fixups to work starting with v3.2: - use the new omap_device API for getting a device by name. - need to include Signed-off-by: Kevin Hilman --- drivers/cpufreq/omap-cpufreq.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index ad94b4f2892..5d04c57aae3 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -32,6 +33,7 @@ #include #include #include +#include #include @@ -252,7 +254,7 @@ static int __init omap_cpufreq_init(void) return -EINVAL; } - mpu_dev = omap2_get_mpuss_device(); + mpu_dev = omap_device_get_by_hwmod_name("mpu"); if (!mpu_dev) { pr_warning("%s: unable to get the mpu device\n", __func__); return -EINVAL; -- cgit v1.2.3-70-g09d2 From 3292beb340c76884427faa1f5d6085719477d889 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Mon, 28 Nov 2011 14:45:17 -0200 Subject: sched/accounting: Change cpustat fields to an array This patch changes fields in cpustat from a structure, to an u64 array. Math gets easier, and the code is more flexible. Signed-off-by: Glauber Costa Reviewed-by: KAMEZAWA Hiroyuki Cc: Linus Torvalds Cc: Andrew Morton Cc: Paul Tuner Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1322498719-2255-2-git-send-email-glommer@parallels.com Signed-off-by: Ingo Molnar --- arch/s390/appldata/appldata_os.c | 16 +++---- arch/x86/include/asm/i387.h | 2 +- drivers/cpufreq/cpufreq_conservative.c | 38 ++++++++--------- drivers/cpufreq/cpufreq_ondemand.c | 38 ++++++++--------- drivers/macintosh/rack-meter.c | 8 ++-- fs/proc/stat.c | 63 +++++++++++++-------------- fs/proc/uptime.c | 4 +- include/linux/kernel_stat.h | 36 ++++++++++------ kernel/sched/core.c | 78 +++++++++++++++++----------------- 9 files changed, 142 insertions(+), 141 deletions(-) (limited to 'drivers/cpufreq') diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c index 92f1cb745d6..4de031d6b76 100644 --- a/arch/s390/appldata/appldata_os.c +++ b/arch/s390/appldata/appldata_os.c @@ -115,21 +115,21 @@ static void appldata_get_os_data(void *data) j = 0; for_each_online_cpu(i) { os_data->os_cpu[j].per_cpu_user = - cputime_to_jiffies(kstat_cpu(i).cpustat.user); + cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]); os_data->os_cpu[j].per_cpu_nice = - cputime_to_jiffies(kstat_cpu(i).cpustat.nice); + cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]); os_data->os_cpu[j].per_cpu_system = - cputime_to_jiffies(kstat_cpu(i).cpustat.system); + cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]); os_data->os_cpu[j].per_cpu_idle = - cputime_to_jiffies(kstat_cpu(i).cpustat.idle); + cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]); os_data->os_cpu[j].per_cpu_irq = - cputime_to_jiffies(kstat_cpu(i).cpustat.irq); + cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]); os_data->os_cpu[j].per_cpu_softirq = - cputime_to_jiffies(kstat_cpu(i).cpustat.softirq); + cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]); os_data->os_cpu[j].per_cpu_iowait = - cputime_to_jiffies(kstat_cpu(i).cpustat.iowait); + cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]); os_data->os_cpu[j].per_cpu_steal = - cputime_to_jiffies(kstat_cpu(i).cpustat.steal); + cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]); os_data->os_cpu[j].cpu_id = i; j++; } diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index c9e09ea0564..6919e936345 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h @@ -218,7 +218,7 @@ static inline void fpu_fxsave(struct fpu *fpu) #ifdef CONFIG_SMP #define safe_address (__per_cpu_offset[0]) #else -#define safe_address (kstat_cpu(0).cpustat.user) +#define safe_address (__get_cpu_var(kernel_cpustat).cpustat[CPUTIME_USER]) #endif /* diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index c97b468ee9f..118bff73fed 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -95,27 +95,26 @@ static struct dbs_tuners { .freq_step = 5, }; -static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, - cputime64_t *wall) +static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) { - cputime64_t idle_time; + u64 idle_time; cputime64_t cur_wall_time; - cputime64_t busy_time; + u64 busy_time; cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, - kstat_cpu(cpu).cpustat.system); + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER] + + kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; idle_time = cputime64_sub(cur_wall_time, busy_time); if (wall) - *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); + *wall = jiffies_to_usecs(cur_wall_time); - return (cputime64_t)jiffies_to_usecs(idle_time); + return jiffies_to_usecs(idle_time); } static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) @@ -272,7 +271,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &dbs_info->prev_cpu_wall); if (dbs_tuners_ins.ignore_nice) - dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; } return count; } @@ -362,11 +361,11 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) j_dbs_info->prev_cpu_idle = cur_idle_time; if (dbs_tuners_ins.ignore_nice) { - cputime64_t cur_nice; + u64 cur_nice; unsigned long cur_nice_jiffies; - cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, - j_dbs_info->prev_cpu_nice); + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + j_dbs_info->prev_cpu_nice; /* * Assumption: nice time between sampling periods will * be less than 2^32 jiffies for 32 bit sys @@ -374,7 +373,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) cur_nice_jiffies = (unsigned long) cputime64_to_jiffies64(cur_nice); - j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; idle_time += jiffies_to_usecs(cur_nice_jiffies); } @@ -501,10 +500,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &j_dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) { + if (dbs_tuners_ins.ignore_nice) j_dbs_info->prev_cpu_nice = - kstat_cpu(j).cpustat.nice; - } + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; } this_dbs_info->down_skip = 0; this_dbs_info->requested_freq = policy->cur; diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index fa8af4ebb1d..f3d327cee43 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -119,27 +119,26 @@ static struct dbs_tuners { .powersave_bias = 0, }; -static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, - cputime64_t *wall) +static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) { - cputime64_t idle_time; + u64 idle_time; cputime64_t cur_wall_time; - cputime64_t busy_time; + u64 busy_time; cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, - kstat_cpu(cpu).cpustat.system); + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER] + + kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; idle_time = cputime64_sub(cur_wall_time, busy_time); if (wall) - *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); + *wall = jiffies_to_usecs(cur_wall_time); - return (cputime64_t)jiffies_to_usecs(idle_time); + return jiffies_to_usecs(idle_time); } static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) @@ -345,7 +344,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &dbs_info->prev_cpu_wall); if (dbs_tuners_ins.ignore_nice) - dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; } return count; @@ -455,11 +454,11 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) j_dbs_info->prev_cpu_iowait = cur_iowait_time; if (dbs_tuners_ins.ignore_nice) { - cputime64_t cur_nice; + u64 cur_nice; unsigned long cur_nice_jiffies; - cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, - j_dbs_info->prev_cpu_nice); + cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - + j_dbs_info->prev_cpu_nice; /* * Assumption: nice time between sampling periods will * be less than 2^32 jiffies for 32 bit sys @@ -467,7 +466,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) cur_nice_jiffies = (unsigned long) cputime64_to_jiffies64(cur_nice); - j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; + j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; idle_time += jiffies_to_usecs(cur_nice_jiffies); } @@ -646,10 +645,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &j_dbs_info->prev_cpu_wall); - if (dbs_tuners_ins.ignore_nice) { + if (dbs_tuners_ins.ignore_nice) j_dbs_info->prev_cpu_nice = - kstat_cpu(j).cpustat.nice; - } + kcpustat_cpu(j).cpustat[CPUTIME_NICE]; } this_dbs_info->cpu = cpu; this_dbs_info->rate_mult = 1; diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c index 2637c139777..66d7f1c7baa 100644 --- a/drivers/macintosh/rack-meter.c +++ b/drivers/macintosh/rack-meter.c @@ -81,13 +81,13 @@ static int rackmeter_ignore_nice; */ static inline cputime64_t get_cpu_idle_time(unsigned int cpu) { - cputime64_t retval; + u64 retval; - retval = cputime64_add(kstat_cpu(cpu).cpustat.idle, - kstat_cpu(cpu).cpustat.iowait); + retval = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE] + + kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; if (rackmeter_ignore_nice) - retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice); + retval += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; return retval; } diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 42b274da92c..8a6ab666e9f 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -22,29 +22,27 @@ #define arch_idle_time(cpu) 0 #endif -static cputime64_t get_idle_time(int cpu) +static u64 get_idle_time(int cpu) { - u64 idle_time = get_cpu_idle_time_us(cpu, NULL); - cputime64_t idle; + u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL); if (idle_time == -1ULL) { /* !NO_HZ so we can rely on cpustat.idle */ - idle = kstat_cpu(cpu).cpustat.idle; - idle = cputime64_add(idle, arch_idle_time(cpu)); + idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; + idle += arch_idle_time(cpu); } else idle = usecs_to_cputime(idle_time); return idle; } -static cputime64_t get_iowait_time(int cpu) +static u64 get_iowait_time(int cpu) { - u64 iowait_time = get_cpu_iowait_time_us(cpu, NULL); - cputime64_t iowait; + u64 iowait, iowait_time = get_cpu_iowait_time_us(cpu, NULL); if (iowait_time == -1ULL) /* !NO_HZ so we can rely on cpustat.iowait */ - iowait = kstat_cpu(cpu).cpustat.iowait; + iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; else iowait = usecs_to_cputime(iowait_time); @@ -55,33 +53,30 @@ static int show_stat(struct seq_file *p, void *v) { int i, j; unsigned long jif; - cputime64_t user, nice, system, idle, iowait, irq, softirq, steal; - cputime64_t guest, guest_nice; + u64 user, nice, system, idle, iowait, irq, softirq, steal; + u64 guest, guest_nice; u64 sum = 0; u64 sum_softirq = 0; unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; struct timespec boottime; user = nice = system = idle = iowait = - irq = softirq = steal = cputime64_zero; - guest = guest_nice = cputime64_zero; + irq = softirq = steal = 0; + guest = guest_nice = 0; getboottime(&boottime); jif = boottime.tv_sec; for_each_possible_cpu(i) { - user = cputime64_add(user, kstat_cpu(i).cpustat.user); - nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice); - system = cputime64_add(system, kstat_cpu(i).cpustat.system); - idle = cputime64_add(idle, get_idle_time(i)); - iowait = cputime64_add(iowait, get_iowait_time(i)); - irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq); - softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); - steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); - guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); - guest_nice = cputime64_add(guest_nice, - kstat_cpu(i).cpustat.guest_nice); - sum += kstat_cpu_irqs_sum(i); - sum += arch_irq_stat_cpu(i); + user += kcpustat_cpu(i).cpustat[CPUTIME_USER]; + nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE]; + system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]; + idle += get_idle_time(i); + iowait += get_iowait_time(i); + irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; + softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]; + steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; + guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; + guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; for (j = 0; j < NR_SOFTIRQS; j++) { unsigned int softirq_stat = kstat_softirqs_cpu(j, i); @@ -106,16 +101,16 @@ static int show_stat(struct seq_file *p, void *v) (unsigned long long)cputime64_to_clock_t(guest_nice)); for_each_online_cpu(i) { /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ - user = kstat_cpu(i).cpustat.user; - nice = kstat_cpu(i).cpustat.nice; - system = kstat_cpu(i).cpustat.system; + user = kcpustat_cpu(i).cpustat[CPUTIME_USER]; + nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE]; + system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]; idle = get_idle_time(i); iowait = get_iowait_time(i); - irq = kstat_cpu(i).cpustat.irq; - softirq = kstat_cpu(i).cpustat.softirq; - steal = kstat_cpu(i).cpustat.steal; - guest = kstat_cpu(i).cpustat.guest; - guest_nice = kstat_cpu(i).cpustat.guest_nice; + irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; + softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]; + steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; + guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; + guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; seq_printf(p, "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu " "%llu\n", diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c index 766b1d45605..0fb22e464e7 100644 --- a/fs/proc/uptime.c +++ b/fs/proc/uptime.c @@ -12,10 +12,10 @@ static int uptime_proc_show(struct seq_file *m, void *v) struct timespec uptime; struct timespec idle; int i; - cputime_t idletime = cputime_zero; + u64 idletime = 0; for_each_possible_cpu(i) - idletime = cputime64_add(idletime, kstat_cpu(i).cpustat.idle); + idletime += kcpustat_cpu(i).cpustat[CPUTIME_IDLE]; do_posix_clock_monotonic_gettime(&uptime); monotonic_to_bootbased(&uptime); diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 0cce2db580c..2fbd9053c2d 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -15,21 +16,25 @@ * used by rstatd/perfmeter */ -struct cpu_usage_stat { - cputime64_t user; - cputime64_t nice; - cputime64_t system; - cputime64_t softirq; - cputime64_t irq; - cputime64_t idle; - cputime64_t iowait; - cputime64_t steal; - cputime64_t guest; - cputime64_t guest_nice; +enum cpu_usage_stat { + CPUTIME_USER, + CPUTIME_NICE, + CPUTIME_SYSTEM, + CPUTIME_SOFTIRQ, + CPUTIME_IRQ, + CPUTIME_IDLE, + CPUTIME_IOWAIT, + CPUTIME_STEAL, + CPUTIME_GUEST, + CPUTIME_GUEST_NICE, + NR_STATS, +}; + +struct kernel_cpustat { + u64 cpustat[NR_STATS]; }; struct kernel_stat { - struct cpu_usage_stat cpustat; #ifndef CONFIG_GENERIC_HARDIRQS unsigned int irqs[NR_IRQS]; #endif @@ -38,10 +43,13 @@ struct kernel_stat { }; DECLARE_PER_CPU(struct kernel_stat, kstat); +DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat); -#define kstat_cpu(cpu) per_cpu(kstat, cpu) /* Must have preemption disabled for this to be meaningful. */ -#define kstat_this_cpu __get_cpu_var(kstat) +#define kstat_this_cpu (&__get_cpu_var(kstat)) +#define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat)) +#define kstat_cpu(cpu) per_cpu(kstat, cpu) +#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) extern unsigned long long nr_context_switches(void); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 699ff1499a8..dbbe35ff93f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -896,14 +896,14 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) #ifdef CONFIG_IRQ_TIME_ACCOUNTING static int irqtime_account_hi_update(void) { - struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; + u64 *cpustat = kcpustat_this_cpu->cpustat; unsigned long flags; u64 latest_ns; int ret = 0; local_irq_save(flags); latest_ns = this_cpu_read(cpu_hardirq_time); - if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq)) + if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat[CPUTIME_IRQ])) ret = 1; local_irq_restore(flags); return ret; @@ -911,14 +911,14 @@ static int irqtime_account_hi_update(void) static int irqtime_account_si_update(void) { - struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; + u64 *cpustat = kcpustat_this_cpu->cpustat; unsigned long flags; u64 latest_ns; int ret = 0; local_irq_save(flags); latest_ns = this_cpu_read(cpu_softirq_time); - if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq)) + if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat[CPUTIME_SOFTIRQ])) ret = 1; local_irq_restore(flags); return ret; @@ -2500,8 +2500,10 @@ unlock: #endif DEFINE_PER_CPU(struct kernel_stat, kstat); +DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); EXPORT_PER_CPU_SYMBOL(kstat); +EXPORT_PER_CPU_SYMBOL(kernel_cpustat); /* * Return any ns on the sched_clock that have not yet been accounted in @@ -2563,8 +2565,9 @@ unsigned long long task_sched_runtime(struct task_struct *p) void account_user_time(struct task_struct *p, cputime_t cputime, cputime_t cputime_scaled) { - struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; - cputime64_t tmp; + u64 *cpustat = kcpustat_this_cpu->cpustat; + u64 tmp; + int index; /* Add user time to process. */ p->utime = cputime_add(p->utime, cputime); @@ -2573,10 +2576,9 @@ void account_user_time(struct task_struct *p, cputime_t cputime, /* Add user time to cpustat. */ tmp = cputime_to_cputime64(cputime); - if (TASK_NICE(p) > 0) - cpustat->nice = cputime64_add(cpustat->nice, tmp); - else - cpustat->user = cputime64_add(cpustat->user, tmp); + + index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; + cpustat[index] += tmp; cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime); /* Account for user time used */ @@ -2592,8 +2594,8 @@ void account_user_time(struct task_struct *p, cputime_t cputime, static void account_guest_time(struct task_struct *p, cputime_t cputime, cputime_t cputime_scaled) { - cputime64_t tmp; - struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; + u64 tmp; + u64 *cpustat = kcpustat_this_cpu->cpustat; tmp = cputime_to_cputime64(cputime); @@ -2605,11 +2607,11 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime, /* Add guest time to cpustat. */ if (TASK_NICE(p) > 0) { - cpustat->nice = cputime64_add(cpustat->nice, tmp); - cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp); + cpustat[CPUTIME_NICE] += tmp; + cpustat[CPUTIME_GUEST_NICE] += tmp; } else { - cpustat->user = cputime64_add(cpustat->user, tmp); - cpustat->guest = cputime64_add(cpustat->guest, tmp); + cpustat[CPUTIME_USER] += tmp; + cpustat[CPUTIME_GUEST] += tmp; } } @@ -2622,9 +2624,10 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime, */ static inline void __account_system_time(struct task_struct *p, cputime_t cputime, - cputime_t cputime_scaled, cputime64_t *target_cputime64) + cputime_t cputime_scaled, int index) { - cputime64_t tmp = cputime_to_cputime64(cputime); + u64 tmp = cputime_to_cputime64(cputime); + u64 *cpustat = kcpustat_this_cpu->cpustat; /* Add system time to process. */ p->stime = cputime_add(p->stime, cputime); @@ -2632,7 +2635,7 @@ void __account_system_time(struct task_struct *p, cputime_t cputime, account_group_system_time(p, cputime); /* Add system time to cpustat. */ - *target_cputime64 = cputime64_add(*target_cputime64, tmp); + cpustat[index] += tmp; cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime); /* Account for system time used */ @@ -2649,8 +2652,7 @@ void __account_system_time(struct task_struct *p, cputime_t cputime, void account_system_time(struct task_struct *p, int hardirq_offset, cputime_t cputime, cputime_t cputime_scaled) { - struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; - cputime64_t *target_cputime64; + int index; if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { account_guest_time(p, cputime, cputime_scaled); @@ -2658,13 +2660,13 @@ void account_system_time(struct task_struct *p, int hardirq_offset, } if (hardirq_count() - hardirq_offset) - target_cputime64 = &cpustat->irq; + index = CPUTIME_IRQ; else if (in_serving_softirq()) - target_cputime64 = &cpustat->softirq; + index = CPUTIME_SOFTIRQ; else - target_cputime64 = &cpustat->system; + index = CPUTIME_SYSTEM; - __account_system_time(p, cputime, cputime_scaled, target_cputime64); + __account_system_time(p, cputime, cputime_scaled, index); } /* @@ -2673,10 +2675,10 @@ void account_system_time(struct task_struct *p, int hardirq_offset, */ void account_steal_time(cputime_t cputime) { - struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; - cputime64_t cputime64 = cputime_to_cputime64(cputime); + u64 *cpustat = kcpustat_this_cpu->cpustat; + u64 cputime64 = cputime_to_cputime64(cputime); - cpustat->steal = cputime64_add(cpustat->steal, cputime64); + cpustat[CPUTIME_STEAL] += cputime64; } /* @@ -2685,14 +2687,14 @@ void account_steal_time(cputime_t cputime) */ void account_idle_time(cputime_t cputime) { - struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; - cputime64_t cputime64 = cputime_to_cputime64(cputime); + u64 *cpustat = kcpustat_this_cpu->cpustat; + u64 cputime64 = cputime_to_cputime64(cputime); struct rq *rq = this_rq(); if (atomic_read(&rq->nr_iowait) > 0) - cpustat->iowait = cputime64_add(cpustat->iowait, cputime64); + cpustat[CPUTIME_IOWAIT] += cputime64; else - cpustat->idle = cputime64_add(cpustat->idle, cputime64); + cpustat[CPUTIME_IDLE] += cputime64; } static __always_inline bool steal_account_process_tick(void) @@ -2742,16 +2744,16 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, struct rq *rq) { cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); - cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy); - struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; + u64 tmp = cputime_to_cputime64(cputime_one_jiffy); + u64 *cpustat = kcpustat_this_cpu->cpustat; if (steal_account_process_tick()) return; if (irqtime_account_hi_update()) { - cpustat->irq = cputime64_add(cpustat->irq, tmp); + cpustat[CPUTIME_IRQ] += tmp; } else if (irqtime_account_si_update()) { - cpustat->softirq = cputime64_add(cpustat->softirq, tmp); + cpustat[CPUTIME_SOFTIRQ] += tmp; } else if (this_cpu_ksoftirqd() == p) { /* * ksoftirqd time do not get accounted in cpu_softirq_time. @@ -2759,7 +2761,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, * Also, p->stime needs to be updated for ksoftirqd. */ __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, - &cpustat->softirq); + CPUTIME_SOFTIRQ); } else if (user_tick) { account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); } else if (p == rq->idle) { @@ -2768,7 +2770,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled); } else { __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, - &cpustat->system); + CPUTIME_SYSTEM); } } -- cgit v1.2.3-70-g09d2 From a6a434124457fe64bb3980ceb2170505207db6e5 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 5 Dec 2011 18:22:01 +0000 Subject: [CPUFREQ] s3c64xx: Use pr_fmt() for consistent log messages They're already consistent but it saves remembering to do so. Signed-off-by: Mark Brown Signed-off-by: Dave Jones --- drivers/cpufreq/s3c64xx-cpufreq.c | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c index 3475f65aeec..a5e72cb5f53 100644 --- a/drivers/cpufreq/s3c64xx-cpufreq.c +++ b/drivers/cpufreq/s3c64xx-cpufreq.c @@ -8,6 +8,8 @@ * published by the Free Software Foundation. */ +#define pr_fmt(fmt) "cpufreq: " fmt + #include #include #include @@ -91,7 +93,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, if (freqs.old == freqs.new) return 0; - pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new); + pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new); cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); @@ -101,7 +103,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, dvfs->vddarm_min, dvfs->vddarm_max); if (ret != 0) { - pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n", + pr_err("Failed to set VDDARM for %dkHz: %d\n", freqs.new, ret); goto err; } @@ -110,7 +112,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, ret = clk_set_rate(armclk, freqs.new * 1000); if (ret < 0) { - pr_err("cpufreq: Failed to set rate %dkHz: %d\n", + pr_err("Failed to set rate %dkHz: %d\n", freqs.new, ret); goto err; } @@ -123,14 +125,14 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, dvfs->vddarm_min, dvfs->vddarm_max); if (ret != 0) { - pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n", + pr_err("Failed to set VDDARM for %dkHz: %d\n", freqs.new, ret); goto err_clk; } } #endif - pr_debug("cpufreq: Set actual frequency %lukHz\n", + pr_debug("Set actual frequency %lukHz\n", clk_get_rate(armclk) / 1000); return 0; @@ -153,7 +155,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void) count = regulator_count_voltages(vddarm); if (count < 0) { - pr_err("cpufreq: Unable to check supported voltages\n"); + pr_err("Unable to check supported voltages\n"); } freq = s3c64xx_freq_table; @@ -171,7 +173,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void) } if (!found) { - pr_debug("cpufreq: %dkHz unsupported by regulator\n", + pr_debug("%dkHz unsupported by regulator\n", freq->frequency); freq->frequency = CPUFREQ_ENTRY_INVALID; } @@ -194,13 +196,13 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) return -EINVAL; if (s3c64xx_freq_table == NULL) { - pr_err("cpufreq: No frequency information for this CPU\n"); + pr_err("No frequency information for this CPU\n"); return -ENODEV; } armclk = clk_get(NULL, "armclk"); if (IS_ERR(armclk)) { - pr_err("cpufreq: Unable to obtain ARMCLK: %ld\n", + pr_err("Unable to obtain ARMCLK: %ld\n", PTR_ERR(armclk)); return PTR_ERR(armclk); } @@ -209,12 +211,19 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) vddarm = regulator_get(NULL, "vddarm"); if (IS_ERR(vddarm)) { ret = PTR_ERR(vddarm); - pr_err("cpufreq: Failed to obtain VDDARM: %d\n", ret); - pr_err("cpufreq: Only frequency scaling available\n"); + pr_err("Failed to obtain VDDARM: %d\n", ret); + pr_err("Only frequency scaling available\n"); vddarm = NULL; } else { s3c64xx_cpufreq_config_regulator(); } + + vddint = regulator_get(NULL, "vddint"); + if (IS_ERR(vddint)) { + ret = PTR_ERR(vddint); + pr_err("Failed to obtain VDDINT: %d\n", ret); + vddint = NULL; + } #endif freq = s3c64xx_freq_table; @@ -225,7 +234,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) r = clk_round_rate(armclk, freq->frequency * 1000); r /= 1000; if (r != freq->frequency) { - pr_debug("cpufreq: %dkHz unsupported by clock\n", + pr_debug("%dkHz unsupported by clock\n", freq->frequency); freq->frequency = CPUFREQ_ENTRY_INVALID; } @@ -248,7 +257,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table); if (ret != 0) { - pr_err("cpufreq: Failed to configure frequency table: %d\n", + pr_err("Failed to configure frequency table: %d\n", ret); regulator_put(vddarm); clk_put(armclk); -- cgit v1.2.3-70-g09d2 From c8c430e2f65adf124b3a2b6cfffa4dfc8a6e49c2 Mon Sep 17 00:00:00 2001 From: Jaecheol Lee Date: Wed, 7 Dec 2011 11:43:42 +0900 Subject: [CPUFREQ] EXYNOS4210: Remove code about bus on cpufreq This patch removes code for bus on cpufreq because the code for bus frequency changing moves to busfreq driver. So code about bus on cpufreq is not necessary. Signed-off-by: Jaecheol Lee Signed-off-by: Jongpill Lee Signed-off-by: Kukjin Kim Signed-off-by: Dave Jones --- drivers/cpufreq/exynos4210-cpufreq.c | 174 +---------------------------------- 1 file changed, 1 insertion(+), 173 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c index ab9741fab92..578956c5ee7 100644 --- a/drivers/cpufreq/exynos4210-cpufreq.c +++ b/drivers/cpufreq/exynos4210-cpufreq.c @@ -33,21 +33,13 @@ static struct clk *mout_mpll; static struct clk *mout_apll; static struct regulator *arm_regulator; -static struct regulator *int_regulator; static struct cpufreq_freqs freqs; -static unsigned int memtype; static unsigned int locking_frequency; static bool frequency_locked; static DEFINE_MUTEX(cpufreq_lock); -enum exynos4_memory_type { - DDR2 = 4, - LPDDR2, - DDR3, -}; - enum cpufreq_level_index { L0, L1, L2, L3, CPUFREQ_LEVEL_END, }; @@ -99,87 +91,24 @@ static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = { { 3, 0 }, }; -static unsigned int clkdiv_dmc0[CPUFREQ_LEVEL_END][8] = { - /* - * Clock divider value for following - * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD - * DIVDMCP, DIVCOPY2, DIVCORE_TIMERS } - */ - - /* DMC L0: 400MHz */ - { 3, 1, 1, 1, 1, 1, 3, 1 }, - - /* DMC L1: 400MHz */ - { 3, 1, 1, 1, 1, 1, 3, 1 }, - - /* DMC L2: 266.7MHz */ - { 7, 1, 1, 2, 1, 1, 3, 1 }, - - /* DMC L3: 200MHz */ - { 7, 1, 1, 3, 1, 1, 3, 1 }, -}; - -static unsigned int clkdiv_top[CPUFREQ_LEVEL_END][5] = { - /* - * Clock divider value for following - * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND } - */ - - /* ACLK200 L0: 200MHz */ - { 3, 7, 4, 5, 1 }, - - /* ACLK200 L1: 200MHz */ - { 3, 7, 4, 5, 1 }, - - /* ACLK200 L2: 160MHz */ - { 4, 7, 5, 7, 1 }, - - /* ACLK200 L3: 133.3MHz */ - { 5, 7, 7, 7, 1 }, -}; - -static unsigned int clkdiv_lr_bus[CPUFREQ_LEVEL_END][2] = { - /* - * Clock divider value for following - * { DIVGDL/R, DIVGPL/R } - */ - - /* ACLK_GDL/R L0: 200MHz */ - { 3, 1 }, - - /* ACLK_GDL/R L1: 200MHz */ - { 3, 1 }, - - /* ACLK_GDL/R L2: 160MHz */ - { 4, 1 }, - - /* ACLK_GDL/R L3: 133.3MHz */ - { 5, 1 }, -}; - struct cpufreq_voltage_table { unsigned int index; /* any */ unsigned int arm_volt; /* uV */ - unsigned int int_volt; }; static struct cpufreq_voltage_table exynos4_volt_table[CPUFREQ_LEVEL_END] = { { .index = L0, .arm_volt = 1200000, - .int_volt = 1100000, }, { .index = L1, .arm_volt = 1100000, - .int_volt = 1100000, }, { .index = L2, .arm_volt = 1000000, - .int_volt = 1000000, }, { .index = L3, .arm_volt = 900000, - .int_volt = 1000000, }, }; @@ -248,80 +177,6 @@ static void exynos4_set_clkdiv(unsigned int div_index) do { tmp = __raw_readl(S5P_CLKDIV_STATCPU1); } while (tmp & 0x11); - - /* Change Divider - DMC0 */ - - tmp = __raw_readl(S5P_CLKDIV_DMC0); - - tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK | S5P_CLKDIV_DMC0_ACPPCLK_MASK | - S5P_CLKDIV_DMC0_DPHY_MASK | S5P_CLKDIV_DMC0_DMC_MASK | - S5P_CLKDIV_DMC0_DMCD_MASK | S5P_CLKDIV_DMC0_DMCP_MASK | - S5P_CLKDIV_DMC0_COPY2_MASK | S5P_CLKDIV_DMC0_CORETI_MASK); - - tmp |= ((clkdiv_dmc0[div_index][0] << S5P_CLKDIV_DMC0_ACP_SHIFT) | - (clkdiv_dmc0[div_index][1] << S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) | - (clkdiv_dmc0[div_index][2] << S5P_CLKDIV_DMC0_DPHY_SHIFT) | - (clkdiv_dmc0[div_index][3] << S5P_CLKDIV_DMC0_DMC_SHIFT) | - (clkdiv_dmc0[div_index][4] << S5P_CLKDIV_DMC0_DMCD_SHIFT) | - (clkdiv_dmc0[div_index][5] << S5P_CLKDIV_DMC0_DMCP_SHIFT) | - (clkdiv_dmc0[div_index][6] << S5P_CLKDIV_DMC0_COPY2_SHIFT) | - (clkdiv_dmc0[div_index][7] << S5P_CLKDIV_DMC0_CORETI_SHIFT)); - - __raw_writel(tmp, S5P_CLKDIV_DMC0); - - do { - tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0); - } while (tmp & 0x11111111); - - /* Change Divider - TOP */ - - tmp = __raw_readl(S5P_CLKDIV_TOP); - - tmp &= ~(S5P_CLKDIV_TOP_ACLK200_MASK | S5P_CLKDIV_TOP_ACLK100_MASK | - S5P_CLKDIV_TOP_ACLK160_MASK | S5P_CLKDIV_TOP_ACLK133_MASK | - S5P_CLKDIV_TOP_ONENAND_MASK); - - tmp |= ((clkdiv_top[div_index][0] << S5P_CLKDIV_TOP_ACLK200_SHIFT) | - (clkdiv_top[div_index][1] << S5P_CLKDIV_TOP_ACLK100_SHIFT) | - (clkdiv_top[div_index][2] << S5P_CLKDIV_TOP_ACLK160_SHIFT) | - (clkdiv_top[div_index][3] << S5P_CLKDIV_TOP_ACLK133_SHIFT) | - (clkdiv_top[div_index][4] << S5P_CLKDIV_TOP_ONENAND_SHIFT)); - - __raw_writel(tmp, S5P_CLKDIV_TOP); - - do { - tmp = __raw_readl(S5P_CLKDIV_STAT_TOP); - } while (tmp & 0x11111); - - /* Change Divider - LEFTBUS */ - - tmp = __raw_readl(S5P_CLKDIV_LEFTBUS); - - tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK); - - tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) | - (clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT)); - - __raw_writel(tmp, S5P_CLKDIV_LEFTBUS); - - do { - tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS); - } while (tmp & 0x11); - - /* Change Divider - RIGHTBUS */ - - tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS); - - tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK); - - tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) | - (clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT)); - - __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS); - - do { - tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS); - } while (tmp & 0x11); } static void exynos4_set_apll(unsigned int index) @@ -410,7 +265,7 @@ static int exynos4_target(struct cpufreq_policy *policy, unsigned int relation) { unsigned int index, old_index; - unsigned int arm_volt, int_volt; + unsigned int arm_volt; int err = -EINVAL; freqs.old = exynos4_getspeed(policy->cpu); @@ -440,7 +295,6 @@ static int exynos4_target(struct cpufreq_policy *policy, /* get the voltage value */ arm_volt = exynos4_volt_table[index].arm_volt; - int_volt = exynos4_volt_table[index].int_volt; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); @@ -448,7 +302,6 @@ static int exynos4_target(struct cpufreq_policy *policy, if (freqs.new > freqs.old) { /* Voltage up */ regulator_set_voltage(arm_regulator, arm_volt, arm_volt); - regulator_set_voltage(int_regulator, int_volt, int_volt); } /* Clock Configuration Procedure */ @@ -458,7 +311,6 @@ static int exynos4_target(struct cpufreq_policy *policy, if (freqs.new < freqs.old) { /* Voltage down */ regulator_set_voltage(arm_regulator, arm_volt, arm_volt); - regulator_set_voltage(int_regulator, int_volt, int_volt); } cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); @@ -636,27 +488,6 @@ static int __init exynos4_cpufreq_init(void) goto out; } - int_regulator = regulator_get(NULL, "vdd_int"); - if (IS_ERR(int_regulator)) { - printk(KERN_ERR "failed to get resource %s\n", "vdd_int"); - goto out; - } - - /* - * Check DRAM type. - * Because DVFS level is different according to DRAM type. - */ - memtype = __raw_readl(S5P_VA_DMC0 + S5P_DMC0_MEMCON_OFFSET); - memtype = (memtype >> S5P_DMC0_MEMTYPE_SHIFT); - memtype &= S5P_DMC0_MEMTYPE_MASK; - - if ((memtype < DDR2) && (memtype > DDR3)) { - printk(KERN_ERR "%s: wrong memtype= 0x%x\n", __func__, memtype); - goto out; - } else { - printk(KERN_DEBUG "%s: memtype= 0x%x\n", __func__, memtype); - } - register_pm_notifier(&exynos4_cpufreq_nb); return cpufreq_register_driver(&exynos4_driver); @@ -677,9 +508,6 @@ out: if (!IS_ERR(arm_regulator)) regulator_put(arm_regulator); - if (!IS_ERR(int_regulator)) - regulator_put(int_regulator); - printk(KERN_ERR "%s: failed initialization\n", __func__); return -EINVAL; -- cgit v1.2.3-70-g09d2 From ba9d78031116a216d0e53aa629a584932e813375 Mon Sep 17 00:00:00 2001 From: Jaecheol Lee Date: Wed, 7 Dec 2011 11:43:56 +0900 Subject: [CPUFREQ] EXYNOS4210: Update frequency table for cpu divider This patch is changes frequency table for cpu divider for stable frequency. Signed-off-by: Jaecheol Lee Signed-off-by: Jongpill Lee Signed-off-by: Kukjin Kim Signed-off-by: Dave Jones --- drivers/cpufreq/exynos4210-cpufreq.c | 69 +++++++++++++++++++++--------------- 1 file changed, 41 insertions(+), 28 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c index 578956c5ee7..ba579e0a5c3 100644 --- a/drivers/cpufreq/exynos4210-cpufreq.c +++ b/drivers/cpufreq/exynos4210-cpufreq.c @@ -41,14 +41,15 @@ static bool frequency_locked; static DEFINE_MUTEX(cpufreq_lock); enum cpufreq_level_index { - L0, L1, L2, L3, CPUFREQ_LEVEL_END, + L0, L1, L2, L3, L4, CPUFREQ_LEVEL_END, }; static struct cpufreq_frequency_table exynos4_freq_table[] = { - {L0, 1000*1000}, - {L1, 800*1000}, - {L2, 400*1000}, - {L3, 100*1000}, + {L0, 1200*1000}, + {L1, 1000*1000}, + {L2, 800*1000}, + {L3, 500*1000}, + {L4, 200*1000}, {0, CPUFREQ_TABLE_END}, }; @@ -59,17 +60,20 @@ static unsigned int clkdiv_cpu0[CPUFREQ_LEVEL_END][7] = { * DIVATB, DIVPCLK_DBG, DIVAPLL } */ - /* ARM L0: 1000MHz */ - { 0, 3, 7, 3, 3, 0, 1 }, + /* ARM L0: 1200MHz */ + { 0, 3, 7, 3, 4, 1, 7 }, - /* ARM L1: 800MHz */ - { 0, 3, 7, 3, 3, 0, 1 }, + /* ARM L1: 1000MHz */ + { 0, 3, 7, 3, 4, 1, 7 }, - /* ARM L2: 400MHz */ - { 0, 1, 3, 1, 3, 0, 1 }, + /* ARM L2: 800MHz */ + { 0, 3, 7, 3, 3, 1, 7 }, - /* ARM L3: 100MHz */ - { 0, 0, 1, 0, 3, 1, 1 }, + /* ARM L3: 500MHz */ + { 0, 3, 7, 3, 3, 1, 7 }, + + /* ARM L4: 200MHz */ + { 0, 1, 3, 1, 3, 1, 0 }, }; static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = { @@ -78,16 +82,19 @@ static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = { * { DIVCOPY, DIVHPM } */ - /* ARM L0: 1000MHz */ - { 3, 0 }, + /* ARM L0: 1200MHz */ + { 5, 0 }, + + /* ARM L1: 1000MHz */ + { 4, 0 }, - /* ARM L1: 800MHz */ + /* ARM L2: 800MHz */ { 3, 0 }, - /* ARM L2: 400MHz */ + /* ARM L3: 500MHz */ { 3, 0 }, - /* ARM L3: 100MHz */ + /* ARM L4: 200MHz */ { 3, 0 }, }; @@ -99,31 +106,37 @@ struct cpufreq_voltage_table { static struct cpufreq_voltage_table exynos4_volt_table[CPUFREQ_LEVEL_END] = { { .index = L0, - .arm_volt = 1200000, + .arm_volt = 1350000, }, { .index = L1, - .arm_volt = 1100000, + .arm_volt = 1300000, }, { .index = L2, - .arm_volt = 1000000, + .arm_volt = 1200000, }, { .index = L3, - .arm_volt = 900000, + .arm_volt = 1100000, + }, { + .index = L4, + .arm_volt = 1050000, }, }; static unsigned int exynos4_apll_pms_table[CPUFREQ_LEVEL_END] = { - /* APLL FOUT L0: 1000MHz */ + /* APLL FOUT L0: 1200MHz */ + ((150 << 16) | (3 << 8) | 1), + + /* APLL FOUT L1: 1000MHz */ ((250 << 16) | (6 << 8) | 1), - /* APLL FOUT L1: 800MHz */ + /* APLL FOUT L2: 800MHz */ ((200 << 16) | (6 << 8) | 1), - /* APLL FOUT L2 : 400MHz */ - ((200 << 16) | (6 << 8) | 2), + /* APLL FOUT L3: 500MHz */ + ((250 << 16) | (6 << 8) | 2), - /* APLL FOUT L3: 100MHz */ - ((200 << 16) | (6 << 8) | 4), + /* APLL FOUT L4: 200MHz */ + ((200 << 16) | (6 << 8) | 3), }; static int exynos4_verify_speed(struct cpufreq_policy *policy) -- cgit v1.2.3-70-g09d2 From 27f805dcb058178444a9a4e380c7dcb2fe2d3a94 Mon Sep 17 00:00:00 2001 From: Jaecheol Lee Date: Wed, 7 Dec 2011 11:44:09 +0900 Subject: [CPUFREQ] EXYNOS4210: cpufreq code is changed for stable working This patch is modify code for stable working 1. Remove unused register access code 2. Change sequence for frequency changing Signed-off-by: Jaecheol Lee Signed-off-by: Jonghwan Choi Signed-off-by: Jongpill Lee Signed-off-by: Kukjin Kim Signed-off-by: Dave Jones --- drivers/cpufreq/exynos4210-cpufreq.c | 101 ++++++++++++++++++++++------------- 1 file changed, 65 insertions(+), 36 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c index ba579e0a5c3..a0af2d4448a 100644 --- a/drivers/cpufreq/exynos4210-cpufreq.c +++ b/drivers/cpufreq/exynos4210-cpufreq.c @@ -36,6 +36,10 @@ static struct regulator *arm_regulator; static struct cpufreq_freqs freqs; +struct cpufreq_clkdiv { + unsigned int clkdiv; +}; + static unsigned int locking_frequency; static bool frequency_locked; static DEFINE_MUTEX(cpufreq_lock); @@ -44,6 +48,8 @@ enum cpufreq_level_index { L0, L1, L2, L3, L4, CPUFREQ_LEVEL_END, }; +static struct cpufreq_clkdiv exynos4_clkdiv_table[CPUFREQ_LEVEL_END]; + static struct cpufreq_frequency_table exynos4_freq_table[] = { {L0, 1200*1000}, {L1, 1000*1000}, @@ -155,20 +161,7 @@ static void exynos4_set_clkdiv(unsigned int div_index) /* Change Divider - CPU0 */ - tmp = __raw_readl(S5P_CLKDIV_CPU); - - tmp &= ~(S5P_CLKDIV_CPU0_CORE_MASK | S5P_CLKDIV_CPU0_COREM0_MASK | - S5P_CLKDIV_CPU0_COREM1_MASK | S5P_CLKDIV_CPU0_PERIPH_MASK | - S5P_CLKDIV_CPU0_ATB_MASK | S5P_CLKDIV_CPU0_PCLKDBG_MASK | - S5P_CLKDIV_CPU0_APLL_MASK); - - tmp |= ((clkdiv_cpu0[div_index][0] << S5P_CLKDIV_CPU0_CORE_SHIFT) | - (clkdiv_cpu0[div_index][1] << S5P_CLKDIV_CPU0_COREM0_SHIFT) | - (clkdiv_cpu0[div_index][2] << S5P_CLKDIV_CPU0_COREM1_SHIFT) | - (clkdiv_cpu0[div_index][3] << S5P_CLKDIV_CPU0_PERIPH_SHIFT) | - (clkdiv_cpu0[div_index][4] << S5P_CLKDIV_CPU0_ATB_SHIFT) | - (clkdiv_cpu0[div_index][5] << S5P_CLKDIV_CPU0_PCLKDBG_SHIFT) | - (clkdiv_cpu0[div_index][6] << S5P_CLKDIV_CPU0_APLL_SHIFT)); + tmp = exynos4_clkdiv_table[div_index].clkdiv; __raw_writel(tmp, S5P_CLKDIV_CPU); @@ -233,14 +226,12 @@ static void exynos4_set_frequency(unsigned int old_index, unsigned int new_index unsigned int tmp; if (old_index > new_index) { - /* The frequency changing to L0 needs to change apll */ - if (freqs.new == exynos4_freq_table[L0].frequency) { - /* 1. Change the system clock divider values */ - exynos4_set_clkdiv(new_index); - - /* 2. Change the apll m,p,s value */ - exynos4_set_apll(new_index); - } else { + /* + * L1/L3, L2/L4 Level change require + * to only change s divider value + */ + if (((old_index == L3) && (new_index == L1)) || + ((old_index == L4) && (new_index == L2))) { /* 1. Change the system clock divider values */ exynos4_set_clkdiv(new_index); @@ -249,24 +240,32 @@ static void exynos4_set_frequency(unsigned int old_index, unsigned int new_index tmp &= ~(0x7 << 0); tmp |= (exynos4_apll_pms_table[new_index] & 0x7); __raw_writel(tmp, S5P_APLL_CON0); - } - } - - else if (old_index < new_index) { - /* The frequency changing from L0 needs to change apll */ - if (freqs.old == exynos4_freq_table[L0].frequency) { - /* 1. Change the apll m,p,s value */ - exynos4_set_apll(new_index); - - /* 2. Change the system clock divider values */ - exynos4_set_clkdiv(new_index); } else { + /* Clock Configuration Procedure */ + /* 1. Change the system clock divider values */ + exynos4_set_clkdiv(new_index); + /* 2. Change the apll m,p,s value */ + exynos4_set_apll(new_index); + } + } else if (old_index < new_index) { + /* + * L1/L3, L2/L4 Level change require + * to only change s divider value + */ + if (((old_index == L1) && (new_index == L3)) || + ((old_index == L2) && (new_index == L4))) { /* 1. Change just s value in apll m,p,s value */ tmp = __raw_readl(S5P_APLL_CON0); tmp &= ~(0x7 << 0); tmp |= (exynos4_apll_pms_table[new_index] & 0x7); __raw_writel(tmp, S5P_APLL_CON0); + /* 2. Change the system clock divider values */ + exynos4_set_clkdiv(new_index); + } else { + /* Clock Configuration Procedure */ + /* 1. Change the apll m,p,s value */ + exynos4_set_apll(new_index); /* 2. Change the system clock divider values */ exynos4_set_clkdiv(new_index); } @@ -320,14 +319,14 @@ static int exynos4_target(struct cpufreq_policy *policy, /* Clock Configuration Procedure */ exynos4_set_frequency(old_index, index); + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + /* control regulator */ if (freqs.new < freqs.old) { /* Voltage down */ regulator_set_voltage(arm_regulator, arm_volt, arm_volt); } - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - out: mutex_unlock(&cpufreq_lock); return err; @@ -438,7 +437,12 @@ static int exynos4_cpufreq_cpu_init(struct cpufreq_policy *policy) * Each cpu is bound to the same speed. * So the affected cpu is all of the cpus. */ - cpumask_setall(policy->cpus); + if (!cpu_online(1)) { + cpumask_copy(policy->related_cpus, cpu_possible_mask); + cpumask_copy(policy->cpus, cpu_online_mask); + } else { + cpumask_setall(policy->cpus); + } ret = cpufreq_frequency_table_cpuinfo(policy, exynos4_freq_table); if (ret) @@ -477,6 +481,9 @@ static struct cpufreq_driver exynos4_driver = { static int __init exynos4_cpufreq_init(void) { + int i; + unsigned int tmp; + cpu_clk = clk_get(NULL, "armclk"); if (IS_ERR(cpu_clk)) return PTR_ERR(cpu_clk); @@ -503,6 +510,28 @@ static int __init exynos4_cpufreq_init(void) register_pm_notifier(&exynos4_cpufreq_nb); + tmp = __raw_readl(S5P_CLKDIV_CPU); + + for (i = L0; i < CPUFREQ_LEVEL_END; i++) { + tmp &= ~(S5P_CLKDIV_CPU0_CORE_MASK | + S5P_CLKDIV_CPU0_COREM0_MASK | + S5P_CLKDIV_CPU0_COREM1_MASK | + S5P_CLKDIV_CPU0_PERIPH_MASK | + S5P_CLKDIV_CPU0_ATB_MASK | + S5P_CLKDIV_CPU0_PCLKDBG_MASK | + S5P_CLKDIV_CPU0_APLL_MASK); + + tmp |= ((clkdiv_cpu0[i][0] << S5P_CLKDIV_CPU0_CORE_SHIFT) | + (clkdiv_cpu0[i][1] << S5P_CLKDIV_CPU0_COREM0_SHIFT) | + (clkdiv_cpu0[i][2] << S5P_CLKDIV_CPU0_COREM1_SHIFT) | + (clkdiv_cpu0[i][3] << S5P_CLKDIV_CPU0_PERIPH_SHIFT) | + (clkdiv_cpu0[i][4] << S5P_CLKDIV_CPU0_ATB_SHIFT) | + (clkdiv_cpu0[i][5] << S5P_CLKDIV_CPU0_PCLKDBG_SHIFT) | + (clkdiv_cpu0[i][6] << S5P_CLKDIV_CPU0_APLL_SHIFT)); + + exynos4_clkdiv_table[i].clkdiv = tmp; + } + return cpufreq_register_driver(&exynos4_driver); out: -- cgit v1.2.3-70-g09d2 From 21f2e3c86b3746aaa462f9a2734363f4f41a641c Mon Sep 17 00:00:00 2001 From: Kamalesh Babulal Date: Fri, 9 Dec 2011 16:18:42 +0530 Subject: [CPUFREQ] Remove wall variable from cpufreq_gov_dbs_init() CPUFREQ Remove wall variable from cpufreq_gov_dbs_init() Remove wall variable from cpufreq_gov_dbs_init() as get_cpu_idle_time_us() no longer updates the last_update_time unconditionally. Passing non-NULL last_update_time address will result in accounting additional idle time with update_ts_time_stats() before returning idle_sleeptime. Signed-off-by: Kamalesh Babulal Signed-off-by: Dave Jones -- drivers/cpufreq/cpufreq_ondemand.c | 3 +-- 1 files changed, 1 insertions(+), 2 deletions(-) --- drivers/cpufreq/cpufreq_ondemand.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index fa8af4ebb1d..53ad4c78074 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -715,11 +715,10 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, static int __init cpufreq_gov_dbs_init(void) { - cputime64_t wall; u64 idle_time; int cpu = get_cpu(); - idle_time = get_cpu_idle_time_us(cpu, &wall); + idle_time = get_cpu_idle_time_us(cpu, NULL); put_cpu(); if (idle_time != -1ULL) { /* Idle micro accounting is supported. Use finer thresholds */ -- cgit v1.2.3-70-g09d2 From 648616343cdbe904c585a6c12e323d3b3c72e46f Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Thu, 15 Dec 2011 14:56:09 +0100 Subject: [S390] cputime: add sparse checking and cleanup Make cputime_t and cputime64_t nocast to enable sparse checking to detect incorrect use of cputime. Drop the cputime macros for simple scalar operations. The conversion macros are still needed. Signed-off-by: Martin Schwidefsky --- arch/ia64/include/asm/cputime.h | 69 ++++++++-------- arch/powerpc/include/asm/cputime.h | 70 +++++++---------- arch/s390/include/asm/cputime.h | 140 +++++++++++++++------------------ drivers/cpufreq/cpufreq_conservative.c | 29 ++++--- drivers/cpufreq/cpufreq_ondemand.c | 33 ++++---- drivers/cpufreq/cpufreq_stats.c | 5 +- drivers/macintosh/rack-meter.c | 11 +-- fs/proc/array.c | 8 +- fs/proc/stat.c | 27 +++---- fs/proc/uptime.c | 4 +- include/asm-generic/cputime.h | 62 +++++++-------- include/linux/sched.h | 4 +- kernel/acct.c | 4 +- kernel/cpu.c | 3 +- kernel/exit.c | 22 ++---- kernel/fork.c | 14 ++-- kernel/itimer.c | 15 ++-- kernel/posix-cpu-timers.c | 132 ++++++++++++------------------- kernel/sched.c | 80 +++++++++---------- kernel/sched_stats.h | 6 +- kernel/signal.c | 6 +- kernel/sys.c | 6 +- kernel/tsacct.c | 2 +- 23 files changed, 323 insertions(+), 429 deletions(-) (limited to 'drivers/cpufreq') diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h index 6073b187528..461e52f0277 100644 --- a/arch/ia64/include/asm/cputime.h +++ b/arch/ia64/include/asm/cputime.h @@ -26,59 +26,51 @@ #include #include -typedef u64 cputime_t; -typedef u64 cputime64_t; +typedef u64 __nocast cputime_t; +typedef u64 __nocast cputime64_t; -#define cputime_zero ((cputime_t)0) #define cputime_one_jiffy jiffies_to_cputime(1) -#define cputime_max ((~((cputime_t)0) >> 1) - 1) -#define cputime_add(__a, __b) ((__a) + (__b)) -#define cputime_sub(__a, __b) ((__a) - (__b)) -#define cputime_div(__a, __n) ((__a) / (__n)) -#define cputime_halve(__a) ((__a) >> 1) -#define cputime_eq(__a, __b) ((__a) == (__b)) -#define cputime_gt(__a, __b) ((__a) > (__b)) -#define cputime_ge(__a, __b) ((__a) >= (__b)) -#define cputime_lt(__a, __b) ((__a) < (__b)) -#define cputime_le(__a, __b) ((__a) <= (__b)) - -#define cputime64_zero ((cputime64_t)0) -#define cputime64_add(__a, __b) ((__a) + (__b)) -#define cputime64_sub(__a, __b) ((__a) - (__b)) -#define cputime_to_cputime64(__ct) (__ct) /* * Convert cputime <-> jiffies (HZ) */ -#define cputime_to_jiffies(__ct) ((__ct) / (NSEC_PER_SEC / HZ)) -#define jiffies_to_cputime(__jif) ((__jif) * (NSEC_PER_SEC / HZ)) -#define cputime64_to_jiffies64(__ct) ((__ct) / (NSEC_PER_SEC / HZ)) -#define jiffies64_to_cputime64(__jif) ((__jif) * (NSEC_PER_SEC / HZ)) +#define cputime_to_jiffies(__ct) \ + ((__force u64)(__ct) / (NSEC_PER_SEC / HZ)) +#define jiffies_to_cputime(__jif) \ + (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ)) +#define cputime64_to_jiffies64(__ct) \ + ((__force u64)(__ct) / (NSEC_PER_SEC / HZ)) +#define jiffies64_to_cputime64(__jif) \ + (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ)) /* * Convert cputime <-> microseconds */ -#define cputime_to_usecs(__ct) ((__ct) / NSEC_PER_USEC) -#define usecs_to_cputime(__usecs) ((__usecs) * NSEC_PER_USEC) +#define cputime_to_usecs(__ct) \ + ((__force u64)(__ct) / NSEC_PER_USEC) +#define usecs_to_cputime(__usecs) \ + (__force cputime_t)((__usecs) * NSEC_PER_USEC) /* * Convert cputime <-> seconds */ -#define cputime_to_secs(__ct) ((__ct) / NSEC_PER_SEC) -#define secs_to_cputime(__secs) ((__secs) * NSEC_PER_SEC) +#define cputime_to_secs(__ct) \ + ((__force u64)(__ct) / NSEC_PER_SEC) +#define secs_to_cputime(__secs) \ + (__force cputime_t)((__secs) * NSEC_PER_SEC) /* * Convert cputime <-> timespec (nsec) */ static inline cputime_t timespec_to_cputime(const struct timespec *val) { - cputime_t ret = val->tv_sec * NSEC_PER_SEC; - return (ret + val->tv_nsec); + u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec; + return (__force cputime_t) ret; } static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) { - val->tv_sec = ct / NSEC_PER_SEC; - val->tv_nsec = ct % NSEC_PER_SEC; + val->tv_sec = (__force u64) ct / NSEC_PER_SEC; + val->tv_nsec = (__force u64) ct % NSEC_PER_SEC; } /* @@ -86,25 +78,28 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) */ static inline cputime_t timeval_to_cputime(struct timeval *val) { - cputime_t ret = val->tv_sec * NSEC_PER_SEC; - return (ret + val->tv_usec * NSEC_PER_USEC); + u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC; + return (__force cputime_t) ret; } static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) { - val->tv_sec = ct / NSEC_PER_SEC; - val->tv_usec = (ct % NSEC_PER_SEC) / NSEC_PER_USEC; + val->tv_sec = (__force u64) ct / NSEC_PER_SEC; + val->tv_usec = ((__force u64) ct % NSEC_PER_SEC) / NSEC_PER_USEC; } /* * Convert cputime <-> clock (USER_HZ) */ -#define cputime_to_clock_t(__ct) ((__ct) / (NSEC_PER_SEC / USER_HZ)) -#define clock_t_to_cputime(__x) ((__x) * (NSEC_PER_SEC / USER_HZ)) +#define cputime_to_clock_t(__ct) \ + ((__force u64)(__ct) / (NSEC_PER_SEC / USER_HZ)) +#define clock_t_to_cputime(__x) \ + (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ)) /* * Convert cputime64 to clock. */ -#define cputime64_to_clock_t(__ct) cputime_to_clock_t((cputime_t)__ct) +#define cputime64_to_clock_t(__ct) \ + cputime_to_clock_t((__force cputime_t)__ct) #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ #endif /* __IA64_CPUTIME_H */ diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index 1cf20bdfbec..e94935c5201 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h @@ -29,25 +29,8 @@ static inline void setup_cputime_one_jiffy(void) { } #include #include -typedef u64 cputime_t; -typedef u64 cputime64_t; - -#define cputime_zero ((cputime_t)0) -#define cputime_max ((~((cputime_t)0) >> 1) - 1) -#define cputime_add(__a, __b) ((__a) + (__b)) -#define cputime_sub(__a, __b) ((__a) - (__b)) -#define cputime_div(__a, __n) ((__a) / (__n)) -#define cputime_halve(__a) ((__a) >> 1) -#define cputime_eq(__a, __b) ((__a) == (__b)) -#define cputime_gt(__a, __b) ((__a) > (__b)) -#define cputime_ge(__a, __b) ((__a) >= (__b)) -#define cputime_lt(__a, __b) ((__a) < (__b)) -#define cputime_le(__a, __b) ((__a) <= (__b)) - -#define cputime64_zero ((cputime64_t)0) -#define cputime64_add(__a, __b) ((__a) + (__b)) -#define cputime64_sub(__a, __b) ((__a) - (__b)) -#define cputime_to_cputime64(__ct) (__ct) +typedef u64 __nocast cputime_t; +typedef u64 __nocast cputime64_t; #ifdef __KERNEL__ @@ -65,7 +48,7 @@ DECLARE_PER_CPU(unsigned long, cputime_scaled_last_delta); static inline unsigned long cputime_to_jiffies(const cputime_t ct) { - return mulhdu(ct, __cputime_jiffies_factor); + return mulhdu((__force u64) ct, __cputime_jiffies_factor); } /* Estimate the scaled cputime by scaling the real cputime based on @@ -74,14 +57,15 @@ static inline cputime_t cputime_to_scaled(const cputime_t ct) { if (cpu_has_feature(CPU_FTR_SPURR) && __get_cpu_var(cputime_last_delta)) - return ct * __get_cpu_var(cputime_scaled_last_delta) / - __get_cpu_var(cputime_last_delta); + return (__force u64) ct * + __get_cpu_var(cputime_scaled_last_delta) / + __get_cpu_var(cputime_last_delta); return ct; } static inline cputime_t jiffies_to_cputime(const unsigned long jif) { - cputime_t ct; + u64 ct; unsigned long sec; /* have to be a little careful about overflow */ @@ -93,7 +77,7 @@ static inline cputime_t jiffies_to_cputime(const unsigned long jif) } if (sec) ct += (cputime_t) sec * tb_ticks_per_sec; - return ct; + return (__force cputime_t) ct; } static inline void setup_cputime_one_jiffy(void) @@ -103,7 +87,7 @@ static inline void setup_cputime_one_jiffy(void) static inline cputime64_t jiffies64_to_cputime64(const u64 jif) { - cputime_t ct; + u64 ct; u64 sec; /* have to be a little careful about overflow */ @@ -114,13 +98,13 @@ static inline cputime64_t jiffies64_to_cputime64(const u64 jif) do_div(ct, HZ); } if (sec) - ct += (cputime_t) sec * tb_ticks_per_sec; - return ct; + ct += (u64) sec * tb_ticks_per_sec; + return (__force cputime64_t) ct; } static inline u64 cputime64_to_jiffies64(const cputime_t ct) { - return mulhdu(ct, __cputime_jiffies_factor); + return mulhdu((__force u64) ct, __cputime_jiffies_factor); } /* @@ -130,12 +114,12 @@ extern u64 __cputime_msec_factor; static inline unsigned long cputime_to_usecs(const cputime_t ct) { - return mulhdu(ct, __cputime_msec_factor) * USEC_PER_MSEC; + return mulhdu((__force u64) ct, __cputime_msec_factor) * USEC_PER_MSEC; } static inline cputime_t usecs_to_cputime(const unsigned long us) { - cputime_t ct; + u64 ct; unsigned long sec; /* have to be a little careful about overflow */ @@ -147,7 +131,7 @@ static inline cputime_t usecs_to_cputime(const unsigned long us) } if (sec) ct += (cputime_t) sec * tb_ticks_per_sec; - return ct; + return (__force cputime_t) ct; } /* @@ -157,12 +141,12 @@ extern u64 __cputime_sec_factor; static inline unsigned long cputime_to_secs(const cputime_t ct) { - return mulhdu(ct, __cputime_sec_factor); + return mulhdu((__force u64) ct, __cputime_sec_factor); } static inline cputime_t secs_to_cputime(const unsigned long sec) { - return (cputime_t) sec * tb_ticks_per_sec; + return (__force cputime_t)((u64) sec * tb_ticks_per_sec); } /* @@ -170,7 +154,7 @@ static inline cputime_t secs_to_cputime(const unsigned long sec) */ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p) { - u64 x = ct; + u64 x = (__force u64) ct; unsigned int frac; frac = do_div(x, tb_ticks_per_sec); @@ -182,11 +166,11 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p) static inline cputime_t timespec_to_cputime(const struct timespec *p) { - cputime_t ct; + u64 ct; ct = (u64) p->tv_nsec * tb_ticks_per_sec; do_div(ct, 1000000000); - return ct + (u64) p->tv_sec * tb_ticks_per_sec; + return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec); } /* @@ -194,7 +178,7 @@ static inline cputime_t timespec_to_cputime(const struct timespec *p) */ static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p) { - u64 x = ct; + u64 x = (__force u64) ct; unsigned int frac; frac = do_div(x, tb_ticks_per_sec); @@ -206,11 +190,11 @@ static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p) static inline cputime_t timeval_to_cputime(const struct timeval *p) { - cputime_t ct; + u64 ct; ct = (u64) p->tv_usec * tb_ticks_per_sec; do_div(ct, 1000000); - return ct + (u64) p->tv_sec * tb_ticks_per_sec; + return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec); } /* @@ -220,12 +204,12 @@ extern u64 __cputime_clockt_factor; static inline unsigned long cputime_to_clock_t(const cputime_t ct) { - return mulhdu(ct, __cputime_clockt_factor); + return mulhdu((__force u64) ct, __cputime_clockt_factor); } static inline cputime_t clock_t_to_cputime(const unsigned long clk) { - cputime_t ct; + u64 ct; unsigned long sec; /* have to be a little careful about overflow */ @@ -236,8 +220,8 @@ static inline cputime_t clock_t_to_cputime(const unsigned long clk) do_div(ct, USER_HZ); } if (sec) - ct += (cputime_t) sec * tb_ticks_per_sec; - return ct; + ct += (u64) sec * tb_ticks_per_sec; + return (__force cputime_t) ct; } #define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct)) diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h index 08143487829..0887a0463e3 100644 --- a/arch/s390/include/asm/cputime.h +++ b/arch/s390/include/asm/cputime.h @@ -16,114 +16,98 @@ /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ -typedef unsigned long long cputime_t; -typedef unsigned long long cputime64_t; +typedef unsigned long long __nocast cputime_t; +typedef unsigned long long __nocast cputime64_t; -#ifndef __s390x__ - -static inline unsigned int -__div(unsigned long long n, unsigned int base) +static inline unsigned long __div(unsigned long long n, unsigned long base) { +#ifndef __s390x__ register_pair rp; rp.pair = n >> 1; asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1)); return rp.subreg.odd; +#else /* __s390x__ */ + return n / base; +#endif /* __s390x__ */ } -#else /* __s390x__ */ +#define cputime_one_jiffy jiffies_to_cputime(1) -static inline unsigned int -__div(unsigned long long n, unsigned int base) +/* + * Convert cputime to jiffies and back. + */ +static inline unsigned long cputime_to_jiffies(const cputime_t cputime) { - return n / base; + return __div((__force unsigned long long) cputime, 4096000000ULL / HZ); } -#endif /* __s390x__ */ +static inline cputime_t jiffies_to_cputime(const unsigned int jif) +{ + return (__force cputime_t)(jif * (4096000000ULL / HZ)); +} -#define cputime_zero (0ULL) -#define cputime_one_jiffy jiffies_to_cputime(1) -#define cputime_max ((~0UL >> 1) - 1) -#define cputime_add(__a, __b) ((__a) + (__b)) -#define cputime_sub(__a, __b) ((__a) - (__b)) -#define cputime_div(__a, __n) ({ \ - unsigned long long __div = (__a); \ - do_div(__div,__n); \ - __div; \ -}) -#define cputime_halve(__a) ((__a) >> 1) -#define cputime_eq(__a, __b) ((__a) == (__b)) -#define cputime_gt(__a, __b) ((__a) > (__b)) -#define cputime_ge(__a, __b) ((__a) >= (__b)) -#define cputime_lt(__a, __b) ((__a) < (__b)) -#define cputime_le(__a, __b) ((__a) <= (__b)) -#define cputime_to_jiffies(__ct) (__div((__ct), 4096000000ULL / HZ)) -#define cputime_to_scaled(__ct) (__ct) -#define jiffies_to_cputime(__hz) ((cputime_t)(__hz) * (4096000000ULL / HZ)) - -#define cputime64_zero (0ULL) -#define cputime64_add(__a, __b) ((__a) + (__b)) -#define cputime_to_cputime64(__ct) (__ct) - -static inline u64 -cputime64_to_jiffies64(cputime64_t cputime) -{ - do_div(cputime, 4096000000ULL / HZ); - return cputime; +static inline u64 cputime64_to_jiffies64(cputime64_t cputime) +{ + unsigned long long jif = (__force unsigned long long) cputime; + do_div(jif, 4096000000ULL / HZ); + return jif; +} + +static inline cputime64_t jiffies64_to_cputime64(const u64 jif) +{ + return (__force cputime64_t)(jif * (4096000000ULL / HZ)); } /* * Convert cputime to microseconds and back. */ -static inline unsigned int -cputime_to_usecs(const cputime_t cputime) +static inline unsigned int cputime_to_usecs(const cputime_t cputime) { - return cputime_div(cputime, 4096); + return (__force unsigned long long) cputime >> 12; } -static inline cputime_t -usecs_to_cputime(const unsigned int m) +static inline cputime_t usecs_to_cputime(const unsigned int m) { - return (cputime_t) m * 4096; + return (__force cputime_t)(m * 4096ULL); } /* * Convert cputime to milliseconds and back. */ -static inline unsigned int -cputime_to_secs(const cputime_t cputime) +static inline unsigned int cputime_to_secs(const cputime_t cputime) { - return __div(cputime, 2048000000) >> 1; + return __div((__force unsigned long long) cputime, 2048000000) >> 1; } -static inline cputime_t -secs_to_cputime(const unsigned int s) +static inline cputime_t secs_to_cputime(const unsigned int s) { - return (cputime_t) s * 4096000000ULL; + return (__force cputime_t)(s * 4096000000ULL); } /* * Convert cputime to timespec and back. */ -static inline cputime_t -timespec_to_cputime(const struct timespec *value) +static inline cputime_t timespec_to_cputime(const struct timespec *value) { - return value->tv_nsec * 4096 / 1000 + (u64) value->tv_sec * 4096000000ULL; + unsigned long long ret = value->tv_sec * 4096000000ULL; + return (__force cputime_t)(ret + value->tv_nsec * 4096 / 1000); } -static inline void -cputime_to_timespec(const cputime_t cputime, struct timespec *value) +static inline void cputime_to_timespec(const cputime_t cputime, + struct timespec *value) { + unsigned long long __cputime = (__force unsigned long long) cputime; #ifndef __s390x__ register_pair rp; - rp.pair = cputime >> 1; + rp.pair = __cputime >> 1; asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); value->tv_nsec = rp.subreg.even * 1000 / 4096; value->tv_sec = rp.subreg.odd; #else - value->tv_nsec = (cputime % 4096000000ULL) * 1000 / 4096; - value->tv_sec = cputime / 4096000000ULL; + value->tv_nsec = (__cputime % 4096000000ULL) * 1000 / 4096; + value->tv_sec = __cputime / 4096000000ULL; #endif } @@ -132,50 +116,52 @@ cputime_to_timespec(const cputime_t cputime, struct timespec *value) * Since cputime and timeval have the same resolution (microseconds) * this is easy. */ -static inline cputime_t -timeval_to_cputime(const struct timeval *value) +static inline cputime_t timeval_to_cputime(const struct timeval *value) { - return value->tv_usec * 4096 + (u64) value->tv_sec * 4096000000ULL; + unsigned long long ret = value->tv_sec * 4096000000ULL; + return (__force cputime_t)(ret + value->tv_usec * 4096ULL); } -static inline void -cputime_to_timeval(const cputime_t cputime, struct timeval *value) +static inline void cputime_to_timeval(const cputime_t cputime, + struct timeval *value) { + unsigned long long __cputime = (__force unsigned long long) cputime; #ifndef __s390x__ register_pair rp; - rp.pair = cputime >> 1; + rp.pair = __cputime >> 1; asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); value->tv_usec = rp.subreg.even / 4096; value->tv_sec = rp.subreg.odd; #else - value->tv_usec = (cputime % 4096000000ULL) / 4096; - value->tv_sec = cputime / 4096000000ULL; + value->tv_usec = (__cputime % 4096000000ULL) / 4096; + value->tv_sec = __cputime / 4096000000ULL; #endif } /* * Convert cputime to clock and back. */ -static inline clock_t -cputime_to_clock_t(cputime_t cputime) +static inline clock_t cputime_to_clock_t(cputime_t cputime) { - return cputime_div(cputime, 4096000000ULL / USER_HZ); + unsigned long long clock = (__force unsigned long long) cputime; + do_div(clock, 4096000000ULL / USER_HZ); + return clock; } -static inline cputime_t -clock_t_to_cputime(unsigned long x) +static inline cputime_t clock_t_to_cputime(unsigned long x) { - return (cputime_t) x * (4096000000ULL / USER_HZ); + return (__force cputime_t)(x * (4096000000ULL / USER_HZ)); } /* * Convert cputime64 to clock. */ -static inline clock_t -cputime64_to_clock_t(cputime64_t cputime) +static inline clock_t cputime64_to_clock_t(cputime64_t cputime) { - return cputime_div(cputime, 4096000000ULL / USER_HZ); + unsigned long long clock = (__force unsigned long long) cputime; + do_div(clock, 4096000000ULL / USER_HZ); + return clock; } struct s390_idle_data { diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index c97b468ee9f..7f31a031c0b 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -103,15 +103,14 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, cputime64_t busy_time; cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, - kstat_cpu(cpu).cpustat.system); - - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); - - idle_time = cputime64_sub(cur_wall_time, busy_time); + busy_time = kstat_cpu(cpu).cpustat.user; + busy_time += kstat_cpu(cpu).cpustat.system; + busy_time += kstat_cpu(cpu).cpustat.irq; + busy_time += kstat_cpu(cpu).cpustat.softirq; + busy_time += kstat_cpu(cpu).cpustat.steal; + busy_time += kstat_cpu(cpu).cpustat.nice; + + idle_time = cur_wall_time - busy_time; if (wall) *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); @@ -353,20 +352,20 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); - wall_time = (unsigned int) cputime64_sub(cur_wall_time, - j_dbs_info->prev_cpu_wall); + wall_time = (unsigned int) + (cur_wall_time - j_dbs_info->prev_cpu_wall); j_dbs_info->prev_cpu_wall = cur_wall_time; - idle_time = (unsigned int) cputime64_sub(cur_idle_time, - j_dbs_info->prev_cpu_idle); + idle_time = (unsigned int) + (cur_idle_time - j_dbs_info->prev_cpu_idle); j_dbs_info->prev_cpu_idle = cur_idle_time; if (dbs_tuners_ins.ignore_nice) { cputime64_t cur_nice; unsigned long cur_nice_jiffies; - cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, - j_dbs_info->prev_cpu_nice); + cur_nice = kstat_cpu(j).cpustat.nice - + j_dbs_info->prev_cpu_nice; /* * Assumption: nice time between sampling periods will * be less than 2^32 jiffies for 32 bit sys diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index fa8af4ebb1d..07cffe2f6cf 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -127,15 +127,14 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, cputime64_t busy_time; cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); - busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, - kstat_cpu(cpu).cpustat.system); - - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); - busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); - - idle_time = cputime64_sub(cur_wall_time, busy_time); + busy_time = kstat_cpu(cpu).cpustat.user; + busy_time += kstat_cpu(cpu).cpustat.system; + busy_time += kstat_cpu(cpu).cpustat.irq; + busy_time += kstat_cpu(cpu).cpustat.softirq; + busy_time += kstat_cpu(cpu).cpustat.steal; + busy_time += kstat_cpu(cpu).cpustat.nice; + + idle_time = cur_wall_time - busy_time; if (wall) *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); @@ -442,24 +441,24 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); - wall_time = (unsigned int) cputime64_sub(cur_wall_time, - j_dbs_info->prev_cpu_wall); + wall_time = (unsigned int) + (cur_wall_time - j_dbs_info->prev_cpu_wall); j_dbs_info->prev_cpu_wall = cur_wall_time; - idle_time = (unsigned int) cputime64_sub(cur_idle_time, - j_dbs_info->prev_cpu_idle); + idle_time = (unsigned int) + (cur_idle_time - j_dbs_info->prev_cpu_idle); j_dbs_info->prev_cpu_idle = cur_idle_time; - iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, - j_dbs_info->prev_cpu_iowait); + iowait_time = (unsigned int) + (cur_iowait_time - j_dbs_info->prev_cpu_iowait); j_dbs_info->prev_cpu_iowait = cur_iowait_time; if (dbs_tuners_ins.ignore_nice) { cputime64_t cur_nice; unsigned long cur_nice_jiffies; - cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, - j_dbs_info->prev_cpu_nice); + cur_nice = kstat_cpu(j).cpustat.nice - + j_dbs_info->prev_cpu_nice; /* * Assumption: nice time between sampling periods will * be less than 2^32 jiffies for 32 bit sys diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index c5072a91e84..2a508edd768 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -61,9 +61,8 @@ static int cpufreq_stats_update(unsigned int cpu) spin_lock(&cpufreq_stats_lock); stat = per_cpu(cpufreq_stats_table, cpu); if (stat->time_in_state) - stat->time_in_state[stat->last_index] = - cputime64_add(stat->time_in_state[stat->last_index], - cputime_sub(cur_time, stat->last_time)); + stat->time_in_state[stat->last_index] += + cur_time - stat->last_time; stat->last_time = cur_time; spin_unlock(&cpufreq_stats_lock); return 0; diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c index 2637c139777..909908ebf16 100644 --- a/drivers/macintosh/rack-meter.c +++ b/drivers/macintosh/rack-meter.c @@ -83,11 +83,10 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu) { cputime64_t retval; - retval = cputime64_add(kstat_cpu(cpu).cpustat.idle, - kstat_cpu(cpu).cpustat.iowait); + retval = kstat_cpu(cpu).cpustat.idle + kstat_cpu(cpu).cpustat.iowait; if (rackmeter_ignore_nice) - retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice); + retval += kstat_cpu(cpu).cpustat.nice; return retval; } @@ -220,13 +219,11 @@ static void rackmeter_do_timer(struct work_struct *work) int i, offset, load, cumm, pause; cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); - total_ticks = (unsigned int)cputime64_sub(cur_jiffies, - rcpu->prev_wall); + total_ticks = (unsigned int) (cur_jiffies - rcpu->prev_wall); rcpu->prev_wall = cur_jiffies; total_idle_ticks = get_cpu_idle_time(cpu); - idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks, - rcpu->prev_idle); + idle_ticks = (unsigned int) (total_idle_ticks - rcpu->prev_idle); rcpu->prev_idle = total_idle_ticks; /* We do a very dumb calculation to update the LEDs for now, diff --git a/fs/proc/array.c b/fs/proc/array.c index 3a1dafd228d..8c344f037bd 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -394,8 +394,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, sigemptyset(&sigign); sigemptyset(&sigcatch); - cutime = cstime = utime = stime = cputime_zero; - cgtime = gtime = cputime_zero; + cutime = cstime = utime = stime = 0; + cgtime = gtime = 0; if (lock_task_sighand(task, &flags)) { struct signal_struct *sig = task->signal; @@ -423,14 +423,14 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, do { min_flt += t->min_flt; maj_flt += t->maj_flt; - gtime = cputime_add(gtime, t->gtime); + gtime += t->gtime; t = next_thread(t); } while (t != task); min_flt += sig->min_flt; maj_flt += sig->maj_flt; thread_group_times(task, &utime, &stime); - gtime = cputime_add(gtime, sig->gtime); + gtime += sig->gtime; } sid = task_session_nr_ns(task, ns); diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 2a30d67dd6b..714d5d131e7 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -30,7 +30,7 @@ static cputime64_t get_idle_time(int cpu) if (idle_time == -1ULL) { /* !NO_HZ so we can rely on cpustat.idle */ idle = kstat_cpu(cpu).cpustat.idle; - idle = cputime64_add(idle, arch_idle_time(cpu)); + idle += arch_idle_time(cpu); } else idle = nsecs_to_jiffies64(1000 * idle_time); @@ -63,23 +63,22 @@ static int show_stat(struct seq_file *p, void *v) struct timespec boottime; user = nice = system = idle = iowait = - irq = softirq = steal = cputime64_zero; - guest = guest_nice = cputime64_zero; + irq = softirq = steal = 0; + guest = guest_nice = 0; getboottime(&boottime); jif = boottime.tv_sec; for_each_possible_cpu(i) { - user = cputime64_add(user, kstat_cpu(i).cpustat.user); - nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice); - system = cputime64_add(system, kstat_cpu(i).cpustat.system); - idle = cputime64_add(idle, get_idle_time(i)); - iowait = cputime64_add(iowait, get_iowait_time(i)); - irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq); - softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); - steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); - guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); - guest_nice = cputime64_add(guest_nice, - kstat_cpu(i).cpustat.guest_nice); + user += kstat_cpu(i).cpustat.user; + nice += kstat_cpu(i).cpustat.nice; + system += kstat_cpu(i).cpustat.system; + idle += get_idle_time(i); + iowait += get_iowait_time(i); + irq += kstat_cpu(i).cpustat.irq; + softirq += kstat_cpu(i).cpustat.softirq; + steal += kstat_cpu(i).cpustat.steal; + guest += kstat_cpu(i).cpustat.guest; + guest_nice += kstat_cpu(i).cpustat.guest_nice; sum += kstat_cpu_irqs_sum(i); sum += arch_irq_stat_cpu(i); diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c index 766b1d45605..ac5243657da 100644 --- a/fs/proc/uptime.c +++ b/fs/proc/uptime.c @@ -12,10 +12,10 @@ static int uptime_proc_show(struct seq_file *m, void *v) struct timespec uptime; struct timespec idle; int i; - cputime_t idletime = cputime_zero; + cputime_t idletime = 0; for_each_possible_cpu(i) - idletime = cputime64_add(idletime, kstat_cpu(i).cpustat.idle); + idletime += kstat_cpu(i).cpustat.idle; do_posix_clock_monotonic_gettime(&uptime); monotonic_to_bootbased(&uptime); diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h index 62ce6823c0f..77202e2c9fc 100644 --- a/include/asm-generic/cputime.h +++ b/include/asm-generic/cputime.h @@ -4,70 +4,64 @@ #include #include -typedef unsigned long cputime_t; +typedef unsigned long __nocast cputime_t; -#define cputime_zero (0UL) #define cputime_one_jiffy jiffies_to_cputime(1) -#define cputime_max ((~0UL >> 1) - 1) -#define cputime_add(__a, __b) ((__a) + (__b)) -#define cputime_sub(__a, __b) ((__a) - (__b)) -#define cputime_div(__a, __n) ((__a) / (__n)) -#define cputime_halve(__a) ((__a) >> 1) -#define cputime_eq(__a, __b) ((__a) == (__b)) -#define cputime_gt(__a, __b) ((__a) > (__b)) -#define cputime_ge(__a, __b) ((__a) >= (__b)) -#define cputime_lt(__a, __b) ((__a) < (__b)) -#define cputime_le(__a, __b) ((__a) <= (__b)) -#define cputime_to_jiffies(__ct) (__ct) +#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct) #define cputime_to_scaled(__ct) (__ct) -#define jiffies_to_cputime(__hz) (__hz) +#define jiffies_to_cputime(__hz) (__force cputime_t)(__hz) -typedef u64 cputime64_t; +typedef u64 __nocast cputime64_t; -#define cputime64_zero (0ULL) -#define cputime64_add(__a, __b) ((__a) + (__b)) -#define cputime64_sub(__a, __b) ((__a) - (__b)) -#define cputime64_to_jiffies64(__ct) (__ct) -#define jiffies64_to_cputime64(__jif) (__jif) -#define cputime_to_cputime64(__ct) ((u64) __ct) -#define cputime64_gt(__a, __b) ((__a) > (__b)) +#define cputime64_to_jiffies64(__ct) (__force u64)(__ct) +#define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif) -#define nsecs_to_cputime64(__ct) nsecs_to_jiffies64(__ct) +#define nsecs_to_cputime64(__ct) \ + jiffies64_to_cputime64(nsecs_to_jiffies64(__ct)) /* * Convert cputime to microseconds and back. */ -#define cputime_to_usecs(__ct) jiffies_to_usecs(__ct) -#define usecs_to_cputime(__msecs) usecs_to_jiffies(__msecs) +#define cputime_to_usecs(__ct) \ + jiffies_to_usecs(cputime_to_jiffies(__ct)); +#define usecs_to_cputime(__msecs) \ + jiffies_to_cputime(usecs_to_jiffies(__msecs)); /* * Convert cputime to seconds and back. */ -#define cputime_to_secs(jif) ((jif) / HZ) -#define secs_to_cputime(sec) ((sec) * HZ) +#define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ) +#define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ) /* * Convert cputime to timespec and back. */ -#define timespec_to_cputime(__val) timespec_to_jiffies(__val) -#define cputime_to_timespec(__ct,__val) jiffies_to_timespec(__ct,__val) +#define timespec_to_cputime(__val) \ + jiffies_to_cputime(timespec_to_jiffies(__val)) +#define cputime_to_timespec(__ct,__val) \ + jiffies_to_timespec(cputime_to_jiffies(__ct),__val) /* * Convert cputime to timeval and back. */ -#define timeval_to_cputime(__val) timeval_to_jiffies(__val) -#define cputime_to_timeval(__ct,__val) jiffies_to_timeval(__ct,__val) +#define timeval_to_cputime(__val) \ + jiffies_to_cputime(timeval_to_jiffies(__val)) +#define cputime_to_timeval(__ct,__val) \ + jiffies_to_timeval(cputime_to_jiffies(__ct),__val) /* * Convert cputime to clock and back. */ -#define cputime_to_clock_t(__ct) jiffies_to_clock_t(__ct) -#define clock_t_to_cputime(__x) clock_t_to_jiffies(__x) +#define cputime_to_clock_t(__ct) \ + jiffies_to_clock_t(cputime_to_jiffies(__ct)) +#define clock_t_to_cputime(__x) \ + jiffies_to_cputime(clock_t_to_jiffies(__x)) /* * Convert cputime64 to clock. */ -#define cputime64_to_clock_t(__ct) jiffies_64_to_clock_t(__ct) +#define cputime64_to_clock_t(__ct) \ + jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct)) #endif diff --git a/include/linux/sched.h b/include/linux/sched.h index 1c4f3e9b9bc..5649032d73f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -483,8 +483,8 @@ struct task_cputime { #define INIT_CPUTIME \ (struct task_cputime) { \ - .utime = cputime_zero, \ - .stime = cputime_zero, \ + .utime = 0, \ + .stime = 0, \ .sum_exec_runtime = 0, \ } diff --git a/kernel/acct.c b/kernel/acct.c index fa7eb3de2dd..203dfead2e0 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -613,8 +613,8 @@ void acct_collect(long exitcode, int group_dead) pacct->ac_flag |= ACORE; if (current->flags & PF_SIGNALED) pacct->ac_flag |= AXSIG; - pacct->ac_utime = cputime_add(pacct->ac_utime, current->utime); - pacct->ac_stime = cputime_add(pacct->ac_stime, current->stime); + pacct->ac_utime += current->utime; + pacct->ac_stime += current->stime; pacct->ac_minflt += current->min_flt; pacct->ac_majflt += current->maj_flt; spin_unlock_irq(¤t->sighand->siglock); diff --git a/kernel/cpu.c b/kernel/cpu.c index 563f1360947..3f8ee8a138c 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -178,8 +178,7 @@ static inline void check_for_tasks(int cpu) write_lock_irq(&tasklist_lock); for_each_process(p) { if (task_cpu(p) == cpu && p->state == TASK_RUNNING && - (!cputime_eq(p->utime, cputime_zero) || - !cputime_eq(p->stime, cputime_zero))) + (p->utime || p->stime)) printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d " "(state = %ld, flags = %x)\n", p->comm, task_pid_nr(p), cpu, diff --git a/kernel/exit.c b/kernel/exit.c index d0b7d988f87..5e0d1f4c696 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -121,9 +121,9 @@ static void __exit_signal(struct task_struct *tsk) * We won't ever get here for the group leader, since it * will have been the last reference on the signal_struct. */ - sig->utime = cputime_add(sig->utime, tsk->utime); - sig->stime = cputime_add(sig->stime, tsk->stime); - sig->gtime = cputime_add(sig->gtime, tsk->gtime); + sig->utime += tsk->utime; + sig->stime += tsk->stime; + sig->gtime += tsk->gtime; sig->min_flt += tsk->min_flt; sig->maj_flt += tsk->maj_flt; sig->nvcsw += tsk->nvcsw; @@ -1255,19 +1255,9 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) spin_lock_irq(&p->real_parent->sighand->siglock); psig = p->real_parent->signal; sig = p->signal; - psig->cutime = - cputime_add(psig->cutime, - cputime_add(tgutime, - sig->cutime)); - psig->cstime = - cputime_add(psig->cstime, - cputime_add(tgstime, - sig->cstime)); - psig->cgtime = - cputime_add(psig->cgtime, - cputime_add(p->gtime, - cputime_add(sig->gtime, - sig->cgtime))); + psig->cutime += tgutime + sig->cutime; + psig->cstime += tgstime + sig->cstime; + psig->cgtime += p->gtime + sig->gtime + sig->cgtime; psig->cmin_flt += p->min_flt + sig->min_flt + sig->cmin_flt; psig->cmaj_flt += diff --git a/kernel/fork.c b/kernel/fork.c index da4a6a10d08..b058c5820ec 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1023,8 +1023,8 @@ void mm_init_owner(struct mm_struct *mm, struct task_struct *p) */ static void posix_cpu_timers_init(struct task_struct *tsk) { - tsk->cputime_expires.prof_exp = cputime_zero; - tsk->cputime_expires.virt_exp = cputime_zero; + tsk->cputime_expires.prof_exp = 0; + tsk->cputime_expires.virt_exp = 0; tsk->cputime_expires.sched_exp = 0; INIT_LIST_HEAD(&tsk->cpu_timers[0]); INIT_LIST_HEAD(&tsk->cpu_timers[1]); @@ -1132,14 +1132,10 @@ static struct task_struct *copy_process(unsigned long clone_flags, init_sigpending(&p->pending); - p->utime = cputime_zero; - p->stime = cputime_zero; - p->gtime = cputime_zero; - p->utimescaled = cputime_zero; - p->stimescaled = cputime_zero; + p->utime = p->stime = p->gtime = 0; + p->utimescaled = p->stimescaled = 0; #ifndef CONFIG_VIRT_CPU_ACCOUNTING - p->prev_utime = cputime_zero; - p->prev_stime = cputime_zero; + p->prev_utime = p->prev_stime = 0; #endif #if defined(SPLIT_RSS_COUNTING) memset(&p->rss_stat, 0, sizeof(p->rss_stat)); diff --git a/kernel/itimer.c b/kernel/itimer.c index d802883153d..22000c3db0d 100644 --- a/kernel/itimer.c +++ b/kernel/itimer.c @@ -52,22 +52,22 @@ static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, cval = it->expires; cinterval = it->incr; - if (!cputime_eq(cval, cputime_zero)) { + if (cval) { struct task_cputime cputime; cputime_t t; thread_group_cputimer(tsk, &cputime); if (clock_id == CPUCLOCK_PROF) - t = cputime_add(cputime.utime, cputime.stime); + t = cputime.utime + cputime.stime; else /* CPUCLOCK_VIRT */ t = cputime.utime; - if (cputime_le(cval, t)) + if (cval < t) /* about to fire */ cval = cputime_one_jiffy; else - cval = cputime_sub(cval, t); + cval = cval - t; } spin_unlock_irq(&tsk->sighand->siglock); @@ -161,10 +161,9 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, cval = it->expires; cinterval = it->incr; - if (!cputime_eq(cval, cputime_zero) || - !cputime_eq(nval, cputime_zero)) { - if (cputime_gt(nval, cputime_zero)) - nval = cputime_add(nval, cputime_one_jiffy); + if (cval || nval) { + if (nval > 0) + nval += cputime_one_jiffy; set_process_cpu_timer(tsk, clock_id, &nval, &cval); } it->expires = nval; diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index e7cb76dc18f..125cb67daa2 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -78,7 +78,7 @@ static inline int cpu_time_before(const clockid_t which_clock, if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { return now.sched < then.sched; } else { - return cputime_lt(now.cpu, then.cpu); + return now.cpu < then.cpu; } } static inline void cpu_time_add(const clockid_t which_clock, @@ -88,7 +88,7 @@ static inline void cpu_time_add(const clockid_t which_clock, if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { acc->sched += val.sched; } else { - acc->cpu = cputime_add(acc->cpu, val.cpu); + acc->cpu += val.cpu; } } static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock, @@ -98,24 +98,11 @@ static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock, if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { a.sched -= b.sched; } else { - a.cpu = cputime_sub(a.cpu, b.cpu); + a.cpu -= b.cpu; } return a; } -/* - * Divide and limit the result to res >= 1 - * - * This is necessary to prevent signal delivery starvation, when the result of - * the division would be rounded down to 0. - */ -static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div) -{ - cputime_t res = cputime_div(time, div); - - return max_t(cputime_t, res, 1); -} - /* * Update expiry time from increment, and increase overrun count, * given the current clock sample. @@ -148,28 +135,26 @@ static void bump_cpu_timer(struct k_itimer *timer, } else { cputime_t delta, incr; - if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu)) + if (now.cpu < timer->it.cpu.expires.cpu) return; incr = timer->it.cpu.incr.cpu; - delta = cputime_sub(cputime_add(now.cpu, incr), - timer->it.cpu.expires.cpu); + delta = now.cpu + incr - timer->it.cpu.expires.cpu; /* Don't use (incr*2 < delta), incr*2 might overflow. */ - for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++) - incr = cputime_add(incr, incr); - for (; i >= 0; incr = cputime_halve(incr), i--) { - if (cputime_lt(delta, incr)) + for (i = 0; incr < delta - incr; i++) + incr += incr; + for (; i >= 0; incr = incr >> 1, i--) { + if (delta < incr) continue; - timer->it.cpu.expires.cpu = - cputime_add(timer->it.cpu.expires.cpu, incr); + timer->it.cpu.expires.cpu += incr; timer->it_overrun += 1 << i; - delta = cputime_sub(delta, incr); + delta -= incr; } } } static inline cputime_t prof_ticks(struct task_struct *p) { - return cputime_add(p->utime, p->stime); + return p->utime + p->stime; } static inline cputime_t virt_ticks(struct task_struct *p) { @@ -248,8 +233,8 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) t = tsk; do { - times->utime = cputime_add(times->utime, t->utime); - times->stime = cputime_add(times->stime, t->stime); + times->utime += t->utime; + times->stime += t->stime; times->sum_exec_runtime += task_sched_runtime(t); } while_each_thread(tsk, t); out: @@ -258,10 +243,10 @@ out: static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b) { - if (cputime_gt(b->utime, a->utime)) + if (b->utime > a->utime) a->utime = b->utime; - if (cputime_gt(b->stime, a->stime)) + if (b->stime > a->stime) a->stime = b->stime; if (b->sum_exec_runtime > a->sum_exec_runtime) @@ -306,7 +291,7 @@ static int cpu_clock_sample_group(const clockid_t which_clock, return -EINVAL; case CPUCLOCK_PROF: thread_group_cputime(p, &cputime); - cpu->cpu = cputime_add(cputime.utime, cputime.stime); + cpu->cpu = cputime.utime + cputime.stime; break; case CPUCLOCK_VIRT: thread_group_cputime(p, &cputime); @@ -470,26 +455,24 @@ static void cleanup_timers(struct list_head *head, unsigned long long sum_exec_runtime) { struct cpu_timer_list *timer, *next; - cputime_t ptime = cputime_add(utime, stime); + cputime_t ptime = utime + stime; list_for_each_entry_safe(timer, next, head, entry) { list_del_init(&timer->entry); - if (cputime_lt(timer->expires.cpu, ptime)) { - timer->expires.cpu = cputime_zero; + if (timer->expires.cpu < ptime) { + timer->expires.cpu = 0; } else { - timer->expires.cpu = cputime_sub(timer->expires.cpu, - ptime); + timer->expires.cpu -= ptime; } } ++head; list_for_each_entry_safe(timer, next, head, entry) { list_del_init(&timer->entry); - if (cputime_lt(timer->expires.cpu, utime)) { - timer->expires.cpu = cputime_zero; + if (timer->expires.cpu < utime) { + timer->expires.cpu = 0; } else { - timer->expires.cpu = cputime_sub(timer->expires.cpu, - utime); + timer->expires.cpu -= utime; } } @@ -520,8 +503,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk) struct signal_struct *const sig = tsk->signal; cleanup_timers(tsk->signal->cpu_timers, - cputime_add(tsk->utime, sig->utime), - cputime_add(tsk->stime, sig->stime), + tsk->utime + sig->utime, tsk->stime + sig->stime, tsk->se.sum_exec_runtime + sig->sum_sched_runtime); } @@ -540,8 +522,7 @@ static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) static inline int expires_gt(cputime_t expires, cputime_t new_exp) { - return cputime_eq(expires, cputime_zero) || - cputime_gt(expires, new_exp); + return expires == 0 || expires > new_exp; } /* @@ -651,7 +632,7 @@ static int cpu_timer_sample_group(const clockid_t which_clock, default: return -EINVAL; case CPUCLOCK_PROF: - cpu->cpu = cputime_add(cputime.utime, cputime.stime); + cpu->cpu = cputime.utime + cputime.stime; break; case CPUCLOCK_VIRT: cpu->cpu = cputime.utime; @@ -918,12 +899,12 @@ static void check_thread_timers(struct task_struct *tsk, unsigned long soft; maxfire = 20; - tsk->cputime_expires.prof_exp = cputime_zero; + tsk->cputime_expires.prof_exp = 0; while (!list_empty(timers)) { struct cpu_timer_list *t = list_first_entry(timers, struct cpu_timer_list, entry); - if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) { + if (!--maxfire || prof_ticks(tsk) < t->expires.cpu) { tsk->cputime_expires.prof_exp = t->expires.cpu; break; } @@ -933,12 +914,12 @@ static void check_thread_timers(struct task_struct *tsk, ++timers; maxfire = 20; - tsk->cputime_expires.virt_exp = cputime_zero; + tsk->cputime_expires.virt_exp = 0; while (!list_empty(timers)) { struct cpu_timer_list *t = list_first_entry(timers, struct cpu_timer_list, entry); - if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) { + if (!--maxfire || virt_ticks(tsk) < t->expires.cpu) { tsk->cputime_expires.virt_exp = t->expires.cpu; break; } @@ -1009,20 +990,19 @@ static u32 onecputick; static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, cputime_t *expires, cputime_t cur_time, int signo) { - if (cputime_eq(it->expires, cputime_zero)) + if (!it->expires) return; - if (cputime_ge(cur_time, it->expires)) { - if (!cputime_eq(it->incr, cputime_zero)) { - it->expires = cputime_add(it->expires, it->incr); + if (cur_time >= it->expires) { + if (it->incr) { + it->expires += it->incr; it->error += it->incr_error; if (it->error >= onecputick) { - it->expires = cputime_sub(it->expires, - cputime_one_jiffy); + it->expires -= cputime_one_jiffy; it->error -= onecputick; } } else { - it->expires = cputime_zero; + it->expires = 0; } trace_itimer_expire(signo == SIGPROF ? @@ -1031,9 +1011,7 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); } - if (!cputime_eq(it->expires, cputime_zero) && - (cputime_eq(*expires, cputime_zero) || - cputime_lt(it->expires, *expires))) { + if (it->expires && (!*expires || it->expires < *expires)) { *expires = it->expires; } } @@ -1048,9 +1026,7 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, */ static inline int task_cputime_zero(const struct task_cputime *cputime) { - if (cputime_eq(cputime->utime, cputime_zero) && - cputime_eq(cputime->stime, cputime_zero) && - cputime->sum_exec_runtime == 0) + if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime) return 1; return 0; } @@ -1076,15 +1052,15 @@ static void check_process_timers(struct task_struct *tsk, */ thread_group_cputimer(tsk, &cputime); utime = cputime.utime; - ptime = cputime_add(utime, cputime.stime); + ptime = utime + cputime.stime; sum_sched_runtime = cputime.sum_exec_runtime; maxfire = 20; - prof_expires = cputime_zero; + prof_expires = 0; while (!list_empty(timers)) { struct cpu_timer_list *tl = list_first_entry(timers, struct cpu_timer_list, entry); - if (!--maxfire || cputime_lt(ptime, tl->expires.cpu)) { + if (!--maxfire || ptime < tl->expires.cpu) { prof_expires = tl->expires.cpu; break; } @@ -1094,12 +1070,12 @@ static void check_process_timers(struct task_struct *tsk, ++timers; maxfire = 20; - virt_expires = cputime_zero; + virt_expires = 0; while (!list_empty(timers)) { struct cpu_timer_list *tl = list_first_entry(timers, struct cpu_timer_list, entry); - if (!--maxfire || cputime_lt(utime, tl->expires.cpu)) { + if (!--maxfire || utime < tl->expires.cpu) { virt_expires = tl->expires.cpu; break; } @@ -1154,8 +1130,7 @@ static void check_process_timers(struct task_struct *tsk, } } x = secs_to_cputime(soft); - if (cputime_eq(prof_expires, cputime_zero) || - cputime_lt(x, prof_expires)) { + if (!prof_expires || x < prof_expires) { prof_expires = x; } } @@ -1249,12 +1224,9 @@ out: static inline int task_cputime_expired(const struct task_cputime *sample, const struct task_cputime *expires) { - if (!cputime_eq(expires->utime, cputime_zero) && - cputime_ge(sample->utime, expires->utime)) + if (expires->utime && sample->utime >= expires->utime) return 1; - if (!cputime_eq(expires->stime, cputime_zero) && - cputime_ge(cputime_add(sample->utime, sample->stime), - expires->stime)) + if (expires->stime && sample->utime + sample->stime >= expires->stime) return 1; if (expires->sum_exec_runtime != 0 && sample->sum_exec_runtime >= expires->sum_exec_runtime) @@ -1389,18 +1361,18 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, * it to be relative, *newval argument is relative and we update * it to be absolute. */ - if (!cputime_eq(*oldval, cputime_zero)) { - if (cputime_le(*oldval, now.cpu)) { + if (*oldval) { + if (*oldval <= now.cpu) { /* Just about to fire. */ *oldval = cputime_one_jiffy; } else { - *oldval = cputime_sub(*oldval, now.cpu); + *oldval -= now.cpu; } } - if (cputime_eq(*newval, cputime_zero)) + if (!*newval) return; - *newval = cputime_add(*newval, now.cpu); + *newval += now.cpu; } /* diff --git a/kernel/sched.c b/kernel/sched.c index d6b149ccf92..18cad4467e6 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2166,7 +2166,7 @@ static int irqtime_account_hi_update(void) local_irq_save(flags); latest_ns = this_cpu_read(cpu_hardirq_time); - if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq)) + if (nsecs_to_cputime64(latest_ns) > cpustat->irq) ret = 1; local_irq_restore(flags); return ret; @@ -2181,7 +2181,7 @@ static int irqtime_account_si_update(void) local_irq_save(flags); latest_ns = this_cpu_read(cpu_softirq_time); - if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq)) + if (nsecs_to_cputime64(latest_ns) > cpustat->softirq) ret = 1; local_irq_restore(flags); return ret; @@ -3868,19 +3868,17 @@ void account_user_time(struct task_struct *p, cputime_t cputime, cputime_t cputime_scaled) { struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; - cputime64_t tmp; /* Add user time to process. */ - p->utime = cputime_add(p->utime, cputime); - p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); + p->utime += cputime; + p->utimescaled += cputime_scaled; account_group_user_time(p, cputime); /* Add user time to cpustat. */ - tmp = cputime_to_cputime64(cputime); if (TASK_NICE(p) > 0) - cpustat->nice = cputime64_add(cpustat->nice, tmp); + cpustat->nice += (__force cputime64_t) cputime; else - cpustat->user = cputime64_add(cpustat->user, tmp); + cpustat->user += (__force cputime64_t) cputime; cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime); /* Account for user time used */ @@ -3896,24 +3894,21 @@ void account_user_time(struct task_struct *p, cputime_t cputime, static void account_guest_time(struct task_struct *p, cputime_t cputime, cputime_t cputime_scaled) { - cputime64_t tmp; struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; - tmp = cputime_to_cputime64(cputime); - /* Add guest time to process. */ - p->utime = cputime_add(p->utime, cputime); - p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); + p->utime += cputime; + p->utimescaled += cputime_scaled; account_group_user_time(p, cputime); - p->gtime = cputime_add(p->gtime, cputime); + p->gtime += cputime; /* Add guest time to cpustat. */ if (TASK_NICE(p) > 0) { - cpustat->nice = cputime64_add(cpustat->nice, tmp); - cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp); + cpustat->nice += (__force cputime64_t) cputime; + cpustat->guest_nice += (__force cputime64_t) cputime; } else { - cpustat->user = cputime64_add(cpustat->user, tmp); - cpustat->guest = cputime64_add(cpustat->guest, tmp); + cpustat->user += (__force cputime64_t) cputime; + cpustat->guest += (__force cputime64_t) cputime; } } @@ -3928,15 +3923,13 @@ static inline void __account_system_time(struct task_struct *p, cputime_t cputime, cputime_t cputime_scaled, cputime64_t *target_cputime64) { - cputime64_t tmp = cputime_to_cputime64(cputime); - /* Add system time to process. */ - p->stime = cputime_add(p->stime, cputime); - p->stimescaled = cputime_add(p->stimescaled, cputime_scaled); + p->stime += cputime; + p->stimescaled += cputime_scaled; account_group_system_time(p, cputime); /* Add system time to cpustat. */ - *target_cputime64 = cputime64_add(*target_cputime64, tmp); + *target_cputime64 += (__force cputime64_t) cputime; cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime); /* Account for system time used */ @@ -3978,9 +3971,8 @@ void account_system_time(struct task_struct *p, int hardirq_offset, void account_steal_time(cputime_t cputime) { struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; - cputime64_t cputime64 = cputime_to_cputime64(cputime); - cpustat->steal = cputime64_add(cpustat->steal, cputime64); + cpustat->steal += (__force cputime64_t) cputime; } /* @@ -3990,13 +3982,12 @@ void account_steal_time(cputime_t cputime) void account_idle_time(cputime_t cputime) { struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; - cputime64_t cputime64 = cputime_to_cputime64(cputime); struct rq *rq = this_rq(); if (atomic_read(&rq->nr_iowait) > 0) - cpustat->iowait = cputime64_add(cpustat->iowait, cputime64); + cpustat->iowait += (__force cputime64_t) cputime; else - cpustat->idle = cputime64_add(cpustat->idle, cputime64); + cpustat->idle += (__force cputime64_t) cputime; } static __always_inline bool steal_account_process_tick(void) @@ -4046,16 +4037,15 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, struct rq *rq) { cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); - cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy); struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; if (steal_account_process_tick()) return; if (irqtime_account_hi_update()) { - cpustat->irq = cputime64_add(cpustat->irq, tmp); + cpustat->irq += (__force cputime64_t) cputime_one_jiffy; } else if (irqtime_account_si_update()) { - cpustat->softirq = cputime64_add(cpustat->softirq, tmp); + cpustat->softirq += (__force cputime64_t) cputime_one_jiffy; } else if (this_cpu_ksoftirqd() == p) { /* * ksoftirqd time do not get accounted in cpu_softirq_time. @@ -4171,7 +4161,7 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) { - cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime); + cputime_t rtime, utime = p->utime, total = utime + p->stime; /* * Use CFS's precise accounting: @@ -4179,11 +4169,11 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) rtime = nsecs_to_cputime(p->se.sum_exec_runtime); if (total) { - u64 temp = rtime; + u64 temp = (__force u64) rtime; - temp *= utime; - do_div(temp, total); - utime = (cputime_t)temp; + temp *= (__force u64) utime; + do_div(temp, (__force u32) total); + utime = (__force cputime_t) temp; } else utime = rtime; @@ -4191,7 +4181,7 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) * Compare with previous values, to keep monotonicity: */ p->prev_utime = max(p->prev_utime, utime); - p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime)); + p->prev_stime = max(p->prev_stime, rtime - p->prev_utime); *ut = p->prev_utime; *st = p->prev_stime; @@ -4208,21 +4198,20 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) thread_group_cputime(p, &cputime); - total = cputime_add(cputime.utime, cputime.stime); + total = cputime.utime + cputime.stime; rtime = nsecs_to_cputime(cputime.sum_exec_runtime); if (total) { - u64 temp = rtime; + u64 temp = (__force u64) rtime; - temp *= cputime.utime; - do_div(temp, total); - utime = (cputime_t)temp; + temp *= (__force u64) cputime.utime; + do_div(temp, (__force u32) total); + utime = (__force cputime_t) temp; } else utime = rtime; sig->prev_utime = max(sig->prev_utime, utime); - sig->prev_stime = max(sig->prev_stime, - cputime_sub(rtime, sig->prev_utime)); + sig->prev_stime = max(sig->prev_stime, rtime - sig->prev_utime); *ut = sig->prev_utime; *st = sig->prev_stime; @@ -9769,7 +9758,8 @@ static void cpuacct_update_stats(struct task_struct *tsk, ca = task_ca(tsk); do { - __percpu_counter_add(&ca->cpustat[idx], val, batch); + __percpu_counter_add(&ca->cpustat[idx], + (__force s64) val, batch); ca = ca->parent; } while (ca); rcu_read_unlock(); diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index 87f9e36ea56..4b71dbef271 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h @@ -283,8 +283,7 @@ static inline void account_group_user_time(struct task_struct *tsk, return; raw_spin_lock(&cputimer->lock); - cputimer->cputime.utime = - cputime_add(cputimer->cputime.utime, cputime); + cputimer->cputime.utime += cputime; raw_spin_unlock(&cputimer->lock); } @@ -307,8 +306,7 @@ static inline void account_group_system_time(struct task_struct *tsk, return; raw_spin_lock(&cputimer->lock); - cputimer->cputime.stime = - cputime_add(cputimer->cputime.stime, cputime); + cputimer->cputime.stime += cputime; raw_spin_unlock(&cputimer->lock); } diff --git a/kernel/signal.c b/kernel/signal.c index b3f78d09a10..739ef2bf105 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1629,10 +1629,8 @@ bool do_notify_parent(struct task_struct *tsk, int sig) info.si_uid = __task_cred(tsk)->uid; rcu_read_unlock(); - info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime, - tsk->signal->utime)); - info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime, - tsk->signal->stime)); + info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime); + info.si_stime = cputime_to_clock_t(tsk->stime + tsk->signal->stime); info.si_status = tsk->exit_code & 0x7f; if (tsk->exit_code & 0x80) diff --git a/kernel/sys.c b/kernel/sys.c index 481611fbd07..ddf8155bf3f 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1605,7 +1605,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) unsigned long maxrss = 0; memset((char *) r, 0, sizeof *r); - utime = stime = cputime_zero; + utime = stime = 0; if (who == RUSAGE_THREAD) { task_times(current, &utime, &stime); @@ -1635,8 +1635,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) case RUSAGE_SELF: thread_group_times(p, &tgutime, &tgstime); - utime = cputime_add(utime, tgutime); - stime = cputime_add(stime, tgstime); + utime += tgutime; + stime += tgstime; r->ru_nvcsw += p->signal->nvcsw; r->ru_nivcsw += p->signal->nivcsw; r->ru_minflt += p->signal->min_flt; diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 5bbfac85866..23b4d784ebd 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c @@ -127,7 +127,7 @@ void acct_update_integrals(struct task_struct *tsk) local_irq_save(flags); time = tsk->stime + tsk->utime; - dtime = cputime_sub(time, tsk->acct_timexpd); + dtime = time - tsk->acct_timexpd; jiffies_to_timeval(cputime_to_jiffies(dtime), &value); delta = value.tv_sec; delta = delta * USEC_PER_SEC + value.tv_usec; -- cgit v1.2.3-70-g09d2 From 8a25a2fd126c621f44f3aeaef80d51f00fc11639 Mon Sep 17 00:00:00 2001 From: Kay Sievers Date: Wed, 21 Dec 2011 14:29:42 -0800 Subject: cpu: convert 'cpu' and 'machinecheck' sysdev_class to a regular subsystem This moves the 'cpu sysdev_class' over to a regular 'cpu' subsystem and converts the devices to regular devices. The sysdev drivers are implemented as subsystem interfaces now. After all sysdev classes are ported to regular driver core entities, the sysdev implementation will be entirely removed from the kernel. Userspace relies on events and generic sysfs subsystem infrastructure from sysdev devices, which are made available with this conversion. Cc: Haavard Skinnemoen Cc: Hans-Christian Egtvedt Cc: Tony Luck Cc: Fenghua Yu Cc: Arnd Bergmann Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: Paul Mundt Cc: "David S. Miller" Cc: Chris Metcalf Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Borislav Petkov Cc: Tigran Aivazian Cc: Len Brown Cc: Zhang Rui Cc: Dave Jones Cc: Peter Zijlstra Cc: Russell King Cc: Andrew Morton Cc: Arjan van de Ven Cc: "Rafael J. Wysocki" Cc: "Srivatsa S. Bhat" Signed-off-by: Kay Sievers Signed-off-by: Greg Kroah-Hartman --- arch/avr32/kernel/cpu.c | 74 +++---- arch/ia64/kernel/err_inject.c | 52 ++--- arch/ia64/kernel/topology.c | 10 +- arch/powerpc/include/asm/spu.h | 12 +- arch/powerpc/include/asm/topology.h | 10 +- arch/powerpc/kernel/cacheinfo.c | 10 +- arch/powerpc/kernel/smp.c | 2 +- arch/powerpc/kernel/sysfs.c | 257 ++++++++++++------------ arch/powerpc/mm/numa.c | 8 +- arch/powerpc/platforms/cell/cbe_thermal.c | 144 ++++++------- arch/powerpc/platforms/cell/spu_base.c | 61 +++--- arch/powerpc/platforms/pseries/pseries_energy.c | 71 ++++--- arch/powerpc/sysdev/ppc4xx_cpm.c | 6 +- arch/s390/kernel/smp.c | 76 +++---- arch/s390/kernel/topology.c | 6 +- arch/sh/kernel/cpu/sh4/sq.c | 24 ++- arch/sparc/kernel/sysfs.c | 122 +++++------ arch/tile/kernel/sysfs.c | 61 +++--- arch/x86/include/asm/mce.h | 2 +- arch/x86/kernel/cpu/intel_cacheinfo.c | 25 ++- arch/x86/kernel/cpu/mcheck/mce-internal.h | 4 +- arch/x86/kernel/cpu/mcheck/mce.c | 128 ++++++------ arch/x86/kernel/cpu/mcheck/mce_amd.c | 11 +- arch/x86/kernel/cpu/mcheck/therm_throt.c | 63 +++--- arch/x86/kernel/microcode_core.c | 58 +++--- drivers/acpi/processor_driver.c | 6 +- drivers/acpi/processor_thermal.c | 1 - drivers/base/cpu.c | 146 +++++++------- drivers/base/node.c | 8 +- drivers/base/topology.c | 51 +++-- drivers/cpufreq/cpufreq.c | 79 ++++---- drivers/cpufreq/cpufreq_stats.c | 1 - drivers/cpuidle/cpuidle.c | 12 +- drivers/cpuidle/cpuidle.h | 10 +- drivers/cpuidle/sysfs.c | 74 ++++--- drivers/s390/char/sclp_config.c | 8 +- include/linux/cpu.h | 18 +- kernel/sched.c | 40 ++-- 38 files changed, 874 insertions(+), 877 deletions(-) (limited to 'drivers/cpufreq') diff --git a/arch/avr32/kernel/cpu.c b/arch/avr32/kernel/cpu.c index e84faffbbec..2233be71e2e 100644 --- a/arch/avr32/kernel/cpu.c +++ b/arch/avr32/kernel/cpu.c @@ -6,7 +6,7 @@ * published by the Free Software Foundation. */ #include -#include +#include #include #include #include @@ -26,16 +26,16 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices); * XXX: If/when a SMP-capable implementation of AVR32 will ever be * made, we must make sure that the code executes on the correct CPU. */ -static ssize_t show_pc0event(struct sys_device *dev, - struct sysdev_attribute *attr, char *buf) +static ssize_t show_pc0event(struct device *dev, + struct device_attribute *attr, char *buf) { unsigned long pccr; pccr = sysreg_read(PCCR); return sprintf(buf, "0x%lx\n", (pccr >> 12) & 0x3f); } -static ssize_t store_pc0event(struct sys_device *dev, - struct sysdev_attribute *attr, const char *buf, +static ssize_t store_pc0event(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) { unsigned long val; @@ -48,16 +48,16 @@ static ssize_t store_pc0event(struct sys_device *dev, sysreg_write(PCCR, val); return count; } -static ssize_t show_pc0count(struct sys_device *dev, - struct sysdev_attribute *attr, char *buf) +static ssize_t show_pc0count(struct device *dev, + struct device_attribute *attr, char *buf) { unsigned long pcnt0; pcnt0 = sysreg_read(PCNT0); return sprintf(buf, "%lu\n", pcnt0); } -static ssize_t store_pc0count(struct sys_device *dev, - struct sysdev_attribute *attr, +static ssize_t store_pc0count(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) { unsigned long val; @@ -71,16 +71,16 @@ static ssize_t store_pc0count(struct sys_device *dev, return count; } -static ssize_t show_pc1event(struct sys_device *dev, - struct sysdev_attribute *attr, char *buf) +static ssize_t show_pc1event(struct device *dev, + struct device_attribute *attr, char *buf) { unsigned long pccr; pccr = sysreg_read(PCCR); return sprintf(buf, "0x%lx\n", (pccr >> 18) & 0x3f); } -static ssize_t store_pc1event(struct sys_device *dev, - struct sysdev_attribute *attr, const char *buf, +static ssize_t store_pc1event(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) { unsigned long val; @@ -93,16 +93,16 @@ static ssize_t store_pc1event(struct sys_device *dev, sysreg_write(PCCR, val); return count; } -static ssize_t show_pc1count(struct sys_device *dev, - struct sysdev_attribute *attr, char *buf) +static ssize_t show_pc1count(struct device *dev, + struct device_attribute *attr, char *buf) { unsigned long pcnt1; pcnt1 = sysreg_read(PCNT1); return sprintf(buf, "%lu\n", pcnt1); } -static ssize_t store_pc1count(struct sys_device *dev, - struct sysdev_attribute *attr, const char *buf, +static ssize_t store_pc1count(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) { unsigned long val; @@ -116,16 +116,16 @@ static ssize_t store_pc1count(struct sys_device *dev, return count; } -static ssize_t show_pccycles(struct sys_device *dev, - struct sysdev_attribute *attr, char *buf) +static ssize_t show_pccycles(struct device *dev, + struct device_attribute *attr, char *buf) { unsigned long pccnt; pccnt = sysreg_read(PCCNT); return sprintf(buf, "%lu\n", pccnt); } -static ssize_t store_pccycles(struct sys_device *dev, - struct sysdev_attribute *attr, const char *buf, +static ssize_t store_pccycles(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) { unsigned long val; @@ -139,16 +139,16 @@ static ssize_t store_pccycles(struct sys_device *dev, return count; } -static ssize_t show_pcenable(struct sys_device *dev, - struct sysdev_attribute *attr, char *buf) +static ssize_t show_pcenable(struct device *dev, + struct device_attribute *attr, char *buf) { unsigned long pccr; pccr = sysreg_read(PCCR); return sprintf(buf, "%c\n", (pccr & 1)?'1':'0'); } -static ssize_t store_pcenable(struct sys_device *dev, - struct sysdev_attribute *attr, const char *buf, +static ssize_t store_pcenable(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) { unsigned long pccr, val; @@ -167,12 +167,12 @@ static ssize_t store_pcenable(struct sys_device *dev, return count; } -static SYSDEV_ATTR(pc0event, 0600, show_pc0event, store_pc0event); -static SYSDEV_ATTR(pc0count, 0600, show_pc0count, store_pc0count); -static SYSDEV_ATTR(pc1event, 0600, show_pc1event, store_pc1event); -static SYSDEV_ATTR(pc1count, 0600, show_pc1count, store_pc1count); -static SYSDEV_ATTR(pccycles, 0600, show_pccycles, store_pccycles); -static SYSDEV_ATTR(pcenable, 0600, show_pcenable, store_pcenable); +static DEVICE_ATTR(pc0event, 0600, show_pc0event, store_pc0event); +static DEVICE_ATTR(pc0count, 0600, show_pc0count, store_pc0count); +static DEVICE_ATTR(pc1event, 0600, show_pc1event, store_pc1event); +static DEVICE_ATTR(pc1count, 0600, show_pc1count, store_pc1count); +static DEVICE_ATTR(pccycles, 0600, show_pccycles, store_pccycles); +static DEVICE_ATTR(pcenable, 0600, show_pcenable, store_pcenable); #endif /* CONFIG_PERFORMANCE_COUNTERS */ @@ -186,12 +186,12 @@ static int __init topology_init(void) register_cpu(c, cpu); #ifdef CONFIG_PERFORMANCE_COUNTERS - sysdev_create_file(&c->sysdev, &attr_pc0event); - sysdev_create_file(&c->sysdev, &attr_pc0count); - sysdev_create_file(&c->sysdev, &attr_pc1event); - sysdev_create_file(&c->sysdev, &attr_pc1count); - sysdev_create_file(&c->sysdev, &attr_pccycles); - sysdev_create_file(&c->sysdev, &attr_pcenable); + device_create_file(&c->dev, &dev_attr_pc0event); + device_create_file(&c->dev, &dev_attr_pc0count); + device_create_file(&c->dev, &dev_attr_pc1event); + device_create_file(&c->dev, &dev_attr_pc1count); + device_create_file(&c->dev, &dev_attr_pccycles); + device_create_file(&c->dev, &dev_attr_pcenable); #endif } diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c index c539c689493..2d67317a1ec 100644 --- a/arch/ia64/kernel/err_inject.c +++ b/arch/ia64/kernel/err_inject.c @@ -24,7 +24,7 @@ * Copyright (C) 2006, Intel Corp. All rights reserved. * */ -#include +#include #include #include #include @@ -35,10 +35,10 @@ #define ERR_DATA_BUFFER_SIZE 3 // Three 8-byte; #define define_one_ro(name) \ -static SYSDEV_ATTR(name, 0444, show_##name, NULL) +static DEVICE_ATTR(name, 0444, show_##name, NULL) #define define_one_rw(name) \ -static SYSDEV_ATTR(name, 0644, show_##name, store_##name) +static DEVICE_ATTR(name, 0644, show_##name, store_##name) static u64 call_start[NR_CPUS]; static u64 phys_addr[NR_CPUS]; @@ -55,7 +55,7 @@ static u64 resources[NR_CPUS]; #define show(name) \ static ssize_t \ -show_##name(struct sys_device *dev, struct sysdev_attribute *attr, \ +show_##name(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ u32 cpu=dev->id; \ @@ -64,7 +64,7 @@ show_##name(struct sys_device *dev, struct sysdev_attribute *attr, \ #define store(name) \ static ssize_t \ -store_##name(struct sys_device *dev, struct sysdev_attribute *attr, \ +store_##name(struct device *dev, struct device_attribute *attr, \ const char *buf, size_t size) \ { \ unsigned int cpu=dev->id; \ @@ -78,7 +78,7 @@ show(call_start) * processor. The cpu number in driver is only used for storing data. */ static ssize_t -store_call_start(struct sys_device *dev, struct sysdev_attribute *attr, +store_call_start(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { unsigned int cpu=dev->id; @@ -127,7 +127,7 @@ show(err_type_info) store(err_type_info) static ssize_t -show_virtual_to_phys(struct sys_device *dev, struct sysdev_attribute *attr, +show_virtual_to_phys(struct device *dev, struct device_attribute *attr, char *buf) { unsigned int cpu=dev->id; @@ -135,7 +135,7 @@ show_virtual_to_phys(struct sys_device *dev, struct sysdev_attribute *attr, } static ssize_t -store_virtual_to_phys(struct sys_device *dev, struct sysdev_attribute *attr, +store_virtual_to_phys(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { unsigned int cpu=dev->id; @@ -159,8 +159,8 @@ show(err_struct_info) store(err_struct_info) static ssize_t -show_err_data_buffer(struct sys_device *dev, - struct sysdev_attribute *attr, char *buf) +show_err_data_buffer(struct device *dev, + struct device_attribute *attr, char *buf) { unsigned int cpu=dev->id; @@ -171,8 +171,8 @@ show_err_data_buffer(struct sys_device *dev, } static ssize_t -store_err_data_buffer(struct sys_device *dev, - struct sysdev_attribute *attr, +store_err_data_buffer(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) { unsigned int cpu=dev->id; @@ -209,14 +209,14 @@ define_one_ro(capabilities); define_one_ro(resources); static struct attribute *default_attrs[] = { - &attr_call_start.attr, - &attr_virtual_to_phys.attr, - &attr_err_type_info.attr, - &attr_err_struct_info.attr, - &attr_err_data_buffer.attr, - &attr_status.attr, - &attr_capabilities.attr, - &attr_resources.attr, + &dev_attr_call_start.attr, + &dev_attr_virtual_to_phys.attr, + &dev_attr_err_type_info.attr, + &dev_attr_err_struct_info.attr, + &dev_attr_err_data_buffer.attr, + &dev_attr_status.attr, + &dev_attr_capabilities.attr, + &dev_attr_resources.attr, NULL }; @@ -225,12 +225,12 @@ static struct attribute_group err_inject_attr_group = { .name = "err_inject" }; /* Add/Remove err_inject interface for CPU device */ -static int __cpuinit err_inject_add_dev(struct sys_device * sys_dev) +static int __cpuinit err_inject_add_dev(struct device * sys_dev) { return sysfs_create_group(&sys_dev->kobj, &err_inject_attr_group); } -static int __cpuinit err_inject_remove_dev(struct sys_device * sys_dev) +static int __cpuinit err_inject_remove_dev(struct device * sys_dev) { sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group); return 0; @@ -239,9 +239,9 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - struct sys_device *sys_dev; + struct device *sys_dev; - sys_dev = get_cpu_sysdev(cpu); + sys_dev = get_cpu_device(cpu); switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: @@ -283,13 +283,13 @@ static void __exit err_inject_exit(void) { int i; - struct sys_device *sys_dev; + struct device *sys_dev; #ifdef ERR_INJ_DEBUG printk(KERN_INFO "Exit error injection driver.\n"); #endif for_each_online_cpu(i) { - sys_dev = get_cpu_sysdev(i); + sys_dev = get_cpu_device(i); sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group); } unregister_hotcpu_notifier(&err_inject_cpu_notifier); diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index 9be1f11a01d..9deb21dbf62 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c @@ -350,7 +350,7 @@ static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu) } /* Add cache interface for CPU device */ -static int __cpuinit cache_add_dev(struct sys_device * sys_dev) +static int __cpuinit cache_add_dev(struct device * sys_dev) { unsigned int cpu = sys_dev->id; unsigned long i, j; @@ -400,7 +400,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) } /* Remove cache interface for CPU device */ -static int __cpuinit cache_remove_dev(struct sys_device * sys_dev) +static int __cpuinit cache_remove_dev(struct device * sys_dev) { unsigned int cpu = sys_dev->id; unsigned long i; @@ -428,9 +428,9 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - struct sys_device *sys_dev; + struct device *sys_dev; - sys_dev = get_cpu_sysdev(cpu); + sys_dev = get_cpu_device(cpu); switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: @@ -454,7 +454,7 @@ static int __init cache_sysfs_init(void) int i; for_each_online_cpu(i) { - struct sys_device *sys_dev = get_cpu_sysdev((unsigned int)i); + struct device *sys_dev = get_cpu_device((unsigned int)i); cache_add_dev(sys_dev); } diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h index 4e360bd4a35..fff921345dd 100644 --- a/arch/powerpc/include/asm/spu.h +++ b/arch/powerpc/include/asm/spu.h @@ -25,7 +25,7 @@ #ifdef __KERNEL__ #include -#include +#include #include #define LS_SIZE (256 * 1024) @@ -166,7 +166,7 @@ struct spu { /* beat only */ u64 shadow_int_mask_RW[3]; - struct sys_device sysdev; + struct device dev; int has_mem_affinity; struct list_head aff_list; @@ -270,11 +270,11 @@ struct spufs_calls { int register_spu_syscalls(struct spufs_calls *calls); void unregister_spu_syscalls(struct spufs_calls *calls); -int spu_add_sysdev_attr(struct sysdev_attribute *attr); -void spu_remove_sysdev_attr(struct sysdev_attribute *attr); +int spu_add_dev_attr(struct device_attribute *attr); +void spu_remove_dev_attr(struct device_attribute *attr); -int spu_add_sysdev_attr_group(struct attribute_group *attrs); -void spu_remove_sysdev_attr_group(struct attribute_group *attrs); +int spu_add_dev_attr_group(struct attribute_group *attrs); +void spu_remove_dev_attr_group(struct attribute_group *attrs); int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea, unsigned long dsisr, unsigned *flt); diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index 1e104af0848..c97185885c6 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -3,7 +3,7 @@ #ifdef __KERNEL__ -struct sys_device; +struct device; struct device_node; #ifdef CONFIG_NUMA @@ -86,19 +86,19 @@ extern int __node_distance(int, int); extern void __init dump_numa_cpu_topology(void); -extern int sysfs_add_device_to_node(struct sys_device *dev, int nid); -extern void sysfs_remove_device_from_node(struct sys_device *dev, int nid); +extern int sysfs_add_device_to_node(struct device *dev, int nid); +extern void sysfs_remove_device_from_node(struct device *dev, int nid); #else static inline void dump_numa_cpu_topology(void) {} -static inline int sysfs_add_device_to_node(struct sys_device *dev, int nid) +static inline int sysfs_add_device_to_node(struct device *dev, int nid) { return 0; } -static inline void sysfs_remove_device_from_node(struct sys_device *dev, +static inline void sysfs_remove_device_from_node(struct device *dev, int nid) { } diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c index a3c684b4c86..92c6b008dd2 100644 --- a/arch/powerpc/kernel/cacheinfo.c +++ b/arch/powerpc/kernel/cacheinfo.c @@ -451,15 +451,15 @@ out: static struct cache_dir *__cpuinit cacheinfo_create_cache_dir(unsigned int cpu_id) { struct cache_dir *cache_dir; - struct sys_device *sysdev; + struct device *dev; struct kobject *kobj = NULL; - sysdev = get_cpu_sysdev(cpu_id); - WARN_ONCE(!sysdev, "no sysdev for CPU %i\n", cpu_id); - if (!sysdev) + dev = get_cpu_device(cpu_id); + WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id); + if (!dev) goto err; - kobj = kobject_create_and_add("cache", &sysdev->kobj); + kobj = kobject_create_and_add("cache", &dev->kobj); if (!kobj) goto err; diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 25ddbfc7dd3..da08240353f 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index ce035c1905f..f396ef27916 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -1,4 +1,4 @@ -#include +#include #include #include #include @@ -37,12 +37,12 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices); /* Time in microseconds we delay before sleeping in the idle loop */ DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 }; -static ssize_t store_smt_snooze_delay(struct sys_device *dev, - struct sysdev_attribute *attr, +static ssize_t store_smt_snooze_delay(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) { - struct cpu *cpu = container_of(dev, struct cpu, sysdev); + struct cpu *cpu = container_of(dev, struct cpu, dev); ssize_t ret; long snooze; @@ -50,21 +50,21 @@ static ssize_t store_smt_snooze_delay(struct sys_device *dev, if (ret != 1) return -EINVAL; - per_cpu(smt_snooze_delay, cpu->sysdev.id) = snooze; + per_cpu(smt_snooze_delay, cpu->dev.id) = snooze; return count; } -static ssize_t show_smt_snooze_delay(struct sys_device *dev, - struct sysdev_attribute *attr, +static ssize_t show_smt_snooze_delay(struct device *dev, + struct device_attribute *attr, char *buf) { - struct cpu *cpu = container_of(dev, struct cpu, sysdev); + struct cpu *cpu = container_of(dev, struct cpu, dev); - return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->sysdev.id)); + return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id)); } -static SYSDEV_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay, +static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay, store_smt_snooze_delay); static int __init setup_smt_snooze_delay(char *str) @@ -117,25 +117,25 @@ static void write_##NAME(void *val) \ ppc_enable_pmcs(); \ mtspr(ADDRESS, *(unsigned long *)val); \ } \ -static ssize_t show_##NAME(struct sys_device *dev, \ - struct sysdev_attribute *attr, \ +static ssize_t show_##NAME(struct device *dev, \ + struct device_attribute *attr, \ char *buf) \ { \ - struct cpu *cpu = container_of(dev, struct cpu, sysdev); \ + struct cpu *cpu = container_of(dev, struct cpu, dev); \ unsigned long val; \ - smp_call_function_single(cpu->sysdev.id, read_##NAME, &val, 1); \ + smp_call_function_single(cpu->dev.id, read_##NAME, &val, 1); \ return sprintf(buf, "%lx\n", val); \ } \ static ssize_t __used \ - store_##NAME(struct sys_device *dev, struct sysdev_attribute *attr, \ + store_##NAME(struct device *dev, struct device_attribute *attr, \ const char *buf, size_t count) \ { \ - struct cpu *cpu = container_of(dev, struct cpu, sysdev); \ + struct cpu *cpu = container_of(dev, struct cpu, dev); \ unsigned long val; \ int ret = sscanf(buf, "%lx", &val); \ if (ret != 1) \ return -EINVAL; \ - smp_call_function_single(cpu->sysdev.id, write_##NAME, &val, 1); \ + smp_call_function_single(cpu->dev.id, write_##NAME, &val, 1); \ return count; \ } @@ -178,22 +178,22 @@ SYSFS_PMCSETUP(purr, SPRN_PURR); SYSFS_PMCSETUP(spurr, SPRN_SPURR); SYSFS_PMCSETUP(dscr, SPRN_DSCR); -static SYSDEV_ATTR(mmcra, 0600, show_mmcra, store_mmcra); -static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL); -static SYSDEV_ATTR(dscr, 0600, show_dscr, store_dscr); -static SYSDEV_ATTR(purr, 0600, show_purr, store_purr); +static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra); +static DEVICE_ATTR(spurr, 0600, show_spurr, NULL); +static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr); +static DEVICE_ATTR(purr, 0600, show_purr, store_purr); unsigned long dscr_default = 0; EXPORT_SYMBOL(dscr_default); -static ssize_t show_dscr_default(struct sysdev_class *class, - struct sysdev_class_attribute *attr, char *buf) +static ssize_t show_dscr_default(struct device *dev, + struct device_attribute *attr, char *buf) { return sprintf(buf, "%lx\n", dscr_default); } -static ssize_t __used store_dscr_default(struct sysdev_class *class, - struct sysdev_class_attribute *attr, const char *buf, +static ssize_t __used store_dscr_default(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) { unsigned long val; @@ -207,15 +207,14 @@ static ssize_t __used store_dscr_default(struct sysdev_class *class, return count; } -static SYSDEV_CLASS_ATTR(dscr_default, 0600, +static DEVICE_ATTR(dscr_default, 0600, show_dscr_default, store_dscr_default); static void sysfs_create_dscr_default(void) { int err = 0; if (cpu_has_feature(CPU_FTR_DSCR)) - err = sysfs_create_file(&cpu_sysdev_class.kset.kobj, - &attr_dscr_default.attr); + err = device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default); } #endif /* CONFIG_PPC64 */ @@ -259,72 +258,72 @@ SYSFS_PMCSETUP(tsr3, SPRN_PA6T_TSR3); #endif /* HAS_PPC_PMC_PA6T */ #ifdef HAS_PPC_PMC_IBM -static struct sysdev_attribute ibm_common_attrs[] = { - _SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0), - _SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1), +static struct device_attribute ibm_common_attrs[] = { + __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0), + __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1), }; #endif /* HAS_PPC_PMC_G4 */ #ifdef HAS_PPC_PMC_G4 -static struct sysdev_attribute g4_common_attrs[] = { - _SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0), - _SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1), - _SYSDEV_ATTR(mmcr2, 0600, show_mmcr2, store_mmcr2), +static struct device_attribute g4_common_attrs[] = { + __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0), + __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1), + __ATTR(mmcr2, 0600, show_mmcr2, store_mmcr2), }; #endif /* HAS_PPC_PMC_G4 */ -static struct sysdev_attribute classic_pmc_attrs[] = { - _SYSDEV_ATTR(pmc1, 0600, show_pmc1, store_pmc1), - _SYSDEV_ATTR(pmc2, 0600, show_pmc2, store_pmc2), - _SYSDEV_ATTR(pmc3, 0600, show_pmc3, store_pmc3), - _SYSDEV_ATTR(pmc4, 0600, show_pmc4, store_pmc4), - _SYSDEV_ATTR(pmc5, 0600, show_pmc5, store_pmc5), - _SYSDEV_ATTR(pmc6, 0600, show_pmc6, store_pmc6), +static struct device_attribute classic_pmc_attrs[] = { + __ATTR(pmc1, 0600, show_pmc1, store_pmc1), + __ATTR(pmc2, 0600, show_pmc2, store_pmc2), + __ATTR(pmc3, 0600, show_pmc3, store_pmc3), + __ATTR(pmc4, 0600, show_pmc4, store_pmc4), + __ATTR(pmc5, 0600, show_pmc5, store_pmc5), + __ATTR(pmc6, 0600, show_pmc6, store_pmc6), #ifdef CONFIG_PPC64 - _SYSDEV_ATTR(pmc7, 0600, show_pmc7, store_pmc7), - _SYSDEV_ATTR(pmc8, 0600, show_pmc8, store_pmc8), + __ATTR(pmc7, 0600, show_pmc7, store_pmc7), + __ATTR(pmc8, 0600, show_pmc8, store_pmc8), #endif }; #ifdef HAS_PPC_PMC_PA6T -static struct sysdev_attribute pa6t_attrs[] = { - _SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0), - _SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1), - _SYSDEV_ATTR(pmc0, 0600, show_pa6t_pmc0, store_pa6t_pmc0), - _SYSDEV_ATTR(pmc1, 0600, show_pa6t_pmc1, store_pa6t_pmc1), - _SYSDEV_ATTR(pmc2, 0600, show_pa6t_pmc2, store_pa6t_pmc2), - _SYSDEV_ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3), - _SYSDEV_ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4), - _SYSDEV_ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5), +static struct device_attribute pa6t_attrs[] = { + __ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0), + __ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1), + __ATTR(pmc0, 0600, show_pa6t_pmc0, store_pa6t_pmc0), + __ATTR(pmc1, 0600, show_pa6t_pmc1, store_pa6t_pmc1), + __ATTR(pmc2, 0600, show_pa6t_pmc2, store_pa6t_pmc2), + __ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3), + __ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4), + __ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5), #ifdef CONFIG_DEBUG_KERNEL - _SYSDEV_ATTR(hid0, 0600, show_hid0, store_hid0), - _SYSDEV_ATTR(hid1, 0600, show_hid1, store_hid1), - _SYSDEV_ATTR(hid4, 0600, show_hid4, store_hid4), - _SYSDEV_ATTR(hid5, 0600, show_hid5, store_hid5), - _SYSDEV_ATTR(ima0, 0600, show_ima0, store_ima0), - _SYSDEV_ATTR(ima1, 0600, show_ima1, store_ima1), - _SYSDEV_ATTR(ima2, 0600, show_ima2, store_ima2), - _SYSDEV_ATTR(ima3, 0600, show_ima3, store_ima3), - _SYSDEV_ATTR(ima4, 0600, show_ima4, store_ima4), - _SYSDEV_ATTR(ima5, 0600, show_ima5, store_ima5), - _SYSDEV_ATTR(ima6, 0600, show_ima6, store_ima6), - _SYSDEV_ATTR(ima7, 0600, show_ima7, store_ima7), - _SYSDEV_ATTR(ima8, 0600, show_ima8, store_ima8), - _SYSDEV_ATTR(ima9, 0600, show_ima9, store_ima9), - _SYSDEV_ATTR(imaat, 0600, show_imaat, store_imaat), - _SYSDEV_ATTR(btcr, 0600, show_btcr, store_btcr), - _SYSDEV_ATTR(pccr, 0600, show_pccr, store_pccr), - _SYSDEV_ATTR(rpccr, 0600, show_rpccr, store_rpccr), - _SYSDEV_ATTR(der, 0600, show_der, store_der), - _SYSDEV_ATTR(mer, 0600, show_mer, store_mer), - _SYSDEV_ATTR(ber, 0600, show_ber, store_ber), - _SYSDEV_ATTR(ier, 0600, show_ier, store_ier), - _SYSDEV_ATTR(sier, 0600, show_sier, store_sier), - _SYSDEV_ATTR(siar, 0600, show_siar, store_siar), - _SYSDEV_ATTR(tsr0, 0600, show_tsr0, store_tsr0), - _SYSDEV_ATTR(tsr1, 0600, show_tsr1, store_tsr1), - _SYSDEV_ATTR(tsr2, 0600, show_tsr2, store_tsr2), - _SYSDEV_ATTR(tsr3, 0600, show_tsr3, store_tsr3), + __ATTR(hid0, 0600, show_hid0, store_hid0), + __ATTR(hid1, 0600, show_hid1, store_hid1), + __ATTR(hid4, 0600, show_hid4, store_hid4), + __ATTR(hid5, 0600, show_hid5, store_hid5), + __ATTR(ima0, 0600, show_ima0, store_ima0), + __ATTR(ima1, 0600, show_ima1, store_ima1), + __ATTR(ima2, 0600, show_ima2, store_ima2), + __ATTR(ima3, 0600, show_ima3, store_ima3), + __ATTR(ima4, 0600, show_ima4, store_ima4), + __ATTR(ima5, 0600, show_ima5, store_ima5), + __ATTR(ima6, 0600, show_ima6, store_ima6), + __ATTR(ima7, 0600, show_ima7, store_ima7), + __ATTR(ima8, 0600, show_ima8, store_ima8), + __ATTR(ima9, 0600, show_ima9, store_ima9), + __ATTR(imaat, 0600, show_imaat, store_imaat), + __ATTR(btcr, 0600, show_btcr, store_btcr), + __ATTR(pccr, 0600, show_pccr, store_pccr), + __ATTR(rpccr, 0600, show_rpccr, store_rpccr), + __ATTR(der, 0600, show_der, store_der), + __ATTR(mer, 0600, show_mer, store_mer), + __ATTR(ber, 0600, show_ber, store_ber), + __ATTR(ier, 0600, show_ier, store_ier), + __ATTR(sier, 0600, show_sier, store_sier), + __ATTR(siar, 0600, show_siar, store_siar), + __ATTR(tsr0, 0600, show_tsr0, store_tsr0), + __ATTR(tsr1, 0600, show_tsr1, store_tsr1), + __ATTR(tsr2, 0600, show_tsr2, store_tsr2), + __ATTR(tsr3, 0600, show_tsr3, store_tsr3), #endif /* CONFIG_DEBUG_KERNEL */ }; #endif /* HAS_PPC_PMC_PA6T */ @@ -333,14 +332,14 @@ static struct sysdev_attribute pa6t_attrs[] = { static void __cpuinit register_cpu_online(unsigned int cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); - struct sys_device *s = &c->sysdev; - struct sysdev_attribute *attrs, *pmc_attrs; + struct device *s = &c->dev; + struct device_attribute *attrs, *pmc_attrs; int i, nattrs; #ifdef CONFIG_PPC64 if (!firmware_has_feature(FW_FEATURE_ISERIES) && cpu_has_feature(CPU_FTR_SMT)) - sysdev_create_file(s, &attr_smt_snooze_delay); + device_create_file(s, &dev_attr_smt_snooze_delay); #endif /* PMC stuff */ @@ -348,14 +347,14 @@ static void __cpuinit register_cpu_online(unsigned int cpu) #ifdef HAS_PPC_PMC_IBM case PPC_PMC_IBM: attrs = ibm_common_attrs; - nattrs = sizeof(ibm_common_attrs) / sizeof(struct sysdev_attribute); + nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute); pmc_attrs = classic_pmc_attrs; break; #endif /* HAS_PPC_PMC_IBM */ #ifdef HAS_PPC_PMC_G4 case PPC_PMC_G4: attrs = g4_common_attrs; - nattrs = sizeof(g4_common_attrs) / sizeof(struct sysdev_attribute); + nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute); pmc_attrs = classic_pmc_attrs; break; #endif /* HAS_PPC_PMC_G4 */ @@ -363,7 +362,7 @@ static void __cpuinit register_cpu_online(unsigned int cpu) case PPC_PMC_PA6T: /* PA Semi starts counting at PMC0 */ attrs = pa6t_attrs; - nattrs = sizeof(pa6t_attrs) / sizeof(struct sysdev_attribute); + nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute); pmc_attrs = NULL; break; #endif /* HAS_PPC_PMC_PA6T */ @@ -374,24 +373,24 @@ static void __cpuinit register_cpu_online(unsigned int cpu) } for (i = 0; i < nattrs; i++) - sysdev_create_file(s, &attrs[i]); + device_create_file(s, &attrs[i]); if (pmc_attrs) for (i = 0; i < cur_cpu_spec->num_pmcs; i++) - sysdev_create_file(s, &pmc_attrs[i]); + device_create_file(s, &pmc_attrs[i]); #ifdef CONFIG_PPC64 if (cpu_has_feature(CPU_FTR_MMCRA)) - sysdev_create_file(s, &attr_mmcra); + device_create_file(s, &dev_attr_mmcra); if (cpu_has_feature(CPU_FTR_PURR)) - sysdev_create_file(s, &attr_purr); + device_create_file(s, &dev_attr_purr); if (cpu_has_feature(CPU_FTR_SPURR)) - sysdev_create_file(s, &attr_spurr); + device_create_file(s, &dev_attr_spurr); if (cpu_has_feature(CPU_FTR_DSCR)) - sysdev_create_file(s, &attr_dscr); + device_create_file(s, &dev_attr_dscr); #endif /* CONFIG_PPC64 */ cacheinfo_cpu_online(cpu); @@ -401,8 +400,8 @@ static void __cpuinit register_cpu_online(unsigned int cpu) static void unregister_cpu_online(unsigned int cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); - struct sys_device *s = &c->sysdev; - struct sysdev_attribute *attrs, *pmc_attrs; + struct device *s = &c->dev; + struct device_attribute *attrs, *pmc_attrs; int i, nattrs; BUG_ON(!c->hotpluggable); @@ -410,7 +409,7 @@ static void unregister_cpu_online(unsigned int cpu) #ifdef CONFIG_PPC64 if (!firmware_has_feature(FW_FEATURE_ISERIES) && cpu_has_feature(CPU_FTR_SMT)) - sysdev_remove_file(s, &attr_smt_snooze_delay); + device_remove_file(s, &dev_attr_smt_snooze_delay); #endif /* PMC stuff */ @@ -418,14 +417,14 @@ static void unregister_cpu_online(unsigned int cpu) #ifdef HAS_PPC_PMC_IBM case PPC_PMC_IBM: attrs = ibm_common_attrs; - nattrs = sizeof(ibm_common_attrs) / sizeof(struct sysdev_attribute); + nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute); pmc_attrs = classic_pmc_attrs; break; #endif /* HAS_PPC_PMC_IBM */ #ifdef HAS_PPC_PMC_G4 case PPC_PMC_G4: attrs = g4_common_attrs; - nattrs = sizeof(g4_common_attrs) / sizeof(struct sysdev_attribute); + nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute); pmc_attrs = classic_pmc_attrs; break; #endif /* HAS_PPC_PMC_G4 */ @@ -433,7 +432,7 @@ static void unregister_cpu_online(unsigned int cpu) case PPC_PMC_PA6T: /* PA Semi starts counting at PMC0 */ attrs = pa6t_attrs; - nattrs = sizeof(pa6t_attrs) / sizeof(struct sysdev_attribute); + nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute); pmc_attrs = NULL; break; #endif /* HAS_PPC_PMC_PA6T */ @@ -444,24 +443,24 @@ static void unregister_cpu_online(unsigned int cpu) } for (i = 0; i < nattrs; i++) - sysdev_remove_file(s, &attrs[i]); + device_remove_file(s, &attrs[i]); if (pmc_attrs) for (i = 0; i < cur_cpu_spec->num_pmcs; i++) - sysdev_remove_file(s, &pmc_attrs[i]); + device_remove_file(s, &pmc_attrs[i]); #ifdef CONFIG_PPC64 if (cpu_has_feature(CPU_FTR_MMCRA)) - sysdev_remove_file(s, &attr_mmcra); + device_remove_file(s, &dev_attr_mmcra); if (cpu_has_feature(CPU_FTR_PURR)) - sysdev_remove_file(s, &attr_purr); + device_remove_file(s, &dev_attr_purr); if (cpu_has_feature(CPU_FTR_SPURR)) - sysdev_remove_file(s, &attr_spurr); + device_remove_file(s, &dev_attr_spurr); if (cpu_has_feature(CPU_FTR_DSCR)) - sysdev_remove_file(s, &attr_dscr); + device_remove_file(s, &dev_attr_dscr); #endif /* CONFIG_PPC64 */ cacheinfo_cpu_offline(cpu); @@ -513,70 +512,70 @@ static struct notifier_block __cpuinitdata sysfs_cpu_nb = { static DEFINE_MUTEX(cpu_mutex); -int cpu_add_sysdev_attr(struct sysdev_attribute *attr) +int cpu_add_dev_attr(struct device_attribute *attr) { int cpu; mutex_lock(&cpu_mutex); for_each_possible_cpu(cpu) { - sysdev_create_file(get_cpu_sysdev(cpu), attr); + device_create_file(get_cpu_device(cpu), attr); } mutex_unlock(&cpu_mutex); return 0; } -EXPORT_SYMBOL_GPL(cpu_add_sysdev_attr); +EXPORT_SYMBOL_GPL(cpu_add_dev_attr); -int cpu_add_sysdev_attr_group(struct attribute_group *attrs) +int cpu_add_dev_attr_group(struct attribute_group *attrs) { int cpu; - struct sys_device *sysdev; + struct device *dev; int ret; mutex_lock(&cpu_mutex); for_each_possible_cpu(cpu) { - sysdev = get_cpu_sysdev(cpu); - ret = sysfs_create_group(&sysdev->kobj, attrs); + dev = get_cpu_device(cpu); + ret = sysfs_create_group(&dev->kobj, attrs); WARN_ON(ret != 0); } mutex_unlock(&cpu_mutex); return 0; } -EXPORT_SYMBOL_GPL(cpu_add_sysdev_attr_group); +EXPORT_SYMBOL_GPL(cpu_add_dev_attr_group); -void cpu_remove_sysdev_attr(struct sysdev_attribute *attr) +void cpu_remove_dev_attr(struct device_attribute *attr) { int cpu; mutex_lock(&cpu_mutex); for_each_possible_cpu(cpu) { - sysdev_remove_file(get_cpu_sysdev(cpu), attr); + device_remove_file(get_cpu_device(cpu), attr); } mutex_unlock(&cpu_mutex); } -EXPORT_SYMBOL_GPL(cpu_remove_sysdev_attr); +EXPORT_SYMBOL_GPL(cpu_remove_dev_attr); -void cpu_remove_sysdev_attr_group(struct attribute_group *attrs) +void cpu_remove_dev_attr_group(struct attribute_group *attrs) { int cpu; - struct sys_device *sysdev; + struct device *dev; mutex_lock(&cpu_mutex); for_each_possible_cpu(cpu) { - sysdev = get_cpu_sysdev(cpu); - sysfs_remove_group(&sysdev->kobj, attrs); + dev = get_cpu_device(cpu); + sysfs_remove_group(&dev->kobj, attrs); } mutex_unlock(&cpu_mutex); } -EXPORT_SYMBOL_GPL(cpu_remove_sysdev_attr_group); +EXPORT_SYMBOL_GPL(cpu_remove_dev_attr_group); /* NUMA stuff */ @@ -590,7 +589,7 @@ static void register_nodes(void) register_one_node(i); } -int sysfs_add_device_to_node(struct sys_device *dev, int nid) +int sysfs_add_device_to_node(struct device *dev, int nid) { struct node *node = &node_devices[nid]; return sysfs_create_link(&node->sysdev.kobj, &dev->kobj, @@ -598,7 +597,7 @@ int sysfs_add_device_to_node(struct sys_device *dev, int nid) } EXPORT_SYMBOL_GPL(sysfs_add_device_to_node); -void sysfs_remove_device_from_node(struct sys_device *dev, int nid) +void sysfs_remove_device_from_node(struct device *dev, int nid) { struct node *node = &node_devices[nid]; sysfs_remove_link(&node->sysdev.kobj, kobject_name(&dev->kobj)); @@ -614,14 +613,14 @@ static void register_nodes(void) #endif /* Only valid if CPU is present. */ -static ssize_t show_physical_id(struct sys_device *dev, - struct sysdev_attribute *attr, char *buf) +static ssize_t show_physical_id(struct device *dev, + struct device_attribute *attr, char *buf) { - struct cpu *cpu = container_of(dev, struct cpu, sysdev); + struct cpu *cpu = container_of(dev, struct cpu, dev); - return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->sysdev.id)); + return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->dev.id)); } -static SYSDEV_ATTR(physical_id, 0444, show_physical_id, NULL); +static DEVICE_ATTR(physical_id, 0444, show_physical_id, NULL); static int __init topology_init(void) { @@ -646,7 +645,7 @@ static int __init topology_init(void) if (cpu_online(cpu) || c->hotpluggable) { register_cpu(c, cpu); - sysdev_create_file(&c->sysdev, &attr_physical_id); + device_create_file(&c->dev, &dev_attr_physical_id); } if (cpu_online(cpu)) diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index c7dd4dec4df..f2b03a86343 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1452,7 +1452,7 @@ int arch_update_cpu_topology(void) { int cpu, nid, old_nid; unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; - struct sys_device *sysdev; + struct device *dev; for_each_cpu(cpu,&cpu_associativity_changes_mask) { vphn_get_associativity(cpu, associativity); @@ -1473,9 +1473,9 @@ int arch_update_cpu_topology(void) register_cpu_under_node(cpu, nid); put_online_cpus(); - sysdev = get_cpu_sysdev(cpu); - if (sysdev) - kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); + dev = get_cpu_device(cpu); + if (dev) + kobject_uevent(&dev->kobj, KOBJ_CHANGE); } return 1; diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c index 4d4c8c16912..94560db788b 100644 --- a/arch/powerpc/platforms/cell/cbe_thermal.c +++ b/arch/powerpc/platforms/cell/cbe_thermal.c @@ -46,7 +46,7 @@ */ #include -#include +#include #include #include #include @@ -59,8 +59,8 @@ #define TEMP_MIN 65 #define TEMP_MAX 125 -#define SYSDEV_PREFIX_ATTR(_prefix,_name,_mode) \ -struct sysdev_attribute attr_ ## _prefix ## _ ## _name = { \ +#define DEVICE_PREFIX_ATTR(_prefix,_name,_mode) \ +struct device_attribute attr_ ## _prefix ## _ ## _name = { \ .attr = { .name = __stringify(_name), .mode = _mode }, \ .show = _prefix ## _show_ ## _name, \ .store = _prefix ## _store_ ## _name, \ @@ -76,36 +76,36 @@ static inline u8 temp_to_reg(u8 temp) return ((temp - TEMP_MIN) >> 1) & 0x3f; } -static struct cbe_pmd_regs __iomem *get_pmd_regs(struct sys_device *sysdev) +static struct cbe_pmd_regs __iomem *get_pmd_regs(struct device *dev) { struct spu *spu; - spu = container_of(sysdev, struct spu, sysdev); + spu = container_of(dev, struct spu, dev); return cbe_get_pmd_regs(spu_devnode(spu)); } /* returns the value for a given spu in a given register */ -static u8 spu_read_register_value(struct sys_device *sysdev, union spe_reg __iomem *reg) +static u8 spu_read_register_value(struct device *dev, union spe_reg __iomem *reg) { union spe_reg value; struct spu *spu; - spu = container_of(sysdev, struct spu, sysdev); + spu = container_of(dev, struct spu, dev); value.val = in_be64(®->val); return value.spe[spu->spe_id]; } -static ssize_t spu_show_temp(struct sys_device *sysdev, struct sysdev_attribute *attr, +static ssize_t spu_show_temp(struct device *dev, struct device_attribute *attr, char *buf) { u8 value; struct cbe_pmd_regs __iomem *pmd_regs; - pmd_regs = get_pmd_regs(sysdev); + pmd_regs = get_pmd_regs(dev); - value = spu_read_register_value(sysdev, &pmd_regs->ts_ctsr1); + value = spu_read_register_value(dev, &pmd_regs->ts_ctsr1); return sprintf(buf, "%d\n", reg_to_temp(value)); } @@ -147,48 +147,48 @@ static ssize_t store_throttle(struct cbe_pmd_regs __iomem *pmd_regs, const char return size; } -static ssize_t spu_show_throttle_end(struct sys_device *sysdev, - struct sysdev_attribute *attr, char *buf) +static ssize_t spu_show_throttle_end(struct device *dev, + struct device_attribute *attr, char *buf) { - return show_throttle(get_pmd_regs(sysdev), buf, 0); + return show_throttle(get_pmd_regs(dev), buf, 0); } -static ssize_t spu_show_throttle_begin(struct sys_device *sysdev, - struct sysdev_attribute *attr, char *buf) +static ssize_t spu_show_throttle_begin(struct device *dev, + struct device_attribute *attr, char *buf) { - return show_throttle(get_pmd_regs(sysdev), buf, 8); + return show_throttle(get_pmd_regs(dev), buf, 8); } -static ssize_t spu_show_throttle_full_stop(struct sys_device *sysdev, - struct sysdev_attribute *attr, char *buf) +static ssize_t spu_show_throttle_full_stop(struct device *dev, + struct device_attribute *attr, char *buf) { - return show_throttle(get_pmd_regs(sysdev), buf, 16); + return show_throttle(get_pmd_regs(dev), buf, 16); } -static ssize_t spu_store_throttle_end(struct sys_device *sysdev, - struct sysdev_attribute *attr, const char *buf, size_t size) +static ssize_t spu_store_throttle_end(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) { - return store_throttle(get_pmd_regs(sysdev), buf, size, 0); + return store_throttle(get_pmd_regs(dev), buf, size, 0); } -static ssize_t spu_store_throttle_begin(struct sys_device *sysdev, - struct sysdev_attribute *attr, const char *buf, size_t size) +static ssize_t spu_store_throttle_begin(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) { - return store_throttle(get_pmd_regs(sysdev), buf, size, 8); + return store_throttle(get_pmd_regs(dev), buf, size, 8); } -static ssize_t spu_store_throttle_full_stop(struct sys_device *sysdev, - struct sysdev_attribute *attr, const char *buf, size_t size) +static ssize_t spu_store_throttle_full_stop(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) { - return store_throttle(get_pmd_regs(sysdev), buf, size, 16); + return store_throttle(get_pmd_regs(dev), buf, size, 16); } -static ssize_t ppe_show_temp(struct sys_device *sysdev, char *buf, int pos) +static ssize_t ppe_show_temp(struct device *dev, char *buf, int pos) { struct cbe_pmd_regs __iomem *pmd_regs; u64 value; - pmd_regs = cbe_get_cpu_pmd_regs(sysdev->id); + pmd_regs = cbe_get_cpu_pmd_regs(dev->id); value = in_be64(&pmd_regs->ts_ctsr2); value = (value >> pos) & 0x3f; @@ -199,64 +199,64 @@ static ssize_t ppe_show_temp(struct sys_device *sysdev, char *buf, int pos) /* shows the temperature of the DTS on the PPE, * located near the linear thermal sensor */ -static ssize_t ppe_show_temp0(struct sys_device *sysdev, - struct sysdev_attribute *attr, char *buf) +static ssize_t ppe_show_temp0(struct device *dev, + struct device_attribute *attr, char *buf) { - return ppe_show_temp(sysdev, buf, 32); + return ppe_show_temp(dev, buf, 32); } /* shows the temperature of the second DTS on the PPE */ -static ssize_t ppe_show_temp1(struct sys_device *sysdev, - struct sysdev_attribute *attr, char *buf) +static ssize_t ppe_show_temp1(struct device *dev, + struct device_attribute *attr, char *buf) { - return ppe_show_temp(sysdev, buf, 0); + return ppe_show_temp(dev, buf, 0); } -static ssize_t ppe_show_throttle_end(struct sys_device *sysdev, - struct sysdev_attribute *attr, char *buf) +static ssize_t ppe_show_throttle_end(struct device *dev, + struct device_attribute *attr, char *buf) { - return show_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, 32); + return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 32); } -static ssize_t ppe_show_throttle_begin(struct sys_device *sysdev, - struct sysdev_attribute *attr, char *buf) +static ssize_t ppe_show_throttle_begin(struct device *dev, + struct device_attribute *attr, char *buf) { - return show_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, 40); + return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 40); } -static ssize_t ppe_show_throttle_full_stop(struct sys_device *sysdev, - struct sysdev_attribute *attr, char *buf) +static ssize_t ppe_show_throttle_full_stop(struct device *dev, + struct device_attribute *attr, char *buf) { - return show_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, 48); + return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 48); } -static ssize_t ppe_store_throttle_end(struct sys_device *sysdev, - struct sysdev_attribute *attr, const char *buf, size_t size) +static ssize_t ppe_store_throttle_end(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) { - return store_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, size, 32); + return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 32); } -static ssize_t ppe_store_throttle_begin(struct sys_device *sysdev, - struct sysdev_attribute *attr, const char *buf, size_t size) +static ssize_t ppe_store_throttle_begin(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) { - return store_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, size, 40); + return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 40); } -static ssize_t ppe_store_throttle_full_stop(struct sys_device *sysdev, - struct sysdev_attribute *attr, const char *buf, size_t size) +static ssize_t ppe_store_throttle_full_stop(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) { - return store_throttle(cbe_get_cpu_pmd_regs(sysdev->id), buf, size, 48); + return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 48); } -static struct sysdev_attribute attr_spu_temperature = { +static struct device_attribute attr_spu_temperature = { .attr = {.name = "temperature", .mode = 0400 }, .show = spu_show_temp, }; -static SYSDEV_PREFIX_ATTR(spu, throttle_end, 0600); -static SYSDEV_PREFIX_ATTR(spu, throttle_begin, 0600); -static SYSDEV_PREFIX_ATTR(spu, throttle_full_stop, 0600); +static DEVICE_PREFIX_ATTR(spu, throttle_end, 0600); +static DEVICE_PREFIX_ATTR(spu, throttle_begin, 0600); +static DEVICE_PREFIX_ATTR(spu, throttle_full_stop, 0600); static struct attribute *spu_attributes[] = { @@ -272,19 +272,19 @@ static struct attribute_group spu_attribute_group = { .attrs = spu_attributes, }; -static struct sysdev_attribute attr_ppe_temperature0 = { +static struct device_attribute attr_ppe_temperature0 = { .attr = {.name = "temperature0", .mode = 0400 }, .show = ppe_show_temp0, }; -static struct sysdev_attribute attr_ppe_temperature1 = { +static struct device_attribute attr_ppe_temperature1 = { .attr = {.name = "temperature1", .mode = 0400 }, .show = ppe_show_temp1, }; -static SYSDEV_PREFIX_ATTR(ppe, throttle_end, 0600); -static SYSDEV_PREFIX_ATTR(ppe, throttle_begin, 0600); -static SYSDEV_PREFIX_ATTR(ppe, throttle_full_stop, 0600); +static DEVICE_PREFIX_ATTR(ppe, throttle_end, 0600); +static DEVICE_PREFIX_ATTR(ppe, throttle_begin, 0600); +static DEVICE_PREFIX_ATTR(ppe, throttle_full_stop, 0600); static struct attribute *ppe_attributes[] = { &attr_ppe_temperature0.attr, @@ -307,7 +307,7 @@ static int __init init_default_values(void) { int cpu; struct cbe_pmd_regs __iomem *pmd_regs; - struct sys_device *sysdev; + struct device *dev; union ppe_spe_reg tpr; union spe_reg str1; u64 str2; @@ -349,14 +349,14 @@ static int __init init_default_values(void) for_each_possible_cpu (cpu) { pr_debug("processing cpu %d\n", cpu); - sysdev = get_cpu_sysdev(cpu); + dev = get_cpu_device(cpu); - if (!sysdev) { - pr_info("invalid sysdev pointer for cbe_thermal\n"); + if (!dev) { + pr_info("invalid dev pointer for cbe_thermal\n"); return -EINVAL; } - pmd_regs = cbe_get_cpu_pmd_regs(sysdev->id); + pmd_regs = cbe_get_cpu_pmd_regs(dev->id); if (!pmd_regs) { pr_info("invalid CBE regs pointer for cbe_thermal\n"); @@ -379,8 +379,8 @@ static int __init thermal_init(void) int rc = init_default_values(); if (rc == 0) { - spu_add_sysdev_attr_group(&spu_attribute_group); - cpu_add_sysdev_attr_group(&ppe_attribute_group); + spu_add_dev_attr_group(&spu_attribute_group); + cpu_add_dev_attr_group(&ppe_attribute_group); } return rc; @@ -389,8 +389,8 @@ module_init(thermal_init); static void __exit thermal_exit(void) { - spu_remove_sysdev_attr_group(&spu_attribute_group); - cpu_remove_sysdev_attr_group(&ppe_attribute_group); + spu_remove_dev_attr_group(&spu_attribute_group); + cpu_remove_dev_attr_group(&ppe_attribute_group); } module_exit(thermal_exit); diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 3675da73623..1708fb7aba3 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -522,31 +522,32 @@ void spu_init_channels(struct spu *spu) } EXPORT_SYMBOL_GPL(spu_init_channels); -static struct sysdev_class spu_sysdev_class = { +static struct bus_type spu_subsys = { .name = "spu", + .dev_name = "spu", }; -int spu_add_sysdev_attr(struct sysdev_attribute *attr) +int spu_add_dev_attr(struct device_attribute *attr) { struct spu *spu; mutex_lock(&spu_full_list_mutex); list_for_each_entry(spu, &spu_full_list, full_list) - sysdev_create_file(&spu->sysdev, attr); + device_create_file(&spu->dev, attr); mutex_unlock(&spu_full_list_mutex); return 0; } -EXPORT_SYMBOL_GPL(spu_add_sysdev_attr); +EXPORT_SYMBOL_GPL(spu_add_dev_attr); -int spu_add_sysdev_attr_group(struct attribute_group *attrs) +int spu_add_dev_attr_group(struct attribute_group *attrs) { struct spu *spu; int rc = 0; mutex_lock(&spu_full_list_mutex); list_for_each_entry(spu, &spu_full_list, full_list) { - rc = sysfs_create_group(&spu->sysdev.kobj, attrs); + rc = sysfs_create_group(&spu->dev.kobj, attrs); /* we're in trouble here, but try unwinding anyway */ if (rc) { @@ -555,7 +556,7 @@ int spu_add_sysdev_attr_group(struct attribute_group *attrs) list_for_each_entry_continue_reverse(spu, &spu_full_list, full_list) - sysfs_remove_group(&spu->sysdev.kobj, attrs); + sysfs_remove_group(&spu->dev.kobj, attrs); break; } } @@ -564,45 +565,45 @@ int spu_add_sysdev_attr_group(struct attribute_group *attrs) return rc; } -EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group); +EXPORT_SYMBOL_GPL(spu_add_dev_attr_group); -void spu_remove_sysdev_attr(struct sysdev_attribute *attr) +void spu_remove_dev_attr(struct device_attribute *attr) { struct spu *spu; mutex_lock(&spu_full_list_mutex); list_for_each_entry(spu, &spu_full_list, full_list) - sysdev_remove_file(&spu->sysdev, attr); + device_remove_file(&spu->dev, attr); mutex_unlock(&spu_full_list_mutex); } -EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr); +EXPORT_SYMBOL_GPL(spu_remove_dev_attr); -void spu_remove_sysdev_attr_group(struct attribute_group *attrs) +void spu_remove_dev_attr_group(struct attribute_group *attrs) { struct spu *spu; mutex_lock(&spu_full_list_mutex); list_for_each_entry(spu, &spu_full_list, full_list) - sysfs_remove_group(&spu->sysdev.kobj, attrs); + sysfs_remove_group(&spu->dev.kobj, attrs); mutex_unlock(&spu_full_list_mutex); } -EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group); +EXPORT_SYMBOL_GPL(spu_remove_dev_attr_group); -static int spu_create_sysdev(struct spu *spu) +static int spu_create_dev(struct spu *spu) { int ret; - spu->sysdev.id = spu->number; - spu->sysdev.cls = &spu_sysdev_class; - ret = sysdev_register(&spu->sysdev); + spu->dev.id = spu->number; + spu->dev.bus = &spu_subsys; + ret = device_register(&spu->dev); if (ret) { printk(KERN_ERR "Can't register SPU %d with sysfs\n", spu->number); return ret; } - sysfs_add_device_to_node(&spu->sysdev, spu->node); + sysfs_add_device_to_node(&spu->dev, spu->node); return 0; } @@ -638,7 +639,7 @@ static int __init create_spu(void *data) if (ret) goto out_destroy; - ret = spu_create_sysdev(spu); + ret = spu_create_dev(spu); if (ret) goto out_free_irqs; @@ -695,10 +696,10 @@ static unsigned long long spu_acct_time(struct spu *spu, } -static ssize_t spu_stat_show(struct sys_device *sysdev, - struct sysdev_attribute *attr, char *buf) +static ssize_t spu_stat_show(struct device *dev, + struct device_attribute *attr, char *buf) { - struct spu *spu = container_of(sysdev, struct spu, sysdev); + struct spu *spu = container_of(dev, struct spu, dev); return sprintf(buf, "%s %llu %llu %llu %llu " "%llu %llu %llu %llu %llu %llu %llu %llu\n", @@ -717,7 +718,7 @@ static ssize_t spu_stat_show(struct sys_device *sysdev, spu->stats.libassist); } -static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL); +static DEVICE_ATTR(stat, 0644, spu_stat_show, NULL); #ifdef CONFIG_KEXEC @@ -816,8 +817,8 @@ static int __init init_spu_base(void) if (!spu_management_ops) goto out; - /* create sysdev class for spus */ - ret = sysdev_class_register(&spu_sysdev_class); + /* create system subsystem for spus */ + ret = subsys_system_register(&spu_subsys, NULL); if (ret) goto out; @@ -826,7 +827,7 @@ static int __init init_spu_base(void) if (ret < 0) { printk(KERN_WARNING "%s: Error initializing spus\n", __func__); - goto out_unregister_sysdev_class; + goto out_unregister_subsys; } if (ret > 0) @@ -836,15 +837,15 @@ static int __init init_spu_base(void) xmon_register_spus(&spu_full_list); crash_register_spus(&spu_full_list); mutex_unlock(&spu_full_list_mutex); - spu_add_sysdev_attr(&attr_stat); + spu_add_dev_attr(&dev_attr_stat); register_syscore_ops(&spu_syscore_ops); spu_init_affinity(); return 0; - out_unregister_sysdev_class: - sysdev_class_unregister(&spu_sysdev_class); + out_unregister_subsys: + bus_unregister(&spu_subsys); out: return ret; } diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c index c8b3c69fe89..af281dce510 100644 --- a/arch/powerpc/platforms/pseries/pseries_energy.c +++ b/arch/powerpc/platforms/pseries/pseries_energy.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include @@ -184,7 +184,7 @@ static ssize_t get_best_energy_list(char *page, int activate) return s-page; } -static ssize_t get_best_energy_data(struct sys_device *dev, +static ssize_t get_best_energy_data(struct device *dev, char *page, int activate) { int rc; @@ -207,26 +207,26 @@ static ssize_t get_best_energy_data(struct sys_device *dev, /* Wrapper functions */ -static ssize_t cpu_activate_hint_list_show(struct sysdev_class *class, - struct sysdev_class_attribute *attr, char *page) +static ssize_t cpu_activate_hint_list_show(struct device *dev, + struct device_attribute *attr, char *page) { return get_best_energy_list(page, 1); } -static ssize_t cpu_deactivate_hint_list_show(struct sysdev_class *class, - struct sysdev_class_attribute *attr, char *page) +static ssize_t cpu_deactivate_hint_list_show(struct device *dev, + struct device_attribute *attr, char *page) { return get_best_energy_list(page, 0); } -static ssize_t percpu_activate_hint_show(struct sys_device *dev, - struct sysdev_attribute *attr, char *page) +static ssize_t percpu_activate_hint_show(struct device *dev, + struct device_attribute *attr, char *page) { return get_best_energy_data(dev, page, 1); } -static ssize_t percpu_deactivate_hint_show(struct sys_device *dev, - struct sysdev_attribute *attr, char *page) +static ssize_t percpu_deactivate_hint_show(struct device *dev, + struct device_attribute *attr, char *page) { return get_best_energy_data(dev, page, 0); } @@ -241,48 +241,48 @@ static ssize_t percpu_deactivate_hint_show(struct sys_device *dev, * Per-cpu value of the hint */ -struct sysdev_class_attribute attr_cpu_activate_hint_list = - _SYSDEV_CLASS_ATTR(pseries_activate_hint_list, 0444, +struct device_attribute attr_cpu_activate_hint_list = + __ATTR(pseries_activate_hint_list, 0444, cpu_activate_hint_list_show, NULL); -struct sysdev_class_attribute attr_cpu_deactivate_hint_list = - _SYSDEV_CLASS_ATTR(pseries_deactivate_hint_list, 0444, +struct device_attribute attr_cpu_deactivate_hint_list = + __ATTR(pseries_deactivate_hint_list, 0444, cpu_deactivate_hint_list_show, NULL); -struct sysdev_attribute attr_percpu_activate_hint = - _SYSDEV_ATTR(pseries_activate_hint, 0444, +struct device_attribute attr_percpu_activate_hint = + __ATTR(pseries_activate_hint, 0444, percpu_activate_hint_show, NULL); -struct sysdev_attribute attr_percpu_deactivate_hint = - _SYSDEV_ATTR(pseries_deactivate_hint, 0444, +struct device_attribute attr_percpu_deactivate_hint = + __ATTR(pseries_deactivate_hint, 0444, percpu_deactivate_hint_show, NULL); static int __init pseries_energy_init(void) { int cpu, err; - struct sys_device *cpu_sys_dev; + struct device *cpu_dev; if (!check_for_h_best_energy()) { printk(KERN_INFO "Hypercall H_BEST_ENERGY not supported\n"); return 0; } /* Create the sysfs files */ - err = sysfs_create_file(&cpu_sysdev_class.kset.kobj, - &attr_cpu_activate_hint_list.attr); + err = device_create_file(cpu_subsys.dev_root, + &attr_cpu_activate_hint_list); if (!err) - err = sysfs_create_file(&cpu_sysdev_class.kset.kobj, - &attr_cpu_deactivate_hint_list.attr); + err = device_create_file(cpu_subsys.dev_root, + &attr_cpu_deactivate_hint_list); if (err) return err; for_each_possible_cpu(cpu) { - cpu_sys_dev = get_cpu_sysdev(cpu); - err = sysfs_create_file(&cpu_sys_dev->kobj, - &attr_percpu_activate_hint.attr); + cpu_dev = get_cpu_device(cpu); + err = device_create_file(cpu_dev, + &attr_percpu_activate_hint); if (err) break; - err = sysfs_create_file(&cpu_sys_dev->kobj, - &attr_percpu_deactivate_hint.attr); + err = device_create_file(cpu_dev, + &attr_percpu_deactivate_hint); if (err) break; } @@ -298,23 +298,20 @@ static int __init pseries_energy_init(void) static void __exit pseries_energy_cleanup(void) { int cpu; - struct sys_device *cpu_sys_dev; + struct device *cpu_dev; if (!sysfs_entries) return; /* Remove the sysfs files */ - sysfs_remove_file(&cpu_sysdev_class.kset.kobj, - &attr_cpu_activate_hint_list.attr); - - sysfs_remove_file(&cpu_sysdev_class.kset.kobj, - &attr_cpu_deactivate_hint_list.attr); + device_remove_file(cpu_subsys.dev_root, &attr_cpu_activate_hint_list); + device_remove_file(cpu_subsys.dev_root, &attr_cpu_deactivate_hint_list); for_each_possible_cpu(cpu) { - cpu_sys_dev = get_cpu_sysdev(cpu); - sysfs_remove_file(&cpu_sys_dev->kobj, + cpu_dev = get_cpu_device(cpu); + sysfs_remove_file(&cpu_dev->kobj, &attr_percpu_activate_hint.attr); - sysfs_remove_file(&cpu_sys_dev->kobj, + sysfs_remove_file(&cpu_dev->kobj, &attr_percpu_deactivate_hint.attr); } } diff --git a/arch/powerpc/sysdev/ppc4xx_cpm.c b/arch/powerpc/sysdev/ppc4xx_cpm.c index 73b86cc5ea7..82e2cfe35c6 100644 --- a/arch/powerpc/sysdev/ppc4xx_cpm.c +++ b/arch/powerpc/sysdev/ppc4xx_cpm.c @@ -179,12 +179,12 @@ static struct kobj_attribute cpm_idle_attr = static void cpm_idle_config_sysfs(void) { - struct sys_device *sys_dev; + struct device *dev; unsigned long ret; - sys_dev = get_cpu_sysdev(0); + dev = get_cpu_device(0); - ret = sysfs_create_file(&sys_dev->kobj, + ret = sysfs_create_file(&dev->kobj, &cpm_idle_attr.attr); if (ret) printk(KERN_WARNING diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 3ea872890da..66cca03c028 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -831,8 +831,8 @@ int setup_profiling_timer(unsigned int multiplier) } #ifdef CONFIG_HOTPLUG_CPU -static ssize_t cpu_configure_show(struct sys_device *dev, - struct sysdev_attribute *attr, char *buf) +static ssize_t cpu_configure_show(struct device *dev, + struct device_attribute *attr, char *buf) { ssize_t count; @@ -842,8 +842,8 @@ static ssize_t cpu_configure_show(struct sys_device *dev, return count; } -static ssize_t cpu_configure_store(struct sys_device *dev, - struct sysdev_attribute *attr, +static ssize_t cpu_configure_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) { int cpu = dev->id; @@ -889,11 +889,11 @@ out: put_online_cpus(); return rc ? rc : count; } -static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); +static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); #endif /* CONFIG_HOTPLUG_CPU */ -static ssize_t cpu_polarization_show(struct sys_device *dev, - struct sysdev_attribute *attr, char *buf) +static ssize_t cpu_polarization_show(struct device *dev, + struct device_attribute *attr, char *buf) { int cpu = dev->id; ssize_t count; @@ -919,22 +919,22 @@ static ssize_t cpu_polarization_show(struct sys_device *dev, mutex_unlock(&smp_cpu_state_mutex); return count; } -static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL); +static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL); -static ssize_t show_cpu_address(struct sys_device *dev, - struct sysdev_attribute *attr, char *buf) +static ssize_t show_cpu_address(struct device *dev, + struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]); } -static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL); +static DEVICE_ATTR(address, 0444, show_cpu_address, NULL); static struct attribute *cpu_common_attrs[] = { #ifdef CONFIG_HOTPLUG_CPU - &attr_configure.attr, + &dev_attr_configure.attr, #endif - &attr_address.attr, - &attr_polarization.attr, + &dev_attr_address.attr, + &dev_attr_polarization.attr, NULL, }; @@ -942,8 +942,8 @@ static struct attribute_group cpu_common_attr_group = { .attrs = cpu_common_attrs, }; -static ssize_t show_capability(struct sys_device *dev, - struct sysdev_attribute *attr, char *buf) +static ssize_t show_capability(struct device *dev, + struct device_attribute *attr, char *buf) { unsigned int capability; int rc; @@ -953,10 +953,10 @@ static ssize_t show_capability(struct sys_device *dev, return rc; return sprintf(buf, "%u\n", capability); } -static SYSDEV_ATTR(capability, 0444, show_capability, NULL); +static DEVICE_ATTR(capability, 0444, show_capability, NULL); -static ssize_t show_idle_count(struct sys_device *dev, - struct sysdev_attribute *attr, char *buf) +static ssize_t show_idle_count(struct device *dev, + struct device_attribute *attr, char *buf) { struct s390_idle_data *idle; unsigned long long idle_count; @@ -976,10 +976,10 @@ repeat: goto repeat; return sprintf(buf, "%llu\n", idle_count); } -static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL); +static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL); -static ssize_t show_idle_time(struct sys_device *dev, - struct sysdev_attribute *attr, char *buf) +static ssize_t show_idle_time(struct device *dev, + struct device_attribute *attr, char *buf) { struct s390_idle_data *idle; unsigned long long now, idle_time, idle_enter; @@ -1001,12 +1001,12 @@ repeat: goto repeat; return sprintf(buf, "%llu\n", idle_time >> 12); } -static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL); +static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL); static struct attribute *cpu_online_attrs[] = { - &attr_capability.attr, - &attr_idle_count.attr, - &attr_idle_time_us.attr, + &dev_attr_capability.attr, + &dev_attr_idle_count.attr, + &dev_attr_idle_time_us.attr, NULL, }; @@ -1019,7 +1019,7 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self, { unsigned int cpu = (unsigned int)(long)hcpu; struct cpu *c = &per_cpu(cpu_devices, cpu); - struct sys_device *s = &c->sysdev; + struct device *s = &c->dev; struct s390_idle_data *idle; int err = 0; @@ -1045,7 +1045,7 @@ static struct notifier_block __cpuinitdata smp_cpu_nb = { static int __devinit smp_add_present_cpu(int cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); - struct sys_device *s = &c->sysdev; + struct device *s = &c->dev; int rc; c->hotpluggable = 1; @@ -1098,8 +1098,8 @@ out: return rc; } -static ssize_t __ref rescan_store(struct sysdev_class *class, - struct sysdev_class_attribute *attr, +static ssize_t __ref rescan_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) { @@ -1108,11 +1108,11 @@ static ssize_t __ref rescan_store(struct sysdev_class *class, rc = smp_rescan_cpus(); return rc ? rc : count; } -static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store); +static DEVICE_ATTR(rescan, 0200, NULL, rescan_store); #endif /* CONFIG_HOTPLUG_CPU */ -static ssize_t dispatching_show(struct sysdev_class *class, - struct sysdev_class_attribute *attr, +static ssize_t dispatching_show(struct device *dev, + struct device_attribute *attr, char *buf) { ssize_t count; @@ -1123,8 +1123,8 @@ static ssize_t dispatching_show(struct sysdev_class *class, return count; } -static ssize_t dispatching_store(struct sysdev_class *dev, - struct sysdev_class_attribute *attr, +static ssize_t dispatching_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) { @@ -1148,7 +1148,7 @@ out: put_online_cpus(); return rc ? rc : count; } -static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show, +static DEVICE_ATTR(dispatching, 0644, dispatching_show, dispatching_store); static int __init topology_init(void) @@ -1159,11 +1159,11 @@ static int __init topology_init(void) register_cpu_notifier(&smp_cpu_nb); #ifdef CONFIG_HOTPLUG_CPU - rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan); + rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan); if (rc) return rc; #endif - rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching); + rc = device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); if (rc) return rc; for_each_present_cpu(cpu) { diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 77b8942b9a1..6dfc524c31a 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -230,7 +230,7 @@ void store_topology(struct sysinfo_15_1_x *info) int arch_update_cpu_topology(void) { struct sysinfo_15_1_x *info = tl_info; - struct sys_device *sysdev; + struct device *dev; int cpu; if (!MACHINE_HAS_TOPOLOGY) { @@ -242,8 +242,8 @@ int arch_update_cpu_topology(void) tl_to_cores(info); update_cpu_core_map(); for_each_online_cpu(cpu) { - sysdev = get_cpu_sysdev(cpu); - kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); + dev = get_cpu_device(cpu); + kobject_uevent(&dev->kobj, KOBJ_CHANGE); } return 1; } diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c index f0907995b4c..a8140f0bbf6 100644 --- a/arch/sh/kernel/cpu/sh4/sq.c +++ b/arch/sh/kernel/cpu/sh4/sq.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include #include @@ -337,9 +337,9 @@ static struct kobj_type ktype_percpu_entry = { .default_attrs = sq_sysfs_attrs, }; -static int __devinit sq_sysdev_add(struct sys_device *sysdev) +static int __devinit sq_dev_add(struct device *dev) { - unsigned int cpu = sysdev->id; + unsigned int cpu = dev->id; struct kobject *kobj; int error; @@ -348,25 +348,27 @@ static int __devinit sq_sysdev_add(struct sys_device *sysdev) return -ENOMEM; kobj = sq_kobject[cpu]; - error = kobject_init_and_add(kobj, &ktype_percpu_entry, &sysdev->kobj, + error = kobject_init_and_add(kobj, &ktype_percpu_entry, &dev->kobj, "%s", "sq"); if (!error) kobject_uevent(kobj, KOBJ_ADD); return error; } -static int __devexit sq_sysdev_remove(struct sys_device *sysdev) +static int __devexit sq_dev_remove(struct device *dev) { - unsigned int cpu = sysdev->id; + unsigned int cpu = dev->id; struct kobject *kobj = sq_kobject[cpu]; kobject_put(kobj); return 0; } -static struct sysdev_driver sq_sysdev_driver = { - .add = sq_sysdev_add, - .remove = __devexit_p(sq_sysdev_remove), +static struct subsys_interface sq_interface = { + .name = "sq" + .subsys = &cpu_subsys, + .add_dev = sq_dev_add, + .remove_dev = __devexit_p(sq_dev_remove), }; static int __init sq_api_init(void) @@ -386,7 +388,7 @@ static int __init sq_api_init(void) if (unlikely(!sq_bitmap)) goto out; - ret = sysdev_driver_register(&cpu_sysdev_class, &sq_sysdev_driver); + ret = subsys_interface_register(&sq_interface); if (unlikely(ret != 0)) goto out; @@ -401,7 +403,7 @@ out: static void __exit sq_api_exit(void) { - sysdev_driver_unregister(&cpu_sysdev_class, &sq_sysdev_driver); + subsys_interface_unregister(&sq_interface); kfree(sq_bitmap); kmem_cache_destroy(sq_cache); } diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c index 7408201d7ef..654e8aad3bb 100644 --- a/arch/sparc/kernel/sysfs.c +++ b/arch/sparc/kernel/sysfs.c @@ -3,7 +3,7 @@ * Copyright (C) 2007 David S. Miller */ #include -#include +#include #include #include #include @@ -16,13 +16,13 @@ static DEFINE_PER_CPU(struct hv_mmu_statistics, mmu_stats) __attribute__((aligned(64))); #define SHOW_MMUSTAT_ULONG(NAME) \ -static ssize_t show_##NAME(struct sys_device *dev, \ - struct sysdev_attribute *attr, char *buf) \ +static ssize_t show_##NAME(struct device *dev, \ + struct device_attribute *attr, char *buf) \ { \ struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \ return sprintf(buf, "%lu\n", p->NAME); \ } \ -static SYSDEV_ATTR(NAME, 0444, show_##NAME, NULL) +static DEVICE_ATTR(NAME, 0444, show_##NAME, NULL) SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_8k_tte); SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_8k_tte); @@ -58,38 +58,38 @@ SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_256mb_tte); SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_256mb_tte); static struct attribute *mmu_stat_attrs[] = { - &attr_immu_tsb_hits_ctx0_8k_tte.attr, - &attr_immu_tsb_ticks_ctx0_8k_tte.attr, - &attr_immu_tsb_hits_ctx0_64k_tte.attr, - &attr_immu_tsb_ticks_ctx0_64k_tte.attr, - &attr_immu_tsb_hits_ctx0_4mb_tte.attr, - &attr_immu_tsb_ticks_ctx0_4mb_tte.attr, - &attr_immu_tsb_hits_ctx0_256mb_tte.attr, - &attr_immu_tsb_ticks_ctx0_256mb_tte.attr, - &attr_immu_tsb_hits_ctxnon0_8k_tte.attr, - &attr_immu_tsb_ticks_ctxnon0_8k_tte.attr, - &attr_immu_tsb_hits_ctxnon0_64k_tte.attr, - &attr_immu_tsb_ticks_ctxnon0_64k_tte.attr, - &attr_immu_tsb_hits_ctxnon0_4mb_tte.attr, - &attr_immu_tsb_ticks_ctxnon0_4mb_tte.attr, - &attr_immu_tsb_hits_ctxnon0_256mb_tte.attr, - &attr_immu_tsb_ticks_ctxnon0_256mb_tte.attr, - &attr_dmmu_tsb_hits_ctx0_8k_tte.attr, - &attr_dmmu_tsb_ticks_ctx0_8k_tte.attr, - &attr_dmmu_tsb_hits_ctx0_64k_tte.attr, - &attr_dmmu_tsb_ticks_ctx0_64k_tte.attr, - &attr_dmmu_tsb_hits_ctx0_4mb_tte.attr, - &attr_dmmu_tsb_ticks_ctx0_4mb_tte.attr, - &attr_dmmu_tsb_hits_ctx0_256mb_tte.attr, - &attr_dmmu_tsb_ticks_ctx0_256mb_tte.attr, - &attr_dmmu_tsb_hits_ctxnon0_8k_tte.attr, - &attr_dmmu_tsb_ticks_ctxnon0_8k_tte.attr, - &attr_dmmu_tsb_hits_ctxnon0_64k_tte.attr, - &attr_dmmu_tsb_ticks_ctxnon0_64k_tte.attr, - &attr_dmmu_tsb_hits_ctxnon0_4mb_tte.attr, - &attr_dmmu_tsb_ticks_ctxnon0_4mb_tte.attr, - &attr_dmmu_tsb_hits_ctxnon0_256mb_tte.attr, - &attr_dmmu_tsb_ticks_ctxnon0_256mb_tte.attr, + &dev_attr_immu_tsb_hits_ctx0_8k_tte.attr, + &dev_attr_immu_tsb_ticks_ctx0_8k_tte.attr, + &dev_attr_immu_tsb_hits_ctx0_64k_tte.attr, + &dev_attr_immu_tsb_ticks_ctx0_64k_tte.attr, + &dev_attr_immu_tsb_hits_ctx0_4mb_tte.attr, + &dev_attr_immu_tsb_ticks_ctx0_4mb_tte.attr, + &dev_attr_immu_tsb_hits_ctx0_256mb_tte.attr, + &dev_attr_immu_tsb_ticks_ctx0_256mb_tte.attr, + &dev_attr_immu_tsb_hits_ctxnon0_8k_tte.attr, + &dev_attr_immu_tsb_ticks_ctxnon0_8k_tte.attr, + &dev_attr_immu_tsb_hits_ctxnon0_64k_tte.attr, + &dev_attr_immu_tsb_ticks_ctxnon0_64k_tte.attr, + &dev_attr_immu_tsb_hits_ctxnon0_4mb_tte.attr, + &dev_attr_immu_tsb_ticks_ctxnon0_4mb_tte.attr, + &dev_attr_immu_tsb_hits_ctxnon0_256mb_tte.attr, + &dev_attr_immu_tsb_ticks_ctxnon0_256mb_tte.attr, + &dev_attr_dmmu_tsb_hits_ctx0_8k_tte.attr, + &dev_attr_dmmu_tsb_ticks_ctx0_8k_tte.attr, + &dev_attr_dmmu_tsb_hits_ctx0_64k_tte.attr, + &dev_attr_dmmu_tsb_ticks_ctx0_64k_tte.attr, + &dev_attr_dmmu_tsb_hits_ctx0_4mb_tte.attr, + &dev_attr_dmmu_tsb_ticks_ctx0_4mb_tte.attr, + &dev_attr_dmmu_tsb_hits_ctx0_256mb_tte.attr, + &dev_attr_dmmu_tsb_ticks_ctx0_256mb_tte.attr, + &dev_attr_dmmu_tsb_hits_ctxnon0_8k_tte.attr, + &dev_attr_dmmu_tsb_ticks_ctxnon0_8k_tte.attr, + &dev_attr_dmmu_tsb_hits_ctxnon0_64k_tte.attr, + &dev_attr_dmmu_tsb_ticks_ctxnon0_64k_tte.attr, + &dev_attr_dmmu_tsb_hits_ctxnon0_4mb_tte.attr, + &dev_attr_dmmu_tsb_ticks_ctxnon0_4mb_tte.attr, + &dev_attr_dmmu_tsb_hits_ctxnon0_256mb_tte.attr, + &dev_attr_dmmu_tsb_ticks_ctxnon0_256mb_tte.attr, NULL, }; @@ -139,15 +139,15 @@ static unsigned long write_mmustat_enable(unsigned long val) return sun4v_mmustat_conf(ra, &orig_ra); } -static ssize_t show_mmustat_enable(struct sys_device *s, - struct sysdev_attribute *attr, char *buf) +static ssize_t show_mmustat_enable(struct device *s, + struct device_attribute *attr, char *buf) { unsigned long val = run_on_cpu(s->id, read_mmustat_enable, 0); return sprintf(buf, "%lx\n", val); } -static ssize_t store_mmustat_enable(struct sys_device *s, - struct sysdev_attribute *attr, const char *buf, +static ssize_t store_mmustat_enable(struct device *s, + struct device_attribute *attr, const char *buf, size_t count) { unsigned long val, err; @@ -163,39 +163,39 @@ static ssize_t store_mmustat_enable(struct sys_device *s, return count; } -static SYSDEV_ATTR(mmustat_enable, 0644, show_mmustat_enable, store_mmustat_enable); +static DEVICE_ATTR(mmustat_enable, 0644, show_mmustat_enable, store_mmustat_enable); static int mmu_stats_supported; -static int register_mmu_stats(struct sys_device *s) +static int register_mmu_stats(struct device *s) { if (!mmu_stats_supported) return 0; - sysdev_create_file(s, &attr_mmustat_enable); + device_create_file(s, &dev_attr_mmustat_enable); return sysfs_create_group(&s->kobj, &mmu_stat_group); } #ifdef CONFIG_HOTPLUG_CPU -static void unregister_mmu_stats(struct sys_device *s) +static void unregister_mmu_stats(struct device *s) { if (!mmu_stats_supported) return; sysfs_remove_group(&s->kobj, &mmu_stat_group); - sysdev_remove_file(s, &attr_mmustat_enable); + device_remove_file(s, &dev_attr_mmustat_enable); } #endif #define SHOW_CPUDATA_ULONG_NAME(NAME, MEMBER) \ -static ssize_t show_##NAME(struct sys_device *dev, \ - struct sysdev_attribute *attr, char *buf) \ +static ssize_t show_##NAME(struct device *dev, \ + struct device_attribute *attr, char *buf) \ { \ cpuinfo_sparc *c = &cpu_data(dev->id); \ return sprintf(buf, "%lu\n", c->MEMBER); \ } #define SHOW_CPUDATA_UINT_NAME(NAME, MEMBER) \ -static ssize_t show_##NAME(struct sys_device *dev, \ - struct sysdev_attribute *attr, char *buf) \ +static ssize_t show_##NAME(struct device *dev, \ + struct device_attribute *attr, char *buf) \ { \ cpuinfo_sparc *c = &cpu_data(dev->id); \ return sprintf(buf, "%u\n", c->MEMBER); \ @@ -209,14 +209,14 @@ SHOW_CPUDATA_UINT_NAME(l1_icache_line_size, icache_line_size); SHOW_CPUDATA_UINT_NAME(l2_cache_size, ecache_size); SHOW_CPUDATA_UINT_NAME(l2_cache_line_size, ecache_line_size); -static struct sysdev_attribute cpu_core_attrs[] = { - _SYSDEV_ATTR(clock_tick, 0444, show_clock_tick, NULL), - _SYSDEV_ATTR(l1_dcache_size, 0444, show_l1_dcache_size, NULL), - _SYSDEV_ATTR(l1_dcache_line_size, 0444, show_l1_dcache_line_size, NULL), - _SYSDEV_ATTR(l1_icache_size, 0444, show_l1_icache_size, NULL), - _SYSDEV_ATTR(l1_icache_line_size, 0444, show_l1_icache_line_size, NULL), - _SYSDEV_ATTR(l2_cache_size, 0444, show_l2_cache_size, NULL), - _SYSDEV_ATTR(l2_cache_line_size, 0444, show_l2_cache_line_size, NULL), +static struct device_attribute cpu_core_attrs[] = { + __ATTR(clock_tick, 0444, show_clock_tick, NULL), + __ATTR(l1_dcache_size, 0444, show_l1_dcache_size, NULL), + __ATTR(l1_dcache_line_size, 0444, show_l1_dcache_line_size, NULL), + __ATTR(l1_icache_size, 0444, show_l1_icache_size, NULL), + __ATTR(l1_icache_line_size, 0444, show_l1_icache_line_size, NULL), + __ATTR(l2_cache_size, 0444, show_l2_cache_size, NULL), + __ATTR(l2_cache_line_size, 0444, show_l2_cache_line_size, NULL), }; static DEFINE_PER_CPU(struct cpu, cpu_devices); @@ -224,11 +224,11 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices); static void register_cpu_online(unsigned int cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); - struct sys_device *s = &c->sysdev; + struct device *s = &c->dev; int i; for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++) - sysdev_create_file(s, &cpu_core_attrs[i]); + device_create_file(s, &cpu_core_attrs[i]); register_mmu_stats(s); } @@ -237,12 +237,12 @@ static void register_cpu_online(unsigned int cpu) static void unregister_cpu_online(unsigned int cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); - struct sys_device *s = &c->sysdev; + struct device *s = &c->dev; int i; unregister_mmu_stats(s); for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++) - sysdev_remove_file(s, &cpu_core_attrs[i]); + device_remove_file(s, &cpu_core_attrs[i]); } #endif diff --git a/arch/tile/kernel/sysfs.c b/arch/tile/kernel/sysfs.c index b671a86f451..e7ce2a5161b 100644 --- a/arch/tile/kernel/sysfs.c +++ b/arch/tile/kernel/sysfs.c @@ -14,7 +14,7 @@ * /sys entry support. */ -#include +#include #include #include #include @@ -31,55 +31,55 @@ static ssize_t get_hv_confstr(char *page, int query) return n; } -static ssize_t chip_width_show(struct sysdev_class *dev, - struct sysdev_class_attribute *attr, +static ssize_t chip_width_show(struct device *dev, + struct device_attribute *attr, char *page) { return sprintf(page, "%u\n", smp_width); } -static SYSDEV_CLASS_ATTR(chip_width, 0444, chip_width_show, NULL); +static DEVICE_ATTR(chip_width, 0444, chip_width_show, NULL); -static ssize_t chip_height_show(struct sysdev_class *dev, - struct sysdev_class_attribute *attr, +static ssize_t chip_height_show(struct device *dev, + struct device_attribute *attr, char *page) { return sprintf(page, "%u\n", smp_height); } -static SYSDEV_CLASS_ATTR(chip_height, 0444, chip_height_show, NULL); +static DEVICE_ATTR(chip_height, 0444, chip_height_show, NULL); -static ssize_t chip_serial_show(struct sysdev_class *dev, - struct sysdev_class_attribute *attr, +static ssize_t chip_serial_show(struct device *dev, + struct device_attribute *attr, char *page) { return get_hv_confstr(page, HV_CONFSTR_CHIP_SERIAL_NUM); } -static SYSDEV_CLASS_ATTR(chip_serial, 0444, chip_serial_show, NULL); +static DEVICE_ATTR(chip_serial, 0444, chip_serial_show, NULL); -static ssize_t chip_revision_show(struct sysdev_class *dev, - struct sysdev_class_attribute *attr, +static ssize_t chip_revision_show(struct device *dev, + struct device_attribute *attr, char *page) { return get_hv_confstr(page, HV_CONFSTR_CHIP_REV); } -static SYSDEV_CLASS_ATTR(chip_revision, 0444, chip_revision_show, NULL); +static DEVICE_ATTR(chip_revision, 0444, chip_revision_show, NULL); -static ssize_t type_show(struct sysdev_class *dev, - struct sysdev_class_attribute *attr, +static ssize_t type_show(struct device *dev, + struct device_attribute *attr, char *page) { return sprintf(page, "tilera\n"); } -static SYSDEV_CLASS_ATTR(type, 0444, type_show, NULL); +static DEVICE_ATTR(type, 0444, type_show, NULL); #define HV_CONF_ATTR(name, conf) \ - static ssize_t name ## _show(struct sysdev_class *dev, \ - struct sysdev_class_attribute *attr, \ + static ssize_t name ## _show(struct device *dev, \ + struct device_attribute *attr, \ char *page) \ { \ return get_hv_confstr(page, conf); \ } \ - static SYSDEV_CLASS_ATTR(name, 0444, name ## _show, NULL); + static DEVICE_ATTR(name, 0444, name ## _show, NULL); HV_CONF_ATTR(version, HV_CONFSTR_HV_SW_VER) HV_CONF_ATTR(config_version, HV_CONFSTR_HV_CONFIG_VER) @@ -95,15 +95,15 @@ HV_CONF_ATTR(mezz_description, HV_CONFSTR_MEZZ_DESC) HV_CONF_ATTR(switch_control, HV_CONFSTR_SWITCH_CONTROL) static struct attribute *board_attrs[] = { - &attr_board_part.attr, - &attr_board_serial.attr, - &attr_board_revision.attr, - &attr_board_description.attr, - &attr_mezz_part.attr, - &attr_mezz_serial.attr, - &attr_mezz_revision.attr, - &attr_mezz_description.attr, - &attr_switch_control.attr, + &dev_attr_board_part.attr, + &dev_attr_board_serial.attr, + &dev_attr_board_revision.attr, + &dev_attr_board_description.attr, + &dev_attr_mezz_part.attr, + &dev_attr_mezz_serial.attr, + &dev_attr_mezz_revision.attr, + &dev_attr_mezz_description.attr, + &dev_attr_switch_control.attr, NULL }; @@ -150,12 +150,11 @@ hvconfig_bin_read(struct file *filp, struct kobject *kobj, static int __init create_sysfs_entries(void) { - struct sysdev_class *cls = &cpu_sysdev_class; int err = 0; #define create_cpu_attr(name) \ if (!err) \ - err = sysfs_create_file(&cls->kset.kobj, &attr_##name.attr); + err = device_create_file(cpu_subsys.dev_root, &dev_attr_##name); create_cpu_attr(chip_width); create_cpu_attr(chip_height); create_cpu_attr(chip_serial); @@ -163,7 +162,7 @@ static int __init create_sysfs_entries(void) #define create_hv_attr(name) \ if (!err) \ - err = sysfs_create_file(hypervisor_kobj, &attr_##name.attr); + err = sysfs_create_file(hypervisor_kobj, &dev_attr_##name); create_hv_attr(type); create_hv_attr(version); create_hv_attr(config_version); diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index c9321f34e55..0b05fb49c56 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -149,7 +149,7 @@ static inline void enable_p5_mce(void) {} void mce_setup(struct mce *m); void mce_log(struct mce *m); -DECLARE_PER_CPU(struct sys_device, mce_sysdev); +DECLARE_PER_CPU(struct device, mce_device); /* * Maximum banks number. diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index a3b0811693c..6b45e5e7a90 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -844,8 +844,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) #include #include - -extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */ +#include /* pointer to kobject for cpuX/cache */ static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject); @@ -1073,9 +1072,9 @@ err_out: static DECLARE_BITMAP(cache_dev_map, NR_CPUS); /* Add/Remove cache interface for CPU device */ -static int __cpuinit cache_add_dev(struct sys_device * sys_dev) +static int __cpuinit cache_add_dev(struct device *dev) { - unsigned int cpu = sys_dev->id; + unsigned int cpu = dev->id; unsigned long i, j; struct _index_kobject *this_object; struct _cpuid4_info *this_leaf; @@ -1087,7 +1086,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu), &ktype_percpu_entry, - &sys_dev->kobj, "%s", "cache"); + &dev->kobj, "%s", "cache"); if (retval < 0) { cpuid4_cache_sysfs_exit(cpu); return retval; @@ -1124,9 +1123,9 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) return 0; } -static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) +static void __cpuinit cache_remove_dev(struct device *dev) { - unsigned int cpu = sys_dev->id; + unsigned int cpu = dev->id; unsigned long i; if (per_cpu(ici_cpuid4_info, cpu) == NULL) @@ -1145,17 +1144,17 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - struct sys_device *sys_dev; + struct device *dev; - sys_dev = get_cpu_sysdev(cpu); + dev = get_cpu_device(cpu); switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: - cache_add_dev(sys_dev); + cache_add_dev(dev); break; case CPU_DEAD: case CPU_DEAD_FROZEN: - cache_remove_dev(sys_dev); + cache_remove_dev(dev); break; } return NOTIFY_OK; @@ -1174,9 +1173,9 @@ static int __cpuinit cache_sysfs_init(void) for_each_online_cpu(i) { int err; - struct sys_device *sys_dev = get_cpu_sysdev(i); + struct device *dev = get_cpu_device(i); - err = cache_add_dev(sys_dev); + err = cache_add_dev(dev); if (err) return err; } diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h index fefcc69ee8b..ed44c8a6585 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h +++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h @@ -1,4 +1,4 @@ -#include +#include #include enum severity_level { @@ -17,7 +17,7 @@ enum severity_level { struct mce_bank { u64 ctl; /* subevents to enable */ unsigned char init; /* initialise bank? */ - struct sysdev_attribute attr; /* sysdev attribute */ + struct device_attribute attr; /* device attribute */ char attrname[ATTR_LEN]; /* attribute name */ }; diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 362056aefeb..0156c6f85d7 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #include #include @@ -1751,7 +1751,7 @@ static struct syscore_ops mce_syscore_ops = { }; /* - * mce_sysdev: Sysfs support + * mce_device: Sysfs support */ static void mce_cpu_restart(void *data) @@ -1787,27 +1787,28 @@ static void mce_enable_ce(void *all) __mcheck_cpu_init_timer(); } -static struct sysdev_class mce_sysdev_class = { +static struct bus_type mce_subsys = { .name = "machinecheck", + .dev_name = "machinecheck", }; -DEFINE_PER_CPU(struct sys_device, mce_sysdev); +DEFINE_PER_CPU(struct device, mce_device); __cpuinitdata void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); -static inline struct mce_bank *attr_to_bank(struct sysdev_attribute *attr) +static inline struct mce_bank *attr_to_bank(struct device_attribute *attr) { return container_of(attr, struct mce_bank, attr); } -static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr, +static ssize_t show_bank(struct device *s, struct device_attribute *attr, char *buf) { return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl); } -static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr, +static ssize_t set_bank(struct device *s, struct device_attribute *attr, const char *buf, size_t size) { u64 new; @@ -1822,14 +1823,14 @@ static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr, } static ssize_t -show_trigger(struct sys_device *s, struct sysdev_attribute *attr, char *buf) +show_trigger(struct device *s, struct device_attribute *attr, char *buf) { strcpy(buf, mce_helper); strcat(buf, "\n"); return strlen(mce_helper) + 1; } -static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr, +static ssize_t set_trigger(struct device *s, struct device_attribute *attr, const char *buf, size_t siz) { char *p; @@ -1844,8 +1845,8 @@ static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr, return strlen(mce_helper) + !!p; } -static ssize_t set_ignore_ce(struct sys_device *s, - struct sysdev_attribute *attr, +static ssize_t set_ignore_ce(struct device *s, + struct device_attribute *attr, const char *buf, size_t size) { u64 new; @@ -1868,8 +1869,8 @@ static ssize_t set_ignore_ce(struct sys_device *s, return size; } -static ssize_t set_cmci_disabled(struct sys_device *s, - struct sysdev_attribute *attr, +static ssize_t set_cmci_disabled(struct device *s, + struct device_attribute *attr, const char *buf, size_t size) { u64 new; @@ -1891,108 +1892,107 @@ static ssize_t set_cmci_disabled(struct sys_device *s, return size; } -static ssize_t store_int_with_restart(struct sys_device *s, - struct sysdev_attribute *attr, +static ssize_t store_int_with_restart(struct device *s, + struct device_attribute *attr, const char *buf, size_t size) { - ssize_t ret = sysdev_store_int(s, attr, buf, size); + ssize_t ret = device_store_int(s, attr, buf, size); mce_restart(); return ret; } -static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger); -static SYSDEV_INT_ATTR(tolerant, 0644, tolerant); -static SYSDEV_INT_ATTR(monarch_timeout, 0644, monarch_timeout); -static SYSDEV_INT_ATTR(dont_log_ce, 0644, mce_dont_log_ce); +static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger); +static DEVICE_INT_ATTR(tolerant, 0644, tolerant); +static DEVICE_INT_ATTR(monarch_timeout, 0644, monarch_timeout); +static DEVICE_INT_ATTR(dont_log_ce, 0644, mce_dont_log_ce); -static struct sysdev_ext_attribute attr_check_interval = { - _SYSDEV_ATTR(check_interval, 0644, sysdev_show_int, - store_int_with_restart), +static struct dev_ext_attribute dev_attr_check_interval = { + __ATTR(check_interval, 0644, device_show_int, store_int_with_restart), &check_interval }; -static struct sysdev_ext_attribute attr_ignore_ce = { - _SYSDEV_ATTR(ignore_ce, 0644, sysdev_show_int, set_ignore_ce), +static struct dev_ext_attribute dev_attr_ignore_ce = { + __ATTR(ignore_ce, 0644, device_show_int, set_ignore_ce), &mce_ignore_ce }; -static struct sysdev_ext_attribute attr_cmci_disabled = { - _SYSDEV_ATTR(cmci_disabled, 0644, sysdev_show_int, set_cmci_disabled), +static struct dev_ext_attribute dev_attr_cmci_disabled = { + __ATTR(cmci_disabled, 0644, device_show_int, set_cmci_disabled), &mce_cmci_disabled }; -static struct sysdev_attribute *mce_sysdev_attrs[] = { - &attr_tolerant.attr, - &attr_check_interval.attr, - &attr_trigger, - &attr_monarch_timeout.attr, - &attr_dont_log_ce.attr, - &attr_ignore_ce.attr, - &attr_cmci_disabled.attr, +static struct device_attribute *mce_device_attrs[] = { + &dev_attr_tolerant.attr, + &dev_attr_check_interval.attr, + &dev_attr_trigger, + &dev_attr_monarch_timeout.attr, + &dev_attr_dont_log_ce.attr, + &dev_attr_ignore_ce.attr, + &dev_attr_cmci_disabled.attr, NULL }; -static cpumask_var_t mce_sysdev_initialized; +static cpumask_var_t mce_device_initialized; -/* Per cpu sysdev init. All of the cpus still share the same ctrl bank: */ -static __cpuinit int mce_sysdev_create(unsigned int cpu) +/* Per cpu device init. All of the cpus still share the same ctrl bank: */ +static __cpuinit int mce_device_create(unsigned int cpu) { - struct sys_device *sysdev = &per_cpu(mce_sysdev, cpu); + struct device *dev = &per_cpu(mce_device, cpu); int err; int i, j; if (!mce_available(&boot_cpu_data)) return -EIO; - memset(&sysdev->kobj, 0, sizeof(struct kobject)); - sysdev->id = cpu; - sysdev->cls = &mce_sysdev_class; + memset(&dev->kobj, 0, sizeof(struct kobject)); + dev->id = cpu; + dev->bus = &mce_subsys; - err = sysdev_register(sysdev); + err = device_register(dev); if (err) return err; - for (i = 0; mce_sysdev_attrs[i]; i++) { - err = sysdev_create_file(sysdev, mce_sysdev_attrs[i]); + for (i = 0; mce_device_attrs[i]; i++) { + err = device_create_file(dev, mce_device_attrs[i]); if (err) goto error; } for (j = 0; j < banks; j++) { - err = sysdev_create_file(sysdev, &mce_banks[j].attr); + err = device_create_file(dev, &mce_banks[j].attr); if (err) goto error2; } - cpumask_set_cpu(cpu, mce_sysdev_initialized); + cpumask_set_cpu(cpu, mce_device_initialized); return 0; error2: while (--j >= 0) - sysdev_remove_file(sysdev, &mce_banks[j].attr); + device_remove_file(dev, &mce_banks[j].attr); error: while (--i >= 0) - sysdev_remove_file(sysdev, mce_sysdev_attrs[i]); + device_remove_file(dev, mce_device_attrs[i]); - sysdev_unregister(sysdev); + device_unregister(dev); return err; } -static __cpuinit void mce_sysdev_remove(unsigned int cpu) +static __cpuinit void mce_device_remove(unsigned int cpu) { - struct sys_device *sysdev = &per_cpu(mce_sysdev, cpu); + struct device *dev = &per_cpu(mce_device, cpu); int i; - if (!cpumask_test_cpu(cpu, mce_sysdev_initialized)) + if (!cpumask_test_cpu(cpu, mce_device_initialized)) return; - for (i = 0; mce_sysdev_attrs[i]; i++) - sysdev_remove_file(sysdev, mce_sysdev_attrs[i]); + for (i = 0; mce_device_attrs[i]; i++) + device_remove_file(dev, mce_device_attrs[i]); for (i = 0; i < banks; i++) - sysdev_remove_file(sysdev, &mce_banks[i].attr); + device_remove_file(dev, &mce_banks[i].attr); - sysdev_unregister(sysdev); - cpumask_clear_cpu(cpu, mce_sysdev_initialized); + device_unregister(dev); + cpumask_clear_cpu(cpu, mce_device_initialized); } /* Make sure there are no machine checks on offlined CPUs. */ @@ -2042,7 +2042,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: - mce_sysdev_create(cpu); + mce_device_create(cpu); if (threshold_cpu_callback) threshold_cpu_callback(action, cpu); break; @@ -2050,7 +2050,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) case CPU_DEAD_FROZEN: if (threshold_cpu_callback) threshold_cpu_callback(action, cpu); - mce_sysdev_remove(cpu); + mce_device_remove(cpu); break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: @@ -2084,7 +2084,7 @@ static __init void mce_init_banks(void) for (i = 0; i < banks; i++) { struct mce_bank *b = &mce_banks[i]; - struct sysdev_attribute *a = &b->attr; + struct device_attribute *a = &b->attr; sysfs_attr_init(&a->attr); a->attr.name = b->attrname; @@ -2104,16 +2104,16 @@ static __init int mcheck_init_device(void) if (!mce_available(&boot_cpu_data)) return -EIO; - zalloc_cpumask_var(&mce_sysdev_initialized, GFP_KERNEL); + zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL); mce_init_banks(); - err = sysdev_class_register(&mce_sysdev_class); + err = subsys_system_register(&mce_subsys, NULL); if (err) return err; for_each_online_cpu(i) { - err = mce_sysdev_create(i); + err = mce_device_create(i); if (err) return err; } diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index f5474218cff..56d2aa1acd5 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include @@ -548,7 +547,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) if (!b) goto out; - err = sysfs_create_link(&per_cpu(mce_sysdev, cpu).kobj, + err = sysfs_create_link(&per_cpu(mce_device, cpu).kobj, b->kobj, name); if (err) goto out; @@ -571,7 +570,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) goto out; } - b->kobj = kobject_create_and_add(name, &per_cpu(mce_sysdev, cpu).kobj); + b->kobj = kobject_create_and_add(name, &per_cpu(mce_device, cpu).kobj); if (!b->kobj) goto out_free; @@ -591,7 +590,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) if (i == cpu) continue; - err = sysfs_create_link(&per_cpu(mce_sysdev, i).kobj, + err = sysfs_create_link(&per_cpu(mce_device, i).kobj, b->kobj, name); if (err) goto out; @@ -669,7 +668,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank) #ifdef CONFIG_SMP /* sibling symlink */ if (shared_bank[bank] && b->blocks->cpu != cpu) { - sysfs_remove_link(&per_cpu(mce_sysdev, cpu).kobj, name); + sysfs_remove_link(&per_cpu(mce_device, cpu).kobj, name); per_cpu(threshold_banks, cpu)[bank] = NULL; return; @@ -681,7 +680,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank) if (i == cpu) continue; - sysfs_remove_link(&per_cpu(mce_sysdev, i).kobj, name); + sysfs_remove_link(&per_cpu(mce_device, i).kobj, name); per_cpu(threshold_banks, i)[bank] = NULL; } diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 787e06c84ea..59e3f6ed265 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -69,16 +68,16 @@ static atomic_t therm_throt_en = ATOMIC_INIT(0); static u32 lvtthmr_init __read_mostly; #ifdef CONFIG_SYSFS -#define define_therm_throt_sysdev_one_ro(_name) \ - static SYSDEV_ATTR(_name, 0444, \ - therm_throt_sysdev_show_##_name, \ +#define define_therm_throt_device_one_ro(_name) \ + static DEVICE_ATTR(_name, 0444, \ + therm_throt_device_show_##_name, \ NULL) \ -#define define_therm_throt_sysdev_show_func(event, name) \ +#define define_therm_throt_device_show_func(event, name) \ \ -static ssize_t therm_throt_sysdev_show_##event##_##name( \ - struct sys_device *dev, \ - struct sysdev_attribute *attr, \ +static ssize_t therm_throt_device_show_##event##_##name( \ + struct device *dev, \ + struct device_attribute *attr, \ char *buf) \ { \ unsigned int cpu = dev->id; \ @@ -95,20 +94,20 @@ static ssize_t therm_throt_sysdev_show_##event##_##name( \ return ret; \ } -define_therm_throt_sysdev_show_func(core_throttle, count); -define_therm_throt_sysdev_one_ro(core_throttle_count); +define_therm_throt_device_show_func(core_throttle, count); +define_therm_throt_device_one_ro(core_throttle_count); -define_therm_throt_sysdev_show_func(core_power_limit, count); -define_therm_throt_sysdev_one_ro(core_power_limit_count); +define_therm_throt_device_show_func(core_power_limit, count); +define_therm_throt_device_one_ro(core_power_limit_count); -define_therm_throt_sysdev_show_func(package_throttle, count); -define_therm_throt_sysdev_one_ro(package_throttle_count); +define_therm_throt_device_show_func(package_throttle, count); +define_therm_throt_device_one_ro(package_throttle_count); -define_therm_throt_sysdev_show_func(package_power_limit, count); -define_therm_throt_sysdev_one_ro(package_power_limit_count); +define_therm_throt_device_show_func(package_power_limit, count); +define_therm_throt_device_one_ro(package_power_limit_count); static struct attribute *thermal_throttle_attrs[] = { - &attr_core_throttle_count.attr, + &dev_attr_core_throttle_count.attr, NULL }; @@ -223,36 +222,36 @@ static int thresh_event_valid(int event) #ifdef CONFIG_SYSFS /* Add/Remove thermal_throttle interface for CPU device: */ -static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev, +static __cpuinit int thermal_throttle_add_dev(struct device *dev, unsigned int cpu) { int err; struct cpuinfo_x86 *c = &cpu_data(cpu); - err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group); + err = sysfs_create_group(&dev->kobj, &thermal_attr_group); if (err) return err; if (cpu_has(c, X86_FEATURE_PLN)) - err = sysfs_add_file_to_group(&sys_dev->kobj, - &attr_core_power_limit_count.attr, + err = sysfs_add_file_to_group(&dev->kobj, + &dev_attr_core_power_limit_count.attr, thermal_attr_group.name); if (cpu_has(c, X86_FEATURE_PTS)) { - err = sysfs_add_file_to_group(&sys_dev->kobj, - &attr_package_throttle_count.attr, + err = sysfs_add_file_to_group(&dev->kobj, + &dev_attr_package_throttle_count.attr, thermal_attr_group.name); if (cpu_has(c, X86_FEATURE_PLN)) - err = sysfs_add_file_to_group(&sys_dev->kobj, - &attr_package_power_limit_count.attr, + err = sysfs_add_file_to_group(&dev->kobj, + &dev_attr_package_power_limit_count.attr, thermal_attr_group.name); } return err; } -static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev) +static __cpuinit void thermal_throttle_remove_dev(struct device *dev) { - sysfs_remove_group(&sys_dev->kobj, &thermal_attr_group); + sysfs_remove_group(&dev->kobj, &thermal_attr_group); } /* Mutex protecting device creation against CPU hotplug: */ @@ -265,16 +264,16 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - struct sys_device *sys_dev; + struct device *dev; int err = 0; - sys_dev = get_cpu_sysdev(cpu); + dev = get_cpu_device(cpu); switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: mutex_lock(&therm_cpu_lock); - err = thermal_throttle_add_dev(sys_dev, cpu); + err = thermal_throttle_add_dev(dev, cpu); mutex_unlock(&therm_cpu_lock); WARN_ON(err); break; @@ -283,7 +282,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb, case CPU_DEAD: case CPU_DEAD_FROZEN: mutex_lock(&therm_cpu_lock); - thermal_throttle_remove_dev(sys_dev); + thermal_throttle_remove_dev(dev); mutex_unlock(&therm_cpu_lock); break; } @@ -310,7 +309,7 @@ static __init int thermal_throttle_init_device(void) #endif /* connect live CPUs to sysfs */ for_each_online_cpu(cpu) { - err = thermal_throttle_add_dev(get_cpu_sysdev(cpu), cpu); + err = thermal_throttle_add_dev(get_cpu_device(cpu), cpu); WARN_ON(err); } #ifdef CONFIG_HOTPLUG_CPU diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index f2d2a664e79..cf88f2a1647 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c @@ -292,8 +292,8 @@ static int reload_for_cpu(int cpu) return err; } -static ssize_t reload_store(struct sys_device *dev, - struct sysdev_attribute *attr, +static ssize_t reload_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) { unsigned long val; @@ -318,30 +318,30 @@ static ssize_t reload_store(struct sys_device *dev, return ret; } -static ssize_t version_show(struct sys_device *dev, - struct sysdev_attribute *attr, char *buf) +static ssize_t version_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; return sprintf(buf, "0x%x\n", uci->cpu_sig.rev); } -static ssize_t pf_show(struct sys_device *dev, - struct sysdev_attribute *attr, char *buf) +static ssize_t pf_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; return sprintf(buf, "0x%x\n", uci->cpu_sig.pf); } -static SYSDEV_ATTR(reload, 0200, NULL, reload_store); -static SYSDEV_ATTR(version, 0400, version_show, NULL); -static SYSDEV_ATTR(processor_flags, 0400, pf_show, NULL); +static DEVICE_ATTR(reload, 0200, NULL, reload_store); +static DEVICE_ATTR(version, 0400, version_show, NULL); +static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL); static struct attribute *mc_default_attrs[] = { - &attr_reload.attr, - &attr_version.attr, - &attr_processor_flags.attr, + &dev_attr_reload.attr, + &dev_attr_version.attr, + &dev_attr_processor_flags.attr, NULL }; @@ -405,43 +405,45 @@ static enum ucode_state microcode_update_cpu(int cpu) return ustate; } -static int mc_sysdev_add(struct sys_device *sys_dev) +static int mc_device_add(struct device *dev, struct subsys_interface *sif) { - int err, cpu = sys_dev->id; + int err, cpu = dev->id; if (!cpu_online(cpu)) return 0; pr_debug("CPU%d added\n", cpu); - err = sysfs_create_group(&sys_dev->kobj, &mc_attr_group); + err = sysfs_create_group(&dev->kobj, &mc_attr_group); if (err) return err; if (microcode_init_cpu(cpu) == UCODE_ERROR) { - sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); + sysfs_remove_group(&dev->kobj, &mc_attr_group); return -EINVAL; } return err; } -static int mc_sysdev_remove(struct sys_device *sys_dev) +static int mc_device_remove(struct device *dev, struct subsys_interface *sif) { - int cpu = sys_dev->id; + int cpu = dev->id; if (!cpu_online(cpu)) return 0; pr_debug("CPU%d removed\n", cpu); microcode_fini_cpu(cpu); - sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); + sysfs_remove_group(&dev->kobj, &mc_attr_group); return 0; } -static struct sysdev_driver mc_sysdev_driver = { - .add = mc_sysdev_add, - .remove = mc_sysdev_remove, +static struct subsys_interface mc_cpu_interface = { + .name = "microcode", + .subsys = &cpu_subsys, + .add_dev = mc_device_add, + .remove_dev = mc_device_remove, }; /** @@ -464,9 +466,9 @@ static __cpuinit int mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - struct sys_device *sys_dev; + struct device *dev; - sys_dev = get_cpu_sysdev(cpu); + dev = get_cpu_device(cpu); switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: @@ -474,13 +476,13 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) case CPU_DOWN_FAILED: case CPU_DOWN_FAILED_FROZEN: pr_debug("CPU%d added\n", cpu); - if (sysfs_create_group(&sys_dev->kobj, &mc_attr_group)) + if (sysfs_create_group(&dev->kobj, &mc_attr_group)) pr_err("Failed to create group for CPU%d\n", cpu); break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: /* Suspend is in progress, only remove the interface */ - sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); + sysfs_remove_group(&dev->kobj, &mc_attr_group); pr_debug("CPU%d removed\n", cpu); break; @@ -527,7 +529,7 @@ static int __init microcode_init(void) get_online_cpus(); mutex_lock(µcode_mutex); - error = sysdev_driver_register(&cpu_sysdev_class, &mc_sysdev_driver); + error = subsys_interface_register(&mc_cpu_interface); mutex_unlock(µcode_mutex); put_online_cpus(); @@ -561,7 +563,7 @@ static void __exit microcode_exit(void) get_online_cpus(); mutex_lock(µcode_mutex); - sysdev_driver_unregister(&cpu_sysdev_class, &mc_sysdev_driver); + subsys_interface_unregister(&mc_cpu_interface); mutex_unlock(µcode_mutex); put_online_cpus(); diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 9d7bc9f6b6c..20a68ca386d 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -446,7 +446,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device) { struct acpi_processor *pr = NULL; int result = 0; - struct sys_device *sysdev; + struct device *dev; pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL); if (!pr) @@ -491,8 +491,8 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device) per_cpu(processors, pr->id) = pr; - sysdev = get_cpu_sysdev(pr->id); - if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) { + dev = get_cpu_device(pr->id); + if (sysfs_create_link(&device->dev.kobj, &dev->kobj, "sysdev")) { result = -EFAULT; goto err_free_cpumask; } diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c index 870550d6a4b..3b599abf2b4 100644 --- a/drivers/acpi/processor_thermal.c +++ b/drivers/acpi/processor_thermal.c @@ -30,7 +30,6 @@ #include #include #include -#include #include diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 251acea3d35..5bb0298fbcc 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -1,8 +1,7 @@ /* - * drivers/base/cpu.c - basic CPU class support + * CPU subsystem support */ -#include #include #include #include @@ -14,40 +13,40 @@ #include "base.h" -static struct sysdev_class_attribute *cpu_sysdev_class_attrs[]; - -struct sysdev_class cpu_sysdev_class = { +struct bus_type cpu_subsys = { .name = "cpu", - .attrs = cpu_sysdev_class_attrs, + .dev_name = "cpu", }; -EXPORT_SYMBOL(cpu_sysdev_class); +EXPORT_SYMBOL_GPL(cpu_subsys); -static DEFINE_PER_CPU(struct sys_device *, cpu_sys_devices); +static DEFINE_PER_CPU(struct device *, cpu_sys_devices); #ifdef CONFIG_HOTPLUG_CPU -static ssize_t show_online(struct sys_device *dev, struct sysdev_attribute *attr, +static ssize_t show_online(struct device *dev, + struct device_attribute *attr, char *buf) { - struct cpu *cpu = container_of(dev, struct cpu, sysdev); + struct cpu *cpu = container_of(dev, struct cpu, dev); - return sprintf(buf, "%u\n", !!cpu_online(cpu->sysdev.id)); + return sprintf(buf, "%u\n", !!cpu_online(cpu->dev.id)); } -static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribute *attr, - const char *buf, size_t count) +static ssize_t __ref store_online(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { - struct cpu *cpu = container_of(dev, struct cpu, sysdev); + struct cpu *cpu = container_of(dev, struct cpu, dev); ssize_t ret; cpu_hotplug_driver_lock(); switch (buf[0]) { case '0': - ret = cpu_down(cpu->sysdev.id); + ret = cpu_down(cpu->dev.id); if (!ret) kobject_uevent(&dev->kobj, KOBJ_OFFLINE); break; case '1': - ret = cpu_up(cpu->sysdev.id); + ret = cpu_up(cpu->dev.id); if (!ret) kobject_uevent(&dev->kobj, KOBJ_ONLINE); break; @@ -60,44 +59,44 @@ static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribut ret = count; return ret; } -static SYSDEV_ATTR(online, 0644, show_online, store_online); +static DEVICE_ATTR(online, 0644, show_online, store_online); static void __cpuinit register_cpu_control(struct cpu *cpu) { - sysdev_create_file(&cpu->sysdev, &attr_online); + device_create_file(&cpu->dev, &dev_attr_online); } void unregister_cpu(struct cpu *cpu) { - int logical_cpu = cpu->sysdev.id; + int logical_cpu = cpu->dev.id; unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu)); - sysdev_remove_file(&cpu->sysdev, &attr_online); + device_remove_file(&cpu->dev, &dev_attr_online); - sysdev_unregister(&cpu->sysdev); + device_unregister(&cpu->dev); per_cpu(cpu_sys_devices, logical_cpu) = NULL; return; } #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE -static ssize_t cpu_probe_store(struct sysdev_class *class, - struct sysdev_class_attribute *attr, +static ssize_t cpu_probe_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) { return arch_cpu_probe(buf, count); } -static ssize_t cpu_release_store(struct sysdev_class *class, - struct sysdev_class_attribute *attr, +static ssize_t cpu_release_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) { return arch_cpu_release(buf, count); } -static SYSDEV_CLASS_ATTR(probe, S_IWUSR, NULL, cpu_probe_store); -static SYSDEV_CLASS_ATTR(release, S_IWUSR, NULL, cpu_release_store); +static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store); +static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store); #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ #else /* ... !CONFIG_HOTPLUG_CPU */ @@ -109,15 +108,15 @@ static inline void register_cpu_control(struct cpu *cpu) #ifdef CONFIG_KEXEC #include -static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute *attr, +static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr, char *buf) { - struct cpu *cpu = container_of(dev, struct cpu, sysdev); + struct cpu *cpu = container_of(dev, struct cpu, dev); ssize_t rc; unsigned long long addr; int cpunum; - cpunum = cpu->sysdev.id; + cpunum = cpu->dev.id; /* * Might be reading other cpu's data based on which cpu read thread @@ -129,7 +128,7 @@ static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute rc = sprintf(buf, "%Lx\n", addr); return rc; } -static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL); +static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL); #endif /* @@ -137,12 +136,12 @@ static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL); */ struct cpu_attr { - struct sysdev_class_attribute attr; + struct device_attribute attr; const struct cpumask *const * const map; }; -static ssize_t show_cpus_attr(struct sysdev_class *class, - struct sysdev_class_attribute *attr, +static ssize_t show_cpus_attr(struct device *dev, + struct device_attribute *attr, char *buf) { struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr); @@ -153,10 +152,10 @@ static ssize_t show_cpus_attr(struct sysdev_class *class, return n; } -#define _CPU_ATTR(name, map) \ - { _SYSDEV_CLASS_ATTR(name, 0444, show_cpus_attr, NULL), map } +#define _CPU_ATTR(name, map) \ + { __ATTR(name, 0444, show_cpus_attr, NULL), map } -/* Keep in sync with cpu_sysdev_class_attrs */ +/* Keep in sync with cpu_subsys_attrs */ static struct cpu_attr cpu_attrs[] = { _CPU_ATTR(online, &cpu_online_mask), _CPU_ATTR(possible, &cpu_possible_mask), @@ -166,19 +165,19 @@ static struct cpu_attr cpu_attrs[] = { /* * Print values for NR_CPUS and offlined cpus */ -static ssize_t print_cpus_kernel_max(struct sysdev_class *class, - struct sysdev_class_attribute *attr, char *buf) +static ssize_t print_cpus_kernel_max(struct device *dev, + struct device_attribute *attr, char *buf) { int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1); return n; } -static SYSDEV_CLASS_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL); +static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL); /* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */ unsigned int total_cpus; -static ssize_t print_cpus_offline(struct sysdev_class *class, - struct sysdev_class_attribute *attr, char *buf) +static ssize_t print_cpus_offline(struct device *dev, + struct device_attribute *attr, char *buf) { int n = 0, len = PAGE_SIZE-2; cpumask_var_t offline; @@ -205,7 +204,7 @@ static ssize_t print_cpus_offline(struct sysdev_class *class, n += snprintf(&buf[n], len - n, "\n"); return n; } -static SYSDEV_CLASS_ATTR(offline, 0444, print_cpus_offline, NULL); +static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL); /* * register_cpu - Setup a sysfs device for a CPU. @@ -218,57 +217,66 @@ static SYSDEV_CLASS_ATTR(offline, 0444, print_cpus_offline, NULL); int __cpuinit register_cpu(struct cpu *cpu, int num) { int error; - cpu->node_id = cpu_to_node(num); - cpu->sysdev.id = num; - cpu->sysdev.cls = &cpu_sysdev_class; - - error = sysdev_register(&cpu->sysdev); + cpu->node_id = cpu_to_node(num); + cpu->dev.id = num; + cpu->dev.bus = &cpu_subsys; + error = device_register(&cpu->dev); if (!error && cpu->hotpluggable) register_cpu_control(cpu); if (!error) - per_cpu(cpu_sys_devices, num) = &cpu->sysdev; + per_cpu(cpu_sys_devices, num) = &cpu->dev; if (!error) register_cpu_under_node(num, cpu_to_node(num)); #ifdef CONFIG_KEXEC if (!error) - error = sysdev_create_file(&cpu->sysdev, &attr_crash_notes); + error = device_create_file(&cpu->dev, &dev_attr_crash_notes); #endif return error; } -struct sys_device *get_cpu_sysdev(unsigned cpu) +struct device *get_cpu_device(unsigned cpu) { if (cpu < nr_cpu_ids && cpu_possible(cpu)) return per_cpu(cpu_sys_devices, cpu); else return NULL; } -EXPORT_SYMBOL_GPL(get_cpu_sysdev); +EXPORT_SYMBOL_GPL(get_cpu_device); + +static struct attribute *cpu_root_attrs[] = { +#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE + &dev_attr_probe.attr, + &dev_attr_release.attr, +#endif + &cpu_attrs[0].attr.attr, + &cpu_attrs[1].attr.attr, + &cpu_attrs[2].attr.attr, + &dev_attr_kernel_max.attr, + &dev_attr_offline.attr, + NULL +}; + +static struct attribute_group cpu_root_attr_group = { + .attrs = cpu_root_attrs, +}; + +static const struct attribute_group *cpu_root_attr_groups[] = { + &cpu_root_attr_group, + NULL, +}; int __init cpu_dev_init(void) { int err; - err = sysdev_class_register(&cpu_sysdev_class); + err = subsys_system_register(&cpu_subsys, cpu_root_attr_groups); + if (err) + return err; + #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) - if (!err) - err = sched_create_sysfs_power_savings_entries(&cpu_sysdev_class); + err = sched_create_sysfs_power_savings_entries(cpu_subsys.dev_root); #endif - return err; } - -static struct sysdev_class_attribute *cpu_sysdev_class_attrs[] = { -#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE - &attr_probe, - &attr_release, -#endif - &cpu_attrs[0].attr, - &cpu_attrs[1].attr, - &cpu_attrs[2].attr, - &attr_kernel_max, - &attr_offline, - NULL -}; diff --git a/drivers/base/node.c b/drivers/base/node.c index 793f796c4da..6ce1501c7de 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -315,12 +315,12 @@ struct node node_devices[MAX_NUMNODES]; int register_cpu_under_node(unsigned int cpu, unsigned int nid) { int ret; - struct sys_device *obj; + struct device *obj; if (!node_online(nid)) return 0; - obj = get_cpu_sysdev(cpu); + obj = get_cpu_device(cpu); if (!obj) return 0; @@ -337,12 +337,12 @@ int register_cpu_under_node(unsigned int cpu, unsigned int nid) int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) { - struct sys_device *obj; + struct device *obj; if (!node_online(nid)) return 0; - obj = get_cpu_sysdev(cpu); + obj = get_cpu_device(cpu); if (!obj) return 0; diff --git a/drivers/base/topology.c b/drivers/base/topology.c index f6f37a05a0c..ae989c57cd5 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -23,7 +23,6 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ -#include #include #include #include @@ -32,14 +31,14 @@ #include #define define_one_ro_named(_name, _func) \ -static SYSDEV_ATTR(_name, 0444, _func, NULL) + static DEVICE_ATTR(_name, 0444, _func, NULL) #define define_one_ro(_name) \ -static SYSDEV_ATTR(_name, 0444, show_##_name, NULL) + static DEVICE_ATTR(_name, 0444, show_##_name, NULL) #define define_id_show_func(name) \ -static ssize_t show_##name(struct sys_device *dev, \ - struct sysdev_attribute *attr, char *buf) \ +static ssize_t show_##name(struct device *dev, \ + struct device_attribute *attr, char *buf) \ { \ unsigned int cpu = dev->id; \ return sprintf(buf, "%d\n", topology_##name(cpu)); \ @@ -65,16 +64,16 @@ static ssize_t show_cpumap(int type, const struct cpumask *mask, char *buf) #ifdef arch_provides_topology_pointers #define define_siblings_show_map(name) \ -static ssize_t show_##name(struct sys_device *dev, \ - struct sysdev_attribute *attr, char *buf) \ +static ssize_t show_##name(struct device *dev, \ + struct device_attribute *attr, char *buf) \ { \ unsigned int cpu = dev->id; \ return show_cpumap(0, topology_##name(cpu), buf); \ } #define define_siblings_show_list(name) \ -static ssize_t show_##name##_list(struct sys_device *dev, \ - struct sysdev_attribute *attr, \ +static ssize_t show_##name##_list(struct device *dev, \ + struct device_attribute *attr, \ char *buf) \ { \ unsigned int cpu = dev->id; \ @@ -83,15 +82,15 @@ static ssize_t show_##name##_list(struct sys_device *dev, \ #else #define define_siblings_show_map(name) \ -static ssize_t show_##name(struct sys_device *dev, \ - struct sysdev_attribute *attr, char *buf) \ +static ssize_t show_##name(struct device *dev, \ + struct device_attribute *attr, char *buf) \ { \ return show_cpumap(0, topology_##name(dev->id), buf); \ } #define define_siblings_show_list(name) \ -static ssize_t show_##name##_list(struct sys_device *dev, \ - struct sysdev_attribute *attr, \ +static ssize_t show_##name##_list(struct device *dev, \ + struct device_attribute *attr, \ char *buf) \ { \ return show_cpumap(1, topology_##name(dev->id), buf); \ @@ -124,16 +123,16 @@ define_one_ro_named(book_siblings_list, show_book_cpumask_list); #endif static struct attribute *default_attrs[] = { - &attr_physical_package_id.attr, - &attr_core_id.attr, - &attr_thread_siblings.attr, - &attr_thread_siblings_list.attr, - &attr_core_siblings.attr, - &attr_core_siblings_list.attr, + &dev_attr_physical_package_id.attr, + &dev_attr_core_id.attr, + &dev_attr_thread_siblings.attr, + &dev_attr_thread_siblings_list.attr, + &dev_attr_core_siblings.attr, + &dev_attr_core_siblings_list.attr, #ifdef CONFIG_SCHED_BOOK - &attr_book_id.attr, - &attr_book_siblings.attr, - &attr_book_siblings_list.attr, + &dev_attr_book_id.attr, + &dev_attr_book_siblings.attr, + &dev_attr_book_siblings_list.attr, #endif NULL }; @@ -146,16 +145,16 @@ static struct attribute_group topology_attr_group = { /* Add/Remove cpu_topology interface for CPU device */ static int __cpuinit topology_add_dev(unsigned int cpu) { - struct sys_device *sys_dev = get_cpu_sysdev(cpu); + struct device *dev = get_cpu_device(cpu); - return sysfs_create_group(&sys_dev->kobj, &topology_attr_group); + return sysfs_create_group(&dev->kobj, &topology_attr_group); } static void __cpuinit topology_remove_dev(unsigned int cpu) { - struct sys_device *sys_dev = get_cpu_sysdev(cpu); + struct device *dev = get_cpu_device(cpu); - sysfs_remove_group(&sys_dev->kobj, &topology_attr_group); + sysfs_remove_group(&dev->kobj, &topology_attr_group); } static int __cpuinit topology_cpu_callback(struct notifier_block *nfb, diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 987a165ede2..8c2df3499da 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -679,7 +679,7 @@ static struct kobj_type ktype_cpufreq = { */ static int cpufreq_add_dev_policy(unsigned int cpu, struct cpufreq_policy *policy, - struct sys_device *sys_dev) + struct device *dev) { int ret = 0; #ifdef CONFIG_SMP @@ -728,7 +728,7 @@ static int cpufreq_add_dev_policy(unsigned int cpu, spin_unlock_irqrestore(&cpufreq_driver_lock, flags); pr_debug("CPU already managed, adding link\n"); - ret = sysfs_create_link(&sys_dev->kobj, + ret = sysfs_create_link(&dev->kobj, &managed_policy->kobj, "cpufreq"); if (ret) @@ -761,7 +761,7 @@ static int cpufreq_add_dev_symlink(unsigned int cpu, for_each_cpu(j, policy->cpus) { struct cpufreq_policy *managed_policy; - struct sys_device *cpu_sys_dev; + struct device *cpu_dev; if (j == cpu) continue; @@ -770,8 +770,8 @@ static int cpufreq_add_dev_symlink(unsigned int cpu, pr_debug("CPU %u already managed, adding link\n", j); managed_policy = cpufreq_cpu_get(cpu); - cpu_sys_dev = get_cpu_sysdev(j); - ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, + cpu_dev = get_cpu_device(j); + ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq"); if (ret) { cpufreq_cpu_put(managed_policy); @@ -783,7 +783,7 @@ static int cpufreq_add_dev_symlink(unsigned int cpu, static int cpufreq_add_dev_interface(unsigned int cpu, struct cpufreq_policy *policy, - struct sys_device *sys_dev) + struct device *dev) { struct cpufreq_policy new_policy; struct freq_attr **drv_attr; @@ -793,7 +793,7 @@ static int cpufreq_add_dev_interface(unsigned int cpu, /* prepare interface data */ ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, - &sys_dev->kobj, "cpufreq"); + &dev->kobj, "cpufreq"); if (ret) return ret; @@ -866,9 +866,9 @@ err_out_kobj_put: * with with cpu hotplugging and all hell will break loose. Tried to clean this * mess up, but more thorough testing is needed. - Mathieu */ -static int cpufreq_add_dev(struct sys_device *sys_dev) +static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) { - unsigned int cpu = sys_dev->id; + unsigned int cpu = dev->id; int ret = 0, found = 0; struct cpufreq_policy *policy; unsigned long flags; @@ -947,7 +947,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_START, policy); - ret = cpufreq_add_dev_policy(cpu, policy, sys_dev); + ret = cpufreq_add_dev_policy(cpu, policy, dev); if (ret) { if (ret > 0) /* This is a managed cpu, symlink created, @@ -956,7 +956,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) goto err_unlock_policy; } - ret = cpufreq_add_dev_interface(cpu, policy, sys_dev); + ret = cpufreq_add_dev_interface(cpu, policy, dev); if (ret) goto err_out_unregister; @@ -999,15 +999,15 @@ module_out: * Caller should already have policy_rwsem in write mode for this CPU. * This routine frees the rwsem before returning. */ -static int __cpufreq_remove_dev(struct sys_device *sys_dev) +static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) { - unsigned int cpu = sys_dev->id; + unsigned int cpu = dev->id; unsigned long flags; struct cpufreq_policy *data; struct kobject *kobj; struct completion *cmp; #ifdef CONFIG_SMP - struct sys_device *cpu_sys_dev; + struct device *cpu_dev; unsigned int j; #endif @@ -1032,7 +1032,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) pr_debug("removing link\n"); cpumask_clear_cpu(cpu, data->cpus); spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - kobj = &sys_dev->kobj; + kobj = &dev->kobj; cpufreq_cpu_put(data); unlock_policy_rwsem_write(cpu); sysfs_remove_link(kobj, "cpufreq"); @@ -1071,8 +1071,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) strncpy(per_cpu(cpufreq_cpu_governor, j), data->governor->name, CPUFREQ_NAME_LEN); #endif - cpu_sys_dev = get_cpu_sysdev(j); - kobj = &cpu_sys_dev->kobj; + cpu_dev = get_cpu_device(j); + kobj = &cpu_dev->kobj; unlock_policy_rwsem_write(cpu); sysfs_remove_link(kobj, "cpufreq"); lock_policy_rwsem_write(cpu); @@ -1112,11 +1112,11 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) if (unlikely(cpumask_weight(data->cpus) > 1)) { /* first sibling now owns the new sysfs dir */ cpumask_clear_cpu(cpu, data->cpus); - cpufreq_add_dev(get_cpu_sysdev(cpumask_first(data->cpus))); + cpufreq_add_dev(get_cpu_device(cpumask_first(data->cpus)), NULL); /* finally remove our own symlink */ lock_policy_rwsem_write(cpu); - __cpufreq_remove_dev(sys_dev); + __cpufreq_remove_dev(dev, sif); } #endif @@ -1128,9 +1128,9 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) } -static int cpufreq_remove_dev(struct sys_device *sys_dev) +static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) { - unsigned int cpu = sys_dev->id; + unsigned int cpu = dev->id; int retval; if (cpu_is_offline(cpu)) @@ -1139,7 +1139,7 @@ static int cpufreq_remove_dev(struct sys_device *sys_dev) if (unlikely(lock_policy_rwsem_write(cpu))) BUG(); - retval = __cpufreq_remove_dev(sys_dev); + retval = __cpufreq_remove_dev(dev, sif); return retval; } @@ -1271,9 +1271,11 @@ out: } EXPORT_SYMBOL(cpufreq_get); -static struct sysdev_driver cpufreq_sysdev_driver = { - .add = cpufreq_add_dev, - .remove = cpufreq_remove_dev, +static struct subsys_interface cpufreq_interface = { + .name = "cpufreq", + .subsys = &cpu_subsys, + .add_dev = cpufreq_add_dev, + .remove_dev = cpufreq_remove_dev, }; @@ -1765,25 +1767,25 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - struct sys_device *sys_dev; + struct device *dev; - sys_dev = get_cpu_sysdev(cpu); - if (sys_dev) { + dev = get_cpu_device(cpu); + if (dev) { switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: - cpufreq_add_dev(sys_dev); + cpufreq_add_dev(dev, NULL); break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: if (unlikely(lock_policy_rwsem_write(cpu))) BUG(); - __cpufreq_remove_dev(sys_dev); + __cpufreq_remove_dev(dev, NULL); break; case CPU_DOWN_FAILED: case CPU_DOWN_FAILED_FROZEN: - cpufreq_add_dev(sys_dev); + cpufreq_add_dev(dev, NULL); break; } } @@ -1830,8 +1832,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) cpufreq_driver = driver_data; spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - ret = sysdev_driver_register(&cpu_sysdev_class, - &cpufreq_sysdev_driver); + ret = subsys_interface_register(&cpufreq_interface); if (ret) goto err_null_driver; @@ -1850,7 +1851,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) if (ret) { pr_debug("no CPU initialized for driver %s\n", driver_data->name); - goto err_sysdev_unreg; + goto err_if_unreg; } } @@ -1858,9 +1859,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) pr_debug("driver %s up and running\n", driver_data->name); return 0; -err_sysdev_unreg: - sysdev_driver_unregister(&cpu_sysdev_class, - &cpufreq_sysdev_driver); +err_if_unreg: + subsys_interface_unregister(&cpufreq_interface); err_null_driver: spin_lock_irqsave(&cpufreq_driver_lock, flags); cpufreq_driver = NULL; @@ -1887,7 +1887,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) pr_debug("unregistering driver %s\n", driver->name); - sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); + subsys_interface_unregister(&cpufreq_interface); unregister_hotcpu_notifier(&cpufreq_cpu_notifier); spin_lock_irqsave(&cpufreq_driver_lock, flags); @@ -1907,8 +1907,7 @@ static int __init cpufreq_core_init(void) init_rwsem(&per_cpu(cpu_policy_rwsem, cpu)); } - cpufreq_global_kobject = kobject_create_and_add("cpufreq", - &cpu_sysdev_class.kset.kobj); + cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj); BUG_ON(!cpufreq_global_kobject); register_syscore_ops(&cpufreq_syscore_ops); diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index c5072a91e84..390380a8cfc 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -11,7 +11,6 @@ #include #include -#include #include #include #include diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 06ce2680d00..59f4261c753 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -291,10 +291,10 @@ EXPORT_SYMBOL_GPL(cpuidle_disable_device); static int __cpuidle_register_device(struct cpuidle_device *dev) { int ret; - struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); + struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); - if (!sys_dev) + if (!dev) return -EINVAL; if (!try_module_get(cpuidle_driver->owner)) return -EINVAL; @@ -303,7 +303,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) per_cpu(cpuidle_devices, dev->cpu) = dev; list_add(&dev->device_list, &cpuidle_detected_devices); - if ((ret = cpuidle_add_sysfs(sys_dev))) { + if ((ret = cpuidle_add_sysfs(cpu_dev))) { module_put(cpuidle_driver->owner); return ret; } @@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device); */ void cpuidle_unregister_device(struct cpuidle_device *dev) { - struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu); + struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver(); if (dev->registered == 0) @@ -354,7 +354,7 @@ void cpuidle_unregister_device(struct cpuidle_device *dev) cpuidle_disable_device(dev); - cpuidle_remove_sysfs(sys_dev); + cpuidle_remove_sysfs(cpu_dev); list_del(&dev->device_list); wait_for_completion(&dev->kobj_unregister); per_cpu(cpuidle_devices, dev->cpu) = NULL; @@ -411,7 +411,7 @@ static int __init cpuidle_init(void) if (cpuidle_disabled()) return -ENODEV; - ret = cpuidle_add_class_sysfs(&cpu_sysdev_class); + ret = cpuidle_add_interface(cpu_subsys.dev_root); if (ret) return ret; diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h index 38c3fd8b9d7..7db186685c2 100644 --- a/drivers/cpuidle/cpuidle.h +++ b/drivers/cpuidle/cpuidle.h @@ -5,7 +5,7 @@ #ifndef __DRIVER_CPUIDLE_H #define __DRIVER_CPUIDLE_H -#include +#include /* For internal use only */ extern struct cpuidle_governor *cpuidle_curr_governor; @@ -23,11 +23,11 @@ extern void cpuidle_uninstall_idle_handler(void); extern int cpuidle_switch_governor(struct cpuidle_governor *gov); /* sysfs */ -extern int cpuidle_add_class_sysfs(struct sysdev_class *cls); -extern void cpuidle_remove_class_sysfs(struct sysdev_class *cls); +extern int cpuidle_add_interface(struct device *dev); +extern void cpuidle_remove_interface(struct device *dev); extern int cpuidle_add_state_sysfs(struct cpuidle_device *device); extern void cpuidle_remove_state_sysfs(struct cpuidle_device *device); -extern int cpuidle_add_sysfs(struct sys_device *sysdev); -extern void cpuidle_remove_sysfs(struct sys_device *sysdev); +extern int cpuidle_add_sysfs(struct device *dev); +extern void cpuidle_remove_sysfs(struct device *dev); #endif /* __DRIVER_CPUIDLE_H */ diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 1e756e160dc..3fe41fe4851 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -22,8 +22,8 @@ static int __init cpuidle_sysfs_setup(char *unused) } __setup("cpuidle_sysfs_switch", cpuidle_sysfs_setup); -static ssize_t show_available_governors(struct sysdev_class *class, - struct sysdev_class_attribute *attr, +static ssize_t show_available_governors(struct device *dev, + struct device_attribute *attr, char *buf) { ssize_t i = 0; @@ -42,8 +42,8 @@ out: return i; } -static ssize_t show_current_driver(struct sysdev_class *class, - struct sysdev_class_attribute *attr, +static ssize_t show_current_driver(struct device *dev, + struct device_attribute *attr, char *buf) { ssize_t ret; @@ -59,8 +59,8 @@ static ssize_t show_current_driver(struct sysdev_class *class, return ret; } -static ssize_t show_current_governor(struct sysdev_class *class, - struct sysdev_class_attribute *attr, +static ssize_t show_current_governor(struct device *dev, + struct device_attribute *attr, char *buf) { ssize_t ret; @@ -75,8 +75,8 @@ static ssize_t show_current_governor(struct sysdev_class *class, return ret; } -static ssize_t store_current_governor(struct sysdev_class *class, - struct sysdev_class_attribute *attr, +static ssize_t store_current_governor(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) { char gov_name[CPUIDLE_NAME_LEN]; @@ -109,50 +109,48 @@ static ssize_t store_current_governor(struct sysdev_class *class, return count; } -static SYSDEV_CLASS_ATTR(current_driver, 0444, show_current_driver, NULL); -static SYSDEV_CLASS_ATTR(current_governor_ro, 0444, show_current_governor, - NULL); +static DEVICE_ATTR(current_driver, 0444, show_current_driver, NULL); +static DEVICE_ATTR(current_governor_ro, 0444, show_current_governor, NULL); -static struct attribute *cpuclass_default_attrs[] = { - &attr_current_driver.attr, - &attr_current_governor_ro.attr, +static struct attribute *cpuidle_default_attrs[] = { + &dev_attr_current_driver.attr, + &dev_attr_current_governor_ro.attr, NULL }; -static SYSDEV_CLASS_ATTR(available_governors, 0444, show_available_governors, - NULL); -static SYSDEV_CLASS_ATTR(current_governor, 0644, show_current_governor, - store_current_governor); +static DEVICE_ATTR(available_governors, 0444, show_available_governors, NULL); +static DEVICE_ATTR(current_governor, 0644, show_current_governor, + store_current_governor); -static struct attribute *cpuclass_switch_attrs[] = { - &attr_available_governors.attr, - &attr_current_driver.attr, - &attr_current_governor.attr, +static struct attribute *cpuidle_switch_attrs[] = { + &dev_attr_available_governors.attr, + &dev_attr_current_driver.attr, + &dev_attr_current_governor.attr, NULL }; -static struct attribute_group cpuclass_attr_group = { - .attrs = cpuclass_default_attrs, +static struct attribute_group cpuidle_attr_group = { + .attrs = cpuidle_default_attrs, .name = "cpuidle", }; /** - * cpuidle_add_class_sysfs - add CPU global sysfs attributes + * cpuidle_add_interface - add CPU global sysfs attributes */ -int cpuidle_add_class_sysfs(struct sysdev_class *cls) +int cpuidle_add_interface(struct device *dev) { if (sysfs_switch) - cpuclass_attr_group.attrs = cpuclass_switch_attrs; + cpuidle_attr_group.attrs = cpuidle_switch_attrs; - return sysfs_create_group(&cls->kset.kobj, &cpuclass_attr_group); + return sysfs_create_group(&dev->kobj, &cpuidle_attr_group); } /** - * cpuidle_remove_class_sysfs - remove CPU global sysfs attributes + * cpuidle_remove_interface - remove CPU global sysfs attributes */ -void cpuidle_remove_class_sysfs(struct sysdev_class *cls) +void cpuidle_remove_interface(struct device *dev) { - sysfs_remove_group(&cls->kset.kobj, &cpuclass_attr_group); + sysfs_remove_group(&dev->kobj, &cpuidle_attr_group); } struct cpuidle_attr { @@ -365,16 +363,16 @@ void cpuidle_remove_state_sysfs(struct cpuidle_device *device) /** * cpuidle_add_sysfs - creates a sysfs instance for the target device - * @sysdev: the target device + * @dev: the target device */ -int cpuidle_add_sysfs(struct sys_device *sysdev) +int cpuidle_add_sysfs(struct device *cpu_dev) { - int cpu = sysdev->id; + int cpu = cpu_dev->id; struct cpuidle_device *dev; int error; dev = per_cpu(cpuidle_devices, cpu); - error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &sysdev->kobj, + error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &cpu_dev->kobj, "cpuidle"); if (!error) kobject_uevent(&dev->kobj, KOBJ_ADD); @@ -383,11 +381,11 @@ int cpuidle_add_sysfs(struct sys_device *sysdev) /** * cpuidle_remove_sysfs - deletes a sysfs instance on the target device - * @sysdev: the target device + * @dev: the target device */ -void cpuidle_remove_sysfs(struct sys_device *sysdev) +void cpuidle_remove_sysfs(struct device *cpu_dev) { - int cpu = sysdev->id; + int cpu = cpu_dev->id; struct cpuidle_device *dev; dev = per_cpu(cpuidle_devices, cpu); diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c index 95b909ac2b7..3c03c1060be 100644 --- a/drivers/s390/char/sclp_config.c +++ b/drivers/s390/char/sclp_config.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include @@ -31,14 +31,14 @@ static struct work_struct sclp_cpu_change_work; static void sclp_cpu_capability_notify(struct work_struct *work) { int cpu; - struct sys_device *sysdev; + struct device *dev; s390_adjust_jiffies(); pr_warning("cpu capability changed.\n"); get_online_cpus(); for_each_online_cpu(cpu) { - sysdev = get_cpu_sysdev(cpu); - kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); + dev = get_cpu_device(cpu); + kobject_uevent(&dev->kobj, KOBJ_CHANGE); } put_online_cpus(); } diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 6cb60fd2ea8..fc3da0d70d6 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -14,7 +14,7 @@ #ifndef _LINUX_CPU_H_ #define _LINUX_CPU_H_ -#include +#include #include #include #include @@ -22,19 +22,19 @@ struct cpu { int node_id; /* The node which contains the CPU */ int hotpluggable; /* creates sysfs control file if hotpluggable */ - struct sys_device sysdev; + struct device dev; }; extern int register_cpu(struct cpu *cpu, int num); -extern struct sys_device *get_cpu_sysdev(unsigned cpu); +extern struct device *get_cpu_device(unsigned cpu); -extern int cpu_add_sysdev_attr(struct sysdev_attribute *attr); -extern void cpu_remove_sysdev_attr(struct sysdev_attribute *attr); +extern int cpu_add_dev_attr(struct device_attribute *attr); +extern void cpu_remove_dev_attr(struct device_attribute *attr); -extern int cpu_add_sysdev_attr_group(struct attribute_group *attrs); -extern void cpu_remove_sysdev_attr_group(struct attribute_group *attrs); +extern int cpu_add_dev_attr_group(struct attribute_group *attrs); +extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); -extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls); +extern int sched_create_sysfs_power_savings_entries(struct device *dev); #ifdef CONFIG_HOTPLUG_CPU extern void unregister_cpu(struct cpu *cpu); @@ -160,7 +160,7 @@ static inline void cpu_maps_update_done(void) } #endif /* CONFIG_SMP */ -extern struct sysdev_class cpu_sysdev_class; +extern struct bus_type cpu_subsys; #ifdef CONFIG_HOTPLUG_CPU /* Stop CPUs going up and down. */ diff --git a/kernel/sched.c b/kernel/sched.c index 0e9344a71be..53077264644 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -7923,54 +7923,52 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) } #ifdef CONFIG_SCHED_MC -static ssize_t sched_mc_power_savings_show(struct sysdev_class *class, - struct sysdev_class_attribute *attr, - char *page) +static ssize_t sched_mc_power_savings_show(struct device *dev, + struct device_attribute *attr, + char *buf) { - return sprintf(page, "%u\n", sched_mc_power_savings); + return sprintf(buf, "%u\n", sched_mc_power_savings); } -static ssize_t sched_mc_power_savings_store(struct sysdev_class *class, - struct sysdev_class_attribute *attr, +static ssize_t sched_mc_power_savings_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) { return sched_power_savings_store(buf, count, 0); } -static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644, - sched_mc_power_savings_show, - sched_mc_power_savings_store); +static DEVICE_ATTR(sched_mc_power_savings, 0644, + sched_mc_power_savings_show, + sched_mc_power_savings_store); #endif #ifdef CONFIG_SCHED_SMT -static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev, - struct sysdev_class_attribute *attr, - char *page) +static ssize_t sched_smt_power_savings_show(struct device *dev, + struct device_attribute *attr, + char *buf) { - return sprintf(page, "%u\n", sched_smt_power_savings); + return sprintf(buf, "%u\n", sched_smt_power_savings); } -static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev, - struct sysdev_class_attribute *attr, +static ssize_t sched_smt_power_savings_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) { return sched_power_savings_store(buf, count, 1); } -static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644, +static DEVICE_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, sched_smt_power_savings_store); #endif -int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) +int __init sched_create_sysfs_power_savings_entries(struct device *dev) { int err = 0; #ifdef CONFIG_SCHED_SMT if (smt_capable()) - err = sysfs_create_file(&cls->kset.kobj, - &attr_sched_smt_power_savings.attr); + err = device_create_file(dev, &dev_attr_sched_smt_power_savings); #endif #ifdef CONFIG_SCHED_MC if (!err && mc_capable()) - err = sysfs_create_file(&cls->kset.kobj, - &attr_sched_mc_power_savings.attr); + err = device_create_file(dev, &dev_attr_sched_mc_power_savings); #endif return err; } -- cgit v1.2.3-70-g09d2 From 226dd0193f9b8524789a86505ba05b1a74d916c1 Mon Sep 17 00:00:00 2001 From: Afzal Mohammed Date: Wed, 4 Jan 2012 10:52:31 +0530 Subject: [CPUFREQ] cpufreq:userspace: fix cpu_cur_freq updation CPU frequency is guranteed to be changed on notifier callback with CPUFREQ_POSTCHANGE. Notifier callback with CPUFREQ_PRECHANGE does not gurantee a change in frequency; after it, if cpufreq driver is unable to change CPU to new frequency. This results in wrong information being fed to user (if setting CPU frequency fails) upon doing like, cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_setspeed Hence in userspace governer update cpu_cur_freq only if notifier has been called with POSTCHANGE. Signed-off-by: Afzal Mohammed Signed-off-by: Dave Jones --- drivers/cpufreq/cpufreq_userspace.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index f231015904c..bedac1aa9be 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c @@ -47,9 +47,11 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, if (!per_cpu(cpu_is_managed, freq->cpu)) return 0; - pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n", - freq->cpu, freq->new); - per_cpu(cpu_cur_freq, freq->cpu) = freq->new; + if (val == CPUFREQ_POSTCHANGE) { + pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n", + freq->cpu, freq->new); + per_cpu(cpu_cur_freq, freq->cpu) = freq->new; + } return 0; } -- cgit v1.2.3-70-g09d2 From d08de0c19c3fc5b9cf557ce3b42795d036ad5da9 Mon Sep 17 00:00:00 2001 From: Afzal Mohammed Date: Wed, 4 Jan 2012 10:52:46 +0530 Subject: [CPUFREQ] update lpj only if frequency has changed During scaling up of cpu frequency, loops_per_jiffy is updated upon invoking PRECHANGE notifier. If setting to new frequency fails in cpufreq driver, lpj is left at incorrect value. Hence update lpj only if cpu frequency is changed, i.e. upon invoking POSTCHANGE notifier. Penalty would be that during time period between changing cpu frequency & invocation of POSTCHANGE notifier, udelay(x) may not gurantee minimal delay of 'x' us for frequency scaling up operation. Perhaps a better solution would be to define CPUFREQ_ABORTCHANGE & handle accordingly, but then it would be more intrusive (using ABORTCHANGE may help drivers also; if any has registered notifier and expect POST for a PRECHANGE, their needs can be taken care using ABORT) Signed-off-by: Afzal Mohammed Signed-off-by: Dave Jones --- drivers/cpufreq/cpufreq.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 987a165ede2..2f5801adc73 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -204,8 +204,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) pr_debug("saving %lu as reference value for loops_per_jiffy; " "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); } - if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) || - (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) || + if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) || (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new); -- cgit v1.2.3-70-g09d2 From 201bf0f129e1715a33568d1563d9a75b840ab4d3 Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Fri, 6 Jan 2012 15:56:31 +0100 Subject: [CPUFREQ] powernow-k8: Avoid Pstate MSR accesses on systems supporting CPB Due to CPB we can't directly map SW Pstates to Pstate MSRs. Get rid of the paranoia check. (assuming that the ACPI Pstate information is correct.) Signed-off-by: Andreas Herrmann Signed-off-by: Dave Jones --- drivers/cpufreq/powernow-k8.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index bce576d7478..e0329f9fa40 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -926,23 +926,24 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, invalidate_entry(powernow_table, i); continue; } - rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); - if (!(hi & HW_PSTATE_VALID_MASK)) { - pr_debug("invalid pstate %d, ignoring\n", index); - invalidate_entry(powernow_table, i); - continue; - } - - powernow_table[i].index = index; - /* Frequency may be rounded for these */ if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10) || boot_cpu_data.x86 == 0x11) { + + rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); + if (!(hi & HW_PSTATE_VALID_MASK)) { + pr_debug("invalid pstate %d, ignoring\n", index); + invalidate_entry(powernow_table, i); + continue; + } + powernow_table[i].frequency = freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7); } else powernow_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000; + + powernow_table[i].index = index; } return 0; } -- cgit v1.2.3-70-g09d2 From a8eb28480e9b637cc78b9aa5e08612ba97e1317a Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Fri, 6 Jan 2012 15:57:55 +0100 Subject: [CPUFREQ] powernow-k8: Fix indexing issue The driver uses the pstate number from the status register as index in its table of ACPI pstates (powernow_table). This is wrong as this is not a 1-to-1 mapping. For example we can have _PSS information to just utilize Pstate 0 and Pstate 4, ie. powernow-k8: Core Performance Boosting: on. powernow-k8: 0 : pstate 0 (2200 MHz) powernow-k8: 1 : pstate 4 (1400 MHz) In this example the driver's powernow_table has just 2 entries. Using the pstate number (4) as index into this table is just plain wrong. Signed-off-by: Andreas Herrmann Signed-off-by: Dave Jones --- drivers/cpufreq/powernow-k8.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index e0329f9fa40..ad683ec2c57 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -54,6 +54,9 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data); static int cpu_family = CPU_OPTERON; +/* array to map SW pstate number to acpi state */ +static u32 ps_to_as[8]; + /* core performance boost */ static bool cpb_capable, cpb_enabled; static struct msr __percpu *msrs; @@ -80,9 +83,9 @@ static u32 find_khz_freq_from_fid(u32 fid) } static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data, - u32 pstate) + u32 pstate) { - return data[pstate].frequency; + return data[ps_to_as[pstate]].frequency; } /* Return the vco fid for an input fid @@ -926,6 +929,9 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, invalidate_entry(powernow_table, i); continue; } + + ps_to_as[index] = i; + /* Frequency may be rounded for these */ if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10) || boot_cpu_data.x86 == 0x11) { @@ -1190,7 +1196,8 @@ static int powernowk8_target(struct cpufreq_policy *pol, powernow_k8_acpi_pst_values(data, newstate); if (cpu_family == CPU_HW_PSTATE) - ret = transition_frequency_pstate(data, newstate); + ret = transition_frequency_pstate(data, + data->powernow_table[newstate].index); else ret = transition_frequency_fidvid(data, newstate); if (ret) { @@ -1203,7 +1210,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, if (cpu_family == CPU_HW_PSTATE) pol->cur = find_khz_freq_from_pstate(data->powernow_table, - newstate); + data->powernow_table[newstate].index); else pol->cur = find_khz_freq_from_fid(data->currfid); ret = 0; -- cgit v1.2.3-70-g09d2 From b2bd68e1d5568a3911e991fc71e083f439886d8c Mon Sep 17 00:00:00 2001 From: Andreas Herrmann Date: Fri, 6 Jan 2012 15:59:33 +0100 Subject: [CPUFREQ] powernow-k8: Update copyright, maintainer and documentation information Signed-off-by: Andreas Herrmann Signed-off-by: Dave Jones --- drivers/cpufreq/powernow-k8.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index ad683ec2c57..8f9b2ceeec8 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -1,10 +1,11 @@ /* - * (c) 2003-2010 Advanced Micro Devices, Inc. + * (c) 2003-2012 Advanced Micro Devices, Inc. * Your use of this code is subject to the terms and conditions of the * GNU general public license version 2. See "COPYING" or * http://www.gnu.org/licenses/gpl.html * - * Support : mark.langsdorf@amd.com + * Maintainer: + * Andreas Herrmann * * Based on the powernow-k7.c module written by Dave Jones. * (C) 2003 Dave Jones on behalf of SuSE Labs @@ -16,12 +17,14 @@ * Valuable input gratefully received from Dave Jones, Pavel Machek, * Dominik Brodowski, Jacob Shin, and others. * Originally developed by Paul Devriendt. - * Processor information obtained from Chapter 9 (Power and Thermal Management) - * of the "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD - * Opteron Processors" available for download from www.amd.com * - * Tables for specific CPUs can be inferred from - * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/30430.pdf + * Processor information obtained from Chapter 9 (Power and Thermal + * Management) of the "BIOS and Kernel Developer's Guide (BKDG) for + * the AMD Athlon 64 and AMD Opteron Processors" and section "2.x + * Power Management" in BKDGs for newer AMD CPU families. + * + * Tables for specific CPUs can be inferred from AMD's processor + * power and thermal data sheets, (e.g. 30417.pdf, 30430.pdf, 43375.pdf) */ #include -- cgit v1.2.3-70-g09d2 From a125a17fa61afe2fa4e52b239dd20af8ce90c9f7 Mon Sep 17 00:00:00 2001 From: Jaecheol Lee Date: Sat, 7 Jan 2012 20:18:35 +0900 Subject: [CPUFREQ] EXYNOS: Make EXYNOS common cpufreq driver To support various EXYNOS series SoCs commonly, added exynos common structure. exynos-cpufreq.c => EXYNOS series common cpufreq driver exynos4210-cpufreq.c => EXYNOS4210 support cpufreq driver Signed-off-by: Jaecheol Lee Signed-off-by: Kukjin Kim Signed-off-by: Dave Jones --- arch/arm/mach-exynos/include/mach/cpufreq.h | 34 +++ drivers/cpufreq/Kconfig.arm | 15 +- drivers/cpufreq/Makefile | 1 + drivers/cpufreq/exynos-cpufreq.c | 296 +++++++++++++++++++++ drivers/cpufreq/exynos4210-cpufreq.c | 385 ++++++---------------------- 5 files changed, 414 insertions(+), 317 deletions(-) create mode 100644 arch/arm/mach-exynos/include/mach/cpufreq.h create mode 100644 drivers/cpufreq/exynos-cpufreq.c (limited to 'drivers/cpufreq') diff --git a/arch/arm/mach-exynos/include/mach/cpufreq.h b/arch/arm/mach-exynos/include/mach/cpufreq.h new file mode 100644 index 00000000000..3df27f2d503 --- /dev/null +++ b/arch/arm/mach-exynos/include/mach/cpufreq.h @@ -0,0 +1,34 @@ +/* linux/arch/arm/mach-exynos/include/mach/cpufreq.h + * + * Copyright (c) 2010 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * EXYNOS - CPUFreq support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +enum cpufreq_level_index { + L0, L1, L2, L3, L4, + L5, L6, L7, L8, L9, + L10, L11, L12, L13, L14, + L15, L16, L17, L18, L19, + L20, +}; + +struct exynos_dvfs_info { + unsigned long mpll_freq_khz; + unsigned int pll_safe_idx; + unsigned int pm_lock_idx; + unsigned int max_support_idx; + unsigned int min_support_idx; + struct clk *cpu_clk; + unsigned int *volt_table; + struct cpufreq_frequency_table *freq_table; + void (*set_freq)(unsigned int, unsigned int); + bool (*need_apll_change)(unsigned int, unsigned int); +}; + +extern int exynos4210_cpufreq_init(struct exynos_dvfs_info *); diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 72a0044c1ba..e0664fed018 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -21,12 +21,19 @@ config ARM_S5PV210_CPUFREQ If in doubt, say N. +config ARM_EXYNOS_CPUFREQ + bool "SAMSUNG EXYNOS SoCs" + depends on ARCH_EXYNOS + select ARM_EXYNOS4210_CPUFREQ if CPU_EXYNOS4210 + default y + help + This adds the CPUFreq driver common part for Samsung + EXYNOS SoCs. + + If in doubt, say N. + config ARM_EXYNOS4210_CPUFREQ bool "Samsung EXYNOS4210" - depends on CPU_EXYNOS4210 - default y help This adds the CPUFreq driver for Samsung EXYNOS4210 SoC (S5PV310 or S5PC210). - - If in doubt, say N. diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index ce75fcbcca4..ac000fa76bb 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -42,6 +42,7 @@ obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o +obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o obj-$(CONFIG_ARCH_OMAP2PLUS) += omap-cpufreq.o diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c new file mode 100644 index 00000000000..24e4dd453fa --- /dev/null +++ b/drivers/cpufreq/exynos-cpufreq.c @@ -0,0 +1,296 @@ +/* + * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * EXYNOS - CPU frequency scaling support for EXYNOS series + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +static struct exynos_dvfs_info *exynos_info; + +static struct regulator *arm_regulator; +static struct cpufreq_freqs freqs; + +static unsigned int locking_frequency; +static bool frequency_locked; +static DEFINE_MUTEX(cpufreq_lock); + +int exynos_verify_speed(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, + exynos_info->freq_table); +} + +unsigned int exynos_getspeed(unsigned int cpu) +{ + return clk_get_rate(exynos_info->cpu_clk) / 1000; +} + +static int exynos_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int index, old_index; + unsigned int arm_volt, safe_arm_volt = 0; + int ret = 0; + struct cpufreq_frequency_table *freq_table = exynos_info->freq_table; + unsigned int *volt_table = exynos_info->volt_table; + unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz; + + mutex_lock(&cpufreq_lock); + + freqs.old = policy->cur; + + if (frequency_locked && target_freq != locking_frequency) { + ret = -EAGAIN; + goto out; + } + + if (cpufreq_frequency_table_target(policy, freq_table, + freqs.old, relation, &old_index)) { + ret = -EINVAL; + goto out; + } + + if (cpufreq_frequency_table_target(policy, freq_table, + target_freq, relation, &index)) { + ret = -EINVAL; + goto out; + } + + freqs.new = freq_table[index].frequency; + freqs.cpu = policy->cpu; + + /* + * ARM clock source will be changed APLL to MPLL temporary + * To support this level, need to control regulator for + * required voltage level + */ + if (exynos_info->need_apll_change != NULL) { + if (exynos_info->need_apll_change(old_index, index) && + (freq_table[index].frequency < mpll_freq_khz) && + (freq_table[old_index].frequency < mpll_freq_khz)) + safe_arm_volt = volt_table[exynos_info->pll_safe_idx]; + } + arm_volt = volt_table[index]; + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + /* When the new frequency is higher than current frequency */ + if ((freqs.new > freqs.old) && !safe_arm_volt) { + /* Firstly, voltage up to increase frequency */ + regulator_set_voltage(arm_regulator, arm_volt, + arm_volt); + } + + if (safe_arm_volt) + regulator_set_voltage(arm_regulator, safe_arm_volt, + safe_arm_volt); + if (freqs.new != freqs.old) + exynos_info->set_freq(old_index, index); + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + /* When the new frequency is lower than current frequency */ + if ((freqs.new < freqs.old) || + ((freqs.new > freqs.old) && safe_arm_volt)) { + /* down the voltage after frequency change */ + regulator_set_voltage(arm_regulator, arm_volt, + arm_volt); + } + +out: + mutex_unlock(&cpufreq_lock); + + return ret; +} + +#ifdef CONFIG_PM +static int exynos_cpufreq_suspend(struct cpufreq_policy *policy) +{ + return 0; +} + +static int exynos_cpufreq_resume(struct cpufreq_policy *policy) +{ + return 0; +} +#endif + +/** + * exynos_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume + * context + * @notifier + * @pm_event + * @v + * + * While frequency_locked == true, target() ignores every frequency but + * locking_frequency. The locking_frequency value is the initial frequency, + * which is set by the bootloader. In order to eliminate possible + * inconsistency in clock values, we save and restore frequencies during + * suspend and resume and block CPUFREQ activities. Note that the standard + * suspend/resume cannot be used as they are too deep (syscore_ops) for + * regulator actions. + */ +static int exynos_cpufreq_pm_notifier(struct notifier_block *notifier, + unsigned long pm_event, void *v) +{ + struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */ + static unsigned int saved_frequency; + unsigned int temp; + + mutex_lock(&cpufreq_lock); + switch (pm_event) { + case PM_SUSPEND_PREPARE: + if (frequency_locked) + goto out; + + frequency_locked = true; + + if (locking_frequency) { + saved_frequency = exynos_getspeed(0); + + mutex_unlock(&cpufreq_lock); + exynos_target(policy, locking_frequency, + CPUFREQ_RELATION_H); + mutex_lock(&cpufreq_lock); + } + break; + + case PM_POST_SUSPEND: + if (saved_frequency) { + /* + * While frequency_locked, only locking_frequency + * is valid for target(). In order to use + * saved_frequency while keeping frequency_locked, + * we temporarly overwrite locking_frequency. + */ + temp = locking_frequency; + locking_frequency = saved_frequency; + + mutex_unlock(&cpufreq_lock); + exynos_target(policy, locking_frequency, + CPUFREQ_RELATION_H); + mutex_lock(&cpufreq_lock); + + locking_frequency = temp; + } + frequency_locked = false; + break; + } +out: + mutex_unlock(&cpufreq_lock); + + return NOTIFY_OK; +} + +static struct notifier_block exynos_cpufreq_nb = { + .notifier_call = exynos_cpufreq_pm_notifier, +}; + +static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + policy->cur = policy->min = policy->max = exynos_getspeed(policy->cpu); + + cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu); + + /* set the transition latency value */ + policy->cpuinfo.transition_latency = 100000; + + /* + * EXYNOS4 multi-core processors has 2 cores + * that the frequency cannot be set independently. + * Each cpu is bound to the same speed. + * So the affected cpu is all of the cpus. + */ + if (num_online_cpus() == 1) { + cpumask_copy(policy->related_cpus, cpu_possible_mask); + cpumask_copy(policy->cpus, cpu_online_mask); + } else { + cpumask_setall(policy->cpus); + } + + return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table); +} + +static struct cpufreq_driver exynos_driver = { + .flags = CPUFREQ_STICKY, + .verify = exynos_verify_speed, + .target = exynos_target, + .get = exynos_getspeed, + .init = exynos_cpufreq_cpu_init, + .name = "exynos_cpufreq", +#ifdef CONFIG_PM + .suspend = exynos_cpufreq_suspend, + .resume = exynos_cpufreq_resume, +#endif +}; + +static int __init exynos_cpufreq_init(void) +{ + int ret = -EINVAL; + + exynos_info = kzalloc(sizeof(struct exynos_dvfs_info), GFP_KERNEL); + if (!exynos_info) + return -ENOMEM; + + if (soc_is_exynos4210()) + ret = exynos4210_cpufreq_init(exynos_info); + else + pr_err("%s: CPU type not found\n", __func__); + + if (ret) + goto err_vdd_arm; + + if (exynos_info->set_freq == NULL) { + pr_err("%s: No set_freq function (ERR)\n", __func__); + goto err_vdd_arm; + } + + arm_regulator = regulator_get(NULL, "vdd_arm"); + if (IS_ERR(arm_regulator)) { + pr_err("%s: failed to get resource vdd_arm\n", __func__); + goto err_vdd_arm; + } + + register_pm_notifier(&exynos_cpufreq_nb); + + if (cpufreq_register_driver(&exynos_driver)) { + pr_err("%s: failed to register cpufreq driver\n", __func__); + goto err_cpufreq; + } + + return 0; +err_cpufreq: + unregister_pm_notifier(&exynos_cpufreq_nb); + + if (!IS_ERR(arm_regulator)) + regulator_put(arm_regulator); +err_vdd_arm: + kfree(exynos_info); + pr_debug("%s: failed initialization\n", __func__); + return -EINVAL; +} +late_initcall(exynos_cpufreq_init); diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c index a0af2d4448a..6bc4ada56df 100644 --- a/drivers/cpufreq/exynos4210-cpufreq.c +++ b/drivers/cpufreq/exynos4210-cpufreq.c @@ -2,7 +2,7 @@ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * - * EXYNOS4 - CPU frequency scaling support + * EXYNOS4210 - CPU frequency scaling support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -23,10 +23,16 @@ #include #include #include +#include #include #include +#define CPUFREQ_LEVEL_END L5 + +static int max_support_idx = L0; +static int min_support_idx = (CPUFREQ_LEVEL_END - 1); + static struct clk *cpu_clk; static struct clk *moutcore; static struct clk *mout_mpll; @@ -37,20 +43,18 @@ static struct regulator *arm_regulator; static struct cpufreq_freqs freqs; struct cpufreq_clkdiv { + unsigned int index; unsigned int clkdiv; }; -static unsigned int locking_frequency; -static bool frequency_locked; -static DEFINE_MUTEX(cpufreq_lock); - -enum cpufreq_level_index { - L0, L1, L2, L3, L4, CPUFREQ_LEVEL_END, +static unsigned int exynos4210_volt_table[CPUFREQ_LEVEL_END] = { + 1250000, 1150000, 1050000, 975000, 950000, }; -static struct cpufreq_clkdiv exynos4_clkdiv_table[CPUFREQ_LEVEL_END]; -static struct cpufreq_frequency_table exynos4_freq_table[] = { +static struct cpufreq_clkdiv exynos4210_clkdiv_table[CPUFREQ_LEVEL_END]; + +static struct cpufreq_frequency_table exynos4210_freq_table[] = { {L0, 1200*1000}, {L1, 1000*1000}, {L2, 800*1000}, @@ -104,31 +108,7 @@ static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = { { 3, 0 }, }; -struct cpufreq_voltage_table { - unsigned int index; /* any */ - unsigned int arm_volt; /* uV */ -}; - -static struct cpufreq_voltage_table exynos4_volt_table[CPUFREQ_LEVEL_END] = { - { - .index = L0, - .arm_volt = 1350000, - }, { - .index = L1, - .arm_volt = 1300000, - }, { - .index = L2, - .arm_volt = 1200000, - }, { - .index = L3, - .arm_volt = 1100000, - }, { - .index = L4, - .arm_volt = 1050000, - }, -}; - -static unsigned int exynos4_apll_pms_table[CPUFREQ_LEVEL_END] = { +static unsigned int exynos4210_apll_pms_table[CPUFREQ_LEVEL_END] = { /* APLL FOUT L0: 1200MHz */ ((150 << 16) | (3 << 8) | 1), @@ -145,23 +125,13 @@ static unsigned int exynos4_apll_pms_table[CPUFREQ_LEVEL_END] = { ((200 << 16) | (6 << 8) | 3), }; -static int exynos4_verify_speed(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, exynos4_freq_table); -} - -static unsigned int exynos4_getspeed(unsigned int cpu) -{ - return clk_get_rate(cpu_clk) / 1000; -} - -static void exynos4_set_clkdiv(unsigned int div_index) +static void exynos4210_set_clkdiv(unsigned int div_index) { unsigned int tmp; /* Change Divider - CPU0 */ - tmp = exynos4_clkdiv_table[div_index].clkdiv; + tmp = exynos4210_clkdiv_table[div_index].clkdiv; __raw_writel(tmp, S5P_CLKDIV_CPU); @@ -185,7 +155,7 @@ static void exynos4_set_clkdiv(unsigned int div_index) } while (tmp & 0x11); } -static void exynos4_set_apll(unsigned int index) +static void exynos4210_set_apll(unsigned int index) { unsigned int tmp; @@ -204,7 +174,7 @@ static void exynos4_set_apll(unsigned int index) /* 3. Change PLL PMS values */ tmp = __raw_readl(S5P_APLL_CON0); tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0)); - tmp |= exynos4_apll_pms_table[index]; + tmp |= exynos4210_apll_pms_table[index]; __raw_writel(tmp, S5P_APLL_CON0); /* 4. wait_lock_time */ @@ -221,305 +191,90 @@ static void exynos4_set_apll(unsigned int index) } while (tmp != (0x1 << S5P_CLKSRC_CPU_MUXCORE_SHIFT)); } -static void exynos4_set_frequency(unsigned int old_index, unsigned int new_index) +bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index) +{ + unsigned int old_pm = (exynos4210_apll_pms_table[old_index] >> 8); + unsigned int new_pm = (exynos4210_apll_pms_table[new_index] >> 8); + + return (old_pm == new_pm) ? 0 : 1; +} + +static void exynos4210_set_frequency(unsigned int old_index, + unsigned int new_index) { unsigned int tmp; if (old_index > new_index) { - /* - * L1/L3, L2/L4 Level change require - * to only change s divider value - */ - if (((old_index == L3) && (new_index == L1)) || - ((old_index == L4) && (new_index == L2))) { + if (!exynos4210_pms_change(old_index, new_index)) { /* 1. Change the system clock divider values */ - exynos4_set_clkdiv(new_index); + exynos4210_set_clkdiv(new_index); /* 2. Change just s value in apll m,p,s value */ tmp = __raw_readl(S5P_APLL_CON0); tmp &= ~(0x7 << 0); - tmp |= (exynos4_apll_pms_table[new_index] & 0x7); + tmp |= (exynos4210_apll_pms_table[new_index] & 0x7); __raw_writel(tmp, S5P_APLL_CON0); } else { /* Clock Configuration Procedure */ /* 1. Change the system clock divider values */ - exynos4_set_clkdiv(new_index); + exynos4210_set_clkdiv(new_index); /* 2. Change the apll m,p,s value */ - exynos4_set_apll(new_index); + exynos4210_set_apll(new_index); } } else if (old_index < new_index) { - /* - * L1/L3, L2/L4 Level change require - * to only change s divider value - */ - if (((old_index == L1) && (new_index == L3)) || - ((old_index == L2) && (new_index == L4))) { + if (!exynos4210_pms_change(old_index, new_index)) { /* 1. Change just s value in apll m,p,s value */ tmp = __raw_readl(S5P_APLL_CON0); tmp &= ~(0x7 << 0); - tmp |= (exynos4_apll_pms_table[new_index] & 0x7); + tmp |= (exynos4210_apll_pms_table[new_index] & 0x7); __raw_writel(tmp, S5P_APLL_CON0); /* 2. Change the system clock divider values */ - exynos4_set_clkdiv(new_index); + exynos4210_set_clkdiv(new_index); } else { /* Clock Configuration Procedure */ /* 1. Change the apll m,p,s value */ - exynos4_set_apll(new_index); + exynos4210_set_apll(new_index); /* 2. Change the system clock divider values */ - exynos4_set_clkdiv(new_index); + exynos4210_set_clkdiv(new_index); } } } -static int exynos4_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int index, old_index; - unsigned int arm_volt; - int err = -EINVAL; - - freqs.old = exynos4_getspeed(policy->cpu); - - mutex_lock(&cpufreq_lock); - - if (frequency_locked && target_freq != locking_frequency) { - err = -EAGAIN; - goto out; - } - - if (cpufreq_frequency_table_target(policy, exynos4_freq_table, - freqs.old, relation, &old_index)) - goto out; - - if (cpufreq_frequency_table_target(policy, exynos4_freq_table, - target_freq, relation, &index)) - goto out; - - err = 0; - - freqs.new = exynos4_freq_table[index].frequency; - freqs.cpu = policy->cpu; - - if (freqs.new == freqs.old) - goto out; - - /* get the voltage value */ - arm_volt = exynos4_volt_table[index].arm_volt; - - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - - /* control regulator */ - if (freqs.new > freqs.old) { - /* Voltage up */ - regulator_set_voltage(arm_regulator, arm_volt, arm_volt); - } - - /* Clock Configuration Procedure */ - exynos4_set_frequency(old_index, index); - - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - - /* control regulator */ - if (freqs.new < freqs.old) { - /* Voltage down */ - regulator_set_voltage(arm_regulator, arm_volt, arm_volt); - } - -out: - mutex_unlock(&cpufreq_lock); - return err; -} - -#ifdef CONFIG_PM -/* - * These suspend/resume are used as syscore_ops, it is already too - * late to set regulator voltages at this stage. - */ -static int exynos4_cpufreq_suspend(struct cpufreq_policy *policy) -{ - return 0; -} - -static int exynos4_cpufreq_resume(struct cpufreq_policy *policy) -{ - return 0; -} -#endif - -/** - * exynos4_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume - * context - * @notifier - * @pm_event - * @v - * - * While frequency_locked == true, target() ignores every frequency but - * locking_frequency. The locking_frequency value is the initial frequency, - * which is set by the bootloader. In order to eliminate possible - * inconsistency in clock values, we save and restore frequencies during - * suspend and resume and block CPUFREQ activities. Note that the standard - * suspend/resume cannot be used as they are too deep (syscore_ops) for - * regulator actions. - */ -static int exynos4_cpufreq_pm_notifier(struct notifier_block *notifier, - unsigned long pm_event, void *v) -{ - struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */ - static unsigned int saved_frequency; - unsigned int temp; - - mutex_lock(&cpufreq_lock); - switch (pm_event) { - case PM_SUSPEND_PREPARE: - if (frequency_locked) - goto out; - frequency_locked = true; - - if (locking_frequency) { - saved_frequency = exynos4_getspeed(0); - - mutex_unlock(&cpufreq_lock); - exynos4_target(policy, locking_frequency, - CPUFREQ_RELATION_H); - mutex_lock(&cpufreq_lock); - } - - break; - case PM_POST_SUSPEND: - - if (saved_frequency) { - /* - * While frequency_locked, only locking_frequency - * is valid for target(). In order to use - * saved_frequency while keeping frequency_locked, - * we temporarly overwrite locking_frequency. - */ - temp = locking_frequency; - locking_frequency = saved_frequency; - - mutex_unlock(&cpufreq_lock); - exynos4_target(policy, locking_frequency, - CPUFREQ_RELATION_H); - mutex_lock(&cpufreq_lock); - - locking_frequency = temp; - } - - frequency_locked = false; - break; - } -out: - mutex_unlock(&cpufreq_lock); - - return NOTIFY_OK; -} - -static struct notifier_block exynos4_cpufreq_nb = { - .notifier_call = exynos4_cpufreq_pm_notifier, -}; - -static int exynos4_cpufreq_cpu_init(struct cpufreq_policy *policy) -{ - int ret; - - policy->cur = policy->min = policy->max = exynos4_getspeed(policy->cpu); - - cpufreq_frequency_table_get_attr(exynos4_freq_table, policy->cpu); - - /* set the transition latency value */ - policy->cpuinfo.transition_latency = 100000; - - /* - * EXYNOS4 multi-core processors has 2 cores - * that the frequency cannot be set independently. - * Each cpu is bound to the same speed. - * So the affected cpu is all of the cpus. - */ - if (!cpu_online(1)) { - cpumask_copy(policy->related_cpus, cpu_possible_mask); - cpumask_copy(policy->cpus, cpu_online_mask); - } else { - cpumask_setall(policy->cpus); - } - - ret = cpufreq_frequency_table_cpuinfo(policy, exynos4_freq_table); - if (ret) - return ret; - - cpufreq_frequency_table_get_attr(exynos4_freq_table, policy->cpu); - - return 0; -} - -static int exynos4_cpufreq_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; -} - -static struct freq_attr *exynos4_cpufreq_attr[] = { - &cpufreq_freq_attr_scaling_available_freqs, - NULL, -}; - -static struct cpufreq_driver exynos4_driver = { - .flags = CPUFREQ_STICKY, - .verify = exynos4_verify_speed, - .target = exynos4_target, - .get = exynos4_getspeed, - .init = exynos4_cpufreq_cpu_init, - .exit = exynos4_cpufreq_cpu_exit, - .name = "exynos4_cpufreq", - .attr = exynos4_cpufreq_attr, -#ifdef CONFIG_PM - .suspend = exynos4_cpufreq_suspend, - .resume = exynos4_cpufreq_resume, -#endif -}; - -static int __init exynos4_cpufreq_init(void) +int exynos4210_cpufreq_init(struct exynos_dvfs_info *info) { int i; unsigned int tmp; + unsigned long rate; cpu_clk = clk_get(NULL, "armclk"); if (IS_ERR(cpu_clk)) return PTR_ERR(cpu_clk); - locking_frequency = exynos4_getspeed(0); - moutcore = clk_get(NULL, "moutcore"); if (IS_ERR(moutcore)) - goto out; + goto err_moutcore; mout_mpll = clk_get(NULL, "mout_mpll"); if (IS_ERR(mout_mpll)) - goto out; + goto err_mout_mpll; + + rate = clk_get_rate(mout_mpll) / 1000; mout_apll = clk_get(NULL, "mout_apll"); if (IS_ERR(mout_apll)) - goto out; - - arm_regulator = regulator_get(NULL, "vdd_arm"); - if (IS_ERR(arm_regulator)) { - printk(KERN_ERR "failed to get resource %s\n", "vdd_arm"); - goto out; - } - - register_pm_notifier(&exynos4_cpufreq_nb); + goto err_mout_apll; tmp = __raw_readl(S5P_CLKDIV_CPU); for (i = L0; i < CPUFREQ_LEVEL_END; i++) { tmp &= ~(S5P_CLKDIV_CPU0_CORE_MASK | - S5P_CLKDIV_CPU0_COREM0_MASK | - S5P_CLKDIV_CPU0_COREM1_MASK | - S5P_CLKDIV_CPU0_PERIPH_MASK | - S5P_CLKDIV_CPU0_ATB_MASK | - S5P_CLKDIV_CPU0_PCLKDBG_MASK | - S5P_CLKDIV_CPU0_APLL_MASK); + S5P_CLKDIV_CPU0_COREM0_MASK | + S5P_CLKDIV_CPU0_COREM1_MASK | + S5P_CLKDIV_CPU0_PERIPH_MASK | + S5P_CLKDIV_CPU0_ATB_MASK | + S5P_CLKDIV_CPU0_PCLKDBG_MASK | + S5P_CLKDIV_CPU0_APLL_MASK); tmp |= ((clkdiv_cpu0[i][0] << S5P_CLKDIV_CPU0_CORE_SHIFT) | (clkdiv_cpu0[i][1] << S5P_CLKDIV_CPU0_COREM0_SHIFT) | @@ -529,29 +284,33 @@ static int __init exynos4_cpufreq_init(void) (clkdiv_cpu0[i][5] << S5P_CLKDIV_CPU0_PCLKDBG_SHIFT) | (clkdiv_cpu0[i][6] << S5P_CLKDIV_CPU0_APLL_SHIFT)); - exynos4_clkdiv_table[i].clkdiv = tmp; + exynos4210_clkdiv_table[i].clkdiv = tmp; } - return cpufreq_register_driver(&exynos4_driver); - -out: - if (!IS_ERR(cpu_clk)) - clk_put(cpu_clk); + info->mpll_freq_khz = rate; + info->pm_lock_idx = L2; + info->pll_safe_idx = L2; + info->max_support_idx = max_support_idx; + info->min_support_idx = min_support_idx; + info->cpu_clk = cpu_clk; + info->volt_table = exynos4210_volt_table; + info->freq_table = exynos4210_freq_table; + info->set_freq = exynos4210_set_frequency; + info->need_apll_change = exynos4210_pms_change; - if (!IS_ERR(moutcore)) - clk_put(moutcore); + return 0; +err_mout_apll: if (!IS_ERR(mout_mpll)) clk_put(mout_mpll); +err_mout_mpll: + if (!IS_ERR(moutcore)) + clk_put(moutcore); +err_moutcore: + if (!IS_ERR(cpu_clk)) + clk_put(cpu_clk); - if (!IS_ERR(mout_apll)) - clk_put(mout_apll); - - if (!IS_ERR(arm_regulator)) - regulator_put(arm_regulator); - - printk(KERN_ERR "%s: failed initialization\n", __func__); - + pr_debug("%s: failed initialization\n", __func__); return -EINVAL; } -late_initcall(exynos4_cpufreq_init); +EXPORT_SYMBOL(exynos4210_cpufreq_init); -- cgit v1.2.3-70-g09d2 From 6c523c614c13b84a3dc64f7a56d6855b03e6b292 Mon Sep 17 00:00:00 2001 From: Jaecheol Lee Date: Sat, 7 Jan 2012 20:18:39 +0900 Subject: [CPUFREQ] EXYNOS: Removed useless headers and codes This patch removes no referencing header files and cleaned up useless code. Signed-off-by: Jaecheol Lee Signed-off-by: Kukjin Kim Signed-off-by: Dave Jones --- drivers/cpufreq/exynos-cpufreq.c | 8 +------- drivers/cpufreq/exynos4210-cpufreq.c | 14 +------------- 2 files changed, 2 insertions(+), 20 deletions(-) (limited to 'drivers/cpufreq') diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c index 24e4dd453fa..5467879ea07 100644 --- a/drivers/cpufreq/exynos-cpufreq.c +++ b/drivers/cpufreq/exynos-cpufreq.c @@ -9,7 +9,6 @@ * published by the Free Software Foundation. */ -#include #include #include #include @@ -18,15 +17,10 @@ #include #include #include -#include -#include -#include -#include #include -#include -#include +#include static struct exynos_dvfs_info *exynos_info; diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c index 6bc4ada56df..065da5b702f 100644 --- a/drivers/cpufreq/exynos4210-cpufreq.c +++ b/drivers/cpufreq/exynos4210-cpufreq.c @@ -9,25 +9,17 @@ * published by the Free Software Foundation. */ -#include +#include #include #include #include #include #include -#include #include -#include -#include -#include #include -#include #include -#include -#include - #define CPUFREQ_LEVEL_END L5 static int max_support_idx = L0; @@ -38,10 +30,6 @@ static struct clk *moutcore; static struct clk *mout_mpll; static struct clk *mout_apll; -static struct regulator *arm_regulator; - -static struct cpufreq_freqs freqs; - struct cpufreq_clkdiv { unsigned int index; unsigned int clkdiv; -- cgit v1.2.3-70-g09d2