summaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/Kconfig.arm90
-rw-r--r--drivers/cpufreq/Kconfig.powerpc37
-rw-r--r--drivers/cpufreq/Kconfig.x863
-rw-r--r--drivers/cpufreq/Makefile13
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c50
-rw-r--r--drivers/cpufreq/arm_big_little.c11
-rw-r--r--drivers/cpufreq/arm_big_little.h5
-rw-r--r--drivers/cpufreq/arm_big_little_dt.c94
-rw-r--r--drivers/cpufreq/blackfin-cpufreq.c10
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c32
-rw-r--r--drivers/cpufreq/cpufreq.c242
-rw-r--r--drivers/cpufreq/cpufreq_governor.c65
-rw-r--r--drivers/cpufreq/cpufreq_governor.h6
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c18
-rw-r--r--drivers/cpufreq/cpufreq_performance.c4
-rw-r--r--drivers/cpufreq/cpufreq_powersave.c6
-rw-r--r--drivers/cpufreq/cpufreq_stats.c10
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c112
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c3
-rw-r--r--drivers/cpufreq/dbx500-cpufreq.c4
-rw-r--r--drivers/cpufreq/e_powersaver.c11
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c10
-rw-r--r--drivers/cpufreq/freq_table.c26
-rw-r--r--drivers/cpufreq/ia64-acpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c17
-rw-r--r--drivers/cpufreq/intel_pstate.c123
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c6
-rw-r--r--drivers/cpufreq/longhaul.c16
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c4
-rw-r--r--drivers/cpufreq/omap-cpufreq.c6
-rw-r--r--drivers/cpufreq/p4-clockmod.c4
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c331
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c2
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c721
-rw-r--r--drivers/cpufreq/pmac64-cpufreq.c746
-rw-r--r--drivers/cpufreq/powernow-k6.c8
-rw-r--r--drivers/cpufreq/powernow-k7.c16
-rw-r--r--drivers/cpufreq/powernow-k8.c24
-rw-r--r--drivers/cpufreq/ppc-corenet-cpufreq.c380
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c4
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c8
-rw-r--r--drivers/cpufreq/pxa3xx-cpufreq.c4
-rw-r--r--drivers/cpufreq/s3c2410-cpufreq.c160
-rw-r--r--drivers/cpufreq/s3c2412-cpufreq.c257
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c8
-rw-r--r--drivers/cpufreq/s3c2440-cpufreq.c312
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq-debugfs.c198
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c711
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c10
-rw-r--r--drivers/cpufreq/sc520_freq.c2
-rw-r--r--drivers/cpufreq/sparc-us2e-cpufreq.c12
-rw-r--r--drivers/cpufreq/sparc-us3-cpufreq.c8
-rw-r--r--drivers/cpufreq/spear-cpufreq.c4
-rw-r--r--drivers/cpufreq/speedstep-centrino.c8
-rw-r--r--drivers/cpufreq/tegra-cpufreq.c23
56 files changed, 4466 insertions, 533 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index a1488f58f6c..534fcb82515 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -47,7 +47,7 @@ config CPU_FREQ_STAT_DETAILS
choice
prompt "Default CPUFreq governor"
- default CPU_FREQ_DEFAULT_GOV_USERSPACE if CPU_FREQ_SA1100 || CPU_FREQ_SA1110
+ default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ
default CPU_FREQ_DEFAULT_GOV_PERFORMANCE
help
This option sets which CPUFreq governor shall be loaded at
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index f3af18b9acc..de4d5d93c3f 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -3,20 +3,23 @@
#
config ARM_BIG_LITTLE_CPUFREQ
- tristate
- depends on ARM_CPU_TOPOLOGY
+ tristate "Generic ARM big LITTLE CPUfreq driver"
+ depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK
+ select CPU_FREQ_TABLE
+ help
+ This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
config ARM_DT_BL_CPUFREQ
- tristate "Generic ARM big LITTLE CPUfreq driver probed via DT"
- select ARM_BIG_LITTLE_CPUFREQ
- depends on OF && HAVE_CLK
+ tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
+ depends on ARM_BIG_LITTLE_CPUFREQ && OF
help
- This enables the Generic CPUfreq driver for ARM big.LITTLE platform.
- This gets frequency tables from DT.
+ This enables probing via DT for Generic CPUfreq driver for ARM
+ big.LITTLE platform. This gets frequency tables from DT.
config ARM_EXYNOS_CPUFREQ
bool "SAMSUNG EXYNOS SoCs"
depends on ARCH_EXYNOS
+ select CPU_FREQ_TABLE
default y
help
This adds the CPUFreq driver common part for Samsung
@@ -45,6 +48,7 @@ config ARM_EXYNOS5250_CPUFREQ
config ARM_EXYNOS5440_CPUFREQ
def_bool SOC_EXYNOS5440
depends on HAVE_CLK && PM_OPP && OF
+ select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for Samsung EXYNOS5440
SoC. The nature of exynos5440 clock controller is
@@ -54,7 +58,6 @@ config ARM_EXYNOS5440_CPUFREQ
config ARM_HIGHBANK_CPUFREQ
tristate "Calxeda Highbank-based"
depends on ARCH_HIGHBANK
- select CPU_FREQ_TABLE
select GENERIC_CPUFREQ_CPU0
select PM_OPP
select REGULATOR
@@ -70,6 +73,7 @@ config ARM_IMX6Q_CPUFREQ
tristate "Freescale i.MX6Q cpufreq support"
depends on SOC_IMX6Q
depends on REGULATOR_ANATOP
+ select CPU_FREQ_TABLE
help
This adds cpufreq driver support for Freescale i.MX6Q SOC.
@@ -85,6 +89,7 @@ config ARM_INTEGRATOR
config ARM_KIRKWOOD_CPUFREQ
def_bool ARCH_KIRKWOOD && OF
+ select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for Marvell Kirkwood
SoCs.
@@ -95,9 +100,60 @@ config ARM_OMAP2PLUS_CPUFREQ
default ARCH_OMAP2PLUS
select CPU_FREQ_TABLE
+config ARM_S3C_CPUFREQ
+ bool
+ help
+ Internal configuration node for common cpufreq on Samsung SoC
+
+config ARM_S3C24XX_CPUFREQ
+ bool "CPUfreq driver for Samsung S3C24XX series CPUs (EXPERIMENTAL)"
+ depends on ARCH_S3C24XX
+ select ARM_S3C_CPUFREQ
+ help
+ This enables the CPUfreq driver for the Samsung S3C24XX family
+ of CPUs.
+
+ For details, take a look at <file:Documentation/cpu-freq>.
+
+ If in doubt, say N.
+
+config ARM_S3C24XX_CPUFREQ_DEBUG
+ bool "Debug CPUfreq Samsung driver core"
+ depends on ARM_S3C24XX_CPUFREQ
+ help
+ Enable s3c_freq_dbg for the Samsung S3C CPUfreq core
+
+config ARM_S3C24XX_CPUFREQ_IODEBUG
+ bool "Debug CPUfreq Samsung driver IO timing"
+ depends on ARM_S3C24XX_CPUFREQ
+ help
+ Enable s3c_freq_iodbg for the Samsung S3C CPUfreq core
+
+config ARM_S3C24XX_CPUFREQ_DEBUGFS
+ bool "Export debugfs for CPUFreq"
+ depends on ARM_S3C24XX_CPUFREQ && DEBUG_FS
+ help
+ Export status information via debugfs.
+
+config ARM_S3C2410_CPUFREQ
+ bool
+ depends on ARM_S3C24XX_CPUFREQ && CPU_S3C2410
+ select S3C2410_CPUFREQ_UTILS
+ help
+ CPU Frequency scaling support for S3C2410
+
+config ARM_S3C2412_CPUFREQ
+ bool
+ depends on ARM_S3C24XX_CPUFREQ && CPU_S3C2412
+ default y
+ select S3C2412_IOTIMING
+ help
+ CPU Frequency scaling support for S3C2412 and S3C2413 SoC CPUs.
+
config ARM_S3C2416_CPUFREQ
bool "S3C2416 CPU Frequency scaling support"
depends on CPU_S3C2416
+ select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for the Samsung S3C2416 and
S3C2450 SoC. The S3C2416 supports changing the rate of the
@@ -117,9 +173,18 @@ config ARM_S3C2416_CPUFREQ_VCORESCALE
If in doubt, say N.
+config ARM_S3C2440_CPUFREQ
+ bool "S3C2440/S3C2442 CPU Frequency scaling support"
+ depends on ARM_S3C24XX_CPUFREQ && (CPU_S3C2440 || CPU_S3C2442)
+ select S3C2410_CPUFREQ_UTILS
+ default y
+ help
+ CPU Frequency scaling support for S3C2440 and S3C2442 SoC CPUs.
+
config ARM_S3C64XX_CPUFREQ
bool "Samsung S3C64XX"
depends on CPU_S3C6410
+ select CPU_FREQ_TABLE
default y
help
This adds the CPUFreq driver for Samsung S3C6410 SoC.
@@ -146,6 +211,15 @@ config ARM_SA1110_CPUFREQ
config ARM_SPEAR_CPUFREQ
bool "SPEAr CPUFreq support"
depends on PLAT_SPEAR
+ select CPU_FREQ_TABLE
default y
help
This adds the CPUFreq driver support for SPEAr SOCs.
+
+config ARM_TEGRA_CPUFREQ
+ bool "TEGRA CPUFreq support"
+ depends on ARCH_TEGRA
+ select CPU_FREQ_TABLE
+ default y
+ help
+ This adds the CPUFreq driver support for TEGRA SOCs.
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
index 9c926ca0d71..25ca9db62e0 100644
--- a/drivers/cpufreq/Kconfig.powerpc
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -1,6 +1,7 @@
config CPU_FREQ_CBE
tristate "CBE frequency scaling"
depends on CBE_RAS && PPC_CELL
+ select CPU_FREQ_TABLE
default m
help
This adds the cpufreq driver for Cell BE processors.
@@ -23,3 +24,39 @@ config CPU_FREQ_MAPLE
help
This adds support for frequency switching on Maple 970FX
Evaluation Board and compatible boards (IBM JS2x blades).
+
+config PPC_CORENET_CPUFREQ
+ tristate "CPU frequency scaling driver for Freescale E500MC SoCs"
+ depends on PPC_E500MC && OF && COMMON_CLK
+ select CPU_FREQ_TABLE
+ select CLK_PPC_CORENET
+ help
+ This adds the CPUFreq driver support for Freescale e500mc,
+ e5500 and e6500 series SoCs which are capable of changing
+ the CPU's frequency dynamically.
+
+config CPU_FREQ_PMAC
+ bool "Support for Apple PowerBooks"
+ depends on ADB_PMU && PPC32
+ select CPU_FREQ_TABLE
+ help
+ This adds support for frequency switching on Apple PowerBooks,
+ this currently includes some models of iBook & Titanium
+ PowerBook.
+
+config CPU_FREQ_PMAC64
+ bool "Support for some Apple G5s"
+ depends on PPC_PMAC && PPC64
+ select CPU_FREQ_TABLE
+ help
+ This adds support for frequency switching on Apple iMac G5,
+ and some of the more recent desktop G5 machines as well.
+
+config PPC_PASEMI_CPUFREQ
+ bool "Support for PA Semi PWRficient"
+ depends on PPC_PASEMI
+ select CPU_FREQ_TABLE
+ default y
+ help
+ This adds the support for frequency switching on PA Semi
+ PWRficient processors.
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 2b8a8c37454..e2b6eabef22 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -132,6 +132,7 @@ config X86_POWERNOW_K8
config X86_AMD_FREQ_SENSITIVITY
tristate "AMD frequency sensitivity feedback powersave bias"
depends on CPU_FREQ_GOV_ONDEMAND && X86_ACPI_CPUFREQ && CPU_SUP_AMD
+ select CPU_FREQ_TABLE
help
This adds AMD-specific powersave bias function to the ondemand
governor, which allows it to make more power-conscious frequency
@@ -272,7 +273,7 @@ config X86_LONGHAUL
config X86_E_POWERSAVER
tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)"
select CPU_FREQ_TABLE
- depends on X86_32
+ depends on X86_32 && ACPI_PROCESSOR
help
This adds the CPUFreq driver for VIA C7 processors. However, this driver
does not have any safeguards to prevent operating the CPU out of spec
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 315b9231feb..d345b5a7aa7 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -65,13 +65,18 @@ obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_PXA25x) += pxa2xx-cpufreq.o
obj-$(CONFIG_PXA27x) += pxa2xx-cpufreq.o
obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
+obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o
+obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
+obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o
+obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o
obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
+obj-$(CONFIG_ARM_S3C2440_CPUFREQ) += s3c2440-cpufreq.o
obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o
obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o
obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
-obj-$(CONFIG_ARCH_TEGRA) += tegra-cpufreq.o
+obj-$(CONFIG_ARM_TEGRA_CPUFREQ) += tegra-cpufreq.o
##################################################################################
# PowerPC platform drivers
@@ -79,11 +84,15 @@ obj-$(CONFIG_CPU_FREQ_CBE) += ppc-cbe-cpufreq.o
ppc-cbe-cpufreq-y += ppc_cbe_cpufreq_pervasive.o ppc_cbe_cpufreq.o
obj-$(CONFIG_CPU_FREQ_CBE_PMI) += ppc_cbe_cpufreq_pmi.o
obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o
+obj-$(CONFIG_PPC_CORENET_CPUFREQ) += ppc-corenet-cpufreq.o
+obj-$(CONFIG_CPU_FREQ_PMAC) += pmac32-cpufreq.o
+obj-$(CONFIG_CPU_FREQ_PMAC64) += pmac64-cpufreq.o
+obj-$(CONFIG_PPC_PASEMI_CPUFREQ) += pasemi-cpufreq.o
##################################################################################
# Other platform drivers
obj-$(CONFIG_AVR32_AT32AP_CPUFREQ) += at32ap-cpufreq.o
-obj-$(CONFIG_BLACKFIN) += blackfin-cpufreq.o
+obj-$(CONFIG_BFIN_CPU_FREQ) += blackfin-cpufreq.o
obj-$(CONFIG_CRIS_MACH_ARTPEC3) += cris-artpec3-cpufreq.o
obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o
obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 11b8b4b54ce..39264020b88 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -70,6 +70,7 @@ struct acpi_cpufreq_data {
struct cpufreq_frequency_table *freq_table;
unsigned int resume;
unsigned int cpu_feature;
+ cpumask_var_t freqdomain_cpus;
};
static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
@@ -176,6 +177,15 @@ static struct global_attr global_boost = __ATTR(boost, 0644,
show_global_boost,
store_global_boost);
+static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
+{
+ struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
+
+ return cpufreq_show_cpus(data->freqdomain_cpus, buf);
+}
+
+cpufreq_freq_attr_ro(freqdomain_cpus);
+
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
size_t count)
@@ -232,7 +242,7 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
perf = data->acpi_data;
for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
- if (msr == perf->states[data->freq_table[i].index].status)
+ if (msr == perf->states[data->freq_table[i].driver_data].status)
return data->freq_table[i].frequency;
}
return data->freq_table[0].frequency;
@@ -347,11 +357,11 @@ static u32 get_cur_val(const struct cpumask *mask)
switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
case SYSTEM_INTEL_MSR_CAPABLE:
cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
- cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
+ cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
break;
case SYSTEM_AMD_MSR_CAPABLE:
cmd.type = SYSTEM_AMD_MSR_CAPABLE;
- cmd.addr.msr.reg = MSR_AMD_PERF_STATUS;
+ cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
break;
case SYSTEM_IO_CAPABLE:
cmd.type = SYSTEM_IO_CAPABLE;
@@ -442,7 +452,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
goto out;
}
- next_perf_state = data->freq_table[next_state].index;
+ next_perf_state = data->freq_table[next_state].driver_data;
if (perf->state == next_perf_state) {
if (unlikely(data->resume)) {
pr_debug("Called after resume, resetting to P%d\n",
@@ -494,12 +504,14 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
pr_debug("acpi_cpufreq_target failed (%d)\n",
policy->cpu);
result = -EAGAIN;
- goto out;
+ freqs.new = freqs.old;
}
}
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- perf->state = next_perf_state;
+
+ if (!result)
+ perf->state = next_perf_state;
out:
return result;
@@ -702,6 +714,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (!data)
return -ENOMEM;
+ if (!zalloc_cpumask_var(&data->freqdomain_cpus, GFP_KERNEL)) {
+ result = -ENOMEM;
+ goto err_free;
+ }
+
data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
per_cpu(acfreq_data, cpu) = data;
@@ -710,7 +727,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
result = acpi_processor_register_performance(data->acpi_data, cpu);
if (result)
- goto err_free;
+ goto err_free_mask;
perf = data->acpi_data;
policy->shared_type = perf->shared_type;
@@ -723,6 +740,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
cpumask_copy(policy->cpus, perf->shared_cpu_map);
}
+ cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map);
#ifdef CONFIG_SMP
dmi_check_system(sw_any_bug_dmi_table);
@@ -734,6 +752,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
cpumask_clear(policy->cpus);
cpumask_set_cpu(cpu, policy->cpus);
+ cpumask_copy(data->freqdomain_cpus, cpu_sibling_mask(cpu));
policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
pr_info_once(PFX "overriding BIOS provided _PSD data\n");
}
@@ -811,7 +830,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
data->freq_table[valid_states-1].frequency / 1000)
continue;
- data->freq_table[valid_states].index = i;
+ data->freq_table[valid_states].driver_data = i;
data->freq_table[valid_states].frequency =
perf->states[i].core_frequency * 1000;
valid_states++;
@@ -868,6 +887,8 @@ err_freqfree:
kfree(data->freq_table);
err_unreg:
acpi_processor_unregister_performance(perf, cpu);
+err_free_mask:
+ free_cpumask_var(data->freqdomain_cpus);
err_free:
kfree(data);
per_cpu(acfreq_data, cpu) = NULL;
@@ -886,6 +907,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
per_cpu(acfreq_data, policy->cpu) = NULL;
acpi_processor_unregister_performance(data->acpi_data,
policy->cpu);
+ free_cpumask_var(data->freqdomain_cpus);
kfree(data->freq_table);
kfree(data);
}
@@ -906,6 +928,7 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
static struct freq_attr *acpi_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
+ &freqdomain_cpus,
NULL, /* this is a placeholder for cpb, do not remove */
NULL,
};
@@ -947,7 +970,7 @@ static void __init acpi_cpufreq_boost_init(void)
/* We create the boost file in any case, though for systems without
* hardware support it will be read-only and hardwired to return 0.
*/
- if (sysfs_create_file(cpufreq_global_kobject, &(global_boost.attr)))
+ if (cpufreq_sysfs_create_file(&(global_boost.attr)))
pr_warn(PFX "could not register global boost sysfs file\n");
else
pr_debug("registered global boost sysfs file\n");
@@ -955,7 +978,7 @@ static void __init acpi_cpufreq_boost_init(void)
static void __exit acpi_cpufreq_boost_exit(void)
{
- sysfs_remove_file(cpufreq_global_kobject, &(global_boost.attr));
+ cpufreq_sysfs_remove_file(&(global_boost.attr));
if (msrs) {
unregister_cpu_notifier(&boost_nb);
@@ -1034,4 +1057,11 @@ static const struct x86_cpu_id acpi_cpufreq_ids[] = {
};
MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
+static const struct acpi_device_id processor_device_ids[] = {
+ {ACPI_PROCESSOR_OBJECT_HID, },
+ {ACPI_PROCESSOR_DEVICE_HID, },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, processor_device_ids);
+
MODULE_ALIAS("acpi");
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index dbdf677d2f3..3549f0784af 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -40,11 +40,6 @@ static struct clk *clk[MAX_CLUSTERS];
static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS];
static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)};
-static int cpu_to_cluster(int cpu)
-{
- return topology_physical_package_id(cpu);
-}
-
static unsigned int bL_cpufreq_get(unsigned int cpu)
{
u32 cur_cluster = cpu_to_cluster(cpu);
@@ -89,11 +84,9 @@ static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
ret = clk_set_rate(clk[cur_cluster], freqs.new * 1000);
if (ret) {
pr_err("clk_set_rate failed: %d\n", ret);
- return ret;
+ freqs.new = freqs.old;
}
- policy->cur = freqs.new;
-
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return ret;
@@ -192,7 +185,7 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
- dev_info(cpu_dev, "CPU %d initialized\n", policy->cpu);
+ dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
return 0;
}
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
index 70f18fc12d4..79b2ce17884 100644
--- a/drivers/cpufreq/arm_big_little.h
+++ b/drivers/cpufreq/arm_big_little.h
@@ -34,6 +34,11 @@ struct cpufreq_arm_bL_ops {
int (*init_opp_table)(struct device *cpu_dev);
};
+static inline int cpu_to_cluster(int cpu)
+{
+ return topology_physical_package_id(cpu);
+}
+
int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops);
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c
index 44be3115375..fd9e3ea6a48 100644
--- a/drivers/cpufreq/arm_big_little_dt.c
+++ b/drivers/cpufreq/arm_big_little_dt.c
@@ -19,69 +19,75 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/opp.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
#include "arm_big_little.h"
-static int dt_init_opp_table(struct device *cpu_dev)
+/* get cpu node with valid operating-points */
+static struct device_node *get_cpu_node_with_valid_op(int cpu)
{
- struct device_node *np, *parent;
- int count = 0, ret;
+ struct device_node *np = NULL, *parent;
+ int count = 0;
parent = of_find_node_by_path("/cpus");
if (!parent) {
pr_err("failed to find OF /cpus\n");
- return -ENOENT;
+ return NULL;
}
for_each_child_of_node(parent, np) {
- if (count++ != cpu_dev->id)
+ if (count++ != cpu)
continue;
if (!of_get_property(np, "operating-points", NULL)) {
- ret = -ENODATA;
- } else {
- cpu_dev->of_node = np;
- ret = of_init_opp_table(cpu_dev);
+ of_node_put(np);
+ np = NULL;
}
- of_node_put(np);
- of_node_put(parent);
- return ret;
+ break;
}
- return -ENODEV;
+ of_node_put(parent);
+ return np;
}
-static int dt_get_transition_latency(struct device *cpu_dev)
+static int dt_init_opp_table(struct device *cpu_dev)
{
- struct device_node *np, *parent;
- u32 transition_latency = CPUFREQ_ETERNAL;
- int count = 0;
+ struct device_node *np;
+ int ret;
- parent = of_find_node_by_path("/cpus");
- if (!parent) {
- pr_err("failed to find OF /cpus\n");
- return -ENOENT;
- }
+ np = get_cpu_node_with_valid_op(cpu_dev->id);
+ if (!np)
+ return -ENODATA;
- for_each_child_of_node(parent, np) {
- if (count++ != cpu_dev->id)
- continue;
+ cpu_dev->of_node = np;
+ ret = of_init_opp_table(cpu_dev);
+ of_node_put(np);
- of_property_read_u32(np, "clock-latency", &transition_latency);
- of_node_put(np);
- of_node_put(parent);
+ return ret;
+}
- return 0;
- }
+static int dt_get_transition_latency(struct device *cpu_dev)
+{
+ struct device_node *np;
+ u32 transition_latency = CPUFREQ_ETERNAL;
+
+ np = get_cpu_node_with_valid_op(cpu_dev->id);
+ if (!np)
+ return CPUFREQ_ETERNAL;
- return -ENODEV;
+ of_property_read_u32(np, "clock-latency", &transition_latency);
+ of_node_put(np);
+
+ pr_debug("%s: clock-latency: %d\n", __func__, transition_latency);
+ return transition_latency;
}
static struct cpufreq_arm_bL_ops dt_bL_ops = {
@@ -90,17 +96,33 @@ static struct cpufreq_arm_bL_ops dt_bL_ops = {
.init_opp_table = dt_init_opp_table,
};
-static int generic_bL_init(void)
+static int generic_bL_probe(struct platform_device *pdev)
{
+ struct device_node *np;
+
+ np = get_cpu_node_with_valid_op(0);
+ if (!np)
+ return -ENODEV;
+
+ of_node_put(np);
return bL_cpufreq_register(&dt_bL_ops);
}
-module_init(generic_bL_init);
-static void generic_bL_exit(void)
+static int generic_bL_remove(struct platform_device *pdev)
{
- return bL_cpufreq_unregister(&dt_bL_ops);
+ bL_cpufreq_unregister(&dt_bL_ops);
+ return 0;
}
-module_exit(generic_bL_exit);
+
+static struct platform_driver generic_bL_platdrv = {
+ .driver = {
+ .name = "arm-bL-cpufreq-dt",
+ .owner = THIS_MODULE,
+ },
+ .probe = generic_bL_probe,
+ .remove = generic_bL_remove,
+};
+module_platform_driver(generic_bL_platdrv);
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver via DT");
diff --git a/drivers/cpufreq/blackfin-cpufreq.c b/drivers/cpufreq/blackfin-cpufreq.c
index 995511e80be..9cdbbd278a8 100644
--- a/drivers/cpufreq/blackfin-cpufreq.c
+++ b/drivers/cpufreq/blackfin-cpufreq.c
@@ -20,23 +20,23 @@
/* this is the table of CCLK frequencies, in Hz */
-/* .index is the entry in the auxiliary dpm_state_table[] */
+/* .driver_data is the entry in the auxiliary dpm_state_table[] */
static struct cpufreq_frequency_table bfin_freq_table[] = {
{
.frequency = CPUFREQ_TABLE_END,
- .index = 0,
+ .driver_data = 0,
},
{
.frequency = CPUFREQ_TABLE_END,
- .index = 1,
+ .driver_data = 1,
},
{
.frequency = CPUFREQ_TABLE_END,
- .index = 2,
+ .driver_data = 2,
},
{
.frequency = CPUFREQ_TABLE_END,
- .index = 0,
+ .driver_data = 0,
},
};
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 3ab8294eab0..ad1fde27766 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -45,7 +45,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
struct cpufreq_freqs freqs;
struct opp *opp;
unsigned long volt = 0, volt_old = 0, tol = 0;
- long freq_Hz;
+ long freq_Hz, freq_exact;
unsigned int index;
int ret;
@@ -60,6 +60,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
if (freq_Hz < 0)
freq_Hz = freq_table[index].frequency * 1000;
+ freq_exact = freq_Hz;
freqs.new = freq_Hz / 1000;
freqs.old = clk_get_rate(cpu_clk) / 1000;
@@ -98,7 +99,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
}
}
- ret = clk_set_rate(cpu_clk, freqs.new * 1000);
+ ret = clk_set_rate(cpu_clk, freq_exact);
if (ret) {
pr_err("failed to set clock rate: %d\n", ret);
if (cpu_reg)
@@ -189,12 +190,29 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
if (!np) {
pr_err("failed to find cpu0 node\n");
- return -ENOENT;
+ ret = -ENOENT;
+ goto out_put_parent;
}
cpu_dev = &pdev->dev;
cpu_dev->of_node = np;
+ cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
+ if (IS_ERR(cpu_reg)) {
+ /*
+ * If cpu0 regulator supply node is present, but regulator is
+ * not yet registered, we should try defering probe.
+ */
+ if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
+ dev_err(cpu_dev, "cpu0 regulator not ready, retry\n");
+ ret = -EPROBE_DEFER;
+ goto out_put_node;
+ }
+ pr_warn("failed to get cpu0 regulator: %ld\n",
+ PTR_ERR(cpu_reg));
+ cpu_reg = NULL;
+ }
+
cpu_clk = devm_clk_get(cpu_dev, NULL);
if (IS_ERR(cpu_clk)) {
ret = PTR_ERR(cpu_clk);
@@ -202,12 +220,6 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
goto out_put_node;
}
- cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
- if (IS_ERR(cpu_reg)) {
- pr_warn("failed to get cpu0 regulator\n");
- cpu_reg = NULL;
- }
-
ret = of_init_opp_table(cpu_dev);
if (ret) {
pr_err("failed to init OPP table: %d\n", ret);
@@ -264,6 +276,8 @@ out_free_table:
opp_free_cpufreq_table(cpu_dev, &freq_table);
out_put_node:
of_node_put(np);
+out_put_parent:
+ of_node_put(parent);
return ret;
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1b8a48eaf90..6a015ada528 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2001 Russell King
* (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
+ * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
*
* Oct 2005 - Ashok Raj <ashok.raj@intel.com>
* Added handling for CPU hotplug
@@ -12,12 +13,13 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <asm/cputime.h>
#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/notifier.h>
@@ -25,6 +27,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
+#include <linux/tick.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/cpu.h>
@@ -41,11 +44,13 @@
*/
static struct cpufreq_driver *cpufreq_driver;
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
+static DEFINE_RWLOCK(cpufreq_driver_lock);
+static DEFINE_MUTEX(cpufreq_governor_lock);
+
#ifdef CONFIG_HOTPLUG_CPU
/* This one keeps track of the previously set governor of a removed CPU */
static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
#endif
-static DEFINE_RWLOCK(cpufreq_driver_lock);
/*
* cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
@@ -132,6 +137,51 @@ bool have_governor_per_policy(void)
{
return cpufreq_driver->have_governor_per_policy;
}
+EXPORT_SYMBOL_GPL(have_governor_per_policy);
+
+struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
+{
+ if (have_governor_per_policy())
+ return &policy->kobj;
+ else
+ return cpufreq_global_kobject;
+}
+EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
+
+static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
+{
+ u64 idle_time;
+ u64 cur_wall_time;
+ u64 busy_time;
+
+ cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
+
+ busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
+
+ idle_time = cur_wall_time - busy_time;
+ if (wall)
+ *wall = cputime_to_usecs(cur_wall_time);
+
+ return cputime_to_usecs(idle_time);
+}
+
+u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
+{
+ u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
+
+ if (idle_time == -1ULL)
+ return get_cpu_idle_time_jiffy(cpu, wall);
+ else if (!io_busy)
+ idle_time += get_cpu_iowait_time_us(cpu, wall);
+
+ return idle_time;
+}
+EXPORT_SYMBOL_GPL(get_cpu_idle_time);
static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
{
@@ -150,7 +200,6 @@ static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
if (!try_module_get(cpufreq_driver->owner))
goto err_out_unlock;
-
/* get the CPU */
data = per_cpu(cpufreq_cpu_data, cpu);
@@ -220,7 +269,7 @@ static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
*/
#ifndef CONFIG_SMP
static unsigned long l_p_j_ref;
-static unsigned int l_p_j_ref_freq;
+static unsigned int l_p_j_ref_freq;
static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
{
@@ -233,7 +282,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
pr_debug("saving %lu as reference value for loops_per_jiffy; "
"freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
}
- if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
+ if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
(val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
ci->new);
@@ -248,8 +297,7 @@ static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
}
#endif
-
-void __cpufreq_notify_transition(struct cpufreq_policy *policy,
+static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, unsigned int state)
{
BUG_ON(irqs_disabled());
@@ -264,6 +312,12 @@ void __cpufreq_notify_transition(struct cpufreq_policy *policy,
switch (state) {
case CPUFREQ_PRECHANGE:
+ if (WARN(policy->transition_ongoing,
+ "In middle of another frequency transition\n"))
+ return;
+
+ policy->transition_ongoing = true;
+
/* detect if the driver reported a value as "old frequency"
* which is not equal to what the cpufreq core thinks is
* "old frequency".
@@ -283,6 +337,12 @@ void __cpufreq_notify_transition(struct cpufreq_policy *policy,
break;
case CPUFREQ_POSTCHANGE:
+ if (WARN(!policy->transition_ongoing,
+ "No frequency transition in progress\n"))
+ return;
+
+ policy->transition_ongoing = false;
+
adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
(unsigned long)freqs->cpu);
@@ -294,6 +354,7 @@ void __cpufreq_notify_transition(struct cpufreq_policy *policy,
break;
}
}
+
/**
* cpufreq_notify_transition - call notifier chain and adjust_jiffies
* on frequency transition.
@@ -311,7 +372,6 @@ void cpufreq_notify_transition(struct cpufreq_policy *policy,
EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
-
/*********************************************************************
* SYSFS INTERFACE *
*********************************************************************/
@@ -376,7 +436,6 @@ out:
return err;
}
-
/**
* cpufreq_per_cpu_attr_read() / show_##file_name() -
* print out cpufreq information
@@ -441,7 +500,6 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
return sprintf(buf, "%u\n", cur_freq);
}
-
/**
* show_scaling_governor - show the current policy for the specified CPU
*/
@@ -457,7 +515,6 @@ static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
return -EINVAL;
}
-
/**
* store_scaling_governor - store policy for the specified CPU
*/
@@ -480,8 +537,10 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
&new_policy.governor))
return -EINVAL;
- /* Do not use cpufreq_set_policy here or the user_policy.max
- will be wrongly overridden */
+ /*
+ * Do not use cpufreq_set_policy here or the user_policy.max
+ * will be wrongly overridden
+ */
ret = __cpufreq_set_policy(policy, &new_policy);
policy->user_policy.policy = policy->policy;
@@ -526,7 +585,7 @@ out:
return i;
}
-static ssize_t show_cpus(const struct cpumask *mask, char *buf)
+ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
{
ssize_t i = 0;
unsigned int cpu;
@@ -541,6 +600,7 @@ static ssize_t show_cpus(const struct cpumask *mask, char *buf)
i += sprintf(&buf[i], "\n");
return i;
}
+EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
/**
* show_related_cpus - show the CPUs affected by each transition even if
@@ -548,7 +608,7 @@ static ssize_t show_cpus(const struct cpumask *mask, char *buf)
*/
static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
{
- return show_cpus(policy->related_cpus, buf);
+ return cpufreq_show_cpus(policy->related_cpus, buf);
}
/**
@@ -556,7 +616,7 @@ static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
*/
static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
{
- return show_cpus(policy->cpus, buf);
+ return cpufreq_show_cpus(policy->cpus, buf);
}
static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
@@ -630,9 +690,6 @@ static struct attribute *default_attrs[] = {
NULL
};
-struct kobject *cpufreq_global_kobject;
-EXPORT_SYMBOL(cpufreq_global_kobject);
-
#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
#define to_attr(a) container_of(a, struct freq_attr, attr)
@@ -703,6 +760,49 @@ static struct kobj_type ktype_cpufreq = {
.release = cpufreq_sysfs_release,
};
+struct kobject *cpufreq_global_kobject;
+EXPORT_SYMBOL(cpufreq_global_kobject);
+
+static int cpufreq_global_kobject_usage;
+
+int cpufreq_get_global_kobject(void)
+{
+ if (!cpufreq_global_kobject_usage++)
+ return kobject_add(cpufreq_global_kobject,
+ &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
+
+ return 0;
+}
+EXPORT_SYMBOL(cpufreq_get_global_kobject);
+
+void cpufreq_put_global_kobject(void)
+{
+ if (!--cpufreq_global_kobject_usage)
+ kobject_del(cpufreq_global_kobject);
+}
+EXPORT_SYMBOL(cpufreq_put_global_kobject);
+
+int cpufreq_sysfs_create_file(const struct attribute *attr)
+{
+ int ret = cpufreq_get_global_kobject();
+
+ if (!ret) {
+ ret = sysfs_create_file(cpufreq_global_kobject, attr);
+ if (ret)
+ cpufreq_put_global_kobject();
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(cpufreq_sysfs_create_file);
+
+void cpufreq_sysfs_remove_file(const struct attribute *attr)
+{
+ sysfs_remove_file(cpufreq_global_kobject, attr);
+ cpufreq_put_global_kobject();
+}
+EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
+
/* symlink affected CPUs */
static int cpufreq_add_dev_symlink(unsigned int cpu,
struct cpufreq_policy *policy)
@@ -1005,7 +1105,8 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
* Caller should already have policy_rwsem in write mode for this CPU.
* This routine frees the rwsem before returning.
*/
-static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
+static int __cpufreq_remove_dev(struct device *dev,
+ struct subsys_interface *sif)
{
unsigned int cpu = dev->id, ret, cpus;
unsigned long flags;
@@ -1075,14 +1176,14 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
__func__, cpu_dev->id, cpu);
}
+ if ((cpus == 1) && (cpufreq_driver->target))
+ __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
+
pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
cpufreq_cpu_put(data);
/* If cpu is last user of policy, free policy */
if (cpus == 1) {
- if (cpufreq_driver->target)
- __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
-
lock_policy_rwsem_read(cpu);
kobj = &data->kobj;
cmp = &data->kobj_unregister;
@@ -1112,7 +1213,6 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
return 0;
}
-
static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
{
unsigned int cpu = dev->id;
@@ -1125,7 +1225,6 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
return retval;
}
-
static void handle_update(struct work_struct *work)
{
struct cpufreq_policy *policy =
@@ -1136,7 +1235,8 @@ static void handle_update(struct work_struct *work)
}
/**
- * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
+ * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
+ * in deep trouble.
* @cpu: cpu number
* @old_freq: CPU frequency the kernel thinks the CPU runs at
* @new_freq: CPU frequency the CPU actually runs at
@@ -1151,7 +1251,6 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
struct cpufreq_freqs freqs;
unsigned long flags;
-
pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
"core thinks of %u, is %u kHz.\n", old_freq, new_freq);
@@ -1166,7 +1265,6 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
}
-
/**
* cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
* @cpu: CPU number
@@ -1212,7 +1310,6 @@ unsigned int cpufreq_quick_get_max(unsigned int cpu)
}
EXPORT_SYMBOL(cpufreq_quick_get_max);
-
static unsigned int __cpufreq_get(unsigned int cpu)
{
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
@@ -1271,7 +1368,6 @@ static struct subsys_interface cpufreq_interface = {
.remove_dev = cpufreq_remove_dev,
};
-
/**
* cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
*
@@ -1408,11 +1504,10 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
}
EXPORT_SYMBOL(cpufreq_register_notifier);
-
/**
* cpufreq_unregister_notifier - unregister a driver with cpufreq
* @nb: notifier block to be unregistered
- * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
+ * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
*
* Remove a driver from the CPU frequency notifier list.
*
@@ -1448,7 +1543,6 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
* GOVERNORS *
*********************************************************************/
-
int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
@@ -1458,6 +1552,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
if (cpufreq_disabled())
return -ENODEV;
+ if (policy->transition_ongoing)
+ return -EBUSY;
/* Make sure that target_freq is within supported range */
if (target_freq > policy->max)
@@ -1484,10 +1580,6 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
{
int ret = -EINVAL;
- policy = cpufreq_cpu_get(policy->cpu);
- if (!policy)
- goto no_policy;
-
if (unlikely(lock_policy_rwsem_write(policy->cpu)))
goto fail;
@@ -1496,30 +1588,19 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
unlock_policy_rwsem_write(policy->cpu);
fail:
- cpufreq_cpu_put(policy);
-no_policy:
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
{
- int ret = 0;
-
if (cpufreq_disabled())
- return ret;
+ return 0;
if (!cpufreq_driver->getavg)
return 0;
- policy = cpufreq_cpu_get(policy->cpu);
- if (!policy)
- return -EINVAL;
-
- ret = cpufreq_driver->getavg(policy, cpu);
-
- cpufreq_cpu_put(policy);
- return ret;
+ return cpufreq_driver->getavg(policy, cpu);
}
EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
@@ -1562,6 +1643,21 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
pr_debug("__cpufreq_governor for CPU %u, event %u\n",
policy->cpu, event);
+
+ mutex_lock(&cpufreq_governor_lock);
+ if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) ||
+ (policy->governor_enabled && (event == CPUFREQ_GOV_START))) {
+ mutex_unlock(&cpufreq_governor_lock);
+ return -EBUSY;
+ }
+
+ if (event == CPUFREQ_GOV_STOP)
+ policy->governor_enabled = false;
+ else if (event == CPUFREQ_GOV_START)
+ policy->governor_enabled = true;
+
+ mutex_unlock(&cpufreq_governor_lock);
+
ret = policy->governor->governor(policy, event);
if (!ret) {
@@ -1569,6 +1665,14 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
policy->governor->initialized++;
else if (event == CPUFREQ_GOV_POLICY_EXIT)
policy->governor->initialized--;
+ } else {
+ /* Restore original values */
+ mutex_lock(&cpufreq_governor_lock);
+ if (event == CPUFREQ_GOV_STOP)
+ policy->governor_enabled = true;
+ else if (event == CPUFREQ_GOV_START)
+ policy->governor_enabled = false;
+ mutex_unlock(&cpufreq_governor_lock);
}
/* we keep one module reference alive for
@@ -1581,7 +1685,6 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
return ret;
}
-
int cpufreq_register_governor(struct cpufreq_governor *governor)
{
int err;
@@ -1606,7 +1709,6 @@ int cpufreq_register_governor(struct cpufreq_governor *governor)
}
EXPORT_SYMBOL_GPL(cpufreq_register_governor);
-
void cpufreq_unregister_governor(struct cpufreq_governor *governor)
{
#ifdef CONFIG_HOTPLUG_CPU
@@ -1636,7 +1738,6 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
-
/*********************************************************************
* POLICY INTERFACE *
*********************************************************************/
@@ -1665,7 +1766,6 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
}
EXPORT_SYMBOL(cpufreq_get_policy);
-
/*
* data : current policy.
* policy : policy to be set.
@@ -1699,8 +1799,10 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_INCOMPATIBLE, policy);
- /* verify the cpu speed can be set within this limit,
- which might be different to the first one */
+ /*
+ * verify the cpu speed can be set within this limit, which might be
+ * different to the first one
+ */
ret = cpufreq_driver->verify(policy);
if (ret)
goto error_out;
@@ -1729,18 +1831,23 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
/* end old governor */
if (data->governor) {
__cpufreq_governor(data, CPUFREQ_GOV_STOP);
+ unlock_policy_rwsem_write(policy->cpu);
__cpufreq_governor(data,
CPUFREQ_GOV_POLICY_EXIT);
+ lock_policy_rwsem_write(policy->cpu);
}
/* start new governor */
data->governor = policy->governor;
if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
- if (!__cpufreq_governor(data, CPUFREQ_GOV_START))
+ if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
failed = 0;
- else
+ } else {
+ unlock_policy_rwsem_write(policy->cpu);
__cpufreq_governor(data,
CPUFREQ_GOV_POLICY_EXIT);
+ lock_policy_rwsem_write(policy->cpu);
+ }
}
if (failed) {
@@ -1797,8 +1904,10 @@ int cpufreq_update_policy(unsigned int cpu)
policy.policy = data->user_policy.policy;
policy.governor = data->user_policy.governor;
- /* BIOS might change freq behind our back
- -> ask driver for current freq and notify governors about a change */
+ /*
+ * BIOS might change freq behind our back
+ * -> ask driver for current freq and notify governors about a change
+ */
if (cpufreq_driver->get) {
policy.cur = cpufreq_driver->get(cpu);
if (!data->cur) {
@@ -1832,15 +1941,13 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
if (dev) {
switch (action) {
case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
cpufreq_add_dev(dev, NULL);
break;
case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
+ case CPU_UP_CANCELED_FROZEN:
__cpufreq_remove_dev(dev, NULL);
break;
case CPU_DOWN_FAILED:
- case CPU_DOWN_FAILED_FROZEN:
cpufreq_add_dev(dev, NULL);
break;
}
@@ -1849,7 +1956,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
}
static struct notifier_block __refdata cpufreq_cpu_notifier = {
- .notifier_call = cpufreq_cpu_callback,
+ .notifier_call = cpufreq_cpu_callback,
};
/*********************************************************************
@@ -1861,7 +1968,7 @@ static struct notifier_block __refdata cpufreq_cpu_notifier = {
* @driver_data: A struct cpufreq_driver containing the values#
* submitted by the CPU Frequency driver.
*
- * Registers a CPU Frequency driver to this core code. This code
+ * Registers a CPU Frequency driver to this core code. This code
* returns zero on success, -EBUSY when another driver got here first
* (and isn't unregistered in the meantime).
*
@@ -1928,11 +2035,10 @@ err_null_driver:
}
EXPORT_SYMBOL_GPL(cpufreq_register_driver);
-
/**
* cpufreq_unregister_driver - unregister the current CPUFreq driver
*
- * Unregister the current CPUFreq driver. Only call this if you have
+ * Unregister the current CPUFreq driver. Only call this if you have
* the right to do so, i.e. if you have succeeded in initialising before!
* Returns zero if successful, and -EINVAL if the cpufreq_driver is
* currently not initialised.
@@ -1969,7 +2075,7 @@ static int __init cpufreq_core_init(void)
init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
}
- cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
+ cpufreq_global_kobject = kobject_create();
BUG_ON(!cpufreq_global_kobject);
register_syscore_ops(&cpufreq_syscore_ops);
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 443442df113..46458769756 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -23,20 +23,12 @@
#include <linux/kernel_stat.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/tick.h>
#include <linux/types.h>
#include <linux/workqueue.h>
+#include <linux/cpu.h>
#include "cpufreq_governor.h"
-static struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
-{
- if (have_governor_per_policy())
- return &policy->kobj;
- else
- return cpufreq_global_kobject;
-}
-
static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
{
if (have_governor_per_policy())
@@ -45,41 +37,6 @@ static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
return dbs_data->cdata->attr_group_gov_sys;
}
-static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
-{
- u64 idle_time;
- u64 cur_wall_time;
- u64 busy_time;
-
- cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
-
- busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
-
- idle_time = cur_wall_time - busy_time;
- if (wall)
- *wall = cputime_to_usecs(cur_wall_time);
-
- return cputime_to_usecs(idle_time);
-}
-
-u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
-{
- u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
-
- if (idle_time == -1ULL)
- return get_cpu_idle_time_jiffy(cpu, wall);
- else if (!io_busy)
- idle_time += get_cpu_iowait_time_us(cpu, wall);
-
- return idle_time;
-}
-EXPORT_SYMBOL_GPL(get_cpu_idle_time);
-
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
{
struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
@@ -180,8 +137,10 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
if (!all_cpus) {
__gov_queue_work(smp_processor_id(), dbs_data, delay);
} else {
+ get_online_cpus();
for_each_cpu(i, policy->cpus)
__gov_queue_work(i, dbs_data, delay);
+ put_online_cpus();
}
}
EXPORT_SYMBOL_GPL(gov_queue_work);
@@ -255,6 +214,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
if (have_governor_per_policy()) {
WARN_ON(dbs_data);
} else if (dbs_data) {
+ dbs_data->usage_count++;
policy->governor_data = dbs_data;
return 0;
}
@@ -266,6 +226,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
}
dbs_data->cdata = cdata;
+ dbs_data->usage_count = 1;
rc = cdata->init(dbs_data);
if (rc) {
pr_err("%s: POLICY_INIT: init() failed\n", __func__);
@@ -273,6 +234,9 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
return rc;
}
+ if (!have_governor_per_policy())
+ WARN_ON(cpufreq_get_global_kobject());
+
rc = sysfs_create_group(get_governor_parent_kobj(policy),
get_sysfs_attr(dbs_data));
if (rc) {
@@ -294,7 +258,8 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
latency * LATENCY_MULTIPLIER));
- if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
+ if ((cdata->governor == GOV_CONSERVATIVE) &&
+ (!policy->governor->initialized)) {
struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
cpufreq_register_notifier(cs_ops->notifier_block,
@@ -306,12 +271,15 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
return 0;
case CPUFREQ_GOV_POLICY_EXIT:
- if ((policy->governor->initialized == 1) ||
- have_governor_per_policy()) {
+ if (!--dbs_data->usage_count) {
sysfs_remove_group(get_governor_parent_kobj(policy),
get_sysfs_attr(dbs_data));
- if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
+ if (!have_governor_per_policy())
+ cpufreq_put_global_kobject();
+
+ if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
+ (policy->governor->initialized == 1)) {
struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
cpufreq_unregister_notifier(cs_ops->notifier_block,
@@ -398,6 +366,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
mutex_lock(&dbs_data->mutex);
mutex_destroy(&cpu_cdbs->timer_mutex);
+ cpu_cdbs->cur_policy = NULL;
mutex_unlock(&dbs_data->mutex);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 8ac33538d0b..6663ec3b305 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -81,7 +81,7 @@ static ssize_t show_##file_name##_gov_sys \
return sprintf(buf, "%u\n", tuners->file_name); \
} \
\
-static ssize_t show_##file_name##_gov_pol \
+static ssize_t show_##file_name##_gov_pol \
(struct cpufreq_policy *policy, char *buf) \
{ \
struct dbs_data *dbs_data = policy->governor_data; \
@@ -91,7 +91,7 @@ static ssize_t show_##file_name##_gov_pol \
#define store_one(_gov, file_name) \
static ssize_t store_##file_name##_gov_sys \
-(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) \
+(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) \
{ \
struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \
return store_##file_name(dbs_data, buf, count); \
@@ -211,6 +211,7 @@ struct common_dbs_data {
struct dbs_data {
struct common_dbs_data *cdata;
unsigned int min_sampling_rate;
+ int usage_count;
void *tuners;
/* dbs_mutex protects dbs_enable in governor start/stop */
@@ -255,7 +256,6 @@ static ssize_t show_sampling_rate_min_gov_pol \
return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \
}
-u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
bool need_load_eval(struct cpu_dbs_common_info *cdbs,
unsigned int sampling_rate);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index b0ffef96bf7..93eb5cbcc1f 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -47,6 +47,8 @@ static struct od_ops od_ops;
static struct cpufreq_governor cpufreq_gov_ondemand;
#endif
+static unsigned int default_powersave_bias;
+
static void ondemand_powersave_bias_init_cpu(int cpu)
{
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
@@ -543,11 +545,10 @@ static int od_init(struct dbs_data *dbs_data)
tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
tuners->ignore_nice = 0;
- tuners->powersave_bias = 0;
+ tuners->powersave_bias = default_powersave_bias;
tuners->io_is_busy = should_io_be_busy();
dbs_data->tuners = tuners;
- pr_info("%s: tuners %p\n", __func__, tuners);
mutex_init(&dbs_data->mutex);
return 0;
}
@@ -586,6 +587,7 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
unsigned int cpu;
cpumask_t done;
+ default_powersave_bias = powersave_bias;
cpumask_clear(&done);
get_online_cpus();
@@ -594,11 +596,17 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
continue;
policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy;
- dbs_data = policy->governor_data;
- od_tuners = dbs_data->tuners;
- od_tuners->powersave_bias = powersave_bias;
+ if (!policy)
+ continue;
cpumask_or(&done, &done, policy->cpus);
+
+ if (policy->governor != &cpufreq_gov_ondemand)
+ continue;
+
+ dbs_data = policy->governor_data;
+ od_tuners = dbs_data->tuners;
+ od_tuners->powersave_bias = default_powersave_bias;
}
put_online_cpus();
}
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c
index ceee06849b9..9fef7d6e4e6 100644
--- a/drivers/cpufreq/cpufreq_performance.c
+++ b/drivers/cpufreq/cpufreq_performance.c
@@ -17,7 +17,6 @@
#include <linux/cpufreq.h>
#include <linux/init.h>
-
static int cpufreq_governor_performance(struct cpufreq_policy *policy,
unsigned int event)
{
@@ -44,19 +43,16 @@ struct cpufreq_governor cpufreq_gov_performance = {
.owner = THIS_MODULE,
};
-
static int __init cpufreq_gov_performance_init(void)
{
return cpufreq_register_governor(&cpufreq_gov_performance);
}
-
static void __exit cpufreq_gov_performance_exit(void)
{
cpufreq_unregister_governor(&cpufreq_gov_performance);
}
-
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION("CPUfreq policy governor 'performance'");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c
index 2d948a17115..32109a14f5d 100644
--- a/drivers/cpufreq/cpufreq_powersave.c
+++ b/drivers/cpufreq/cpufreq_powersave.c
@@ -1,7 +1,7 @@
/*
- * linux/drivers/cpufreq/cpufreq_powersave.c
+ * linux/drivers/cpufreq/cpufreq_powersave.c
*
- * Copyright (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
+ * Copyright (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
*
*
* This program is free software; you can redistribute it and/or modify
@@ -48,13 +48,11 @@ static int __init cpufreq_gov_powersave_init(void)
return cpufreq_register_governor(&cpufreq_gov_powersave);
}
-
static void __exit cpufreq_gov_powersave_exit(void)
{
cpufreq_unregister_governor(&cpufreq_gov_powersave);
}
-
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION("CPUfreq policy governor 'powersave'");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index bfd6273fd87..cd9e81713a7 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -27,7 +27,7 @@ static spinlock_t cpufreq_stats_lock;
struct cpufreq_stats {
unsigned int cpu;
unsigned int total_trans;
- unsigned long long last_time;
+ unsigned long long last_time;
unsigned int max_state;
unsigned int state_num;
unsigned int last_index;
@@ -116,7 +116,7 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
stat->freq_table[i]);
- for (j = 0; j < stat->state_num; j++) {
+ for (j = 0; j < stat->state_num; j++) {
if (len >= PAGE_SIZE)
break;
len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
@@ -353,11 +353,13 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
cpufreq_update_policy(cpu);
break;
case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
cpufreq_stats_free_sysfs(cpu);
break;
case CPU_DEAD:
- case CPU_DEAD_FROZEN:
+ cpufreq_stats_free_table(cpu);
+ break;
+ case CPU_UP_CANCELED_FROZEN:
+ cpufreq_stats_free_sysfs(cpu);
cpufreq_stats_free_table(cpu);
break;
}
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index bbeb9c0720a..03078090b5f 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -13,55 +13,13 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
#include <linux/cpufreq.h>
-#include <linux/cpu.h>
-#include <linux/types.h>
-#include <linux/fs.h>
-#include <linux/sysfs.h>
+#include <linux/init.h>
+#include <linux/module.h>
#include <linux/mutex.h>
-/**
- * A few values needed by the userspace governor
- */
-static DEFINE_PER_CPU(unsigned int, cpu_max_freq);
-static DEFINE_PER_CPU(unsigned int, cpu_min_freq);
-static DEFINE_PER_CPU(unsigned int, cpu_cur_freq); /* current CPU freq */
-static DEFINE_PER_CPU(unsigned int, cpu_set_freq); /* CPU freq desired by
- userspace */
static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
-
static DEFINE_MUTEX(userspace_mutex);
-static int cpus_using_userspace_governor;
-
-/* keep track of frequency transitions */
-static int
-userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
- void *data)
-{
- struct cpufreq_freqs *freq = data;
-
- if (!per_cpu(cpu_is_managed, freq->cpu))
- return 0;
-
- if (val == CPUFREQ_POSTCHANGE) {
- pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n",
- freq->cpu, freq->new);
- per_cpu(cpu_cur_freq, freq->cpu) = freq->new;
- }
-
- return 0;
-}
-
-static struct notifier_block userspace_cpufreq_notifier_block = {
- .notifier_call = userspace_cpufreq_notifier
-};
-
/**
* cpufreq_set - set the CPU frequency
@@ -80,13 +38,6 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
if (!per_cpu(cpu_is_managed, policy->cpu))
goto err;
- per_cpu(cpu_set_freq, policy->cpu) = freq;
-
- if (freq < per_cpu(cpu_min_freq, policy->cpu))
- freq = per_cpu(cpu_min_freq, policy->cpu);
- if (freq > per_cpu(cpu_max_freq, policy->cpu))
- freq = per_cpu(cpu_max_freq, policy->cpu);
-
/*
* We're safe from concurrent calls to ->target() here
* as we hold the userspace_mutex lock. If we were calling
@@ -104,10 +55,9 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
return ret;
}
-
static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
{
- return sprintf(buf, "%u\n", per_cpu(cpu_cur_freq, policy->cpu));
+ return sprintf(buf, "%u\n", policy->cur);
}
static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
@@ -119,73 +69,37 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
switch (event) {
case CPUFREQ_GOV_START:
BUG_ON(!policy->cur);
- mutex_lock(&userspace_mutex);
-
- if (cpus_using_userspace_governor == 0) {
- cpufreq_register_notifier(
- &userspace_cpufreq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
- }
- cpus_using_userspace_governor++;
+ pr_debug("started managing cpu %u\n", cpu);
+ mutex_lock(&userspace_mutex);
per_cpu(cpu_is_managed, cpu) = 1;
- per_cpu(cpu_min_freq, cpu) = policy->min;
- per_cpu(cpu_max_freq, cpu) = policy->max;
- per_cpu(cpu_cur_freq, cpu) = policy->cur;
- per_cpu(cpu_set_freq, cpu) = policy->cur;
- pr_debug("managing cpu %u started "
- "(%u - %u kHz, currently %u kHz)\n",
- cpu,
- per_cpu(cpu_min_freq, cpu),
- per_cpu(cpu_max_freq, cpu),
- per_cpu(cpu_cur_freq, cpu));
-
mutex_unlock(&userspace_mutex);
break;
case CPUFREQ_GOV_STOP:
- mutex_lock(&userspace_mutex);
- cpus_using_userspace_governor--;
- if (cpus_using_userspace_governor == 0) {
- cpufreq_unregister_notifier(
- &userspace_cpufreq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
- }
+ pr_debug("managing cpu %u stopped\n", cpu);
+ mutex_lock(&userspace_mutex);
per_cpu(cpu_is_managed, cpu) = 0;
- per_cpu(cpu_min_freq, cpu) = 0;
- per_cpu(cpu_max_freq, cpu) = 0;
- per_cpu(cpu_set_freq, cpu) = 0;
- pr_debug("managing cpu %u stopped\n", cpu);
mutex_unlock(&userspace_mutex);
break;
case CPUFREQ_GOV_LIMITS:
mutex_lock(&userspace_mutex);
- pr_debug("limit event for cpu %u: %u - %u kHz, "
- "currently %u kHz, last set to %u kHz\n",
+ pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n",
cpu, policy->min, policy->max,
- per_cpu(cpu_cur_freq, cpu),
- per_cpu(cpu_set_freq, cpu));
- if (policy->max < per_cpu(cpu_set_freq, cpu)) {
+ policy->cur);
+
+ if (policy->max < policy->cur)
__cpufreq_driver_target(policy, policy->max,
CPUFREQ_RELATION_H);
- } else if (policy->min > per_cpu(cpu_set_freq, cpu)) {
+ else if (policy->min > policy->cur)
__cpufreq_driver_target(policy, policy->min,
CPUFREQ_RELATION_L);
- } else {
- __cpufreq_driver_target(policy,
- per_cpu(cpu_set_freq, cpu),
- CPUFREQ_RELATION_L);
- }
- per_cpu(cpu_min_freq, cpu) = policy->min;
- per_cpu(cpu_max_freq, cpu) = policy->max;
- per_cpu(cpu_cur_freq, cpu) = policy->cur;
mutex_unlock(&userspace_mutex);
break;
}
return rc;
}
-
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE
static
#endif
@@ -202,13 +116,11 @@ static int __init cpufreq_gov_userspace_init(void)
return cpufreq_register_governor(&cpufreq_gov_userspace);
}
-
static void __exit cpufreq_gov_userspace_exit(void)
{
cpufreq_unregister_governor(&cpufreq_gov_userspace);
}
-
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>, "
"Russell King <rmk@arm.linux.org.uk>");
MODULE_DESCRIPTION("CPUfreq policy governor 'userspace'");
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index c33c76c360f..551dd655c6f 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -114,6 +114,9 @@ static int davinci_target(struct cpufreq_policy *policy,
pdata->set_voltage(idx);
out:
+ if (ret)
+ freqs.new = freqs.old;
+
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return ret;
diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
index 6ec6539ae04..1fdb02b9f1e 100644
--- a/drivers/cpufreq/dbx500-cpufreq.c
+++ b/drivers/cpufreq/dbx500-cpufreq.c
@@ -57,13 +57,13 @@ static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
if (ret) {
pr_err("dbx500-cpufreq: Failed to set armss_clk to %d Hz: error %d\n",
freqs.new * 1000, ret);
- return ret;
+ freqs.new = freqs.old;
}
/* post change notification */
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- return 0;
+ return ret;
}
static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 37380fb9262..a60efaeb4cf 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -161,6 +161,9 @@ postchange:
current_multiplier);
}
#endif
+ if (err)
+ freqs.new = freqs.old;
+
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return err;
}
@@ -188,7 +191,7 @@ static int eps_target(struct cpufreq_policy *policy,
}
/* Make frequency transition */
- dest_state = centaur->freq_table[newstate].index & 0xffff;
+ dest_state = centaur->freq_table[newstate].driver_data & 0xffff;
ret = eps_set_state(centaur, policy, dest_state);
if (ret)
printk(KERN_ERR "eps: Timeout!\n");
@@ -380,9 +383,9 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
f_table = &centaur->freq_table[0];
if (brand != EPS_BRAND_C7M) {
f_table[0].frequency = fsb * min_multiplier;
- f_table[0].index = (min_multiplier << 8) | min_voltage;
+ f_table[0].driver_data = (min_multiplier << 8) | min_voltage;
f_table[1].frequency = fsb * max_multiplier;
- f_table[1].index = (max_multiplier << 8) | max_voltage;
+ f_table[1].driver_data = (max_multiplier << 8) | max_voltage;
f_table[2].frequency = CPUFREQ_TABLE_END;
} else {
k = 0;
@@ -391,7 +394,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
for (i = min_multiplier; i <= max_multiplier; i++) {
voltage = (k * step) / 256 + min_voltage;
f_table[k].frequency = fsb * i;
- f_table[k].index = (i << 8) | voltage;
+ f_table[k].driver_data = (i << 8) | voltage;
k++;
}
f_table[k].frequency = CPUFREQ_TABLE_END;
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index 475b4f607f0..0d32f02ef4d 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -113,7 +113,8 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
if (ret) {
pr_err("%s: failed to set cpu voltage to %d\n",
__func__, arm_volt);
- goto out;
+ freqs.new = freqs.old;
+ goto post_notify;
}
}
@@ -123,14 +124,19 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
if (ret) {
pr_err("%s: failed to set cpu voltage to %d\n",
__func__, safe_arm_volt);
- goto out;
+ freqs.new = freqs.old;
+ goto post_notify;
}
}
exynos_info->set_freq(old_index, index);
+post_notify:
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ if (ret)
+ goto out;
+
/* When the new frequency is lower than current frequency */
if ((freqs.new < freqs.old) ||
((freqs.new > freqs.old) && safe_arm_volt)) {
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index d7a79662e24..f0d87412cc9 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -34,8 +34,8 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
continue;
}
- pr_debug("table entry %u: %u kHz, %u index\n",
- i, freq, table[i].index);
+ pr_debug("table entry %u: %u kHz, %u driver_data\n",
+ i, freq, table[i].driver_data);
if (freq < min_freq)
min_freq = freq;
if (freq > max_freq)
@@ -97,11 +97,11 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
unsigned int *index)
{
struct cpufreq_frequency_table optimal = {
- .index = ~0,
+ .driver_data = ~0,
.frequency = 0,
};
struct cpufreq_frequency_table suboptimal = {
- .index = ~0,
+ .driver_data = ~0,
.frequency = 0,
};
unsigned int i;
@@ -129,12 +129,12 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
if (freq <= target_freq) {
if (freq >= optimal.frequency) {
optimal.frequency = freq;
- optimal.index = i;
+ optimal.driver_data = i;
}
} else {
if (freq <= suboptimal.frequency) {
suboptimal.frequency = freq;
- suboptimal.index = i;
+ suboptimal.driver_data = i;
}
}
break;
@@ -142,26 +142,26 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
if (freq >= target_freq) {
if (freq <= optimal.frequency) {
optimal.frequency = freq;
- optimal.index = i;
+ optimal.driver_data = i;
}
} else {
if (freq >= suboptimal.frequency) {
suboptimal.frequency = freq;
- suboptimal.index = i;
+ suboptimal.driver_data = i;
}
}
break;
}
}
- if (optimal.index > i) {
- if (suboptimal.index > i)
+ if (optimal.driver_data > i) {
+ if (suboptimal.driver_data > i)
return -EINVAL;
- *index = suboptimal.index;
+ *index = suboptimal.driver_data;
} else
- *index = optimal.index;
+ *index = optimal.driver_data;
pr_debug("target is %u (%u kHz, %u)\n", *index, table[*index].frequency,
- table[*index].index);
+ table[*index].driver_data);
return 0;
}
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
index c0075dbaa63..573c14ea802 100644
--- a/drivers/cpufreq/ia64-acpi-cpufreq.c
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -326,7 +326,7 @@ acpi_cpufreq_cpu_init (
/* table init */
for (i = 0; i <= data->acpi_data.state_count; i++)
{
- data->freq_table[i].index = i;
+ data->freq_table[i].driver_data = i;
if (i < data->acpi_data.state_count) {
data->freq_table[i].frequency =
data->acpi_data.states[i].core_frequency * 1000;
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index b78bc35973b..e37cdaedbb5 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -68,8 +68,6 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
if (freqs.old == freqs.new)
return 0;
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
rcu_read_lock();
opp = opp_find_freq_ceil(cpu_dev, &freq_hz);
if (IS_ERR(opp)) {
@@ -86,13 +84,16 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
freqs.old / 1000, volt_old / 1000,
freqs.new / 1000, volt / 1000);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
/* scaling up? scale voltage before frequency */
if (freqs.new > freqs.old) {
ret = regulator_set_voltage_tol(arm_reg, volt, 0);
if (ret) {
dev_err(cpu_dev,
"failed to scale vddarm up: %d\n", ret);
- return ret;
+ freqs.new = freqs.old;
+ goto post_notify;
}
/*
@@ -145,15 +146,18 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
if (ret) {
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
regulator_set_voltage_tol(arm_reg, volt_old, 0);
- return ret;
+ freqs.new = freqs.old;
+ goto post_notify;
}
/* scaling down? scale voltage after frequency */
if (freqs.new < freqs.old) {
ret = regulator_set_voltage_tol(arm_reg, volt, 0);
- if (ret)
+ if (ret) {
dev_warn(cpu_dev,
"failed to scale vddarm down: %d\n", ret);
+ ret = 0;
+ }
if (freqs.old == FREQ_1P2_GHZ / 1000) {
regulator_set_voltage_tol(pu_reg,
@@ -163,9 +167,10 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
}
}
+post_notify:
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- return 0;
+ return ret;
}
static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index cc3a8e6c92b..07f2840ad80 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -48,12 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y)
}
struct sample {
- ktime_t start_time;
- ktime_t end_time;
int core_pct_busy;
- int pstate_pct_busy;
- u64 duration_us;
- u64 idletime_us;
u64 aperf;
u64 mperf;
int freq;
@@ -86,13 +81,9 @@ struct cpudata {
struct pstate_adjust_policy *pstate_policy;
struct pstate_data pstate;
struct _pid pid;
- struct _pid idle_pid;
int min_pstate_count;
- int idle_mode;
- ktime_t prev_sample;
- u64 prev_idle_time_us;
u64 prev_aperf;
u64 prev_mperf;
int sample_ptr;
@@ -124,6 +115,8 @@ struct perf_limits {
int min_perf_pct;
int32_t max_perf;
int32_t min_perf;
+ int max_policy_pct;
+ int max_sysfs_pct;
};
static struct perf_limits limits = {
@@ -132,6 +125,8 @@ static struct perf_limits limits = {
.max_perf = int_tofp(1),
.min_perf_pct = 0,
.min_perf = 0,
+ .max_policy_pct = 100,
+ .max_sysfs_pct = 100,
};
static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
@@ -202,19 +197,6 @@ static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
0);
}
-static inline void intel_pstate_idle_pid_reset(struct cpudata *cpu)
-{
- pid_p_gain_set(&cpu->idle_pid, cpu->pstate_policy->p_gain_pct);
- pid_d_gain_set(&cpu->idle_pid, cpu->pstate_policy->d_gain_pct);
- pid_i_gain_set(&cpu->idle_pid, cpu->pstate_policy->i_gain_pct);
-
- pid_reset(&cpu->idle_pid,
- 75,
- 50,
- cpu->pstate_policy->deadband,
- 0);
-}
-
static inline void intel_pstate_reset_all_pid(void)
{
unsigned int cpu;
@@ -302,7 +284,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
if (ret != 1)
return -EINVAL;
- limits.max_perf_pct = clamp_t(int, input, 0 , 100);
+ limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
+ limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
return count;
}
@@ -408,9 +391,8 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
if (pstate == cpu->pstate.current_pstate)
return;
-#ifndef MODULE
trace_cpu_frequency(pstate * 100000, cpu->cpu);
-#endif
+
cpu->pstate.current_pstate = pstate;
wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
@@ -450,48 +432,26 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
struct sample *sample)
{
u64 core_pct;
- sample->pstate_pct_busy = 100 - div64_u64(
- sample->idletime_us * 100,
- sample->duration_us);
core_pct = div64_u64(sample->aperf * 100, sample->mperf);
sample->freq = cpu->pstate.max_pstate * core_pct * 1000;
- sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct),
- 100);
+ sample->core_pct_busy = core_pct;
}
static inline void intel_pstate_sample(struct cpudata *cpu)
{
- ktime_t now;
- u64 idle_time_us;
u64 aperf, mperf;
- now = ktime_get();
- idle_time_us = get_cpu_idle_time_us(cpu->cpu, NULL);
-
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
- /* for the first sample, don't actually record a sample, just
- * set the baseline */
- if (cpu->prev_idle_time_us > 0) {
- cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
- cpu->samples[cpu->sample_ptr].start_time = cpu->prev_sample;
- cpu->samples[cpu->sample_ptr].end_time = now;
- cpu->samples[cpu->sample_ptr].duration_us =
- ktime_us_delta(now, cpu->prev_sample);
- cpu->samples[cpu->sample_ptr].idletime_us =
- idle_time_us - cpu->prev_idle_time_us;
-
- cpu->samples[cpu->sample_ptr].aperf = aperf;
- cpu->samples[cpu->sample_ptr].mperf = mperf;
- cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
- cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
-
- intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
- }
+ cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
+ cpu->samples[cpu->sample_ptr].aperf = aperf;
+ cpu->samples[cpu->sample_ptr].mperf = mperf;
+ cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
+ cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
+
+ intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
- cpu->prev_sample = now;
- cpu->prev_idle_time_us = idle_time_us;
cpu->prev_aperf = aperf;
cpu->prev_mperf = mperf;
}
@@ -505,16 +465,6 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
mod_timer_pinned(&cpu->timer, jiffies + delay);
}
-static inline void intel_pstate_idle_mode(struct cpudata *cpu)
-{
- cpu->idle_mode = 1;
-}
-
-static inline void intel_pstate_normal_mode(struct cpudata *cpu)
-{
- cpu->idle_mode = 0;
-}
-
static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
{
int32_t busy_scaled;
@@ -547,50 +497,21 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
intel_pstate_pstate_decrease(cpu, steps);
}
-static inline void intel_pstate_adjust_idle_pstate(struct cpudata *cpu)
-{
- int busy_scaled;
- struct _pid *pid;
- int ctl = 0;
- int steps;
-
- pid = &cpu->idle_pid;
-
- busy_scaled = intel_pstate_get_scaled_busy(cpu);
-
- ctl = pid_calc(pid, 100 - busy_scaled);
-
- steps = abs(ctl);
- if (ctl < 0)
- intel_pstate_pstate_decrease(cpu, steps);
- else
- intel_pstate_pstate_increase(cpu, steps);
-
- if (cpu->pstate.current_pstate == cpu->pstate.min_pstate)
- intel_pstate_normal_mode(cpu);
-}
-
static void intel_pstate_timer_func(unsigned long __data)
{
struct cpudata *cpu = (struct cpudata *) __data;
intel_pstate_sample(cpu);
+ intel_pstate_adjust_busy_pstate(cpu);
- if (!cpu->idle_mode)
- intel_pstate_adjust_busy_pstate(cpu);
- else
- intel_pstate_adjust_idle_pstate(cpu);
-
-#if defined(XPERF_FIX)
if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) {
cpu->min_pstate_count++;
if (!(cpu->min_pstate_count % 5)) {
intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
- intel_pstate_idle_mode(cpu);
}
} else
cpu->min_pstate_count = 0;
-#endif
+
intel_pstate_set_sample_time(cpu);
}
@@ -600,6 +521,7 @@ static void intel_pstate_timer_func(unsigned long __data)
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
ICPU(0x2a, default_policy),
ICPU(0x2d, default_policy),
+ ICPU(0x3a, default_policy),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
@@ -631,7 +553,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
(unsigned long)cpu;
cpu->timer.expires = jiffies + HZ/100;
intel_pstate_busy_pid_reset(cpu);
- intel_pstate_idle_pid_reset(cpu);
intel_pstate_sample(cpu);
intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
@@ -675,8 +596,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
- limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq;
- limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100);
+ limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq;
+ limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
+ limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
return 0;
@@ -788,10 +710,9 @@ static int __init intel_pstate_init(void)
pr_info("Intel P-state driver initializing.\n");
- all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus());
+ all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
if (!all_cpu_data)
return -ENOMEM;
- memset(all_cpu_data, 0, sizeof(void *) * num_possible_cpus());
rc = cpufreq_register_driver(&intel_pstate_driver);
if (rc)
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index d36ea8dc96e..c233ea61736 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -59,7 +59,7 @@ static void kirkwood_cpufreq_set_cpu_state(struct cpufreq_policy *policy,
unsigned int index)
{
struct cpufreq_freqs freqs;
- unsigned int state = kirkwood_freq_table[index].index;
+ unsigned int state = kirkwood_freq_table[index].driver_data;
unsigned long reg;
freqs.old = kirkwood_cpufreq_get_cpu_frequency(0);
@@ -171,10 +171,6 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
priv.dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Cannot get memory resource\n");
- return -ENODEV;
- }
priv.base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv.base))
return PTR_ERR(priv.base);
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index b448638e34d..b6a0a7a406b 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -254,7 +254,7 @@ static void longhaul_setstate(struct cpufreq_policy *policy,
u32 bm_timeout = 1000;
unsigned int dir = 0;
- mults_index = longhaul_table[table_index].index;
+ mults_index = longhaul_table[table_index].driver_data;
/* Safety precautions */
mult = mults[mults_index & 0x1f];
if (mult == -1)
@@ -487,7 +487,7 @@ static int __cpuinit longhaul_get_ranges(void)
if (ratio > maxmult || ratio < minmult)
continue;
longhaul_table[k].frequency = calc_speed(ratio);
- longhaul_table[k].index = j;
+ longhaul_table[k].driver_data = j;
k++;
}
if (k <= 1) {
@@ -508,8 +508,8 @@ static int __cpuinit longhaul_get_ranges(void)
if (min_i != j) {
swap(longhaul_table[j].frequency,
longhaul_table[min_i].frequency);
- swap(longhaul_table[j].index,
- longhaul_table[min_i].index);
+ swap(longhaul_table[j].driver_data,
+ longhaul_table[min_i].driver_data);
}
}
@@ -517,7 +517,7 @@ static int __cpuinit longhaul_get_ranges(void)
/* Find index we are running on */
for (j = 0; j < k; j++) {
- if (mults[longhaul_table[j].index & 0x1f] == mult) {
+ if (mults[longhaul_table[j].driver_data & 0x1f] == mult) {
longhaul_index = j;
break;
}
@@ -613,7 +613,7 @@ static void __cpuinit longhaul_setup_voltagescaling(void)
pos = (speed - min_vid_speed) / kHz_step + minvid.pos;
else
pos = minvid.pos;
- longhaul_table[j].index |= mV_vrm_table[pos] << 8;
+ longhaul_table[j].driver_data |= mV_vrm_table[pos] << 8;
vid = vrm_mV_table[mV_vrm_table[pos]];
printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n",
speed, j, vid.mV);
@@ -656,12 +656,12 @@ static int longhaul_target(struct cpufreq_policy *policy,
* this in hardware, C3 is old and we need to do this
* in software. */
i = longhaul_index;
- current_vid = (longhaul_table[longhaul_index].index >> 8);
+ current_vid = (longhaul_table[longhaul_index].driver_data >> 8);
current_vid &= 0x1f;
if (table_index > longhaul_index)
dir = 1;
while (i != table_index) {
- vid = (longhaul_table[i].index >> 8) & 0x1f;
+ vid = (longhaul_table[i].driver_data >> 8) & 0x1f;
if (vid != current_vid) {
longhaul_setstate(policy, i);
current_vid = vid;
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index 84889573b56..bb838b98507 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <asm/clock.h>
+#include <asm/idle.h>
#include <asm/mach-loongson/loongson.h>
@@ -71,7 +72,7 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
freq =
((cpu_clock_freq / 1000) *
- loongson2_clockmod_table[newstate].index) / 8;
+ loongson2_clockmod_table[newstate].driver_data) / 8;
if (freq < policy->min || freq > policy->max)
return -EINVAL;
@@ -200,6 +201,7 @@ static void loongson2_cpu_wait(void)
LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */
LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */
spin_unlock_irqrestore(&loongson2_wait_lock, flags);
+ local_irq_enable();
}
static int __init cpufreq_init(void)
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 0279d18a57f..29468a522ee 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -93,9 +93,6 @@ static int omap_target(struct cpufreq_policy *policy,
if (freqs.old == freqs.new && policy->cur == freqs.new)
return ret;
- /* notifiers */
- cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
-
freq = freqs.new * 1000;
ret = clk_round_rate(mpu_clk, freq);
if (IS_ERR_VALUE(ret)) {
@@ -125,6 +122,9 @@ static int omap_target(struct cpufreq_policy *policy,
freqs.old / 1000, volt_old ? volt_old / 1000 : -1,
freqs.new / 1000, volt ? volt / 1000 : -1);
+ /* notifiers */
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
/* scaling up? scale voltage before frequency */
if (mpu_reg && (freqs.new > freqs.old)) {
r = regulator_set_voltage(mpu_reg, volt - tol, volt + tol);
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index 421ef37d0bb..9ee78170ff8 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -118,7 +118,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
return -EINVAL;
freqs.old = cpufreq_p4_get(policy->cpu);
- freqs.new = stock_freq * p4clockmod_table[newstate].index / 8;
+ freqs.new = stock_freq * p4clockmod_table[newstate].driver_data / 8;
if (freqs.new == freqs.old)
return 0;
@@ -131,7 +131,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
* Developer's Manual, Volume 3
*/
for_each_cpu(i, policy->cpus)
- cpufreq_p4_setdc(i, p4clockmod_table[newstate].index);
+ cpufreq_p4_setdc(i, p4clockmod_table[newstate].driver_data);
/* notifiers */
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
new file mode 100644
index 00000000000..b704da40406
--- /dev/null
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright (C) 2007 PA Semi, Inc
+ *
+ * Authors: Egor Martovetsky <egor@pasemi.com>
+ * Olof Johansson <olof@lixom.net>
+ *
+ * Maintained by: Olof Johansson <olof@lixom.net>
+ *
+ * Based on arch/powerpc/platforms/cell/cbe_cpufreq.c:
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+
+#include <asm/hw_irq.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/time.h>
+#include <asm/smp.h>
+
+#define SDCASR_REG 0x0100
+#define SDCASR_REG_STRIDE 0x1000
+#define SDCPWR_CFGA0_REG 0x0100
+#define SDCPWR_PWST0_REG 0x0000
+#define SDCPWR_GIZTIME_REG 0x0440
+
+/* SDCPWR_GIZTIME_REG fields */
+#define SDCPWR_GIZTIME_GR 0x80000000
+#define SDCPWR_GIZTIME_LONGLOCK 0x000000ff
+
+/* Offset of ASR registers from SDC base */
+#define SDCASR_OFFSET 0x120000
+
+static void __iomem *sdcpwr_mapbase;
+static void __iomem *sdcasr_mapbase;
+
+static DEFINE_MUTEX(pas_switch_mutex);
+
+/* Current astate, is used when waking up from power savings on
+ * one core, in case the other core has switched states during
+ * the idle time.
+ */
+static int current_astate;
+
+/* We support 5(A0-A4) power states excluding turbo(A5-A6) modes */
+static struct cpufreq_frequency_table pas_freqs[] = {
+ {0, 0},
+ {1, 0},
+ {2, 0},
+ {3, 0},
+ {4, 0},
+ {0, CPUFREQ_TABLE_END},
+};
+
+static struct freq_attr *pas_cpu_freqs_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+/*
+ * hardware specific functions
+ */
+
+static int get_astate_freq(int astate)
+{
+ u32 ret;
+ ret = in_le32(sdcpwr_mapbase + SDCPWR_CFGA0_REG + (astate * 0x10));
+
+ return ret & 0x3f;
+}
+
+static int get_cur_astate(int cpu)
+{
+ u32 ret;
+
+ ret = in_le32(sdcpwr_mapbase + SDCPWR_PWST0_REG);
+ ret = (ret >> (cpu * 4)) & 0x7;
+
+ return ret;
+}
+
+static int get_gizmo_latency(void)
+{
+ u32 giztime, ret;
+
+ giztime = in_le32(sdcpwr_mapbase + SDCPWR_GIZTIME_REG);
+
+ /* just provide the upper bound */
+ if (giztime & SDCPWR_GIZTIME_GR)
+ ret = (giztime & SDCPWR_GIZTIME_LONGLOCK) * 128000;
+ else
+ ret = (giztime & SDCPWR_GIZTIME_LONGLOCK) * 1000;
+
+ return ret;
+}
+
+static void set_astate(int cpu, unsigned int astate)
+{
+ unsigned long flags;
+
+ /* Return if called before init has run */
+ if (unlikely(!sdcasr_mapbase))
+ return;
+
+ local_irq_save(flags);
+
+ out_le32(sdcasr_mapbase + SDCASR_REG + SDCASR_REG_STRIDE*cpu, astate);
+
+ local_irq_restore(flags);
+}
+
+int check_astate(void)
+{
+ return get_cur_astate(hard_smp_processor_id());
+}
+
+void restore_astate(int cpu)
+{
+ set_astate(cpu, current_astate);
+}
+
+/*
+ * cpufreq functions
+ */
+
+static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ const u32 *max_freqp;
+ u32 max_freq;
+ int i, cur_astate;
+ struct resource res;
+ struct device_node *cpu, *dn;
+ int err = -ENODEV;
+
+ cpu = of_get_cpu_node(policy->cpu, NULL);
+
+ if (!cpu)
+ goto out;
+
+ dn = of_find_compatible_node(NULL, NULL, "1682m-sdc");
+ if (!dn)
+ dn = of_find_compatible_node(NULL, NULL,
+ "pasemi,pwrficient-sdc");
+ if (!dn)
+ goto out;
+ err = of_address_to_resource(dn, 0, &res);
+ of_node_put(dn);
+ if (err)
+ goto out;
+ sdcasr_mapbase = ioremap(res.start + SDCASR_OFFSET, 0x2000);
+ if (!sdcasr_mapbase) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ dn = of_find_compatible_node(NULL, NULL, "1682m-gizmo");
+ if (!dn)
+ dn = of_find_compatible_node(NULL, NULL,
+ "pasemi,pwrficient-gizmo");
+ if (!dn) {
+ err = -ENODEV;
+ goto out_unmap_sdcasr;
+ }
+ err = of_address_to_resource(dn, 0, &res);
+ of_node_put(dn);
+ if (err)
+ goto out_unmap_sdcasr;
+ sdcpwr_mapbase = ioremap(res.start, 0x1000);
+ if (!sdcpwr_mapbase) {
+ err = -EINVAL;
+ goto out_unmap_sdcasr;
+ }
+
+ pr_debug("init cpufreq on CPU %d\n", policy->cpu);
+
+ max_freqp = of_get_property(cpu, "clock-frequency", NULL);
+ if (!max_freqp) {
+ err = -EINVAL;
+ goto out_unmap_sdcpwr;
+ }
+
+ /* we need the freq in kHz */
+ max_freq = *max_freqp / 1000;
+
+ pr_debug("max clock-frequency is at %u kHz\n", max_freq);
+ pr_debug("initializing frequency table\n");
+
+ /* initialize frequency table */
+ for (i=0; pas_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) {
+ pas_freqs[i].frequency =
+ get_astate_freq(pas_freqs[i].driver_data) * 100000;
+ pr_debug("%d: %d\n", i, pas_freqs[i].frequency);
+ }
+
+ policy->cpuinfo.transition_latency = get_gizmo_latency();
+
+ cur_astate = get_cur_astate(policy->cpu);
+ pr_debug("current astate is at %d\n",cur_astate);
+
+ policy->cur = pas_freqs[cur_astate].frequency;
+ cpumask_copy(policy->cpus, cpu_online_mask);
+
+ ppc_proc_freq = policy->cur * 1000ul;
+
+ cpufreq_frequency_table_get_attr(pas_freqs, policy->cpu);
+
+ /* this ensures that policy->cpuinfo_min and policy->cpuinfo_max
+ * are set correctly
+ */
+ return cpufreq_frequency_table_cpuinfo(policy, pas_freqs);
+
+out_unmap_sdcpwr:
+ iounmap(sdcpwr_mapbase);
+
+out_unmap_sdcasr:
+ iounmap(sdcasr_mapbase);
+out:
+ return err;
+}
+
+static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ /*
+ * We don't support CPU hotplug. Don't unmap after the system
+ * has already made it to a running state.
+ */
+ if (system_state != SYSTEM_BOOTING)
+ return 0;
+
+ if (sdcasr_mapbase)
+ iounmap(sdcasr_mapbase);
+ if (sdcpwr_mapbase)
+ iounmap(sdcpwr_mapbase);
+
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ return 0;
+}
+
+static int pas_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, pas_freqs);
+}
+
+static int pas_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_freqs freqs;
+ int pas_astate_new;
+ int i;
+
+ cpufreq_frequency_table_target(policy,
+ pas_freqs,
+ target_freq,
+ relation,
+ &pas_astate_new);
+
+ freqs.old = policy->cur;
+ freqs.new = pas_freqs[pas_astate_new].frequency;
+
+ mutex_lock(&pas_switch_mutex);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+ pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n",
+ policy->cpu,
+ pas_freqs[pas_astate_new].frequency,
+ pas_freqs[pas_astate_new].driver_data);
+
+ current_astate = pas_astate_new;
+
+ for_each_online_cpu(i)
+ set_astate(i, pas_astate_new);
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ mutex_unlock(&pas_switch_mutex);
+
+ ppc_proc_freq = freqs.new * 1000ul;
+ return 0;
+}
+
+static struct cpufreq_driver pas_cpufreq_driver = {
+ .name = "pas-cpufreq",
+ .owner = THIS_MODULE,
+ .flags = CPUFREQ_CONST_LOOPS,
+ .init = pas_cpufreq_cpu_init,
+ .exit = pas_cpufreq_cpu_exit,
+ .verify = pas_cpufreq_verify,
+ .target = pas_cpufreq_target,
+ .attr = pas_cpu_freqs_attr,
+};
+
+/*
+ * module init and destoy
+ */
+
+static int __init pas_cpufreq_init(void)
+{
+ if (!of_machine_is_compatible("PA6T-1682M") &&
+ !of_machine_is_compatible("pasemi,pwrficient"))
+ return -ENODEV;
+
+ return cpufreq_register_driver(&pas_cpufreq_driver);
+}
+
+static void __exit pas_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&pas_cpufreq_driver);
+}
+
+module_init(pas_cpufreq_init);
+module_exit(pas_cpufreq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>, Olof Johansson <olof@lixom.net>");
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 0de00081a81..1581fcc4cf4 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -243,6 +243,8 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
return 0;
cmd_incomplete:
+ freqs.new = freqs.old;
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
iowrite16(0, &pcch_hdr->status);
spin_unlock(&pcc_lock);
return -EINVAL;
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
new file mode 100644
index 00000000000..3104fad8248
--- /dev/null
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -0,0 +1,721 @@
+/*
+ * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ * Copyright (C) 2004 John Steele Scott <toojays@toojays.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * TODO: Need a big cleanup here. Basically, we need to have different
+ * cpufreq_driver structures for the different type of HW instead of the
+ * current mess. We also need to better deal with the detection of the
+ * type of machine.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/adb.h>
+#include <linux/pmu.h>
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/hardirq.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/irq.h>
+#include <asm/pmac_feature.h>
+#include <asm/mmu_context.h>
+#include <asm/sections.h>
+#include <asm/cputable.h>
+#include <asm/time.h>
+#include <asm/mpic.h>
+#include <asm/keylargo.h>
+#include <asm/switch_to.h>
+
+/* WARNING !!! This will cause calibrate_delay() to be called,
+ * but this is an __init function ! So you MUST go edit
+ * init/main.c to make it non-init before enabling DEBUG_FREQ
+ */
+#undef DEBUG_FREQ
+
+extern void low_choose_7447a_dfs(int dfs);
+extern void low_choose_750fx_pll(int pll);
+extern void low_sleep_handler(void);
+
+/*
+ * Currently, PowerMac cpufreq supports only high & low frequencies
+ * that are set by the firmware
+ */
+static unsigned int low_freq;
+static unsigned int hi_freq;
+static unsigned int cur_freq;
+static unsigned int sleep_freq;
+static unsigned long transition_latency;
+
+/*
+ * Different models uses different mechanisms to switch the frequency
+ */
+static int (*set_speed_proc)(int low_speed);
+static unsigned int (*get_speed_proc)(void);
+
+/*
+ * Some definitions used by the various speedprocs
+ */
+static u32 voltage_gpio;
+static u32 frequency_gpio;
+static u32 slew_done_gpio;
+static int no_schedule;
+static int has_cpu_l2lve;
+static int is_pmu_based;
+
+/* There are only two frequency states for each processor. Values
+ * are in kHz for the time being.
+ */
+#define CPUFREQ_HIGH 0
+#define CPUFREQ_LOW 1
+
+static struct cpufreq_frequency_table pmac_cpu_freqs[] = {
+ {CPUFREQ_HIGH, 0},
+ {CPUFREQ_LOW, 0},
+ {0, CPUFREQ_TABLE_END},
+};
+
+static struct freq_attr* pmac_cpu_freqs_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static inline void local_delay(unsigned long ms)
+{
+ if (no_schedule)
+ mdelay(ms);
+ else
+ msleep(ms);
+}
+
+#ifdef DEBUG_FREQ
+static inline void debug_calc_bogomips(void)
+{
+ /* This will cause a recalc of bogomips and display the
+ * result. We backup/restore the value to avoid affecting the
+ * core cpufreq framework's own calculation.
+ */
+ unsigned long save_lpj = loops_per_jiffy;
+ calibrate_delay();
+ loops_per_jiffy = save_lpj;
+}
+#endif /* DEBUG_FREQ */
+
+/* Switch CPU speed under 750FX CPU control
+ */
+static int cpu_750fx_cpu_speed(int low_speed)
+{
+ u32 hid2;
+
+ if (low_speed == 0) {
+ /* ramping up, set voltage first */
+ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
+ /* Make sure we sleep for at least 1ms */
+ local_delay(10);
+
+ /* tweak L2 for high voltage */
+ if (has_cpu_l2lve) {
+ hid2 = mfspr(SPRN_HID2);
+ hid2 &= ~0x2000;
+ mtspr(SPRN_HID2, hid2);
+ }
+ }
+#ifdef CONFIG_6xx
+ low_choose_750fx_pll(low_speed);
+#endif
+ if (low_speed == 1) {
+ /* tweak L2 for low voltage */
+ if (has_cpu_l2lve) {
+ hid2 = mfspr(SPRN_HID2);
+ hid2 |= 0x2000;
+ mtspr(SPRN_HID2, hid2);
+ }
+
+ /* ramping down, set voltage last */
+ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
+ local_delay(10);
+ }
+
+ return 0;
+}
+
+static unsigned int cpu_750fx_get_cpu_speed(void)
+{
+ if (mfspr(SPRN_HID1) & HID1_PS)
+ return low_freq;
+ else
+ return hi_freq;
+}
+
+/* Switch CPU speed using DFS */
+static int dfs_set_cpu_speed(int low_speed)
+{
+ if (low_speed == 0) {
+ /* ramping up, set voltage first */
+ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
+ /* Make sure we sleep for at least 1ms */
+ local_delay(1);
+ }
+
+ /* set frequency */
+#ifdef CONFIG_6xx
+ low_choose_7447a_dfs(low_speed);
+#endif
+ udelay(100);
+
+ if (low_speed == 1) {
+ /* ramping down, set voltage last */
+ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
+ local_delay(1);
+ }
+
+ return 0;
+}
+
+static unsigned int dfs_get_cpu_speed(void)
+{
+ if (mfspr(SPRN_HID1) & HID1_DFS)
+ return low_freq;
+ else
+ return hi_freq;
+}
+
+
+/* Switch CPU speed using slewing GPIOs
+ */
+static int gpios_set_cpu_speed(int low_speed)
+{
+ int gpio, timeout = 0;
+
+ /* If ramping up, set voltage first */
+ if (low_speed == 0) {
+ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
+ /* Delay is way too big but it's ok, we schedule */
+ local_delay(10);
+ }
+
+ /* Set frequency */
+ gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0);
+ if (low_speed == ((gpio & 0x01) == 0))
+ goto skip;
+
+ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, frequency_gpio,
+ low_speed ? 0x04 : 0x05);
+ udelay(200);
+ do {
+ if (++timeout > 100)
+ break;
+ local_delay(1);
+ gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, slew_done_gpio, 0);
+ } while((gpio & 0x02) == 0);
+ skip:
+ /* If ramping down, set voltage last */
+ if (low_speed == 1) {
+ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
+ /* Delay is way too big but it's ok, we schedule */
+ local_delay(10);
+ }
+
+#ifdef DEBUG_FREQ
+ debug_calc_bogomips();
+#endif
+
+ return 0;
+}
+
+/* Switch CPU speed under PMU control
+ */
+static int pmu_set_cpu_speed(int low_speed)
+{
+ struct adb_request req;
+ unsigned long save_l2cr;
+ unsigned long save_l3cr;
+ unsigned int pic_prio;
+ unsigned long flags;
+
+ preempt_disable();
+
+#ifdef DEBUG_FREQ
+ printk(KERN_DEBUG "HID1, before: %x\n", mfspr(SPRN_HID1));
+#endif
+ pmu_suspend();
+
+ /* Disable all interrupt sources on openpic */
+ pic_prio = mpic_cpu_get_priority();
+ mpic_cpu_set_priority(0xf);
+
+ /* Make sure the decrementer won't interrupt us */
+ asm volatile("mtdec %0" : : "r" (0x7fffffff));
+ /* Make sure any pending DEC interrupt occurring while we did
+ * the above didn't re-enable the DEC */
+ mb();
+ asm volatile("mtdec %0" : : "r" (0x7fffffff));
+
+ /* We can now disable MSR_EE */
+ local_irq_save(flags);
+
+ /* Giveup the FPU & vec */
+ enable_kernel_fp();
+
+#ifdef CONFIG_ALTIVEC
+ if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ enable_kernel_altivec();
+#endif /* CONFIG_ALTIVEC */
+
+ /* Save & disable L2 and L3 caches */
+ save_l3cr = _get_L3CR(); /* (returns -1 if not available) */
+ save_l2cr = _get_L2CR(); /* (returns -1 if not available) */
+
+ /* Send the new speed command. My assumption is that this command
+ * will cause PLL_CFG[0..3] to be changed next time CPU goes to sleep
+ */
+ pmu_request(&req, NULL, 6, PMU_CPU_SPEED, 'W', 'O', 'O', 'F', low_speed);
+ while (!req.complete)
+ pmu_poll();
+
+ /* Prepare the northbridge for the speed transition */
+ pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,1);
+
+ /* Call low level code to backup CPU state and recover from
+ * hardware reset
+ */
+ low_sleep_handler();
+
+ /* Restore the northbridge */
+ pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,0);
+
+ /* Restore L2 cache */
+ if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0)
+ _set_L2CR(save_l2cr);
+ /* Restore L3 cache */
+ if (save_l3cr != 0xffffffff && (save_l3cr & L3CR_L3E) != 0)
+ _set_L3CR(save_l3cr);
+
+ /* Restore userland MMU context */
+ switch_mmu_context(NULL, current->active_mm);
+
+#ifdef DEBUG_FREQ
+ printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1));
+#endif
+
+ /* Restore low level PMU operations */
+ pmu_unlock();
+
+ /*
+ * Restore decrementer; we'll take a decrementer interrupt
+ * as soon as interrupts are re-enabled and the generic
+ * clockevents code will reprogram it with the right value.
+ */
+ set_dec(1);
+
+ /* Restore interrupts */
+ mpic_cpu_set_priority(pic_prio);
+
+ /* Let interrupts flow again ... */
+ local_irq_restore(flags);
+
+#ifdef DEBUG_FREQ
+ debug_calc_bogomips();
+#endif
+
+ pmu_resume();
+
+ preempt_enable();
+
+ return 0;
+}
+
+static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode,
+ int notify)
+{
+ struct cpufreq_freqs freqs;
+ unsigned long l3cr;
+ static unsigned long prev_l3cr;
+
+ freqs.old = cur_freq;
+ freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
+
+ if (freqs.old == freqs.new)
+ return 0;
+
+ if (notify)
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ if (speed_mode == CPUFREQ_LOW &&
+ cpu_has_feature(CPU_FTR_L3CR)) {
+ l3cr = _get_L3CR();
+ if (l3cr & L3CR_L3E) {
+ prev_l3cr = l3cr;
+ _set_L3CR(0);
+ }
+ }
+ set_speed_proc(speed_mode == CPUFREQ_LOW);
+ if (speed_mode == CPUFREQ_HIGH &&
+ cpu_has_feature(CPU_FTR_L3CR)) {
+ l3cr = _get_L3CR();
+ if ((prev_l3cr & L3CR_L3E) && l3cr != prev_l3cr)
+ _set_L3CR(prev_l3cr);
+ }
+ if (notify)
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
+
+ return 0;
+}
+
+static unsigned int pmac_cpufreq_get_speed(unsigned int cpu)
+{
+ return cur_freq;
+}
+
+static int pmac_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs);
+}
+
+static int pmac_cpufreq_target( struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int newstate = 0;
+ int rc;
+
+ if (cpufreq_frequency_table_target(policy, pmac_cpu_freqs,
+ target_freq, relation, &newstate))
+ return -EINVAL;
+
+ rc = do_set_cpu_speed(policy, newstate, 1);
+
+ ppc_proc_freq = cur_freq * 1000ul;
+ return rc;
+}
+
+static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ if (policy->cpu != 0)
+ return -ENODEV;
+
+ policy->cpuinfo.transition_latency = transition_latency;
+ policy->cur = cur_freq;
+
+ cpufreq_frequency_table_get_attr(pmac_cpu_freqs, policy->cpu);
+ return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs);
+}
+
+static u32 read_gpio(struct device_node *np)
+{
+ const u32 *reg = of_get_property(np, "reg", NULL);
+ u32 offset;
+
+ if (reg == NULL)
+ return 0;
+ /* That works for all keylargos but shall be fixed properly
+ * some day... The problem is that it seems we can't rely
+ * on the "reg" property of the GPIO nodes, they are either
+ * relative to the base of KeyLargo or to the base of the
+ * GPIO space, and the device-tree doesn't help.
+ */
+ offset = *reg;
+ if (offset < KEYLARGO_GPIO_LEVELS0)
+ offset += KEYLARGO_GPIO_LEVELS0;
+ return offset;
+}
+
+static int pmac_cpufreq_suspend(struct cpufreq_policy *policy)
+{
+ /* Ok, this could be made a bit smarter, but let's be robust for now. We
+ * always force a speed change to high speed before sleep, to make sure
+ * we have appropriate voltage and/or bus speed for the wakeup process,
+ * and to make sure our loops_per_jiffies are "good enough", that is will
+ * not cause too short delays if we sleep in low speed and wake in high
+ * speed..
+ */
+ no_schedule = 1;
+ sleep_freq = cur_freq;
+ if (cur_freq == low_freq && !is_pmu_based)
+ do_set_cpu_speed(policy, CPUFREQ_HIGH, 0);
+ return 0;
+}
+
+static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
+{
+ /* If we resume, first check if we have a get() function */
+ if (get_speed_proc)
+ cur_freq = get_speed_proc();
+ else
+ cur_freq = 0;
+
+ /* We don't, hrm... we don't really know our speed here, best
+ * is that we force a switch to whatever it was, which is
+ * probably high speed due to our suspend() routine
+ */
+ do_set_cpu_speed(policy, sleep_freq == low_freq ?
+ CPUFREQ_LOW : CPUFREQ_HIGH, 0);
+
+ ppc_proc_freq = cur_freq * 1000ul;
+
+ no_schedule = 0;
+ return 0;
+}
+
+static struct cpufreq_driver pmac_cpufreq_driver = {
+ .verify = pmac_cpufreq_verify,
+ .target = pmac_cpufreq_target,
+ .get = pmac_cpufreq_get_speed,
+ .init = pmac_cpufreq_cpu_init,
+ .suspend = pmac_cpufreq_suspend,
+ .resume = pmac_cpufreq_resume,
+ .flags = CPUFREQ_PM_NO_WARN,
+ .attr = pmac_cpu_freqs_attr,
+ .name = "powermac",
+ .owner = THIS_MODULE,
+};
+
+
+static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
+{
+ struct device_node *volt_gpio_np = of_find_node_by_name(NULL,
+ "voltage-gpio");
+ struct device_node *freq_gpio_np = of_find_node_by_name(NULL,
+ "frequency-gpio");
+ struct device_node *slew_done_gpio_np = of_find_node_by_name(NULL,
+ "slewing-done");
+ const u32 *value;
+
+ /*
+ * Check to see if it's GPIO driven or PMU only
+ *
+ * The way we extract the GPIO address is slightly hackish, but it
+ * works well enough for now. We need to abstract the whole GPIO
+ * stuff sooner or later anyway
+ */
+
+ if (volt_gpio_np)
+ voltage_gpio = read_gpio(volt_gpio_np);
+ if (freq_gpio_np)
+ frequency_gpio = read_gpio(freq_gpio_np);
+ if (slew_done_gpio_np)
+ slew_done_gpio = read_gpio(slew_done_gpio_np);
+
+ /* If we use the frequency GPIOs, calculate the min/max speeds based
+ * on the bus frequencies
+ */
+ if (frequency_gpio && slew_done_gpio) {
+ int lenp, rc;
+ const u32 *freqs, *ratio;
+
+ freqs = of_get_property(cpunode, "bus-frequencies", &lenp);
+ lenp /= sizeof(u32);
+ if (freqs == NULL || lenp != 2) {
+ printk(KERN_ERR "cpufreq: bus-frequencies incorrect or missing\n");
+ return 1;
+ }
+ ratio = of_get_property(cpunode, "processor-to-bus-ratio*2",
+ NULL);
+ if (ratio == NULL) {
+ printk(KERN_ERR "cpufreq: processor-to-bus-ratio*2 missing\n");
+ return 1;
+ }
+
+ /* Get the min/max bus frequencies */
+ low_freq = min(freqs[0], freqs[1]);
+ hi_freq = max(freqs[0], freqs[1]);
+
+ /* Grrrr.. It _seems_ that the device-tree is lying on the low bus
+ * frequency, it claims it to be around 84Mhz on some models while
+ * it appears to be approx. 101Mhz on all. Let's hack around here...
+ * fortunately, we don't need to be too precise
+ */
+ if (low_freq < 98000000)
+ low_freq = 101000000;
+
+ /* Convert those to CPU core clocks */
+ low_freq = (low_freq * (*ratio)) / 2000;
+ hi_freq = (hi_freq * (*ratio)) / 2000;
+
+ /* Now we get the frequencies, we read the GPIO to see what is out current
+ * speed
+ */
+ rc = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0);
+ cur_freq = (rc & 0x01) ? hi_freq : low_freq;
+
+ set_speed_proc = gpios_set_cpu_speed;
+ return 1;
+ }
+
+ /* If we use the PMU, look for the min & max frequencies in the
+ * device-tree
+ */
+ value = of_get_property(cpunode, "min-clock-frequency", NULL);
+ if (!value)
+ return 1;
+ low_freq = (*value) / 1000;
+ /* The PowerBook G4 12" (PowerBook6,1) has an error in the device-tree
+ * here */
+ if (low_freq < 100000)
+ low_freq *= 10;
+
+ value = of_get_property(cpunode, "max-clock-frequency", NULL);
+ if (!value)
+ return 1;
+ hi_freq = (*value) / 1000;
+ set_speed_proc = pmu_set_cpu_speed;
+ is_pmu_based = 1;
+
+ return 0;
+}
+
+static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
+{
+ struct device_node *volt_gpio_np;
+
+ if (of_get_property(cpunode, "dynamic-power-step", NULL) == NULL)
+ return 1;
+
+ volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
+ if (volt_gpio_np)
+ voltage_gpio = read_gpio(volt_gpio_np);
+ if (!voltage_gpio){
+ printk(KERN_ERR "cpufreq: missing cpu-vcore-select gpio\n");
+ return 1;
+ }
+
+ /* OF only reports the high frequency */
+ hi_freq = cur_freq;
+ low_freq = cur_freq/2;
+
+ /* Read actual frequency from CPU */
+ cur_freq = dfs_get_cpu_speed();
+ set_speed_proc = dfs_set_cpu_speed;
+ get_speed_proc = dfs_get_cpu_speed;
+
+ return 0;
+}
+
+static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
+{
+ struct device_node *volt_gpio_np;
+ u32 pvr;
+ const u32 *value;
+
+ if (of_get_property(cpunode, "dynamic-power-step", NULL) == NULL)
+ return 1;
+
+ hi_freq = cur_freq;
+ value = of_get_property(cpunode, "reduced-clock-frequency", NULL);
+ if (!value)
+ return 1;
+ low_freq = (*value) / 1000;
+
+ volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
+ if (volt_gpio_np)
+ voltage_gpio = read_gpio(volt_gpio_np);
+
+ pvr = mfspr(SPRN_PVR);
+ has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
+
+ set_speed_proc = cpu_750fx_cpu_speed;
+ get_speed_proc = cpu_750fx_get_cpu_speed;
+ cur_freq = cpu_750fx_get_cpu_speed();
+
+ return 0;
+}
+
+/* Currently, we support the following machines:
+ *
+ * - Titanium PowerBook 1Ghz (PMU based, 667Mhz & 1Ghz)
+ * - Titanium PowerBook 800 (PMU based, 667Mhz & 800Mhz)
+ * - Titanium PowerBook 400 (PMU based, 300Mhz & 400Mhz)
+ * - Titanium PowerBook 500 (PMU based, 300Mhz & 500Mhz)
+ * - iBook2 500/600 (PMU based, 400Mhz & 500/600Mhz)
+ * - iBook2 700 (CPU based, 400Mhz & 700Mhz, support low voltage)
+ * - Recent MacRISC3 laptops
+ * - All new machines with 7447A CPUs
+ */
+static int __init pmac_cpufreq_setup(void)
+{
+ struct device_node *cpunode;
+ const u32 *value;
+
+ if (strstr(cmd_line, "nocpufreq"))
+ return 0;
+
+ /* Assume only one CPU */
+ cpunode = of_find_node_by_type(NULL, "cpu");
+ if (!cpunode)
+ goto out;
+
+ /* Get current cpu clock freq */
+ value = of_get_property(cpunode, "clock-frequency", NULL);
+ if (!value)
+ goto out;
+ cur_freq = (*value) / 1000;
+ transition_latency = CPUFREQ_ETERNAL;
+
+ /* Check for 7447A based MacRISC3 */
+ if (of_machine_is_compatible("MacRISC3") &&
+ of_get_property(cpunode, "dynamic-power-step", NULL) &&
+ PVR_VER(mfspr(SPRN_PVR)) == 0x8003) {
+ pmac_cpufreq_init_7447A(cpunode);
+ transition_latency = 8000000;
+ /* Check for other MacRISC3 machines */
+ } else if (of_machine_is_compatible("PowerBook3,4") ||
+ of_machine_is_compatible("PowerBook3,5") ||
+ of_machine_is_compatible("MacRISC3")) {
+ pmac_cpufreq_init_MacRISC3(cpunode);
+ /* Else check for iBook2 500/600 */
+ } else if (of_machine_is_compatible("PowerBook4,1")) {
+ hi_freq = cur_freq;
+ low_freq = 400000;
+ set_speed_proc = pmu_set_cpu_speed;
+ is_pmu_based = 1;
+ }
+ /* Else check for TiPb 550 */
+ else if (of_machine_is_compatible("PowerBook3,3") && cur_freq == 550000) {
+ hi_freq = cur_freq;
+ low_freq = 500000;
+ set_speed_proc = pmu_set_cpu_speed;
+ is_pmu_based = 1;
+ }
+ /* Else check for TiPb 400 & 500 */
+ else if (of_machine_is_compatible("PowerBook3,2")) {
+ /* We only know about the 400 MHz and the 500Mhz model
+ * they both have 300 MHz as low frequency
+ */
+ if (cur_freq < 350000 || cur_freq > 550000)
+ goto out;
+ hi_freq = cur_freq;
+ low_freq = 300000;
+ set_speed_proc = pmu_set_cpu_speed;
+ is_pmu_based = 1;
+ }
+ /* Else check for 750FX */
+ else if (PVR_VER(mfspr(SPRN_PVR)) == 0x7000)
+ pmac_cpufreq_init_750FX(cpunode);
+out:
+ of_node_put(cpunode);
+ if (set_speed_proc == NULL)
+ return -ENODEV;
+
+ pmac_cpu_freqs[CPUFREQ_LOW].frequency = low_freq;
+ pmac_cpu_freqs[CPUFREQ_HIGH].frequency = hi_freq;
+ ppc_proc_freq = cur_freq * 1000ul;
+
+ printk(KERN_INFO "Registering PowerMac CPU frequency driver\n");
+ printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n",
+ low_freq/1000, hi_freq/1000, cur_freq/1000);
+
+ return cpufreq_register_driver(&pmac_cpufreq_driver);
+}
+
+module_init(pmac_cpufreq_setup);
+
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c
new file mode 100644
index 00000000000..7ba423431cf
--- /dev/null
+++ b/drivers/cpufreq/pmac64-cpufreq.c
@@ -0,0 +1,746 @@
+/*
+ * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ * and Markus Demleitner <msdemlei@cl.uni-heidelberg.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This driver adds basic cpufreq support for SMU & 970FX based G5 Macs,
+ * that is iMac G5 and latest single CPU desktop.
+ */
+
+#undef DEBUG
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/irq.h>
+#include <asm/sections.h>
+#include <asm/cputable.h>
+#include <asm/time.h>
+#include <asm/smu.h>
+#include <asm/pmac_pfunc.h>
+
+#define DBG(fmt...) pr_debug(fmt)
+
+/* see 970FX user manual */
+
+#define SCOM_PCR 0x0aa001 /* PCR scom addr */
+
+#define PCR_HILO_SELECT 0x80000000U /* 1 = PCR, 0 = PCRH */
+#define PCR_SPEED_FULL 0x00000000U /* 1:1 speed value */
+#define PCR_SPEED_HALF 0x00020000U /* 1:2 speed value */
+#define PCR_SPEED_QUARTER 0x00040000U /* 1:4 speed value */
+#define PCR_SPEED_MASK 0x000e0000U /* speed mask */
+#define PCR_SPEED_SHIFT 17
+#define PCR_FREQ_REQ_VALID 0x00010000U /* freq request valid */
+#define PCR_VOLT_REQ_VALID 0x00008000U /* volt request valid */
+#define PCR_TARGET_TIME_MASK 0x00006000U /* target time */
+#define PCR_STATLAT_MASK 0x00001f00U /* STATLAT value */
+#define PCR_SNOOPLAT_MASK 0x000000f0U /* SNOOPLAT value */
+#define PCR_SNOOPACC_MASK 0x0000000fU /* SNOOPACC value */
+
+#define SCOM_PSR 0x408001 /* PSR scom addr */
+/* warning: PSR is a 64 bits register */
+#define PSR_CMD_RECEIVED 0x2000000000000000U /* command received */
+#define PSR_CMD_COMPLETED 0x1000000000000000U /* command completed */
+#define PSR_CUR_SPEED_MASK 0x0300000000000000U /* current speed */
+#define PSR_CUR_SPEED_SHIFT (56)
+
+/*
+ * The G5 only supports two frequencies (Quarter speed is not supported)
+ */
+#define CPUFREQ_HIGH 0
+#define CPUFREQ_LOW 1
+
+static struct cpufreq_frequency_table g5_cpu_freqs[] = {
+ {CPUFREQ_HIGH, 0},
+ {CPUFREQ_LOW, 0},
+ {0, CPUFREQ_TABLE_END},
+};
+
+static struct freq_attr* g5_cpu_freqs_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+/* Power mode data is an array of the 32 bits PCR values to use for
+ * the various frequencies, retrieved from the device-tree
+ */
+static int g5_pmode_cur;
+
+static void (*g5_switch_volt)(int speed_mode);
+static int (*g5_switch_freq)(int speed_mode);
+static int (*g5_query_freq)(void);
+
+static DEFINE_MUTEX(g5_switch_mutex);
+
+static unsigned long transition_latency;
+
+#ifdef CONFIG_PMAC_SMU
+
+static const u32 *g5_pmode_data;
+static int g5_pmode_max;
+
+static struct smu_sdbp_fvt *g5_fvt_table; /* table of op. points */
+static int g5_fvt_count; /* number of op. points */
+static int g5_fvt_cur; /* current op. point */
+
+/*
+ * SMU based voltage switching for Neo2 platforms
+ */
+
+static void g5_smu_switch_volt(int speed_mode)
+{
+ struct smu_simple_cmd cmd;
+
+ DECLARE_COMPLETION_ONSTACK(comp);
+ smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 8, smu_done_complete,
+ &comp, 'V', 'S', 'L', 'E', 'W',
+ 0xff, g5_fvt_cur+1, speed_mode);
+ wait_for_completion(&comp);
+}
+
+/*
+ * Platform function based voltage/vdnap switching for Neo2
+ */
+
+static struct pmf_function *pfunc_set_vdnap0;
+static struct pmf_function *pfunc_vdnap0_complete;
+
+static void g5_vdnap_switch_volt(int speed_mode)
+{
+ struct pmf_args args;
+ u32 slew, done = 0;
+ unsigned long timeout;
+
+ slew = (speed_mode == CPUFREQ_LOW) ? 1 : 0;
+ args.count = 1;
+ args.u[0].p = &slew;
+
+ pmf_call_one(pfunc_set_vdnap0, &args);
+
+ /* It's an irq GPIO so we should be able to just block here,
+ * I'll do that later after I've properly tested the IRQ code for
+ * platform functions
+ */
+ timeout = jiffies + HZ/10;
+ while(!time_after(jiffies, timeout)) {
+ args.count = 1;
+ args.u[0].p = &done;
+ pmf_call_one(pfunc_vdnap0_complete, &args);
+ if (done)
+ break;
+ msleep(1);
+ }
+ if (done == 0)
+ printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
+}
+
+
+/*
+ * SCOM based frequency switching for 970FX rev3
+ */
+static int g5_scom_switch_freq(int speed_mode)
+{
+ unsigned long flags;
+ int to;
+
+ /* If frequency is going up, first ramp up the voltage */
+ if (speed_mode < g5_pmode_cur)
+ g5_switch_volt(speed_mode);
+
+ local_irq_save(flags);
+
+ /* Clear PCR high */
+ scom970_write(SCOM_PCR, 0);
+ /* Clear PCR low */
+ scom970_write(SCOM_PCR, PCR_HILO_SELECT | 0);
+ /* Set PCR low */
+ scom970_write(SCOM_PCR, PCR_HILO_SELECT |
+ g5_pmode_data[speed_mode]);
+
+ /* Wait for completion */
+ for (to = 0; to < 10; to++) {
+ unsigned long psr = scom970_read(SCOM_PSR);
+
+ if ((psr & PSR_CMD_RECEIVED) == 0 &&
+ (((psr >> PSR_CUR_SPEED_SHIFT) ^
+ (g5_pmode_data[speed_mode] >> PCR_SPEED_SHIFT)) & 0x3)
+ == 0)
+ break;
+ if (psr & PSR_CMD_COMPLETED)
+ break;
+ udelay(100);
+ }
+
+ local_irq_restore(flags);
+
+ /* If frequency is going down, last ramp the voltage */
+ if (speed_mode > g5_pmode_cur)
+ g5_switch_volt(speed_mode);
+
+ g5_pmode_cur = speed_mode;
+ ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul;
+
+ return 0;
+}
+
+static int g5_scom_query_freq(void)
+{
+ unsigned long psr = scom970_read(SCOM_PSR);
+ int i;
+
+ for (i = 0; i <= g5_pmode_max; i++)
+ if ((((psr >> PSR_CUR_SPEED_SHIFT) ^
+ (g5_pmode_data[i] >> PCR_SPEED_SHIFT)) & 0x3) == 0)
+ break;
+ return i;
+}
+
+/*
+ * Fake voltage switching for platforms with missing support
+ */
+
+static void g5_dummy_switch_volt(int speed_mode)
+{
+}
+
+#endif /* CONFIG_PMAC_SMU */
+
+/*
+ * Platform function based voltage switching for PowerMac7,2 & 7,3
+ */
+
+static struct pmf_function *pfunc_cpu0_volt_high;
+static struct pmf_function *pfunc_cpu0_volt_low;
+static struct pmf_function *pfunc_cpu1_volt_high;
+static struct pmf_function *pfunc_cpu1_volt_low;
+
+static void g5_pfunc_switch_volt(int speed_mode)
+{
+ if (speed_mode == CPUFREQ_HIGH) {
+ if (pfunc_cpu0_volt_high)
+ pmf_call_one(pfunc_cpu0_volt_high, NULL);
+ if (pfunc_cpu1_volt_high)
+ pmf_call_one(pfunc_cpu1_volt_high, NULL);
+ } else {
+ if (pfunc_cpu0_volt_low)
+ pmf_call_one(pfunc_cpu0_volt_low, NULL);
+ if (pfunc_cpu1_volt_low)
+ pmf_call_one(pfunc_cpu1_volt_low, NULL);
+ }
+ msleep(10); /* should be faster , to fix */
+}
+
+/*
+ * Platform function based frequency switching for PowerMac7,2 & 7,3
+ */
+
+static struct pmf_function *pfunc_cpu_setfreq_high;
+static struct pmf_function *pfunc_cpu_setfreq_low;
+static struct pmf_function *pfunc_cpu_getfreq;
+static struct pmf_function *pfunc_slewing_done;
+
+static int g5_pfunc_switch_freq(int speed_mode)
+{
+ struct pmf_args args;
+ u32 done = 0;
+ unsigned long timeout;
+ int rc;
+
+ DBG("g5_pfunc_switch_freq(%d)\n", speed_mode);
+
+ /* If frequency is going up, first ramp up the voltage */
+ if (speed_mode < g5_pmode_cur)
+ g5_switch_volt(speed_mode);
+
+ /* Do it */
+ if (speed_mode == CPUFREQ_HIGH)
+ rc = pmf_call_one(pfunc_cpu_setfreq_high, NULL);
+ else
+ rc = pmf_call_one(pfunc_cpu_setfreq_low, NULL);
+
+ if (rc)
+ printk(KERN_WARNING "cpufreq: pfunc switch error %d\n", rc);
+
+ /* It's an irq GPIO so we should be able to just block here,
+ * I'll do that later after I've properly tested the IRQ code for
+ * platform functions
+ */
+ timeout = jiffies + HZ/10;
+ while(!time_after(jiffies, timeout)) {
+ args.count = 1;
+ args.u[0].p = &done;
+ pmf_call_one(pfunc_slewing_done, &args);
+ if (done)
+ break;
+ msleep(1);
+ }
+ if (done == 0)
+ printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
+
+ /* If frequency is going down, last ramp the voltage */
+ if (speed_mode > g5_pmode_cur)
+ g5_switch_volt(speed_mode);
+
+ g5_pmode_cur = speed_mode;
+ ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul;
+
+ return 0;
+}
+
+static int g5_pfunc_query_freq(void)
+{
+ struct pmf_args args;
+ u32 val = 0;
+
+ args.count = 1;
+ args.u[0].p = &val;
+ pmf_call_one(pfunc_cpu_getfreq, &args);
+ return val ? CPUFREQ_HIGH : CPUFREQ_LOW;
+}
+
+
+/*
+ * Common interface to the cpufreq core
+ */
+
+static int g5_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, g5_cpu_freqs);
+}
+
+static int g5_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq, unsigned int relation)
+{
+ unsigned int newstate = 0;
+ struct cpufreq_freqs freqs;
+ int rc;
+
+ if (cpufreq_frequency_table_target(policy, g5_cpu_freqs,
+ target_freq, relation, &newstate))
+ return -EINVAL;
+
+ if (g5_pmode_cur == newstate)
+ return 0;
+
+ mutex_lock(&g5_switch_mutex);
+
+ freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency;
+ freqs.new = g5_cpu_freqs[newstate].frequency;
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+ rc = g5_switch_freq(newstate);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+ mutex_unlock(&g5_switch_mutex);
+
+ return rc;
+}
+
+static unsigned int g5_cpufreq_get_speed(unsigned int cpu)
+{
+ return g5_cpu_freqs[g5_pmode_cur].frequency;
+}
+
+static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ policy->cpuinfo.transition_latency = transition_latency;
+ policy->cur = g5_cpu_freqs[g5_query_freq()].frequency;
+ /* secondary CPUs are tied to the primary one by the
+ * cpufreq core if in the secondary policy we tell it that
+ * it actually must be one policy together with all others. */
+ cpumask_copy(policy->cpus, cpu_online_mask);
+ cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu);
+
+ return cpufreq_frequency_table_cpuinfo(policy,
+ g5_cpu_freqs);
+}
+
+
+static struct cpufreq_driver g5_cpufreq_driver = {
+ .name = "powermac",
+ .owner = THIS_MODULE,
+ .flags = CPUFREQ_CONST_LOOPS,
+ .init = g5_cpufreq_cpu_init,
+ .verify = g5_cpufreq_verify,
+ .target = g5_cpufreq_target,
+ .get = g5_cpufreq_get_speed,
+ .attr = g5_cpu_freqs_attr,
+};
+
+
+#ifdef CONFIG_PMAC_SMU
+
+static int __init g5_neo2_cpufreq_init(struct device_node *cpus)
+{
+ struct device_node *cpunode;
+ unsigned int psize, ssize;
+ unsigned long max_freq;
+ char *freq_method, *volt_method;
+ const u32 *valp;
+ u32 pvr_hi;
+ int use_volts_vdnap = 0;
+ int use_volts_smu = 0;
+ int rc = -ENODEV;
+
+ /* Check supported platforms */
+ if (of_machine_is_compatible("PowerMac8,1") ||
+ of_machine_is_compatible("PowerMac8,2") ||
+ of_machine_is_compatible("PowerMac9,1"))
+ use_volts_smu = 1;
+ else if (of_machine_is_compatible("PowerMac11,2"))
+ use_volts_vdnap = 1;
+ else
+ return -ENODEV;
+
+ /* Get first CPU node */
+ for (cpunode = NULL;
+ (cpunode = of_get_next_child(cpus, cpunode)) != NULL;) {
+ const u32 *reg = of_get_property(cpunode, "reg", NULL);
+ if (reg == NULL || (*reg) != 0)
+ continue;
+ if (!strcmp(cpunode->type, "cpu"))
+ break;
+ }
+ if (cpunode == NULL) {
+ printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n");
+ return -ENODEV;
+ }
+
+ /* Check 970FX for now */
+ valp = of_get_property(cpunode, "cpu-version", NULL);
+ if (!valp) {
+ DBG("No cpu-version property !\n");
+ goto bail_noprops;
+ }
+ pvr_hi = (*valp) >> 16;
+ if (pvr_hi != 0x3c && pvr_hi != 0x44) {
+ printk(KERN_ERR "cpufreq: Unsupported CPU version\n");
+ goto bail_noprops;
+ }
+
+ /* Look for the powertune data in the device-tree */
+ g5_pmode_data = of_get_property(cpunode, "power-mode-data",&psize);
+ if (!g5_pmode_data) {
+ DBG("No power-mode-data !\n");
+ goto bail_noprops;
+ }
+ g5_pmode_max = psize / sizeof(u32) - 1;
+
+ if (use_volts_smu) {
+ const struct smu_sdbp_header *shdr;
+
+ /* Look for the FVT table */
+ shdr = smu_get_sdb_partition(SMU_SDB_FVT_ID, NULL);
+ if (!shdr)
+ goto bail_noprops;
+ g5_fvt_table = (struct smu_sdbp_fvt *)&shdr[1];
+ ssize = (shdr->len * sizeof(u32)) -
+ sizeof(struct smu_sdbp_header);
+ g5_fvt_count = ssize / sizeof(struct smu_sdbp_fvt);
+ g5_fvt_cur = 0;
+
+ /* Sanity checking */
+ if (g5_fvt_count < 1 || g5_pmode_max < 1)
+ goto bail_noprops;
+
+ g5_switch_volt = g5_smu_switch_volt;
+ volt_method = "SMU";
+ } else if (use_volts_vdnap) {
+ struct device_node *root;
+
+ root = of_find_node_by_path("/");
+ if (root == NULL) {
+ printk(KERN_ERR "cpufreq: Can't find root of "
+ "device tree\n");
+ goto bail_noprops;
+ }
+ pfunc_set_vdnap0 = pmf_find_function(root, "set-vdnap0");
+ pfunc_vdnap0_complete =
+ pmf_find_function(root, "slewing-done");
+ if (pfunc_set_vdnap0 == NULL ||
+ pfunc_vdnap0_complete == NULL) {
+ printk(KERN_ERR "cpufreq: Can't find required "
+ "platform function\n");
+ goto bail_noprops;
+ }
+
+ g5_switch_volt = g5_vdnap_switch_volt;
+ volt_method = "GPIO";
+ } else {
+ g5_switch_volt = g5_dummy_switch_volt;
+ volt_method = "none";
+ }
+
+ /*
+ * From what I see, clock-frequency is always the maximal frequency.
+ * The current driver can not slew sysclk yet, so we really only deal
+ * with powertune steps for now. We also only implement full freq and
+ * half freq in this version. So far, I haven't yet seen a machine
+ * supporting anything else.
+ */
+ valp = of_get_property(cpunode, "clock-frequency", NULL);
+ if (!valp)
+ return -ENODEV;
+ max_freq = (*valp)/1000;
+ g5_cpu_freqs[0].frequency = max_freq;
+ g5_cpu_freqs[1].frequency = max_freq/2;
+
+ /* Set callbacks */
+ transition_latency = 12000;
+ g5_switch_freq = g5_scom_switch_freq;
+ g5_query_freq = g5_scom_query_freq;
+ freq_method = "SCOM";
+
+ /* Force apply current frequency to make sure everything is in
+ * sync (voltage is right for example). Firmware may leave us with
+ * a strange setting ...
+ */
+ g5_switch_volt(CPUFREQ_HIGH);
+ msleep(10);
+ g5_pmode_cur = -1;
+ g5_switch_freq(g5_query_freq());
+
+ printk(KERN_INFO "Registering G5 CPU frequency driver\n");
+ printk(KERN_INFO "Frequency method: %s, Voltage method: %s\n",
+ freq_method, volt_method);
+ printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
+ g5_cpu_freqs[1].frequency/1000,
+ g5_cpu_freqs[0].frequency/1000,
+ g5_cpu_freqs[g5_pmode_cur].frequency/1000);
+
+ rc = cpufreq_register_driver(&g5_cpufreq_driver);
+
+ /* We keep the CPU node on hold... hopefully, Apple G5 don't have
+ * hotplug CPU with a dynamic device-tree ...
+ */
+ return rc;
+
+ bail_noprops:
+ of_node_put(cpunode);
+
+ return rc;
+}
+
+#endif /* CONFIG_PMAC_SMU */
+
+
+static int __init g5_pm72_cpufreq_init(struct device_node *cpus)
+{
+ struct device_node *cpuid = NULL, *hwclock = NULL, *cpunode = NULL;
+ const u8 *eeprom = NULL;
+ const u32 *valp;
+ u64 max_freq, min_freq, ih, il;
+ int has_volt = 1, rc = 0;
+
+ DBG("cpufreq: Initializing for PowerMac7,2, PowerMac7,3 and"
+ " RackMac3,1...\n");
+
+ /* Get first CPU node */
+ for (cpunode = NULL;
+ (cpunode = of_get_next_child(cpus, cpunode)) != NULL;) {
+ if (!strcmp(cpunode->type, "cpu"))
+ break;
+ }
+ if (cpunode == NULL) {
+ printk(KERN_ERR "cpufreq: Can't find any CPU node\n");
+ return -ENODEV;
+ }
+
+ /* Lookup the cpuid eeprom node */
+ cpuid = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/cpuid@a0");
+ if (cpuid != NULL)
+ eeprom = of_get_property(cpuid, "cpuid", NULL);
+ if (eeprom == NULL) {
+ printk(KERN_ERR "cpufreq: Can't find cpuid EEPROM !\n");
+ rc = -ENODEV;
+ goto bail;
+ }
+
+ /* Lookup the i2c hwclock */
+ for (hwclock = NULL;
+ (hwclock = of_find_node_by_name(hwclock, "i2c-hwclock")) != NULL;){
+ const char *loc = of_get_property(hwclock,
+ "hwctrl-location", NULL);
+ if (loc == NULL)
+ continue;
+ if (strcmp(loc, "CPU CLOCK"))
+ continue;
+ if (!of_get_property(hwclock, "platform-get-frequency", NULL))
+ continue;
+ break;
+ }
+ if (hwclock == NULL) {
+ printk(KERN_ERR "cpufreq: Can't find i2c clock chip !\n");
+ rc = -ENODEV;
+ goto bail;
+ }
+
+ DBG("cpufreq: i2c clock chip found: %s\n", hwclock->full_name);
+
+ /* Now get all the platform functions */
+ pfunc_cpu_getfreq =
+ pmf_find_function(hwclock, "get-frequency");
+ pfunc_cpu_setfreq_high =
+ pmf_find_function(hwclock, "set-frequency-high");
+ pfunc_cpu_setfreq_low =
+ pmf_find_function(hwclock, "set-frequency-low");
+ pfunc_slewing_done =
+ pmf_find_function(hwclock, "slewing-done");
+ pfunc_cpu0_volt_high =
+ pmf_find_function(hwclock, "set-voltage-high-0");
+ pfunc_cpu0_volt_low =
+ pmf_find_function(hwclock, "set-voltage-low-0");
+ pfunc_cpu1_volt_high =
+ pmf_find_function(hwclock, "set-voltage-high-1");
+ pfunc_cpu1_volt_low =
+ pmf_find_function(hwclock, "set-voltage-low-1");
+
+ /* Check we have minimum requirements */
+ if (pfunc_cpu_getfreq == NULL || pfunc_cpu_setfreq_high == NULL ||
+ pfunc_cpu_setfreq_low == NULL || pfunc_slewing_done == NULL) {
+ printk(KERN_ERR "cpufreq: Can't find platform functions !\n");
+ rc = -ENODEV;
+ goto bail;
+ }
+
+ /* Check that we have complete sets */
+ if (pfunc_cpu0_volt_high == NULL || pfunc_cpu0_volt_low == NULL) {
+ pmf_put_function(pfunc_cpu0_volt_high);
+ pmf_put_function(pfunc_cpu0_volt_low);
+ pfunc_cpu0_volt_high = pfunc_cpu0_volt_low = NULL;
+ has_volt = 0;
+ }
+ if (!has_volt ||
+ pfunc_cpu1_volt_high == NULL || pfunc_cpu1_volt_low == NULL) {
+ pmf_put_function(pfunc_cpu1_volt_high);
+ pmf_put_function(pfunc_cpu1_volt_low);
+ pfunc_cpu1_volt_high = pfunc_cpu1_volt_low = NULL;
+ }
+
+ /* Note: The device tree also contains a "platform-set-values"
+ * function for which I haven't quite figured out the usage. It
+ * might have to be called on init and/or wakeup, I'm not too sure
+ * but things seem to work fine without it so far ...
+ */
+
+ /* Get max frequency from device-tree */
+ valp = of_get_property(cpunode, "clock-frequency", NULL);
+ if (!valp) {
+ printk(KERN_ERR "cpufreq: Can't find CPU frequency !\n");
+ rc = -ENODEV;
+ goto bail;
+ }
+
+ max_freq = (*valp)/1000;
+
+ /* Now calculate reduced frequency by using the cpuid input freq
+ * ratio. This requires 64 bits math unless we are willing to lose
+ * some precision
+ */
+ ih = *((u32 *)(eeprom + 0x10));
+ il = *((u32 *)(eeprom + 0x20));
+
+ /* Check for machines with no useful settings */
+ if (il == ih) {
+ printk(KERN_WARNING "cpufreq: No low frequency mode available"
+ " on this model !\n");
+ rc = -ENODEV;
+ goto bail;
+ }
+
+ min_freq = 0;
+ if (ih != 0 && il != 0)
+ min_freq = (max_freq * il) / ih;
+
+ /* Sanity check */
+ if (min_freq >= max_freq || min_freq < 1000) {
+ printk(KERN_ERR "cpufreq: Can't calculate low frequency !\n");
+ rc = -ENXIO;
+ goto bail;
+ }
+ g5_cpu_freqs[0].frequency = max_freq;
+ g5_cpu_freqs[1].frequency = min_freq;
+
+ /* Set callbacks */
+ transition_latency = CPUFREQ_ETERNAL;
+ g5_switch_volt = g5_pfunc_switch_volt;
+ g5_switch_freq = g5_pfunc_switch_freq;
+ g5_query_freq = g5_pfunc_query_freq;
+
+ /* Force apply current frequency to make sure everything is in
+ * sync (voltage is right for example). Firmware may leave us with
+ * a strange setting ...
+ */
+ g5_switch_volt(CPUFREQ_HIGH);
+ msleep(10);
+ g5_pmode_cur = -1;
+ g5_switch_freq(g5_query_freq());
+
+ printk(KERN_INFO "Registering G5 CPU frequency driver\n");
+ printk(KERN_INFO "Frequency method: i2c/pfunc, "
+ "Voltage method: %s\n", has_volt ? "i2c/pfunc" : "none");
+ printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
+ g5_cpu_freqs[1].frequency/1000,
+ g5_cpu_freqs[0].frequency/1000,
+ g5_cpu_freqs[g5_pmode_cur].frequency/1000);
+
+ rc = cpufreq_register_driver(&g5_cpufreq_driver);
+ bail:
+ if (rc != 0) {
+ pmf_put_function(pfunc_cpu_getfreq);
+ pmf_put_function(pfunc_cpu_setfreq_high);
+ pmf_put_function(pfunc_cpu_setfreq_low);
+ pmf_put_function(pfunc_slewing_done);
+ pmf_put_function(pfunc_cpu0_volt_high);
+ pmf_put_function(pfunc_cpu0_volt_low);
+ pmf_put_function(pfunc_cpu1_volt_high);
+ pmf_put_function(pfunc_cpu1_volt_low);
+ }
+ of_node_put(hwclock);
+ of_node_put(cpuid);
+ of_node_put(cpunode);
+
+ return rc;
+}
+
+static int __init g5_cpufreq_init(void)
+{
+ struct device_node *cpus;
+ int rc = 0;
+
+ cpus = of_find_node_by_path("/cpus");
+ if (cpus == NULL) {
+ DBG("No /cpus node !\n");
+ return -ENODEV;
+ }
+
+ if (of_machine_is_compatible("PowerMac7,2") ||
+ of_machine_is_compatible("PowerMac7,3") ||
+ of_machine_is_compatible("RackMac3,1"))
+ rc = g5_pm72_cpufreq_init(cpus);
+#ifdef CONFIG_PMAC_SMU
+ else
+ rc = g5_neo2_cpufreq_init(cpus);
+#endif /* CONFIG_PMAC_SMU */
+
+ of_node_put(cpus);
+ return rc;
+}
+
+module_init(g5_cpufreq_init);
+
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index ea0222a45b7..ea8e10382ec 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -58,7 +58,7 @@ static int powernow_k6_get_cpu_multiplier(void)
msrval = POWERNOW_IOPORT + 0x0;
wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
- return clock_ratio[(invalue >> 5)&7].index;
+ return clock_ratio[(invalue >> 5)&7].driver_data;
}
@@ -75,13 +75,13 @@ static void powernow_k6_set_state(struct cpufreq_policy *policy,
unsigned long msrval;
struct cpufreq_freqs freqs;
- if (clock_ratio[best_i].index > max_multiplier) {
+ if (clock_ratio[best_i].driver_data > max_multiplier) {
printk(KERN_ERR PFX "invalid target frequency\n");
return;
}
freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
- freqs.new = busfreq * clock_ratio[best_i].index;
+ freqs.new = busfreq * clock_ratio[best_i].driver_data;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
@@ -156,7 +156,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
/* table init */
for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
- f = clock_ratio[i].index;
+ f = clock_ratio[i].driver_data;
if (f > max_multiplier)
clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID;
else
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 53888dacbe5..b9f80b713fd 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -186,7 +186,7 @@ static int get_ranges(unsigned char *pst)
fid = *pst++;
powernow_table[j].frequency = (fsb * fid_codes[fid]) / 10;
- powernow_table[j].index = fid; /* lower 8 bits */
+ powernow_table[j].driver_data = fid; /* lower 8 bits */
speed = powernow_table[j].frequency;
@@ -203,7 +203,7 @@ static int get_ranges(unsigned char *pst)
maximum_speed = speed;
vid = *pst++;
- powernow_table[j].index |= (vid << 8); /* upper 8 bits */
+ powernow_table[j].driver_data |= (vid << 8); /* upper 8 bits */
pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) "
"VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
@@ -212,7 +212,7 @@ static int get_ranges(unsigned char *pst)
mobile_vid_table[vid]%1000);
}
powernow_table[number_scales].frequency = CPUFREQ_TABLE_END;
- powernow_table[number_scales].index = 0;
+ powernow_table[number_scales].driver_data = 0;
return 0;
}
@@ -260,8 +260,8 @@ static void change_speed(struct cpufreq_policy *policy, unsigned int index)
* vid are the upper 8 bits.
*/
- fid = powernow_table[index].index & 0xFF;
- vid = (powernow_table[index].index & 0xFF00) >> 8;
+ fid = powernow_table[index].driver_data & 0xFF;
+ vid = (powernow_table[index].driver_data & 0xFF00) >> 8;
rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
cfid = fidvidstatus.bits.CFID;
@@ -373,8 +373,8 @@ static int powernow_acpi_init(void)
fid = pc.bits.fid;
powernow_table[i].frequency = fsb * fid_codes[fid] / 10;
- powernow_table[i].index = fid; /* lower 8 bits */
- powernow_table[i].index |= (vid << 8); /* upper 8 bits */
+ powernow_table[i].driver_data = fid; /* lower 8 bits */
+ powernow_table[i].driver_data |= (vid << 8); /* upper 8 bits */
speed = powernow_table[i].frequency;
speed_mhz = speed / 1000;
@@ -417,7 +417,7 @@ static int powernow_acpi_init(void)
}
powernow_table[i].frequency = CPUFREQ_TABLE_END;
- powernow_table[i].index = 0;
+ powernow_table[i].driver_data = 0;
/* notify BIOS that we exist */
acpi_processor_notify_smm(THIS_MODULE);
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index b828efe4b2f..78f018f2a5d 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -584,9 +584,9 @@ static void print_basics(struct powernow_k8_data *data)
CPUFREQ_ENTRY_INVALID) {
printk(KERN_INFO PFX
"fid 0x%x (%d MHz), vid 0x%x\n",
- data->powernow_table[j].index & 0xff,
+ data->powernow_table[j].driver_data & 0xff,
data->powernow_table[j].frequency/1000,
- data->powernow_table[j].index >> 8);
+ data->powernow_table[j].driver_data >> 8);
}
}
if (data->batps)
@@ -632,13 +632,13 @@ static int fill_powernow_table(struct powernow_k8_data *data,
for (j = 0; j < data->numps; j++) {
int freq;
- powernow_table[j].index = pst[j].fid; /* lower 8 bits */
- powernow_table[j].index |= (pst[j].vid << 8); /* upper 8 bits */
+ powernow_table[j].driver_data = pst[j].fid; /* lower 8 bits */
+ powernow_table[j].driver_data |= (pst[j].vid << 8); /* upper 8 bits */
freq = find_khz_freq_from_fid(pst[j].fid);
powernow_table[j].frequency = freq;
}
powernow_table[data->numps].frequency = CPUFREQ_TABLE_END;
- powernow_table[data->numps].index = 0;
+ powernow_table[data->numps].driver_data = 0;
if (query_current_values_with_pending_wait(data)) {
kfree(powernow_table);
@@ -810,7 +810,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
powernow_table[data->acpi_data.state_count].frequency =
CPUFREQ_TABLE_END;
- powernow_table[data->acpi_data.state_count].index = 0;
+ powernow_table[data->acpi_data.state_count].driver_data = 0;
data->powernow_table = powernow_table;
if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
@@ -865,7 +865,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
pr_debug(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid);
index = fid | (vid<<8);
- powernow_table[i].index = index;
+ powernow_table[i].driver_data = index;
freq = find_khz_freq_from_fid(fid);
powernow_table[i].frequency = freq;
@@ -941,8 +941,8 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
* the cpufreq frequency table in find_psb_table, vid
* are the upper 8 bits.
*/
- fid = data->powernow_table[index].index & 0xFF;
- vid = (data->powernow_table[index].index & 0xFF00) >> 8;
+ fid = data->powernow_table[index].driver_data & 0xFF;
+ vid = (data->powernow_table[index].driver_data & 0xFF00) >> 8;
pr_debug("table matched fid 0x%x, giving vid 0x%x\n", fid, vid);
@@ -967,9 +967,9 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
res = transition_fid_vid(data, fid, vid);
if (res)
- return res;
-
- freqs.new = find_khz_freq_from_fid(data->currfid);
+ freqs.new = freqs.old;
+ else
+ freqs.new = find_khz_freq_from_fid(data->currfid);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return res;
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
new file mode 100644
index 00000000000..3cae4529f95
--- /dev/null
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -0,0 +1,380 @@
+/*
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * CPU Frequency Scaling driver for Freescale PowerPC corenet SoCs.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/errno.h>
+#include <sysdev/fsl_soc.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+
+/**
+ * struct cpu_data - per CPU data struct
+ * @clk: the clk of CPU
+ * @parent: the parent node of cpu clock
+ * @table: frequency table
+ */
+struct cpu_data {
+ struct clk *clk;
+ struct device_node *parent;
+ struct cpufreq_frequency_table *table;
+};
+
+/**
+ * struct soc_data - SoC specific data
+ * @freq_mask: mask the disallowed frequencies
+ * @flag: unique flags
+ */
+struct soc_data {
+ u32 freq_mask[4];
+ u32 flag;
+};
+
+#define FREQ_MASK 1
+/* see hardware specification for the allowed frqeuencies */
+static const struct soc_data sdata[] = {
+ { /* used by p2041 and p3041 */
+ .freq_mask = {0x8, 0x8, 0x2, 0x2},
+ .flag = FREQ_MASK,
+ },
+ { /* used by p5020 */
+ .freq_mask = {0x8, 0x2},
+ .flag = FREQ_MASK,
+ },
+ { /* used by p4080, p5040 */
+ .freq_mask = {0},
+ .flag = 0,
+ },
+};
+
+/*
+ * the minimum allowed core frequency, in Hz
+ * for chassis v1.0, >= platform frequency
+ * for chassis v2.0, >= platform frequency / 2
+ */
+static u32 min_cpufreq;
+static const u32 *fmask;
+
+/* serialize frequency changes */
+static DEFINE_MUTEX(cpufreq_lock);
+static DEFINE_PER_CPU(struct cpu_data *, cpu_data);
+
+/* cpumask in a cluster */
+static DEFINE_PER_CPU(cpumask_var_t, cpu_mask);
+
+#ifndef CONFIG_SMP
+static inline const struct cpumask *cpu_core_mask(int cpu)
+{
+ return cpumask_of(0);
+}
+#endif
+
+static unsigned int corenet_cpufreq_get_speed(unsigned int cpu)
+{
+ struct cpu_data *data = per_cpu(cpu_data, cpu);
+
+ return clk_get_rate(data->clk) / 1000;
+}
+
+/* reduce the duplicated frequencies in frequency table */
+static void freq_table_redup(struct cpufreq_frequency_table *freq_table,
+ int count)
+{
+ int i, j;
+
+ for (i = 1; i < count; i++) {
+ for (j = 0; j < i; j++) {
+ if (freq_table[j].frequency == CPUFREQ_ENTRY_INVALID ||
+ freq_table[j].frequency !=
+ freq_table[i].frequency)
+ continue;
+
+ freq_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ break;
+ }
+ }
+}
+
+/* sort the frequencies in frequency table in descenting order */
+static void freq_table_sort(struct cpufreq_frequency_table *freq_table,
+ int count)
+{
+ int i, j, ind;
+ unsigned int freq, max_freq;
+ struct cpufreq_frequency_table table;
+ for (i = 0; i < count - 1; i++) {
+ max_freq = freq_table[i].frequency;
+ ind = i;
+ for (j = i + 1; j < count; j++) {
+ freq = freq_table[j].frequency;
+ if (freq == CPUFREQ_ENTRY_INVALID ||
+ freq <= max_freq)
+ continue;
+ ind = j;
+ max_freq = freq;
+ }
+
+ if (ind != i) {
+ /* exchange the frequencies */
+ table.driver_data = freq_table[i].driver_data;
+ table.frequency = freq_table[i].frequency;
+ freq_table[i].driver_data = freq_table[ind].driver_data;
+ freq_table[i].frequency = freq_table[ind].frequency;
+ freq_table[ind].driver_data = table.driver_data;
+ freq_table[ind].frequency = table.frequency;
+ }
+ }
+}
+
+static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ struct device_node *np;
+ int i, count, ret;
+ u32 freq, mask;
+ struct clk *clk;
+ struct cpufreq_frequency_table *table;
+ struct cpu_data *data;
+ unsigned int cpu = policy->cpu;
+
+ np = of_get_cpu_node(cpu, NULL);
+ if (!np)
+ return -ENODEV;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ pr_err("%s: no memory\n", __func__);
+ goto err_np;
+ }
+
+ data->clk = of_clk_get(np, 0);
+ if (IS_ERR(data->clk)) {
+ pr_err("%s: no clock information\n", __func__);
+ goto err_nomem2;
+ }
+
+ data->parent = of_parse_phandle(np, "clocks", 0);
+ if (!data->parent) {
+ pr_err("%s: could not get clock information\n", __func__);
+ goto err_nomem2;
+ }
+
+ count = of_property_count_strings(data->parent, "clock-names");
+ table = kcalloc(count + 1, sizeof(*table), GFP_KERNEL);
+ if (!table) {
+ pr_err("%s: no memory\n", __func__);
+ goto err_node;
+ }
+
+ if (fmask)
+ mask = fmask[get_hard_smp_processor_id(cpu)];
+ else
+ mask = 0x0;
+
+ for (i = 0; i < count; i++) {
+ clk = of_clk_get(data->parent, i);
+ freq = clk_get_rate(clk);
+ /*
+ * the clock is valid if its frequency is not masked
+ * and large than minimum allowed frequency.
+ */
+ if (freq < min_cpufreq || (mask & (1 << i)))
+ table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ else
+ table[i].frequency = freq / 1000;
+ table[i].driver_data = i;
+ }
+ freq_table_redup(table, count);
+ freq_table_sort(table, count);
+ table[i].frequency = CPUFREQ_TABLE_END;
+
+ /* set the min and max frequency properly */
+ ret = cpufreq_frequency_table_cpuinfo(policy, table);
+ if (ret) {
+ pr_err("invalid frequency table: %d\n", ret);
+ goto err_nomem1;
+ }
+
+ data->table = table;
+ per_cpu(cpu_data, cpu) = data;
+
+ /* update ->cpus if we have cluster, no harm if not */
+ cpumask_copy(policy->cpus, per_cpu(cpu_mask, cpu));
+ for_each_cpu(i, per_cpu(cpu_mask, cpu))
+ per_cpu(cpu_data, i) = data;
+
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ policy->cur = corenet_cpufreq_get_speed(policy->cpu);
+
+ cpufreq_frequency_table_get_attr(table, cpu);
+ of_node_put(np);
+
+ return 0;
+
+err_nomem1:
+ kfree(table);
+err_node:
+ of_node_put(data->parent);
+err_nomem2:
+ per_cpu(cpu_data, cpu) = NULL;
+ kfree(data);
+err_np:
+ of_node_put(np);
+
+ return -ENODEV;
+}
+
+static int __exit corenet_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ struct cpu_data *data = per_cpu(cpu_data, policy->cpu);
+ unsigned int cpu;
+
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ of_node_put(data->parent);
+ kfree(data->table);
+ kfree(data);
+
+ for_each_cpu(cpu, per_cpu(cpu_mask, policy->cpu))
+ per_cpu(cpu_data, cpu) = NULL;
+
+ return 0;
+}
+
+static int corenet_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *table =
+ per_cpu(cpu_data, policy->cpu)->table;
+
+ return cpufreq_frequency_table_verify(policy, table);
+}
+
+static int corenet_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq, unsigned int relation)
+{
+ struct cpufreq_freqs freqs;
+ unsigned int new;
+ struct clk *parent;
+ int ret;
+ struct cpu_data *data = per_cpu(cpu_data, policy->cpu);
+
+ cpufreq_frequency_table_target(policy, data->table,
+ target_freq, relation, &new);
+
+ if (policy->cur == data->table[new].frequency)
+ return 0;
+
+ freqs.old = policy->cur;
+ freqs.new = data->table[new].frequency;
+
+ mutex_lock(&cpufreq_lock);
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+ parent = of_clk_get(data->parent, data->table[new].driver_data);
+ ret = clk_set_parent(data->clk, parent);
+ if (ret)
+ freqs.new = freqs.old;
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ mutex_unlock(&cpufreq_lock);
+
+ return ret;
+}
+
+static struct freq_attr *corenet_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver ppc_corenet_cpufreq_driver = {
+ .name = "ppc_cpufreq",
+ .owner = THIS_MODULE,
+ .flags = CPUFREQ_CONST_LOOPS,
+ .init = corenet_cpufreq_cpu_init,
+ .exit = __exit_p(corenet_cpufreq_cpu_exit),
+ .verify = corenet_cpufreq_verify,
+ .target = corenet_cpufreq_target,
+ .get = corenet_cpufreq_get_speed,
+ .attr = corenet_cpufreq_attr,
+};
+
+static const struct of_device_id node_matches[] __initdata = {
+ { .compatible = "fsl,p2041-clockgen", .data = &sdata[0], },
+ { .compatible = "fsl,p3041-clockgen", .data = &sdata[0], },
+ { .compatible = "fsl,p5020-clockgen", .data = &sdata[1], },
+ { .compatible = "fsl,p4080-clockgen", .data = &sdata[2], },
+ { .compatible = "fsl,p5040-clockgen", .data = &sdata[2], },
+ { .compatible = "fsl,qoriq-clockgen-2.0", },
+ {}
+};
+
+static int __init ppc_corenet_cpufreq_init(void)
+{
+ int ret;
+ struct device_node *np;
+ const struct of_device_id *match;
+ const struct soc_data *data;
+ unsigned int cpu;
+
+ np = of_find_matching_node(NULL, node_matches);
+ if (!np)
+ return -ENODEV;
+
+ for_each_possible_cpu(cpu) {
+ if (!alloc_cpumask_var(&per_cpu(cpu_mask, cpu), GFP_KERNEL))
+ goto err_mask;
+ cpumask_copy(per_cpu(cpu_mask, cpu), cpu_core_mask(cpu));
+ }
+
+ match = of_match_node(node_matches, np);
+ data = match->data;
+ if (data) {
+ if (data->flag)
+ fmask = data->freq_mask;
+ min_cpufreq = fsl_get_sys_freq();
+ } else {
+ min_cpufreq = fsl_get_sys_freq() / 2;
+ }
+
+ of_node_put(np);
+
+ ret = cpufreq_register_driver(&ppc_corenet_cpufreq_driver);
+ if (!ret)
+ pr_info("Freescale PowerPC corenet CPU frequency scaling driver\n");
+
+ return ret;
+
+err_mask:
+ for_each_possible_cpu(cpu)
+ free_cpumask_var(per_cpu(cpu_mask, cpu));
+
+ return -ENOMEM;
+}
+module_init(ppc_corenet_cpufreq_init);
+
+static void __exit ppc_corenet_cpufreq_exit(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ free_cpumask_var(per_cpu(cpu_mask, cpu));
+
+ cpufreq_unregister_driver(&ppc_corenet_cpufreq_driver);
+}
+module_exit(ppc_corenet_cpufreq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tang Yuantian <Yuantian.Tang@freescale.com>");
+MODULE_DESCRIPTION("cpufreq driver for Freescale e500mc series SoCs");
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
index e577a1dbbfc..5936f8d6f2c 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -106,7 +106,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
/* initialize frequency table */
for (i=0; cbe_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) {
- cbe_freqs[i].frequency = max_freq / cbe_freqs[i].index;
+ cbe_freqs[i].frequency = max_freq / cbe_freqs[i].driver_data;
pr_debug("%d: %d\n", i, cbe_freqs[i].frequency);
}
@@ -165,7 +165,7 @@ static int cbe_cpufreq_target(struct cpufreq_policy *policy,
"1/%d of max frequency\n",
policy->cpu,
cbe_freqs[cbe_pmode_new].frequency,
- cbe_freqs[cbe_pmode_new].index);
+ cbe_freqs[cbe_pmode_new].driver_data);
rc = set_pmode(policy->cpu, cbe_pmode_new);
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index 9e5bc8e388a..fb3981ac829 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -420,7 +420,7 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
/* Generate pxa25x the run cpufreq_frequency_table struct */
for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) {
pxa255_run_freq_table[i].frequency = pxa255_run_freqs[i].khz;
- pxa255_run_freq_table[i].index = i;
+ pxa255_run_freq_table[i].driver_data = i;
}
pxa255_run_freq_table[i].frequency = CPUFREQ_TABLE_END;
@@ -428,7 +428,7 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
for (i = 0; i < NUM_PXA25x_TURBO_FREQS; i++) {
pxa255_turbo_freq_table[i].frequency =
pxa255_turbo_freqs[i].khz;
- pxa255_turbo_freq_table[i].index = i;
+ pxa255_turbo_freq_table[i].driver_data = i;
}
pxa255_turbo_freq_table[i].frequency = CPUFREQ_TABLE_END;
@@ -440,9 +440,9 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
if (freq > pxa27x_maxfreq)
break;
pxa27x_freq_table[i].frequency = freq;
- pxa27x_freq_table[i].index = i;
+ pxa27x_freq_table[i].driver_data = i;
}
- pxa27x_freq_table[i].index = i;
+ pxa27x_freq_table[i].driver_data = i;
pxa27x_freq_table[i].frequency = CPUFREQ_TABLE_END;
/*
diff --git a/drivers/cpufreq/pxa3xx-cpufreq.c b/drivers/cpufreq/pxa3xx-cpufreq.c
index 15d60f857ad..9c92ef032a9 100644
--- a/drivers/cpufreq/pxa3xx-cpufreq.c
+++ b/drivers/cpufreq/pxa3xx-cpufreq.c
@@ -98,10 +98,10 @@ static int setup_freqs_table(struct cpufreq_policy *policy,
return -ENOMEM;
for (i = 0; i < num; i++) {
- table[i].index = i;
+ table[i].driver_data = i;
table[i].frequency = freqs[i].cpufreq_mhz * 1000;
}
- table[num].index = i;
+ table[num].driver_data = i;
table[num].frequency = CPUFREQ_TABLE_END;
pxa3xx_freqs = freqs;
diff --git a/drivers/cpufreq/s3c2410-cpufreq.c b/drivers/cpufreq/s3c2410-cpufreq.c
new file mode 100644
index 00000000000..cfa0dd8723e
--- /dev/null
+++ b/drivers/cpufreq/s3c2410-cpufreq.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2006-2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C2410 CPU Frequency scaling
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <mach/regs-clock.h>
+
+#include <plat/cpu.h>
+#include <plat/clock.h>
+#include <plat/cpu-freq-core.h>
+
+/* Note, 2410A has an extra mode for 1:4:4 ratio, bit 2 of CLKDIV */
+
+static void s3c2410_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
+{
+ u32 clkdiv = 0;
+
+ if (cfg->divs.h_divisor == 2)
+ clkdiv |= S3C2410_CLKDIVN_HDIVN;
+
+ if (cfg->divs.p_divisor != cfg->divs.h_divisor)
+ clkdiv |= S3C2410_CLKDIVN_PDIVN;
+
+ __raw_writel(clkdiv, S3C2410_CLKDIVN);
+}
+
+static int s3c2410_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
+{
+ unsigned long hclk, fclk, pclk;
+ unsigned int hdiv, pdiv;
+ unsigned long hclk_max;
+
+ fclk = cfg->freq.fclk;
+ hclk_max = cfg->max.hclk;
+
+ cfg->freq.armclk = fclk;
+
+ s3c_freq_dbg("%s: fclk is %lu, max hclk %lu\n",
+ __func__, fclk, hclk_max);
+
+ hdiv = (fclk > cfg->max.hclk) ? 2 : 1;
+ hclk = fclk / hdiv;
+
+ if (hclk > cfg->max.hclk) {
+ s3c_freq_dbg("%s: hclk too big\n", __func__);
+ return -EINVAL;
+ }
+
+ pdiv = (hclk > cfg->max.pclk) ? 2 : 1;
+ pclk = hclk / pdiv;
+
+ if (pclk > cfg->max.pclk) {
+ s3c_freq_dbg("%s: pclk too big\n", __func__);
+ return -EINVAL;
+ }
+
+ pdiv *= hdiv;
+
+ /* record the result */
+ cfg->divs.p_divisor = pdiv;
+ cfg->divs.h_divisor = hdiv;
+
+ return 0;
+}
+
+static struct s3c_cpufreq_info s3c2410_cpufreq_info = {
+ .max = {
+ .fclk = 200000000,
+ .hclk = 100000000,
+ .pclk = 50000000,
+ },
+
+ /* transition latency is about 5ms worst-case, so
+ * set 10ms to be sure */
+ .latency = 10000000,
+
+ .locktime_m = 150,
+ .locktime_u = 150,
+ .locktime_bits = 12,
+
+ .need_pll = 1,
+
+ .name = "s3c2410",
+ .calc_iotiming = s3c2410_iotiming_calc,
+ .set_iotiming = s3c2410_iotiming_set,
+ .get_iotiming = s3c2410_iotiming_get,
+ .resume_clocks = s3c2410_setup_clocks,
+
+ .set_fvco = s3c2410_set_fvco,
+ .set_refresh = s3c2410_cpufreq_setrefresh,
+ .set_divs = s3c2410_cpufreq_setdivs,
+ .calc_divs = s3c2410_cpufreq_calcdivs,
+
+ .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs),
+};
+
+static int s3c2410_cpufreq_add(struct device *dev,
+ struct subsys_interface *sif)
+{
+ return s3c_cpufreq_register(&s3c2410_cpufreq_info);
+}
+
+static struct subsys_interface s3c2410_cpufreq_interface = {
+ .name = "s3c2410_cpufreq",
+ .subsys = &s3c2410_subsys,
+ .add_dev = s3c2410_cpufreq_add,
+};
+
+static int __init s3c2410_cpufreq_init(void)
+{
+ return subsys_interface_register(&s3c2410_cpufreq_interface);
+}
+arch_initcall(s3c2410_cpufreq_init);
+
+static int s3c2410a_cpufreq_add(struct device *dev,
+ struct subsys_interface *sif)
+{
+ /* alter the maximum freq settings for S3C2410A. If a board knows
+ * it only has a maximum of 200, then it should register its own
+ * limits. */
+
+ s3c2410_cpufreq_info.max.fclk = 266000000;
+ s3c2410_cpufreq_info.max.hclk = 133000000;
+ s3c2410_cpufreq_info.max.pclk = 66500000;
+ s3c2410_cpufreq_info.name = "s3c2410a";
+
+ return s3c2410_cpufreq_add(dev, sif);
+}
+
+static struct subsys_interface s3c2410a_cpufreq_interface = {
+ .name = "s3c2410a_cpufreq",
+ .subsys = &s3c2410a_subsys,
+ .add_dev = s3c2410a_cpufreq_add,
+};
+
+static int __init s3c2410a_cpufreq_init(void)
+{
+ return subsys_interface_register(&s3c2410a_cpufreq_interface);
+}
+arch_initcall(s3c2410a_cpufreq_init);
diff --git a/drivers/cpufreq/s3c2412-cpufreq.c b/drivers/cpufreq/s3c2412-cpufreq.c
new file mode 100644
index 00000000000..4645b489899
--- /dev/null
+++ b/drivers/cpufreq/s3c2412-cpufreq.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright 2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C2412 CPU Frequency scalling
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <mach/regs-clock.h>
+#include <mach/s3c2412.h>
+
+#include <plat/cpu.h>
+#include <plat/clock.h>
+#include <plat/cpu-freq-core.h>
+
+/* our clock resources. */
+static struct clk *xtal;
+static struct clk *fclk;
+static struct clk *hclk;
+static struct clk *armclk;
+
+/* HDIV: 1, 2, 3, 4, 6, 8 */
+
+static int s3c2412_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
+{
+ unsigned int hdiv, pdiv, armdiv, dvs;
+ unsigned long hclk, fclk, armclk, armdiv_clk;
+ unsigned long hclk_max;
+
+ fclk = cfg->freq.fclk;
+ armclk = cfg->freq.armclk;
+ hclk_max = cfg->max.hclk;
+
+ /* We can't run hclk above armclk as at the best we have to
+ * have armclk and hclk in dvs mode. */
+
+ if (hclk_max > armclk)
+ hclk_max = armclk;
+
+ s3c_freq_dbg("%s: fclk=%lu, armclk=%lu, hclk_max=%lu\n",
+ __func__, fclk, armclk, hclk_max);
+ s3c_freq_dbg("%s: want f=%lu, arm=%lu, h=%lu, p=%lu\n",
+ __func__, cfg->freq.fclk, cfg->freq.armclk,
+ cfg->freq.hclk, cfg->freq.pclk);
+
+ armdiv = fclk / armclk;
+
+ if (armdiv < 1)
+ armdiv = 1;
+ if (armdiv > 2)
+ armdiv = 2;
+
+ cfg->divs.arm_divisor = armdiv;
+ armdiv_clk = fclk / armdiv;
+
+ hdiv = armdiv_clk / hclk_max;
+ if (hdiv < 1)
+ hdiv = 1;
+
+ cfg->freq.hclk = hclk = armdiv_clk / hdiv;
+
+ /* set dvs depending on whether we reached armclk or not. */
+ cfg->divs.dvs = dvs = armclk < armdiv_clk;
+
+ /* update the actual armclk we achieved. */
+ cfg->freq.armclk = dvs ? hclk : armdiv_clk;
+
+ s3c_freq_dbg("%s: armclk %lu, hclk %lu, armdiv %d, hdiv %d, dvs %d\n",
+ __func__, armclk, hclk, armdiv, hdiv, cfg->divs.dvs);
+
+ if (hdiv > 4)
+ goto invalid;
+
+ pdiv = (hclk > cfg->max.pclk) ? 2 : 1;
+
+ if ((hclk / pdiv) > cfg->max.pclk)
+ pdiv++;
+
+ cfg->freq.pclk = hclk / pdiv;
+
+ s3c_freq_dbg("%s: pdiv %d\n", __func__, pdiv);
+
+ if (pdiv > 2)
+ goto invalid;
+
+ pdiv *= hdiv;
+
+ /* store the result, and then return */
+
+ cfg->divs.h_divisor = hdiv * armdiv;
+ cfg->divs.p_divisor = pdiv * armdiv;
+
+ return 0;
+
+invalid:
+ return -EINVAL;
+}
+
+static void s3c2412_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
+{
+ unsigned long clkdiv;
+ unsigned long olddiv;
+
+ olddiv = clkdiv = __raw_readl(S3C2410_CLKDIVN);
+
+ /* clear off current clock info */
+
+ clkdiv &= ~S3C2412_CLKDIVN_ARMDIVN;
+ clkdiv &= ~S3C2412_CLKDIVN_HDIVN_MASK;
+ clkdiv &= ~S3C2412_CLKDIVN_PDIVN;
+
+ if (cfg->divs.arm_divisor == 2)
+ clkdiv |= S3C2412_CLKDIVN_ARMDIVN;
+
+ clkdiv |= ((cfg->divs.h_divisor / cfg->divs.arm_divisor) - 1);
+
+ if (cfg->divs.p_divisor != cfg->divs.h_divisor)
+ clkdiv |= S3C2412_CLKDIVN_PDIVN;
+
+ s3c_freq_dbg("%s: div %08lx => %08lx\n", __func__, olddiv, clkdiv);
+ __raw_writel(clkdiv, S3C2410_CLKDIVN);
+
+ clk_set_parent(armclk, cfg->divs.dvs ? hclk : fclk);
+}
+
+static void s3c2412_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg)
+{
+ struct s3c_cpufreq_board *board = cfg->board;
+ unsigned long refresh;
+
+ s3c_freq_dbg("%s: refresh %u ns, hclk %lu\n", __func__,
+ board->refresh, cfg->freq.hclk);
+
+ /* Reduce both the refresh time (in ns) and the frequency (in MHz)
+ * by 10 each to ensure that we do not overflow 32 bit numbers. This
+ * should work for HCLK up to 133MHz and refresh period up to 30usec.
+ */
+
+ refresh = (board->refresh / 10);
+ refresh *= (cfg->freq.hclk / 100);
+ refresh /= (1 * 1000 * 1000); /* 10^6 */
+
+ s3c_freq_dbg("%s: setting refresh 0x%08lx\n", __func__, refresh);
+ __raw_writel(refresh, S3C2412_REFRESH);
+}
+
+/* set the default cpu frequency information, based on an 200MHz part
+ * as we have no other way of detecting the speed rating in software.
+ */
+
+static struct s3c_cpufreq_info s3c2412_cpufreq_info = {
+ .max = {
+ .fclk = 200000000,
+ .hclk = 100000000,
+ .pclk = 50000000,
+ },
+
+ .latency = 5000000, /* 5ms */
+
+ .locktime_m = 150,
+ .locktime_u = 150,
+ .locktime_bits = 16,
+
+ .name = "s3c2412",
+ .set_refresh = s3c2412_cpufreq_setrefresh,
+ .set_divs = s3c2412_cpufreq_setdivs,
+ .calc_divs = s3c2412_cpufreq_calcdivs,
+
+ .calc_iotiming = s3c2412_iotiming_calc,
+ .set_iotiming = s3c2412_iotiming_set,
+ .get_iotiming = s3c2412_iotiming_get,
+
+ .resume_clocks = s3c2412_setup_clocks,
+
+ .debug_io_show = s3c_cpufreq_debugfs_call(s3c2412_iotiming_debugfs),
+};
+
+static int s3c2412_cpufreq_add(struct device *dev,
+ struct subsys_interface *sif)
+{
+ unsigned long fclk_rate;
+
+ hclk = clk_get(NULL, "hclk");
+ if (IS_ERR(hclk)) {
+ printk(KERN_ERR "%s: cannot find hclk clock\n", __func__);
+ return -ENOENT;
+ }
+
+ fclk = clk_get(NULL, "fclk");
+ if (IS_ERR(fclk)) {
+ printk(KERN_ERR "%s: cannot find fclk clock\n", __func__);
+ goto err_fclk;
+ }
+
+ fclk_rate = clk_get_rate(fclk);
+ if (fclk_rate > 200000000) {
+ printk(KERN_INFO
+ "%s: fclk %ld MHz, assuming 266MHz capable part\n",
+ __func__, fclk_rate / 1000000);
+ s3c2412_cpufreq_info.max.fclk = 266000000;
+ s3c2412_cpufreq_info.max.hclk = 133000000;
+ s3c2412_cpufreq_info.max.pclk = 66000000;
+ }
+
+ armclk = clk_get(NULL, "armclk");
+ if (IS_ERR(armclk)) {
+ printk(KERN_ERR "%s: cannot find arm clock\n", __func__);
+ goto err_armclk;
+ }
+
+ xtal = clk_get(NULL, "xtal");
+ if (IS_ERR(xtal)) {
+ printk(KERN_ERR "%s: cannot find xtal clock\n", __func__);
+ goto err_xtal;
+ }
+
+ return s3c_cpufreq_register(&s3c2412_cpufreq_info);
+
+err_xtal:
+ clk_put(armclk);
+err_armclk:
+ clk_put(fclk);
+err_fclk:
+ clk_put(hclk);
+
+ return -ENOENT;
+}
+
+static struct subsys_interface s3c2412_cpufreq_interface = {
+ .name = "s3c2412_cpufreq",
+ .subsys = &s3c2412_subsys,
+ .add_dev = s3c2412_cpufreq_add,
+};
+
+static int s3c2412_cpufreq_init(void)
+{
+ return subsys_interface_register(&s3c2412_cpufreq_interface);
+}
+arch_initcall(s3c2412_cpufreq_init);
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index 4f1881eee3f..ce5b9fca9c1 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -205,7 +205,7 @@ static int s3c2416_cpufreq_leave_dvs(struct s3c2416_data *s3c_freq, int idx)
ret = s3c2416_cpufreq_set_armdiv(s3c_freq,
clk_get_rate(s3c_freq->hclk) / 1000);
if (ret < 0) {
- pr_err("cpufreq: Failed to to set the armdiv to %lukHz: %d\n",
+ pr_err("cpufreq: Failed to set the armdiv to %lukHz: %d\n",
clk_get_rate(s3c_freq->hclk) / 1000, ret);
return ret;
}
@@ -244,7 +244,7 @@ static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
if (ret != 0)
goto out;
- idx = s3c_freq->freq_table[i].index;
+ idx = s3c_freq->freq_table[i].driver_data;
if (idx == SOURCE_HCLK)
to_dvs = 1;
@@ -312,7 +312,7 @@ static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
if (freq->frequency == CPUFREQ_ENTRY_INVALID)
continue;
- dvfs = &s3c2416_dvfs_table[freq->index];
+ dvfs = &s3c2416_dvfs_table[freq->driver_data];
found = 0;
/* Check only the min-voltage, more is always ok on S3C2416 */
@@ -462,7 +462,7 @@ static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
freq = s3c_freq->freq_table;
while (freq->frequency != CPUFREQ_TABLE_END) {
/* special handling for dvs mode */
- if (freq->index == 0) {
+ if (freq->driver_data == 0) {
if (!s3c_freq->hclk) {
pr_debug("cpufreq: %dkHz unsupported as it would need unavailable dvs mode\n",
freq->frequency);
diff --git a/drivers/cpufreq/s3c2440-cpufreq.c b/drivers/cpufreq/s3c2440-cpufreq.c
new file mode 100644
index 00000000000..72b2cc8a5a8
--- /dev/null
+++ b/drivers/cpufreq/s3c2440-cpufreq.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2006-2009 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ * Vincent Sanders <vince@simtec.co.uk>
+ *
+ * S3C2440/S3C2442 CPU Frequency scaling
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <mach/regs-clock.h>
+
+#include <plat/cpu.h>
+#include <plat/cpu-freq-core.h>
+#include <plat/clock.h>
+
+static struct clk *xtal;
+static struct clk *fclk;
+static struct clk *hclk;
+static struct clk *armclk;
+
+/* HDIV: 1, 2, 3, 4, 6, 8 */
+
+static inline int within_khz(unsigned long a, unsigned long b)
+{
+ long diff = a - b;
+
+ return (diff >= -1000 && diff <= 1000);
+}
+
+/**
+ * s3c2440_cpufreq_calcdivs - calculate divider settings
+ * @cfg: The cpu frequency settings.
+ *
+ * Calcualte the divider values for the given frequency settings
+ * specified in @cfg. The values are stored in @cfg for later use
+ * by the relevant set routine if the request settings can be reached.
+ */
+int s3c2440_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
+{
+ unsigned int hdiv, pdiv;
+ unsigned long hclk, fclk, armclk;
+ unsigned long hclk_max;
+
+ fclk = cfg->freq.fclk;
+ armclk = cfg->freq.armclk;
+ hclk_max = cfg->max.hclk;
+
+ s3c_freq_dbg("%s: fclk is %lu, armclk %lu, max hclk %lu\n",
+ __func__, fclk, armclk, hclk_max);
+
+ if (armclk > fclk) {
+ printk(KERN_WARNING "%s: armclk > fclk\n", __func__);
+ armclk = fclk;
+ }
+
+ /* if we are in DVS, we need HCLK to be <= ARMCLK */
+ if (armclk < fclk && armclk < hclk_max)
+ hclk_max = armclk;
+
+ for (hdiv = 1; hdiv < 9; hdiv++) {
+ if (hdiv == 5 || hdiv == 7)
+ hdiv++;
+
+ hclk = (fclk / hdiv);
+ if (hclk <= hclk_max || within_khz(hclk, hclk_max))
+ break;
+ }
+
+ s3c_freq_dbg("%s: hclk %lu, div %d\n", __func__, hclk, hdiv);
+
+ if (hdiv > 8)
+ goto invalid;
+
+ pdiv = (hclk > cfg->max.pclk) ? 2 : 1;
+
+ if ((hclk / pdiv) > cfg->max.pclk)
+ pdiv++;
+
+ s3c_freq_dbg("%s: pdiv %d\n", __func__, pdiv);
+
+ if (pdiv > 2)
+ goto invalid;
+
+ pdiv *= hdiv;
+
+ /* calculate a valid armclk */
+
+ if (armclk < hclk)
+ armclk = hclk;
+
+ /* if we're running armclk lower than fclk, this really means
+ * that the system should go into dvs mode, which means that
+ * armclk is connected to hclk. */
+ if (armclk < fclk) {
+ cfg->divs.dvs = 1;
+ armclk = hclk;
+ } else
+ cfg->divs.dvs = 0;
+
+ cfg->freq.armclk = armclk;
+
+ /* store the result, and then return */
+
+ cfg->divs.h_divisor = hdiv;
+ cfg->divs.p_divisor = pdiv;
+
+ return 0;
+
+ invalid:
+ return -EINVAL;
+}
+
+#define CAMDIVN_HCLK_HALF (S3C2440_CAMDIVN_HCLK3_HALF | \
+ S3C2440_CAMDIVN_HCLK4_HALF)
+
+/**
+ * s3c2440_cpufreq_setdivs - set the cpu frequency divider settings
+ * @cfg: The cpu frequency settings.
+ *
+ * Set the divisors from the settings in @cfg, which where generated
+ * during the calculation phase by s3c2440_cpufreq_calcdivs().
+ */
+static void s3c2440_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
+{
+ unsigned long clkdiv, camdiv;
+
+ s3c_freq_dbg("%s: divsiors: h=%d, p=%d\n", __func__,
+ cfg->divs.h_divisor, cfg->divs.p_divisor);
+
+ clkdiv = __raw_readl(S3C2410_CLKDIVN);
+ camdiv = __raw_readl(S3C2440_CAMDIVN);
+
+ clkdiv &= ~(S3C2440_CLKDIVN_HDIVN_MASK | S3C2440_CLKDIVN_PDIVN);
+ camdiv &= ~CAMDIVN_HCLK_HALF;
+
+ switch (cfg->divs.h_divisor) {
+ case 1:
+ clkdiv |= S3C2440_CLKDIVN_HDIVN_1;
+ break;
+
+ case 2:
+ clkdiv |= S3C2440_CLKDIVN_HDIVN_2;
+ break;
+
+ case 6:
+ camdiv |= S3C2440_CAMDIVN_HCLK3_HALF;
+ case 3:
+ clkdiv |= S3C2440_CLKDIVN_HDIVN_3_6;
+ break;
+
+ case 8:
+ camdiv |= S3C2440_CAMDIVN_HCLK4_HALF;
+ case 4:
+ clkdiv |= S3C2440_CLKDIVN_HDIVN_4_8;
+ break;
+
+ default:
+ BUG(); /* we don't expect to get here. */
+ }
+
+ if (cfg->divs.p_divisor != cfg->divs.h_divisor)
+ clkdiv |= S3C2440_CLKDIVN_PDIVN;
+
+ /* todo - set pclk. */
+
+ /* Write the divisors first with hclk intentionally halved so that
+ * when we write clkdiv we will under-frequency instead of over. We
+ * then make a short delay and remove the hclk halving if necessary.
+ */
+
+ __raw_writel(camdiv | CAMDIVN_HCLK_HALF, S3C2440_CAMDIVN);
+ __raw_writel(clkdiv, S3C2410_CLKDIVN);
+
+ ndelay(20);
+ __raw_writel(camdiv, S3C2440_CAMDIVN);
+
+ clk_set_parent(armclk, cfg->divs.dvs ? hclk : fclk);
+}
+
+static int run_freq_for(unsigned long max_hclk, unsigned long fclk,
+ int *divs,
+ struct cpufreq_frequency_table *table,
+ size_t table_size)
+{
+ unsigned long freq;
+ int index = 0;
+ int div;
+
+ for (div = *divs; div > 0; div = *divs++) {
+ freq = fclk / div;
+
+ if (freq > max_hclk && div != 1)
+ continue;
+
+ freq /= 1000; /* table is in kHz */
+ index = s3c_cpufreq_addfreq(table, index, table_size, freq);
+ if (index < 0)
+ break;
+ }
+
+ return index;
+}
+
+static int hclk_divs[] = { 1, 2, 3, 4, 6, 8, -1 };
+
+static int s3c2440_cpufreq_calctable(struct s3c_cpufreq_config *cfg,
+ struct cpufreq_frequency_table *table,
+ size_t table_size)
+{
+ int ret;
+
+ WARN_ON(cfg->info == NULL);
+ WARN_ON(cfg->board == NULL);
+
+ ret = run_freq_for(cfg->info->max.hclk,
+ cfg->info->max.fclk,
+ hclk_divs,
+ table, table_size);
+
+ s3c_freq_dbg("%s: returning %d\n", __func__, ret);
+
+ return ret;
+}
+
+struct s3c_cpufreq_info s3c2440_cpufreq_info = {
+ .max = {
+ .fclk = 400000000,
+ .hclk = 133333333,
+ .pclk = 66666666,
+ },
+
+ .locktime_m = 300,
+ .locktime_u = 300,
+ .locktime_bits = 16,
+
+ .name = "s3c244x",
+ .calc_iotiming = s3c2410_iotiming_calc,
+ .set_iotiming = s3c2410_iotiming_set,
+ .get_iotiming = s3c2410_iotiming_get,
+ .set_fvco = s3c2410_set_fvco,
+
+ .set_refresh = s3c2410_cpufreq_setrefresh,
+ .set_divs = s3c2440_cpufreq_setdivs,
+ .calc_divs = s3c2440_cpufreq_calcdivs,
+ .calc_freqtable = s3c2440_cpufreq_calctable,
+
+ .resume_clocks = s3c244x_setup_clocks,
+
+ .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs),
+};
+
+static int s3c2440_cpufreq_add(struct device *dev,
+ struct subsys_interface *sif)
+{
+ xtal = s3c_cpufreq_clk_get(NULL, "xtal");
+ hclk = s3c_cpufreq_clk_get(NULL, "hclk");
+ fclk = s3c_cpufreq_clk_get(NULL, "fclk");
+ armclk = s3c_cpufreq_clk_get(NULL, "armclk");
+
+ if (IS_ERR(xtal) || IS_ERR(hclk) || IS_ERR(fclk) || IS_ERR(armclk)) {
+ printk(KERN_ERR "%s: failed to get clocks\n", __func__);
+ return -ENOENT;
+ }
+
+ return s3c_cpufreq_register(&s3c2440_cpufreq_info);
+}
+
+static struct subsys_interface s3c2440_cpufreq_interface = {
+ .name = "s3c2440_cpufreq",
+ .subsys = &s3c2440_subsys,
+ .add_dev = s3c2440_cpufreq_add,
+};
+
+static int s3c2440_cpufreq_init(void)
+{
+ return subsys_interface_register(&s3c2440_cpufreq_interface);
+}
+
+/* arch_initcall adds the clocks we need, so use subsys_initcall. */
+subsys_initcall(s3c2440_cpufreq_init);
+
+static struct subsys_interface s3c2442_cpufreq_interface = {
+ .name = "s3c2442_cpufreq",
+ .subsys = &s3c2442_subsys,
+ .add_dev = s3c2440_cpufreq_add,
+};
+
+static int s3c2442_cpufreq_init(void)
+{
+ return subsys_interface_register(&s3c2442_cpufreq_interface);
+}
+subsys_initcall(s3c2442_cpufreq_init);
diff --git a/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c b/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
new file mode 100644
index 00000000000..9b7b4289d66
--- /dev/null
+++ b/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2009 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C24XX CPU Frequency scaling - debugfs status support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/cpufreq.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/err.h>
+
+#include <plat/cpu-freq-core.h>
+
+static struct dentry *dbgfs_root;
+static struct dentry *dbgfs_file_io;
+static struct dentry *dbgfs_file_info;
+static struct dentry *dbgfs_file_board;
+
+#define print_ns(x) ((x) / 10), ((x) % 10)
+
+static void show_max(struct seq_file *seq, struct s3c_freq *f)
+{
+ seq_printf(seq, "MAX: F=%lu, H=%lu, P=%lu, A=%lu\n",
+ f->fclk, f->hclk, f->pclk, f->armclk);
+}
+
+static int board_show(struct seq_file *seq, void *p)
+{
+ struct s3c_cpufreq_config *cfg;
+ struct s3c_cpufreq_board *brd;
+
+ cfg = s3c_cpufreq_getconfig();
+ if (!cfg) {
+ seq_printf(seq, "no configuration registered\n");
+ return 0;
+ }
+
+ brd = cfg->board;
+ if (!brd) {
+ seq_printf(seq, "no board definition set?\n");
+ return 0;
+ }
+
+ seq_printf(seq, "SDRAM refresh %u ns\n", brd->refresh);
+ seq_printf(seq, "auto_io=%u\n", brd->auto_io);
+ seq_printf(seq, "need_io=%u\n", brd->need_io);
+
+ show_max(seq, &brd->max);
+
+
+ return 0;
+}
+
+static int fops_board_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, board_show, NULL);
+}
+
+static const struct file_operations fops_board = {
+ .open = fops_board_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int info_show(struct seq_file *seq, void *p)
+{
+ struct s3c_cpufreq_config *cfg;
+
+ cfg = s3c_cpufreq_getconfig();
+ if (!cfg) {
+ seq_printf(seq, "no configuration registered\n");
+ return 0;
+ }
+
+ seq_printf(seq, " FCLK %ld Hz\n", cfg->freq.fclk);
+ seq_printf(seq, " HCLK %ld Hz (%lu.%lu ns)\n",
+ cfg->freq.hclk, print_ns(cfg->freq.hclk_tns));
+ seq_printf(seq, " PCLK %ld Hz\n", cfg->freq.hclk);
+ seq_printf(seq, "ARMCLK %ld Hz\n", cfg->freq.armclk);
+ seq_printf(seq, "\n");
+
+ show_max(seq, &cfg->max);
+
+ seq_printf(seq, "Divisors: P=%d, H=%d, A=%d, dvs=%s\n",
+ cfg->divs.h_divisor, cfg->divs.p_divisor,
+ cfg->divs.arm_divisor, cfg->divs.dvs ? "on" : "off");
+ seq_printf(seq, "\n");
+
+ seq_printf(seq, "lock_pll=%u\n", cfg->lock_pll);
+
+ return 0;
+}
+
+static int fops_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, info_show, NULL);
+}
+
+static const struct file_operations fops_info = {
+ .open = fops_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+static int io_show(struct seq_file *seq, void *p)
+{
+ void (*show_bank)(struct seq_file *, struct s3c_cpufreq_config *, union s3c_iobank *);
+ struct s3c_cpufreq_config *cfg;
+ struct s3c_iotimings *iot;
+ union s3c_iobank *iob;
+ int bank;
+
+ cfg = s3c_cpufreq_getconfig();
+ if (!cfg) {
+ seq_printf(seq, "no configuration registered\n");
+ return 0;
+ }
+
+ show_bank = cfg->info->debug_io_show;
+ if (!show_bank) {
+ seq_printf(seq, "no code to show bank timing\n");
+ return 0;
+ }
+
+ iot = s3c_cpufreq_getiotimings();
+ if (!iot) {
+ seq_printf(seq, "no io timings registered\n");
+ return 0;
+ }
+
+ seq_printf(seq, "hclk period is %lu.%lu ns\n", print_ns(cfg->freq.hclk_tns));
+
+ for (bank = 0; bank < MAX_BANKS; bank++) {
+ iob = &iot->bank[bank];
+
+ seq_printf(seq, "bank %d: ", bank);
+
+ if (!iob->io_2410) {
+ seq_printf(seq, "nothing set\n");
+ continue;
+ }
+
+ show_bank(seq, cfg, iob);
+ }
+
+ return 0;
+}
+
+static int fops_io_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, io_show, NULL);
+}
+
+static const struct file_operations fops_io = {
+ .open = fops_io_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
+
+static int __init s3c_freq_debugfs_init(void)
+{
+ dbgfs_root = debugfs_create_dir("s3c-cpufreq", NULL);
+ if (IS_ERR(dbgfs_root)) {
+ printk(KERN_ERR "%s: error creating debugfs root\n", __func__);
+ return PTR_ERR(dbgfs_root);
+ }
+
+ dbgfs_file_io = debugfs_create_file("io-timing", S_IRUGO, dbgfs_root,
+ NULL, &fops_io);
+
+ dbgfs_file_info = debugfs_create_file("info", S_IRUGO, dbgfs_root,
+ NULL, &fops_info);
+
+ dbgfs_file_board = debugfs_create_file("board", S_IRUGO, dbgfs_root,
+ NULL, &fops_board);
+
+ return 0;
+}
+
+late_initcall(s3c_freq_debugfs_init);
+
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
new file mode 100644
index 00000000000..3513e747716
--- /dev/null
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -0,0 +1,711 @@
+/*
+ * Copyright (c) 2006-2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C24XX CPU Frequency scaling
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/cpufreq.h>
+#include <linux/cpu.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/slab.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <plat/cpu.h>
+#include <plat/clock.h>
+#include <plat/cpu-freq-core.h>
+
+#include <mach/regs-clock.h>
+
+/* note, cpufreq support deals in kHz, no Hz */
+
+static struct cpufreq_driver s3c24xx_driver;
+static struct s3c_cpufreq_config cpu_cur;
+static struct s3c_iotimings s3c24xx_iotiming;
+static struct cpufreq_frequency_table *pll_reg;
+static unsigned int last_target = ~0;
+static unsigned int ftab_size;
+static struct cpufreq_frequency_table *ftab;
+
+static struct clk *_clk_mpll;
+static struct clk *_clk_xtal;
+static struct clk *clk_fclk;
+static struct clk *clk_hclk;
+static struct clk *clk_pclk;
+static struct clk *clk_arm;
+
+#ifdef CONFIG_CPU_FREQ_S3C24XX_DEBUGFS
+struct s3c_cpufreq_config *s3c_cpufreq_getconfig(void)
+{
+ return &cpu_cur;
+}
+
+struct s3c_iotimings *s3c_cpufreq_getiotimings(void)
+{
+ return &s3c24xx_iotiming;
+}
+#endif /* CONFIG_CPU_FREQ_S3C24XX_DEBUGFS */
+
+static void s3c_cpufreq_getcur(struct s3c_cpufreq_config *cfg)
+{
+ unsigned long fclk, pclk, hclk, armclk;
+
+ cfg->freq.fclk = fclk = clk_get_rate(clk_fclk);
+ cfg->freq.hclk = hclk = clk_get_rate(clk_hclk);
+ cfg->freq.pclk = pclk = clk_get_rate(clk_pclk);
+ cfg->freq.armclk = armclk = clk_get_rate(clk_arm);
+
+ cfg->pll.driver_data = __raw_readl(S3C2410_MPLLCON);
+ cfg->pll.frequency = fclk;
+
+ cfg->freq.hclk_tns = 1000000000 / (cfg->freq.hclk / 10);
+
+ cfg->divs.h_divisor = fclk / hclk;
+ cfg->divs.p_divisor = fclk / pclk;
+}
+
+static inline void s3c_cpufreq_calc(struct s3c_cpufreq_config *cfg)
+{
+ unsigned long pll = cfg->pll.frequency;
+
+ cfg->freq.fclk = pll;
+ cfg->freq.hclk = pll / cfg->divs.h_divisor;
+ cfg->freq.pclk = pll / cfg->divs.p_divisor;
+
+ /* convert hclk into 10ths of nanoseconds for io calcs */
+ cfg->freq.hclk_tns = 1000000000 / (cfg->freq.hclk / 10);
+}
+
+static inline int closer(unsigned int target, unsigned int n, unsigned int c)
+{
+ int diff_cur = abs(target - c);
+ int diff_new = abs(target - n);
+
+ return (diff_new < diff_cur);
+}
+
+static void s3c_cpufreq_show(const char *pfx,
+ struct s3c_cpufreq_config *cfg)
+{
+ s3c_freq_dbg("%s: Fvco=%u, F=%lu, A=%lu, H=%lu (%u), P=%lu (%u)\n",
+ pfx, cfg->pll.frequency, cfg->freq.fclk, cfg->freq.armclk,
+ cfg->freq.hclk, cfg->divs.h_divisor,
+ cfg->freq.pclk, cfg->divs.p_divisor);
+}
+
+/* functions to wrapper the driver info calls to do the cpu specific work */
+
+static void s3c_cpufreq_setio(struct s3c_cpufreq_config *cfg)
+{
+ if (cfg->info->set_iotiming)
+ (cfg->info->set_iotiming)(cfg, &s3c24xx_iotiming);
+}
+
+static int s3c_cpufreq_calcio(struct s3c_cpufreq_config *cfg)
+{
+ if (cfg->info->calc_iotiming)
+ return (cfg->info->calc_iotiming)(cfg, &s3c24xx_iotiming);
+
+ return 0;
+}
+
+static void s3c_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg)
+{
+ (cfg->info->set_refresh)(cfg);
+}
+
+static void s3c_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
+{
+ (cfg->info->set_divs)(cfg);
+}
+
+static int s3c_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
+{
+ return (cfg->info->calc_divs)(cfg);
+}
+
+static void s3c_cpufreq_setfvco(struct s3c_cpufreq_config *cfg)
+{
+ (cfg->info->set_fvco)(cfg);
+}
+
+static inline void s3c_cpufreq_resume_clocks(void)
+{
+ cpu_cur.info->resume_clocks();
+}
+
+static inline void s3c_cpufreq_updateclk(struct clk *clk,
+ unsigned int freq)
+{
+ clk_set_rate(clk, freq);
+}
+
+static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ struct cpufreq_frequency_table *pll)
+{
+ struct s3c_cpufreq_freqs freqs;
+ struct s3c_cpufreq_config cpu_new;
+ unsigned long flags;
+
+ cpu_new = cpu_cur; /* copy new from current */
+
+ s3c_cpufreq_show("cur", &cpu_cur);
+
+ /* TODO - check for DMA currently outstanding */
+
+ cpu_new.pll = pll ? *pll : cpu_cur.pll;
+
+ if (pll)
+ freqs.pll_changing = 1;
+
+ /* update our frequencies */
+
+ cpu_new.freq.armclk = target_freq;
+ cpu_new.freq.fclk = cpu_new.pll.frequency;
+
+ if (s3c_cpufreq_calcdivs(&cpu_new) < 0) {
+ printk(KERN_ERR "no divisors for %d\n", target_freq);
+ goto err_notpossible;
+ }
+
+ s3c_freq_dbg("%s: got divs\n", __func__);
+
+ s3c_cpufreq_calc(&cpu_new);
+
+ s3c_freq_dbg("%s: calculated frequencies for new\n", __func__);
+
+ if (cpu_new.freq.hclk != cpu_cur.freq.hclk) {
+ if (s3c_cpufreq_calcio(&cpu_new) < 0) {
+ printk(KERN_ERR "%s: no IO timings\n", __func__);
+ goto err_notpossible;
+ }
+ }
+
+ s3c_cpufreq_show("new", &cpu_new);
+
+ /* setup our cpufreq parameters */
+
+ freqs.old = cpu_cur.freq;
+ freqs.new = cpu_new.freq;
+
+ freqs.freqs.old = cpu_cur.freq.armclk / 1000;
+ freqs.freqs.new = cpu_new.freq.armclk / 1000;
+
+ /* update f/h/p clock settings before we issue the change
+ * notification, so that drivers do not need to do anything
+ * special if they want to recalculate on CPUFREQ_PRECHANGE. */
+
+ s3c_cpufreq_updateclk(_clk_mpll, cpu_new.pll.frequency);
+ s3c_cpufreq_updateclk(clk_fclk, cpu_new.freq.fclk);
+ s3c_cpufreq_updateclk(clk_hclk, cpu_new.freq.hclk);
+ s3c_cpufreq_updateclk(clk_pclk, cpu_new.freq.pclk);
+
+ /* start the frequency change */
+ cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_PRECHANGE);
+
+ /* If hclk is staying the same, then we do not need to
+ * re-write the IO or the refresh timings whilst we are changing
+ * speed. */
+
+ local_irq_save(flags);
+
+ /* is our memory clock slowing down? */
+ if (cpu_new.freq.hclk < cpu_cur.freq.hclk) {
+ s3c_cpufreq_setrefresh(&cpu_new);
+ s3c_cpufreq_setio(&cpu_new);
+ }
+
+ if (cpu_new.freq.fclk == cpu_cur.freq.fclk) {
+ /* not changing PLL, just set the divisors */
+
+ s3c_cpufreq_setdivs(&cpu_new);
+ } else {
+ if (cpu_new.freq.fclk < cpu_cur.freq.fclk) {
+ /* slow the cpu down, then set divisors */
+
+ s3c_cpufreq_setfvco(&cpu_new);
+ s3c_cpufreq_setdivs(&cpu_new);
+ } else {
+ /* set the divisors, then speed up */
+
+ s3c_cpufreq_setdivs(&cpu_new);
+ s3c_cpufreq_setfvco(&cpu_new);
+ }
+ }
+
+ /* did our memory clock speed up */
+ if (cpu_new.freq.hclk > cpu_cur.freq.hclk) {
+ s3c_cpufreq_setrefresh(&cpu_new);
+ s3c_cpufreq_setio(&cpu_new);
+ }
+
+ /* update our current settings */
+ cpu_cur = cpu_new;
+
+ local_irq_restore(flags);
+
+ /* notify everyone we've done this */
+ cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_POSTCHANGE);
+
+ s3c_freq_dbg("%s: finished\n", __func__);
+ return 0;
+
+ err_notpossible:
+ printk(KERN_ERR "no compatible settings for %d\n", target_freq);
+ return -EINVAL;
+}
+
+/* s3c_cpufreq_target
+ *
+ * called by the cpufreq core to adjust the frequency that the CPU
+ * is currently running at.
+ */
+
+static int s3c_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_frequency_table *pll;
+ unsigned int index;
+
+ /* avoid repeated calls which cause a needless amout of duplicated
+ * logging output (and CPU time as the calculation process is
+ * done) */
+ if (target_freq == last_target)
+ return 0;
+
+ last_target = target_freq;
+
+ s3c_freq_dbg("%s: policy %p, target %u, relation %u\n",
+ __func__, policy, target_freq, relation);
+
+ if (ftab) {
+ if (cpufreq_frequency_table_target(policy, ftab,
+ target_freq, relation,
+ &index)) {
+ s3c_freq_dbg("%s: table failed\n", __func__);
+ return -EINVAL;
+ }
+
+ s3c_freq_dbg("%s: adjust %d to entry %d (%u)\n", __func__,
+ target_freq, index, ftab[index].frequency);
+ target_freq = ftab[index].frequency;
+ }
+
+ target_freq *= 1000; /* convert target to Hz */
+
+ /* find the settings for our new frequency */
+
+ if (!pll_reg || cpu_cur.lock_pll) {
+ /* either we've not got any PLL values, or we've locked
+ * to the current one. */
+ pll = NULL;
+ } else {
+ struct cpufreq_policy tmp_policy;
+ int ret;
+
+ /* we keep the cpu pll table in Hz, to ensure we get an
+ * accurate value for the PLL output. */
+
+ tmp_policy.min = policy->min * 1000;
+ tmp_policy.max = policy->max * 1000;
+ tmp_policy.cpu = policy->cpu;
+
+ /* cpufreq_frequency_table_target uses a pointer to 'index'
+ * which is the number of the table entry, not the value of
+ * the table entry's index field. */
+
+ ret = cpufreq_frequency_table_target(&tmp_policy, pll_reg,
+ target_freq, relation,
+ &index);
+
+ if (ret < 0) {
+ printk(KERN_ERR "%s: no PLL available\n", __func__);
+ goto err_notpossible;
+ }
+
+ pll = pll_reg + index;
+
+ s3c_freq_dbg("%s: target %u => %u\n",
+ __func__, target_freq, pll->frequency);
+
+ target_freq = pll->frequency;
+ }
+
+ return s3c_cpufreq_settarget(policy, target_freq, pll);
+
+ err_notpossible:
+ printk(KERN_ERR "no compatible settings for %d\n", target_freq);
+ return -EINVAL;
+}
+
+static unsigned int s3c_cpufreq_get(unsigned int cpu)
+{
+ return clk_get_rate(clk_arm) / 1000;
+}
+
+struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
+{
+ struct clk *clk;
+
+ clk = clk_get(dev, name);
+ if (IS_ERR(clk))
+ printk(KERN_ERR "cpufreq: failed to get clock '%s'\n", name);
+
+ return clk;
+}
+
+static int s3c_cpufreq_init(struct cpufreq_policy *policy)
+{
+ printk(KERN_INFO "%s: initialising policy %p\n", __func__, policy);
+
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ policy->cur = s3c_cpufreq_get(0);
+ policy->min = policy->cpuinfo.min_freq = 0;
+ policy->max = policy->cpuinfo.max_freq = cpu_cur.info->max.fclk / 1000;
+ policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
+
+ /* feed the latency information from the cpu driver */
+ policy->cpuinfo.transition_latency = cpu_cur.info->latency;
+
+ if (ftab)
+ cpufreq_frequency_table_cpuinfo(policy, ftab);
+
+ return 0;
+}
+
+static __init int s3c_cpufreq_initclks(void)
+{
+ _clk_mpll = s3c_cpufreq_clk_get(NULL, "mpll");
+ _clk_xtal = s3c_cpufreq_clk_get(NULL, "xtal");
+ clk_fclk = s3c_cpufreq_clk_get(NULL, "fclk");
+ clk_hclk = s3c_cpufreq_clk_get(NULL, "hclk");
+ clk_pclk = s3c_cpufreq_clk_get(NULL, "pclk");
+ clk_arm = s3c_cpufreq_clk_get(NULL, "armclk");
+
+ if (IS_ERR(clk_fclk) || IS_ERR(clk_hclk) || IS_ERR(clk_pclk) ||
+ IS_ERR(_clk_mpll) || IS_ERR(clk_arm) || IS_ERR(_clk_xtal)) {
+ printk(KERN_ERR "%s: could not get clock(s)\n", __func__);
+ return -ENOENT;
+ }
+
+ printk(KERN_INFO "%s: clocks f=%lu,h=%lu,p=%lu,a=%lu\n", __func__,
+ clk_get_rate(clk_fclk) / 1000,
+ clk_get_rate(clk_hclk) / 1000,
+ clk_get_rate(clk_pclk) / 1000,
+ clk_get_rate(clk_arm) / 1000);
+
+ return 0;
+}
+
+static int s3c_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static struct cpufreq_frequency_table suspend_pll;
+static unsigned int suspend_freq;
+
+static int s3c_cpufreq_suspend(struct cpufreq_policy *policy)
+{
+ suspend_pll.frequency = clk_get_rate(_clk_mpll);
+ suspend_pll.driver_data = __raw_readl(S3C2410_MPLLCON);
+ suspend_freq = s3c_cpufreq_get(0) * 1000;
+
+ return 0;
+}
+
+static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
+{
+ int ret;
+
+ s3c_freq_dbg("%s: resuming with policy %p\n", __func__, policy);
+
+ last_target = ~0; /* invalidate last_target setting */
+
+ /* first, find out what speed we resumed at. */
+ s3c_cpufreq_resume_clocks();
+
+ /* whilst we will be called later on, we try and re-set the
+ * cpu frequencies as soon as possible so that we do not end
+ * up resuming devices and then immediately having to re-set
+ * a number of settings once these devices have restarted.
+ *
+ * as a note, it is expected devices are not used until they
+ * have been un-suspended and at that time they should have
+ * used the updated clock settings.
+ */
+
+ ret = s3c_cpufreq_settarget(NULL, suspend_freq, &suspend_pll);
+ if (ret) {
+ printk(KERN_ERR "%s: failed to reset pll/freq\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+#else
+#define s3c_cpufreq_resume NULL
+#define s3c_cpufreq_suspend NULL
+#endif
+
+static struct cpufreq_driver s3c24xx_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = s3c_cpufreq_verify,
+ .target = s3c_cpufreq_target,
+ .get = s3c_cpufreq_get,
+ .init = s3c_cpufreq_init,
+ .suspend = s3c_cpufreq_suspend,
+ .resume = s3c_cpufreq_resume,
+ .name = "s3c24xx",
+};
+
+
+int __init s3c_cpufreq_register(struct s3c_cpufreq_info *info)
+{
+ if (!info || !info->name) {
+ printk(KERN_ERR "%s: failed to pass valid information\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ printk(KERN_INFO "S3C24XX CPU Frequency driver, %s cpu support\n",
+ info->name);
+
+ /* check our driver info has valid data */
+
+ BUG_ON(info->set_refresh == NULL);
+ BUG_ON(info->set_divs == NULL);
+ BUG_ON(info->calc_divs == NULL);
+
+ /* info->set_fvco is optional, depending on whether there
+ * is a need to set the clock code. */
+
+ cpu_cur.info = info;
+
+ /* Note, driver registering should probably update locktime */
+
+ return 0;
+}
+
+int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
+{
+ struct s3c_cpufreq_board *ours;
+
+ if (!board) {
+ printk(KERN_INFO "%s: no board data\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Copy the board information so that each board can make this
+ * initdata. */
+
+ ours = kzalloc(sizeof(struct s3c_cpufreq_board), GFP_KERNEL);
+ if (ours == NULL) {
+ printk(KERN_ERR "%s: no memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ *ours = *board;
+ cpu_cur.board = ours;
+
+ return 0;
+}
+
+int __init s3c_cpufreq_auto_io(void)
+{
+ int ret;
+
+ if (!cpu_cur.info->get_iotiming) {
+ printk(KERN_ERR "%s: get_iotiming undefined\n", __func__);
+ return -ENOENT;
+ }
+
+ printk(KERN_INFO "%s: working out IO settings\n", __func__);
+
+ ret = (cpu_cur.info->get_iotiming)(&cpu_cur, &s3c24xx_iotiming);
+ if (ret)
+ printk(KERN_ERR "%s: failed to get timings\n", __func__);
+
+ return ret;
+}
+
+/* if one or is zero, then return the other, otherwise return the min */
+#define do_min(_a, _b) ((_a) == 0 ? (_b) : (_b) == 0 ? (_a) : min(_a, _b))
+
+/**
+ * s3c_cpufreq_freq_min - find the minimum settings for the given freq.
+ * @dst: The destination structure
+ * @a: One argument.
+ * @b: The other argument.
+ *
+ * Create a minimum of each frequency entry in the 'struct s3c_freq',
+ * unless the entry is zero when it is ignored and the non-zero argument
+ * used.
+ */
+static void s3c_cpufreq_freq_min(struct s3c_freq *dst,
+ struct s3c_freq *a, struct s3c_freq *b)
+{
+ dst->fclk = do_min(a->fclk, b->fclk);
+ dst->hclk = do_min(a->hclk, b->hclk);
+ dst->pclk = do_min(a->pclk, b->pclk);
+ dst->armclk = do_min(a->armclk, b->armclk);
+}
+
+static inline u32 calc_locktime(u32 freq, u32 time_us)
+{
+ u32 result;
+
+ result = freq * time_us;
+ result = DIV_ROUND_UP(result, 1000 * 1000);
+
+ return result;
+}
+
+static void s3c_cpufreq_update_loctkime(void)
+{
+ unsigned int bits = cpu_cur.info->locktime_bits;
+ u32 rate = (u32)clk_get_rate(_clk_xtal);
+ u32 val;
+
+ if (bits == 0) {
+ WARN_ON(1);
+ return;
+ }
+
+ val = calc_locktime(rate, cpu_cur.info->locktime_u) << bits;
+ val |= calc_locktime(rate, cpu_cur.info->locktime_m);
+
+ printk(KERN_INFO "%s: new locktime is 0x%08x\n", __func__, val);
+ __raw_writel(val, S3C2410_LOCKTIME);
+}
+
+static int s3c_cpufreq_build_freq(void)
+{
+ int size, ret;
+
+ if (!cpu_cur.info->calc_freqtable)
+ return -EINVAL;
+
+ kfree(ftab);
+ ftab = NULL;
+
+ size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0);
+ size++;
+
+ ftab = kmalloc(sizeof(struct cpufreq_frequency_table) * size, GFP_KERNEL);
+ if (!ftab) {
+ printk(KERN_ERR "%s: no memory for tables\n", __func__);
+ return -ENOMEM;
+ }
+
+ ftab_size = size;
+
+ ret = cpu_cur.info->calc_freqtable(&cpu_cur, ftab, size);
+ s3c_cpufreq_addfreq(ftab, ret, size, CPUFREQ_TABLE_END);
+
+ return 0;
+}
+
+static int __init s3c_cpufreq_initcall(void)
+{
+ int ret = 0;
+
+ if (cpu_cur.info && cpu_cur.board) {
+ ret = s3c_cpufreq_initclks();
+ if (ret)
+ goto out;
+
+ /* get current settings */
+ s3c_cpufreq_getcur(&cpu_cur);
+ s3c_cpufreq_show("cur", &cpu_cur);
+
+ if (cpu_cur.board->auto_io) {
+ ret = s3c_cpufreq_auto_io();
+ if (ret) {
+ printk(KERN_ERR "%s: failed to get io timing\n",
+ __func__);
+ goto out;
+ }
+ }
+
+ if (cpu_cur.board->need_io && !cpu_cur.info->set_iotiming) {
+ printk(KERN_ERR "%s: no IO support registered\n",
+ __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!cpu_cur.info->need_pll)
+ cpu_cur.lock_pll = 1;
+
+ s3c_cpufreq_update_loctkime();
+
+ s3c_cpufreq_freq_min(&cpu_cur.max, &cpu_cur.board->max,
+ &cpu_cur.info->max);
+
+ if (cpu_cur.info->calc_freqtable)
+ s3c_cpufreq_build_freq();
+
+ ret = cpufreq_register_driver(&s3c24xx_driver);
+ }
+
+ out:
+ return ret;
+}
+
+late_initcall(s3c_cpufreq_initcall);
+
+/**
+ * s3c_plltab_register - register CPU PLL table.
+ * @plls: The list of PLL entries.
+ * @plls_no: The size of the PLL entries @plls.
+ *
+ * Register the given set of PLLs with the system.
+ */
+int __init s3c_plltab_register(struct cpufreq_frequency_table *plls,
+ unsigned int plls_no)
+{
+ struct cpufreq_frequency_table *vals;
+ unsigned int size;
+
+ size = sizeof(struct cpufreq_frequency_table) * (plls_no + 1);
+
+ vals = kmalloc(size, GFP_KERNEL);
+ if (vals) {
+ memcpy(vals, plls, size);
+ pll_reg = vals;
+
+ /* write a terminating entry, we don't store it in the
+ * table that is stored in the kernel */
+ vals += plls_no;
+ vals->frequency = CPUFREQ_TABLE_END;
+
+ printk(KERN_INFO "cpufreq: %d PLL entries\n", plls_no);
+ } else
+ printk(KERN_ERR "cpufreq: no memory for PLL tables\n");
+
+ return vals ? 0 : -ENOMEM;
+}
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index 27cacb52479..13bb4bae64e 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -87,7 +87,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
freqs.old = clk_get_rate(armclk) / 1000;
freqs.new = s3c64xx_freq_table[i].frequency;
freqs.flags = 0;
- dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[i].index];
+ dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[i].driver_data];
if (freqs.old == freqs.new)
return 0;
@@ -104,7 +104,8 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
if (ret != 0) {
pr_err("Failed to set VDDARM for %dkHz: %d\n",
freqs.new, ret);
- goto err;
+ freqs.new = freqs.old;
+ goto post_notify;
}
}
#endif
@@ -113,10 +114,13 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
if (ret < 0) {
pr_err("Failed to set rate %dkHz: %d\n",
freqs.new, ret);
- goto err;
+ freqs.new = freqs.old;
}
+post_notify:
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+ if (ret)
+ goto err;
#ifdef CONFIG_REGULATOR
if (vddarm && freqs.new < freqs.old) {
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index f740b134d27..77a210975fc 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -71,7 +71,7 @@ static void sc520_freq_set_cpu_state(struct cpufreq_policy *policy,
local_irq_disable();
clockspeed_reg = *cpuctl & ~0x03;
- *cpuctl = clockspeed_reg | sc520_freq_table[state].index;
+ *cpuctl = clockspeed_reg | sc520_freq_table[state].driver_data;
local_irq_enable();
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c
index 306ae462bba..93061a40877 100644
--- a/drivers/cpufreq/sparc-us2e-cpufreq.c
+++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -308,17 +308,17 @@ static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
struct cpufreq_frequency_table *table =
&us2e_freq_table[cpu].table[0];
- table[0].index = 0;
+ table[0].driver_data = 0;
table[0].frequency = clock_tick / 1;
- table[1].index = 1;
+ table[1].driver_data = 1;
table[1].frequency = clock_tick / 2;
- table[2].index = 2;
+ table[2].driver_data = 2;
table[2].frequency = clock_tick / 4;
- table[2].index = 3;
+ table[2].driver_data = 3;
table[2].frequency = clock_tick / 6;
- table[2].index = 4;
+ table[2].driver_data = 4;
table[2].frequency = clock_tick / 8;
- table[2].index = 5;
+ table[2].driver_data = 5;
table[3].frequency = CPUFREQ_TABLE_END;
policy->cpuinfo.transition_latency = 0;
diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
index c71ee142347..880ee293d61 100644
--- a/drivers/cpufreq/sparc-us3-cpufreq.c
+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
@@ -169,13 +169,13 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
struct cpufreq_frequency_table *table =
&us3_freq_table[cpu].table[0];
- table[0].index = 0;
+ table[0].driver_data = 0;
table[0].frequency = clock_tick / 1;
- table[1].index = 1;
+ table[1].driver_data = 1;
table[1].frequency = clock_tick / 2;
- table[2].index = 2;
+ table[2].driver_data = 2;
table[2].frequency = clock_tick / 32;
- table[3].index = 0;
+ table[3].driver_data = 0;
table[3].frequency = CPUFREQ_TABLE_END;
policy->cpuinfo.transition_latency = 0;
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 156829f4576..c3efa7f2a90 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -250,11 +250,11 @@ static int spear_cpufreq_driver_init(void)
}
for (i = 0; i < cnt; i++) {
- freq_tbl[i].index = i;
+ freq_tbl[i].driver_data = i;
freq_tbl[i].frequency = be32_to_cpup(val++);
}
- freq_tbl[i].index = i;
+ freq_tbl[i].driver_data = i;
freq_tbl[i].frequency = CPUFREQ_TABLE_END;
spear_cpufreq.freq_tbl = freq_tbl;
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index 618e6f417b1..0915e712fbd 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -79,11 +79,11 @@ static struct cpufreq_driver centrino_driver;
/* Computes the correct form for IA32_PERF_CTL MSR for a particular
frequency/voltage operating point; frequency in MHz, volts in mV.
- This is stored as "index" in the structure. */
+ This is stored as "driver_data" in the structure. */
#define OP(mhz, mv) \
{ \
.frequency = (mhz) * 1000, \
- .index = (((mhz)/100) << 8) | ((mv - 700) / 16) \
+ .driver_data = (((mhz)/100) << 8) | ((mv - 700) / 16) \
}
/*
@@ -307,7 +307,7 @@ static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe)
per_cpu(centrino_model, cpu)->op_points[i].frequency
!= CPUFREQ_TABLE_END;
i++) {
- if (msr == per_cpu(centrino_model, cpu)->op_points[i].index)
+ if (msr == per_cpu(centrino_model, cpu)->op_points[i].driver_data)
return per_cpu(centrino_model, cpu)->
op_points[i].frequency;
}
@@ -501,7 +501,7 @@ static int centrino_target (struct cpufreq_policy *policy,
break;
}
- msr = per_cpu(centrino_model, cpu)->op_points[newstate].index;
+ msr = per_cpu(centrino_model, cpu)->op_points[newstate].driver_data;
if (first_cpu) {
rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h);
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c
index c74c0e130ef..cd66b85d927 100644
--- a/drivers/cpufreq/tegra-cpufreq.c
+++ b/drivers/cpufreq/tegra-cpufreq.c
@@ -28,17 +28,16 @@
#include <linux/io.h>
#include <linux/suspend.h>
-/* Frequency table index must be sequential starting at 0 */
static struct cpufreq_frequency_table freq_table[] = {
- { 0, 216000 },
- { 1, 312000 },
- { 2, 456000 },
- { 3, 608000 },
- { 4, 760000 },
- { 5, 816000 },
- { 6, 912000 },
- { 7, 1000000 },
- { 8, CPUFREQ_TABLE_END },
+ { .frequency = 216000 },
+ { .frequency = 312000 },
+ { .frequency = 456000 },
+ { .frequency = 608000 },
+ { .frequency = 760000 },
+ { .frequency = 816000 },
+ { .frequency = 912000 },
+ { .frequency = 1000000 },
+ { .frequency = CPUFREQ_TABLE_END },
};
#define NUM_CPUS 2
@@ -138,12 +137,12 @@ static int tegra_update_cpu_speed(struct cpufreq_policy *policy,
if (ret) {
pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n",
freqs.new);
- return ret;
+ freqs.new = freqs.old;
}
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
- return 0;
+ return ret;
}
static unsigned long tegra_cpu_highest_speed(void)