diff options
author | Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com> | 2013-10-29 12:18:38 +0000 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2013-10-30 00:48:25 +0100 |
commit | 4d910d5bb56c870e5a737b71d4130ec38dc4faea (patch) | |
tree | 36215d747d59adeaaa84af0ce19998936a814bf1 /arch/arm/mach-vexpress | |
parent | f7cd2d835e0f17cde2e5cead92be0099d7e92a7c (diff) |
ARM: vexpress/TC2: add cpu clock support
On TC2, the cpu clocks are controlled by the external M3 microcontroller
and SPC provides the interface between the CPU and the power controller.
The generic cpufreq drivers use the clock APIs to get the cpu clocks.
This patch add virtual spc clocks for all the cpus to control the cpu
operating frequency via the clock framework.
Signed-off-by: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
Acked-by: Nicolas Pitre <nico@linaro.org>
Acked-by: Pawel Moll <Pawel.Moll@arm.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'arch/arm/mach-vexpress')
-rw-r--r-- | arch/arm/mach-vexpress/spc.c | 102 |
1 files changed, 102 insertions, 0 deletions
diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c index de0ca3615c3..3532e26c911 100644 --- a/arch/arm/mach-vexpress/spc.c +++ b/arch/arm/mach-vexpress/spc.c @@ -17,6 +17,9 @@ * GNU General Public License for more details. */ +#include <linux/clk-provider.h> +#include <linux/clkdev.h> +#include <linux/cpu.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/interrupt.h> @@ -438,3 +441,102 @@ int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid, int irq) return 0; } + +struct clk_spc { + struct clk_hw hw; + int cluster; +}; + +#define to_clk_spc(spc) container_of(spc, struct clk_spc, hw) +static unsigned long spc_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_spc *spc = to_clk_spc(hw); + u32 freq; + + if (ve_spc_get_performance(spc->cluster, &freq)) + return -EIO; + + return freq * 1000; +} + +static long spc_round_rate(struct clk_hw *hw, unsigned long drate, + unsigned long *parent_rate) +{ + struct clk_spc *spc = to_clk_spc(hw); + + return ve_spc_round_performance(spc->cluster, drate); +} + +static int spc_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_spc *spc = to_clk_spc(hw); + + return ve_spc_set_performance(spc->cluster, rate / 1000); +} + +static struct clk_ops clk_spc_ops = { + .recalc_rate = spc_recalc_rate, + .round_rate = spc_round_rate, + .set_rate = spc_set_rate, +}; + +static struct clk *ve_spc_clk_register(struct device *cpu_dev) +{ + struct clk_init_data init; + struct clk_spc *spc; + + spc = kzalloc(sizeof(*spc), GFP_KERNEL); + if (!spc) { + pr_err("could not allocate spc clk\n"); + return ERR_PTR(-ENOMEM); + } + + spc->hw.init = &init; + spc->cluster = topology_physical_package_id(cpu_dev->id); + + init.name = dev_name(cpu_dev); + init.ops = &clk_spc_ops; + init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE; + init.num_parents = 0; + + return devm_clk_register(cpu_dev, &spc->hw); +} + +static int __init ve_spc_clk_init(void) +{ + int cpu; + struct clk *clk; + + if (!info) + return 0; /* Continue only if SPC is initialised */ + + if (ve_spc_populate_opps(0) || ve_spc_populate_opps(1)) { + pr_err("failed to build OPP table\n"); + return -ENODEV; + } + + for_each_possible_cpu(cpu) { + struct device *cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) { + pr_warn("failed to get cpu%d device\n", cpu); + continue; + } + clk = ve_spc_clk_register(cpu_dev); + if (IS_ERR(clk)) { + pr_warn("failed to register cpu%d clock\n", cpu); + continue; + } + if (clk_register_clkdev(clk, NULL, dev_name(cpu_dev))) { + pr_warn("failed to register cpu%d clock lookup\n", cpu); + continue; + } + + if (ve_init_opp_table(cpu_dev)) + pr_warn("failed to initialise cpu%d opp table\n", cpu); + } + + return 0; +} +module_init(ve_spc_clk_init); |