diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-06-12 16:53:38 +1000 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-06-12 16:53:38 +1000 |
commit | bc47ab0241c7c86da4f5e5f82fbca7d45387c18d (patch) | |
tree | b9c33ae8b6de43e44cc5fcbaa3e4a15f18a5ed42 /arch/sh/kernel/cpu/clock.c | |
parent | 37f9ef553bed630957e025504cdcbc76f5de49d5 (diff) | |
parent | 8ebf975608aaebd7feb33d77f07ba21a6380e086 (diff) |
Merge commit 'origin/master' into next
Manual merge of:
arch/powerpc/kernel/asm-offsets.c
Diffstat (limited to 'arch/sh/kernel/cpu/clock.c')
-rw-r--r-- | arch/sh/kernel/cpu/clock.c | 606 |
1 files changed, 413 insertions, 193 deletions
diff --git a/arch/sh/kernel/cpu/clock.c b/arch/sh/kernel/cpu/clock.c index 1dc896483b5..f3a46be2ae8 100644 --- a/arch/sh/kernel/cpu/clock.c +++ b/arch/sh/kernel/cpu/clock.c @@ -1,15 +1,19 @@ /* * arch/sh/kernel/cpu/clock.c - SuperH clock framework * - * Copyright (C) 2005, 2006, 2007 Paul Mundt + * Copyright (C) 2005 - 2009 Paul Mundt * * This clock framework is derived from the OMAP version by: * - * Copyright (C) 2004 - 2005 Nokia Corporation + * Copyright (C) 2004 - 2008 Nokia Corporation * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> * * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com> * + * With clkdev bits: + * + * Copyright (C) 2008 Russell King. + * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. @@ -19,134 +23,159 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/list.h> -#include <linux/kref.h> #include <linux/kobject.h> #include <linux/sysdev.h> #include <linux/seq_file.h> #include <linux/err.h> #include <linux/platform_device.h> -#include <linux/proc_fs.h> +#include <linux/debugfs.h> +#include <linux/cpufreq.h> #include <asm/clock.h> -#include <asm/timer.h> +#include <asm/machvec.h> static LIST_HEAD(clock_list); static DEFINE_SPINLOCK(clock_lock); static DEFINE_MUTEX(clock_list_sem); -/* - * Each subtype is expected to define the init routines for these clocks, - * as each subtype (or processor family) will have these clocks at the - * very least. These are all provided through the CPG, which even some of - * the more quirky parts (such as ST40, SH4-202, etc.) still have. - * - * The processor-specific code is expected to register any additional - * clock sources that are of interest. - */ -static struct clk master_clk = { - .name = "master_clk", - .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, - .rate = CONFIG_SH_PCLK_FREQ, -}; +void clk_rate_table_build(struct clk *clk, + struct cpufreq_frequency_table *freq_table, + int nr_freqs, + struct clk_div_mult_table *src_table, + unsigned long *bitmap) +{ + unsigned long mult, div; + unsigned long freq; + int i; -static struct clk module_clk = { - .name = "module_clk", - .parent = &master_clk, - .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, -}; + for (i = 0; i < nr_freqs; i++) { + div = 1; + mult = 1; -static struct clk bus_clk = { - .name = "bus_clk", - .parent = &master_clk, - .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, -}; + if (src_table->divisors && i < src_table->nr_divisors) + div = src_table->divisors[i]; -static struct clk cpu_clk = { - .name = "cpu_clk", - .parent = &master_clk, - .flags = CLK_ALWAYS_ENABLED, -}; + if (src_table->multipliers && i < src_table->nr_multipliers) + mult = src_table->multipliers[i]; -/* - * The ordering of these clocks matters, do not change it. - */ -static struct clk *onchip_clocks[] = { - &master_clk, - &module_clk, - &bus_clk, - &cpu_clk, -}; + if (!div || !mult || (bitmap && !test_bit(i, bitmap))) + freq = CPUFREQ_ENTRY_INVALID; + else + freq = clk->parent->rate * mult / div; -static void propagate_rate(struct clk *clk) + freq_table[i].index = i; + freq_table[i].frequency = freq; + } + + /* Termination entry */ + freq_table[i].index = i; + freq_table[i].frequency = CPUFREQ_TABLE_END; +} + +long clk_rate_table_round(struct clk *clk, + struct cpufreq_frequency_table *freq_table, + unsigned long rate) { - struct clk *clkp; + unsigned long rate_error, rate_error_prev = ~0UL; + unsigned long rate_best_fit = rate; + unsigned long highest, lowest; + int i; + + highest = lowest = 0; + + for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { + unsigned long freq = freq_table[i].frequency; - list_for_each_entry(clkp, &clock_list, node) { - if (likely(clkp->parent != clk)) + if (freq == CPUFREQ_ENTRY_INVALID) continue; - if (likely(clkp->ops && clkp->ops->recalc)) - clkp->ops->recalc(clkp); - if (unlikely(clkp->flags & CLK_RATE_PROPAGATES)) - propagate_rate(clkp); + + if (freq > highest) + highest = freq; + if (freq < lowest) + lowest = freq; + + rate_error = abs(freq - rate); + if (rate_error < rate_error_prev) { + rate_best_fit = freq; + rate_error_prev = rate_error; + } + + if (rate_error == 0) + break; } + + if (rate >= highest) + rate_best_fit = highest; + if (rate <= lowest) + rate_best_fit = lowest; + + return rate_best_fit; } -static int __clk_enable(struct clk *clk) +int clk_rate_table_find(struct clk *clk, + struct cpufreq_frequency_table *freq_table, + unsigned long rate) { - /* - * See if this is the first time we're enabling the clock, some - * clocks that are always enabled still require "special" - * initialization. This is especially true if the clock mode - * changes and the clock needs to hunt for the proper set of - * divisors to use before it can effectively recalc. - */ - if (unlikely(atomic_read(&clk->kref.refcount) == 1)) - if (clk->ops && clk->ops->init) - clk->ops->init(clk); + int i; - kref_get(&clk->kref); + for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { + unsigned long freq = freq_table[i].frequency; - if (clk->flags & CLK_ALWAYS_ENABLED) - return 0; + if (freq == CPUFREQ_ENTRY_INVALID) + continue; - if (likely(clk->ops && clk->ops->enable)) - clk->ops->enable(clk); + if (freq == rate) + return i; + } - return 0; + return -ENOENT; } -int clk_enable(struct clk *clk) +/* Used for clocks that always have same value as the parent clock */ +unsigned long followparent_recalc(struct clk *clk) { - unsigned long flags; - int ret; - - if (!clk) - return -EINVAL; + return clk->parent ? clk->parent->rate : 0; +} - clk_enable(clk->parent); +int clk_reparent(struct clk *child, struct clk *parent) +{ + list_del_init(&child->sibling); + if (parent) + list_add(&child->sibling, &parent->children); + child->parent = parent; - spin_lock_irqsave(&clock_lock, flags); - ret = __clk_enable(clk); - spin_unlock_irqrestore(&clock_lock, flags); + /* now do the debugfs renaming to reattach the child + to the proper parent */ - return ret; + return 0; } -EXPORT_SYMBOL_GPL(clk_enable); -static void clk_kref_release(struct kref *kref) +/* Propagate rate to children */ +void propagate_rate(struct clk *tclk) { - /* Nothing to do */ + struct clk *clkp; + + list_for_each_entry(clkp, &tclk->children, sibling) { + if (clkp->ops && clkp->ops->recalc) + clkp->rate = clkp->ops->recalc(clkp); + + propagate_rate(clkp); + } } static void __clk_disable(struct clk *clk) { - int count = kref_put(&clk->kref, clk_kref_release); - - if (clk->flags & CLK_ALWAYS_ENABLED) + if (clk->usecount == 0) { + printk(KERN_ERR "Trying disable clock %s with 0 usecount\n", + clk->name); + WARN_ON(1); return; + } - if (!count) { /* count reaches zero, disable the clock */ + if (!(--clk->usecount)) { if (likely(clk->ops && clk->ops->disable)) clk->ops->disable(clk); + if (likely(clk->parent)) + __clk_disable(clk->parent); } } @@ -160,28 +189,97 @@ void clk_disable(struct clk *clk) spin_lock_irqsave(&clock_lock, flags); __clk_disable(clk); spin_unlock_irqrestore(&clock_lock, flags); - - clk_disable(clk->parent); } EXPORT_SYMBOL_GPL(clk_disable); +static int __clk_enable(struct clk *clk) +{ + int ret = 0; + + if (clk->usecount++ == 0) { + if (clk->parent) { + ret = __clk_enable(clk->parent); + if (unlikely(ret)) + goto err; + } + + if (clk->ops && clk->ops->enable) { + ret = clk->ops->enable(clk); + if (ret) { + if (clk->parent) + __clk_disable(clk->parent); + goto err; + } + } + } + + return ret; +err: + clk->usecount--; + return ret; +} + +int clk_enable(struct clk *clk) +{ + unsigned long flags; + int ret; + + if (!clk) + return -EINVAL; + + spin_lock_irqsave(&clock_lock, flags); + ret = __clk_enable(clk); + spin_unlock_irqrestore(&clock_lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(clk_enable); + +static LIST_HEAD(root_clks); + +/** + * recalculate_root_clocks - recalculate and propagate all root clocks + * + * Recalculates all root clocks (clocks with no parent), which if the + * clock's .recalc is set correctly, should also propagate their rates. + * Called at init. + */ +void recalculate_root_clocks(void) +{ + struct clk *clkp; + + list_for_each_entry(clkp, &root_clks, sibling) { + if (clkp->ops && clkp->ops->recalc) + clkp->rate = clkp->ops->recalc(clkp); + propagate_rate(clkp); + } +} + int clk_register(struct clk *clk) { + if (clk == NULL || IS_ERR(clk)) + return -EINVAL; + + /* + * trap out already registered clocks + */ + if (clk->node.next || clk->node.prev) + return 0; + mutex_lock(&clock_list_sem); - list_add(&clk->node, &clock_list); - kref_init(&clk->kref); + INIT_LIST_HEAD(&clk->children); + clk->usecount = 0; - mutex_unlock(&clock_list_sem); + if (clk->parent) + list_add(&clk->sibling, &clk->parent->children); + else + list_add(&clk->sibling, &root_clks); - if (clk->flags & CLK_ALWAYS_ENABLED) { - pr_debug( "Clock '%s' is ALWAYS_ENABLED\n", clk->name); - if (clk->ops && clk->ops->init) - clk->ops->init(clk); - if (clk->ops && clk->ops->enable) - clk->ops->enable(clk); - pr_debug( "Enabled."); - } + list_add(&clk->node, &clock_list); + if (clk->ops && clk->ops->init) + clk->ops->init(clk); + mutex_unlock(&clock_list_sem); return 0; } @@ -190,11 +288,21 @@ EXPORT_SYMBOL_GPL(clk_register); void clk_unregister(struct clk *clk) { mutex_lock(&clock_list_sem); + list_del(&clk->sibling); list_del(&clk->node); mutex_unlock(&clock_list_sem); } EXPORT_SYMBOL_GPL(clk_unregister); +static void clk_enable_init_clocks(void) +{ + struct clk *clkp; + + list_for_each_entry(clkp, &clock_list, node) + if (clkp->flags & CLK_ENABLE_ON_INIT) + clk_enable(clkp); +} + unsigned long clk_get_rate(struct clk *clk) { return clk->rate; @@ -210,56 +318,59 @@ EXPORT_SYMBOL_GPL(clk_set_rate); int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id) { int ret = -EOPNOTSUPP; + unsigned long flags; - if (likely(clk->ops && clk->ops->set_rate)) { - unsigned long flags; + spin_lock_irqsave(&clock_lock, flags); - spin_lock_irqsave(&clock_lock, flags); + if (likely(clk->ops && clk->ops->set_rate)) { ret = clk->ops->set_rate(clk, rate, algo_id); - spin_unlock_irqrestore(&clock_lock, flags); + if (ret != 0) + goto out_unlock; + } else { + clk->rate = rate; + ret = 0; } - if (unlikely(clk->flags & CLK_RATE_PROPAGATES)) - propagate_rate(clk); + if (clk->ops && clk->ops->recalc) + clk->rate = clk->ops->recalc(clk); - return ret; -} -EXPORT_SYMBOL_GPL(clk_set_rate_ex); + propagate_rate(clk); -void clk_recalc_rate(struct clk *clk) -{ - if (likely(clk->ops && clk->ops->recalc)) { - unsigned long flags; - - spin_lock_irqsave(&clock_lock, flags); - clk->ops->recalc(clk); - spin_unlock_irqrestore(&clock_lock, flags); - } +out_unlock: + spin_unlock_irqrestore(&clock_lock, flags); - if (unlikely(clk->flags & CLK_RATE_PROPAGATES)) - propagate_rate(clk); + return ret; } -EXPORT_SYMBOL_GPL(clk_recalc_rate); +EXPORT_SYMBOL_GPL(clk_set_rate_ex); int clk_set_parent(struct clk *clk, struct clk *parent) { + unsigned long flags; int ret = -EINVAL; - struct clk *old; if (!parent || !clk) return ret; + if (clk->parent == parent) + return 0; - old = clk->parent; - if (likely(clk->ops && clk->ops->set_parent)) { - unsigned long flags; - spin_lock_irqsave(&clock_lock, flags); - ret = clk->ops->set_parent(clk, parent); - spin_unlock_irqrestore(&clock_lock, flags); - clk->parent = (ret ? old : parent); - } + spin_lock_irqsave(&clock_lock, flags); + if (clk->usecount == 0) { + if (clk->ops->set_parent) + ret = clk->ops->set_parent(clk, parent); + else + ret = clk_reparent(clk, parent); + + if (ret == 0) { + pr_debug("clock: set parent of %s to %s (new rate %ld)\n", + clk->name, clk->parent->name, clk->rate); + if (clk->ops->recalc) + clk->rate = clk->ops->recalc(clk); + propagate_rate(clk); + } + } else + ret = -EBUSY; + spin_unlock_irqrestore(&clock_lock, flags); - if (unlikely(clk->flags & CLK_RATE_PROPAGATES)) - propagate_rate(clk); return ret; } EXPORT_SYMBOL_GPL(clk_set_parent); @@ -287,14 +398,69 @@ long clk_round_rate(struct clk *clk, unsigned long rate) EXPORT_SYMBOL_GPL(clk_round_rate); /* + * Find the correct struct clk for the device and connection ID. + * We do slightly fuzzy matching here: + * An entry with a NULL ID is assumed to be a wildcard. + * If an entry has a device ID, it must match + * If an entry has a connection ID, it must match + * Then we take the most specific entry - with the following + * order of precidence: dev+con > dev only > con only. + */ +static struct clk *clk_find(const char *dev_id, const char *con_id) +{ + struct clk_lookup *p; + struct clk *clk = NULL; + int match, best = 0; + + list_for_each_entry(p, &clock_list, node) { + match = 0; + if (p->dev_id) { + if (!dev_id || strcmp(p->dev_id, dev_id)) + continue; + match += 2; + } + if (p->con_id) { + if (!con_id || strcmp(p->con_id, con_id)) + continue; + match += 1; + } + if (match == 0) + continue; + + if (match > best) { + clk = p->clk; + best = match; + } + } + return clk; +} + +struct clk *clk_get_sys(const char *dev_id, const char *con_id) +{ + struct clk *clk; + + mutex_lock(&clock_list_sem); + clk = clk_find(dev_id, con_id); + mutex_unlock(&clock_list_sem); + + return clk ? clk : ERR_PTR(-ENOENT); +} +EXPORT_SYMBOL_GPL(clk_get_sys); + +/* * Returns a clock. Note that we first try to use device id on the bus * and clock name. If this fails, we try to use clock name only. */ struct clk *clk_get(struct device *dev, const char *id) { + const char *dev_id = dev ? dev_name(dev) : NULL; struct clk *p, *clk = ERR_PTR(-ENOENT); int idno; + clk = clk_get_sys(dev_id, id); + if (clk && !IS_ERR(clk)) + return clk; + if (dev == NULL || dev->bus != &platform_bus_type) idno = -1; else @@ -330,36 +496,6 @@ void clk_put(struct clk *clk) } EXPORT_SYMBOL_GPL(clk_put); -void __init __attribute__ ((weak)) -arch_init_clk_ops(struct clk_ops **ops, int type) -{ -} - -int __init __attribute__ ((weak)) -arch_clk_init(void) -{ - return 0; -} - -static int show_clocks(char *buf, char **start, off_t off, - int len, int *eof, void *data) -{ - struct clk *clk; - char *p = buf; - - list_for_each_entry_reverse(clk, &clock_list, node) { - unsigned long rate = clk_get_rate(clk); - - p += sprintf(p, "%-12s\t: %ld.%02ldMHz\t%s\n", clk->name, - rate / 1000000, (rate % 1000000) / 10000, - ((clk->flags & CLK_ALWAYS_ENABLED) || - (atomic_read(&clk->kref.refcount) != 1)) ? - "enabled" : "disabled"); - } - - return p - buf; -} - #ifdef CONFIG_PM static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state) { @@ -369,20 +505,22 @@ static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state) switch (state.event) { case PM_EVENT_ON: /* Resumeing from hibernation */ - if (prev_state.event == PM_EVENT_FREEZE) { - list_for_each_entry(clkp, &clock_list, node) - if (likely(clkp->ops)) { - unsigned long rate = clkp->rate; - - if (likely(clkp->ops->set_parent)) - clkp->ops->set_parent(clkp, - clkp->parent); - if (likely(clkp->ops->set_rate)) - clkp->ops->set_rate(clkp, - rate, NO_CHANGE); - else if (likely(clkp->ops->recalc)) - clkp->ops->recalc(clkp); - } + if (prev_state.event != PM_EVENT_FREEZE) + break; + + list_for_each_entry(clkp, &clock_list, node) { + if (likely(clkp->ops)) { + unsigned long rate = clkp->rate; + + if (likely(clkp->ops->set_parent)) + clkp->ops->set_parent(clkp, + clkp->parent); + if (likely(clkp->ops->set_rate)) + clkp->ops->set_rate(clkp, + rate, NO_CHANGE); + else if (likely(clkp->ops->recalc)) + clkp->rate = clkp->ops->recalc(clkp); + } } break; case PM_EVENT_FREEZE: @@ -426,34 +564,116 @@ subsys_initcall(clk_sysdev_init); int __init clk_init(void) { - int i, ret = 0; - - BUG_ON(!master_clk.rate); - - for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) { - struct clk *clk = onchip_clocks[i]; + int ret; - arch_init_clk_ops(&clk->ops, i); - ret |= clk_register(clk); + ret = arch_clk_init(); + if (unlikely(ret)) { + pr_err("%s: CPU clock registration failed.\n", __func__); + return ret; } - ret |= arch_clk_init(); + if (sh_mv.mv_clk_init) { + ret = sh_mv.mv_clk_init(); + if (unlikely(ret)) { + pr_err("%s: machvec clock initialization failed.\n", + __func__); + return ret; + } + } /* Kick the child clocks.. */ - propagate_rate(&master_clk); - propagate_rate(&bus_clk); + recalculate_root_clocks(); + + /* Enable the necessary init clocks */ + clk_enable_init_clocks(); return ret; } -static int __init clk_proc_init(void) +/* + * debugfs support to trace clock tree hierarchy and attributes + */ +static struct dentry *clk_debugfs_root; + +static int clk_debugfs_register_one(struct clk *c) { - struct proc_dir_entry *p; - p = create_proc_read_entry("clocks", S_IRUSR, NULL, - show_clocks, NULL); - if (unlikely(!p)) - return -EINVAL; + int err; + struct dentry *d, *child; + struct clk *pa = c->parent; + char s[255]; + char *p = s; + + p += sprintf(p, "%s", c->name); + if (c->id >= 0) + sprintf(p, ":%d", c->id); + d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root); + if (!d) + return -ENOMEM; + c->dentry = d; + + d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount); + if (!d) { + err = -ENOMEM; + goto err_out; + } + d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate); + if (!d) { + err = -ENOMEM; + goto err_out; + } + d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags); + if (!d) { + err = -ENOMEM; + goto err_out; + } + return 0; +err_out: + d = c->dentry; + list_for_each_entry(child, &d->d_subdirs, d_u.d_child) + debugfs_remove(child); + debugfs_remove(c->dentry); + return err; +} + +static int clk_debugfs_register(struct clk *c) +{ + int err; + struct clk *pa = c->parent; + + if (pa && !pa->dentry) { + err = clk_debugfs_register(pa); + if (err) + return err; + } + + if (!c->dentry) { + err = clk_debugfs_register_one(c); + if (err) + return err; + } + return 0; +} + +static int __init clk_debugfs_init(void) +{ + struct clk *c; + struct dentry *d; + int err; + + d = debugfs_create_dir("clock", NULL); + if (!d) + return -ENOMEM; + clk_debugfs_root = d; + + list_for_each_entry(c, &clock_list, node) { + err = clk_debugfs_register(c); + if (err) + goto err_out; + } return 0; +err_out: + debugfs_remove(clk_debugfs_root); /* REVISIT: Cleanup correctly */ + return err; } -subsys_initcall(clk_proc_init); +late_initcall(clk_debugfs_init); |