summaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/cpu/clock.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-05-12 05:30:10 +0900
committerPaul Mundt <lethal@linux-sh.org>2009-05-12 05:30:10 +0900
commitae891a4264c91246c0b4c22be68b9838747ae48d (patch)
tree18da25fbd0322f6d2f4288dbf741609e53ae297e /arch/sh/kernel/cpu/clock.c
parent154502e160e02dee7b00ec2149762ae5d48e0bb4 (diff)
sh: clkfwk: Fix up the clk_enable() error path.
There are a couple of instances where a clk_enable() can fail, which the SH-Mobile code presently handles, but doesn't get reported all the way back up. This fixes up the return type so the errors make it all the way down to the drivers. Additionally, we now also error out properly if the parent enable fails. Prep work for aggressively turning off unused clocks on boot. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel/cpu/clock.c')
-rw-r--r--arch/sh/kernel/cpu/clock.c71
1 files changed, 46 insertions, 25 deletions
diff --git a/arch/sh/kernel/cpu/clock.c b/arch/sh/kernel/cpu/clock.c
index c683be5ba8b..e027fe5898d 100644
--- a/arch/sh/kernel/cpu/clock.c
+++ b/arch/sh/kernel/cpu/clock.c
@@ -93,57 +93,78 @@ void propagate_rate(struct clk *tclk)
}
}
-static int __clk_enable(struct clk *clk)
+static void __clk_disable(struct clk *clk)
{
- if (clk->usecount++ == 0) {
- if (clk->parent)
- __clk_enable(clk->parent);
-
- if (clk->ops && clk->ops->enable)
- clk->ops->enable(clk);
+ if (clk->usecount == 0) {
+ printk(KERN_ERR "Trying disable clock %s with 0 usecount\n",
+ clk->name);
+ WARN_ON(1);
+ return;
}
- return 0;
+ if (!(--clk->usecount)) {
+ if (likely(clk->ops && clk->ops->disable))
+ clk->ops->disable(clk);
+ if (likely(clk->parent))
+ __clk_disable(clk->parent);
+ }
}
-int clk_enable(struct clk *clk)
+void clk_disable(struct clk *clk)
{
unsigned long flags;
- int ret;
if (!clk)
- return -EINVAL;
+ return;
spin_lock_irqsave(&clock_lock, flags);
- ret = __clk_enable(clk);
+ __clk_disable(clk);
spin_unlock_irqrestore(&clock_lock, flags);
-
- return ret;
}
-EXPORT_SYMBOL_GPL(clk_enable);
+EXPORT_SYMBOL_GPL(clk_disable);
-static void __clk_disable(struct clk *clk)
+static int __clk_enable(struct clk *clk)
{
- if (clk->usecount > 0 && !(--clk->usecount)) {
- if (likely(clk->ops && clk->ops->disable))
- clk->ops->disable(clk);
- if (likely(clk->parent))
- __clk_disable(clk->parent);
+ int ret = 0;
+
+ if (clk->usecount++ == 0) {
+ if (clk->parent) {
+ ret = __clk_enable(clk->parent);
+ if (unlikely(ret))
+ goto err;
+ }
+
+ if (clk->ops && clk->ops->enable) {
+ ret = clk->ops->enable(clk);
+ if (ret) {
+ if (clk->parent)
+ __clk_disable(clk->parent);
+ goto err;
+ }
+ }
}
+
+ return ret;
+err:
+ clk->usecount--;
+ return ret;
}
-void clk_disable(struct clk *clk)
+int clk_enable(struct clk *clk)
{
unsigned long flags;
+ int ret;
if (!clk)
- return;
+ return -EINVAL;
spin_lock_irqsave(&clock_lock, flags);
- __clk_disable(clk);
+ ret = __clk_enable(clk);
spin_unlock_irqrestore(&clock_lock, flags);
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(clk_disable);
+EXPORT_SYMBOL_GPL(clk_enable);
static LIST_HEAD(root_clks);