summaryrefslogtreecommitdiffstats
path: root/drivers/clocksource/sh_cmt.c
diff options
context:
space:
mode:
authorMagnus Damm <damm@igel.co.jp>2009-04-17 05:26:31 +0000
committerPaul Mundt <lethal@linux-sh.org>2009-04-22 09:31:04 +0900
commit19bdc9d061bcb71efd2b53083d96b59bbe1a1751 (patch)
treeebe6328a1078cd8526d9635ce54057b18984f30f /drivers/clocksource/sh_cmt.c
parent99ce567ba912109c78762246c964327f3f81f27d (diff)
clocksource: sh_cmt clocksource support
Add clocksource support to the sh_cmt driver. With this in place we can do tickless with a single CMT channel. Signed-off-by: Magnus Damm <damm@igel.co.jp> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'drivers/clocksource/sh_cmt.c')
-rw-r--r--drivers/clocksource/sh_cmt.c66
1 files changed, 66 insertions, 0 deletions
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 02bae3994ab..c2475648961 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -47,6 +47,7 @@ struct sh_cmt_priv {
unsigned long rate;
spinlock_t lock;
struct clock_event_device ced;
+ struct clocksource cs;
unsigned long total_cycles;
};
@@ -376,6 +377,68 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
spin_unlock_irqrestore(&p->lock, flags);
}
+static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs)
+{
+ return container_of(cs, struct sh_cmt_priv, cs);
+}
+
+static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
+{
+ struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
+ unsigned long flags, raw;
+ unsigned long value;
+ int has_wrapped;
+
+ spin_lock_irqsave(&p->lock, flags);
+ value = p->total_cycles;
+ raw = sh_cmt_get_counter(p, &has_wrapped);
+
+ if (unlikely(has_wrapped))
+ raw = p->match_value;
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ return value + raw;
+}
+
+static int sh_cmt_clocksource_enable(struct clocksource *cs)
+{
+ struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
+ int ret;
+
+ p->total_cycles = 0;
+
+ ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
+ if (ret)
+ return ret;
+
+ /* TODO: calculate good shift from rate and counter bit width */
+ cs->shift = 0;
+ cs->mult = clocksource_hz2mult(p->rate, cs->shift);
+ return 0;
+}
+
+static void sh_cmt_clocksource_disable(struct clocksource *cs)
+{
+ sh_cmt_stop(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE);
+}
+
+static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
+ char *name, unsigned long rating)
+{
+ struct clocksource *cs = &p->cs;
+
+ cs->name = name;
+ cs->rating = rating;
+ cs->read = sh_cmt_clocksource_read;
+ cs->enable = sh_cmt_clocksource_enable;
+ cs->disable = sh_cmt_clocksource_disable;
+ cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
+ cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ pr_info("sh_cmt: %s used as clock source\n", cs->name);
+ clocksource_register(cs);
+ return 0;
+}
+
static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced)
{
return container_of(ced, struct sh_cmt_priv, ced);
@@ -483,6 +546,9 @@ int sh_cmt_register(struct sh_cmt_priv *p, char *name,
if (clockevent_rating)
sh_cmt_register_clockevent(p, name, clockevent_rating);
+ if (clocksource_rating)
+ sh_cmt_register_clocksource(p, name, clocksource_rating);
+
return 0;
}