summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/sched_clock.c
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2012-01-09 16:16:29 +0000
committerArnd Bergmann <arnd@arndb.de>2012-01-09 16:16:29 +0000
commitdcf7ec5ee62a78123057a1e286c88ca739717409 (patch)
treefa3f19434638a942ba66d236dde4d9aaadf8b370 /arch/arm/kernel/sched_clock.c
parent15db3e823c3246e3bd31fe454f5c8927eb85caf2 (diff)
parent142f2101a86ade2d6c9dfbedf82e1b5b31c8fce6 (diff)
Merge branch 'samsung/driver' into next/drivers
Conflicts: arch/arm/mach-mxs/include/mach/common.h Pull in previous samsung conflict merges and do a trivial merge of an mxs double-add conflict. Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/arm/kernel/sched_clock.c')
-rw-r--r--arch/arm/kernel/sched_clock.c118
1 files changed, 105 insertions, 13 deletions
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index 9a46370fe9d..5416c7c1252 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -14,61 +14,153 @@
#include <asm/sched_clock.h>
+struct clock_data {
+ u64 epoch_ns;
+ u32 epoch_cyc;
+ u32 epoch_cyc_copy;
+ u32 mult;
+ u32 shift;
+};
+
static void sched_clock_poll(unsigned long wrap_ticks);
static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0);
-static void (*sched_clock_update_fn)(void);
+
+static struct clock_data cd = {
+ .mult = NSEC_PER_SEC / HZ,
+};
+
+static u32 __read_mostly sched_clock_mask = 0xffffffff;
+
+static u32 notrace jiffy_sched_clock_read(void)
+{
+ return (u32)(jiffies - INITIAL_JIFFIES);
+}
+
+static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
+
+static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
+{
+ return (cyc * mult) >> shift;
+}
+
+static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask)
+{
+ u64 epoch_ns;
+ u32 epoch_cyc;
+
+ /*
+ * Load the epoch_cyc and epoch_ns atomically. We do this by
+ * ensuring that we always write epoch_cyc, epoch_ns and
+ * epoch_cyc_copy in strict order, and read them in strict order.
+ * If epoch_cyc and epoch_cyc_copy are not equal, then we're in
+ * the middle of an update, and we should repeat the load.
+ */
+ do {
+ epoch_cyc = cd.epoch_cyc;
+ smp_rmb();
+ epoch_ns = cd.epoch_ns;
+ smp_rmb();
+ } while (epoch_cyc != cd.epoch_cyc_copy);
+
+ return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift);
+}
+
+/*
+ * Atomically update the sched_clock epoch.
+ */
+static void notrace update_sched_clock(void)
+{
+ unsigned long flags;
+ u32 cyc;
+ u64 ns;
+
+ cyc = read_sched_clock();
+ ns = cd.epoch_ns +
+ cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
+ cd.mult, cd.shift);
+ /*
+ * Write epoch_cyc and epoch_ns in a way that the update is
+ * detectable in cyc_to_fixed_sched_clock().
+ */
+ raw_local_irq_save(flags);
+ cd.epoch_cyc = cyc;
+ smp_wmb();
+ cd.epoch_ns = ns;
+ smp_wmb();
+ cd.epoch_cyc_copy = cyc;
+ raw_local_irq_restore(flags);
+}
static void sched_clock_poll(unsigned long wrap_ticks)
{
mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks));
- sched_clock_update_fn();
+ update_sched_clock();
}
-void __init init_sched_clock(struct clock_data *cd, void (*update)(void),
- unsigned int clock_bits, unsigned long rate)
+void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
{
unsigned long r, w;
u64 res, wrap;
char r_unit;
- sched_clock_update_fn = update;
+ BUG_ON(bits > 32);
+ WARN_ON(!irqs_disabled());
+ WARN_ON(read_sched_clock != jiffy_sched_clock_read);
+ read_sched_clock = read;
+ sched_clock_mask = (1 << bits) - 1;
/* calculate the mult/shift to convert counter ticks to ns. */
- clocks_calc_mult_shift(&cd->mult, &cd->shift, rate, NSEC_PER_SEC, 0);
+ clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0);
r = rate;
if (r >= 4000000) {
r /= 1000000;
r_unit = 'M';
- } else {
+ } else if (r >= 1000) {
r /= 1000;
r_unit = 'k';
- }
+ } else
+ r_unit = ' ';
/* calculate how many ns until we wrap */
- wrap = cyc_to_ns((1ULL << clock_bits) - 1, cd->mult, cd->shift);
+ wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift);
do_div(wrap, NSEC_PER_MSEC);
w = wrap;
/* calculate the ns resolution of this counter */
- res = cyc_to_ns(1ULL, cd->mult, cd->shift);
+ res = cyc_to_ns(1ULL, cd.mult, cd.shift);
pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n",
- clock_bits, r, r_unit, res, w);
+ bits, r, r_unit, res, w);
/*
* Start the timer to keep sched_clock() properly updated and
* sets the initial epoch.
*/
sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
- update();
+ update_sched_clock();
/*
* Ensure that sched_clock() starts off at 0ns
*/
- cd->epoch_ns = 0;
+ cd.epoch_ns = 0;
+
+ pr_debug("Registered %pF as sched_clock source\n", read);
+}
+
+unsigned long long notrace sched_clock(void)
+{
+ u32 cyc = read_sched_clock();
+ return cyc_to_sched_clock(cyc, sched_clock_mask);
}
void __init sched_clock_postinit(void)
{
+ /*
+ * If no sched_clock function has been provided at that point,
+ * make it the final one one.
+ */
+ if (read_sched_clock == jiffy_sched_clock_read)
+ setup_sched_clock(jiffy_sched_clock_read, 32, HZ);
+
sched_clock_poll(sched_clock_timer.data);
}