summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJohn Stultz <john.stultz@linaro.org>2012-07-13 01:21:54 -0400
committerThomas Gleixner <tglx@linutronix.de>2012-07-15 10:39:06 +0200
commit1f4f948706bcec1b51bf6492bf04057d2e21e273 (patch)
tree1677cf2f130da9ff53259fdb658fa53a4ef81b4e /kernel
parent1e75fa8be9fb61e1af46b5b3b176347a4c958ca1 (diff)
time: Refactor accumulation of nsecs to secs
We do the exact same logic moving nsecs to secs in the timekeeper in multiple places, so condense this into a single function. Signed-off-by: John Stultz <john.stultz@linaro.org> Reviewed-by: Ingo Molnar <mingo@kernel.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Richard Cochran <richardcochran@gmail.com> Cc: Prarit Bhargava <prarit@redhat.com> Link: http://lkml.kernel.org/r/1342156917-25092-6-git-send-email-john.stultz@linaro.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/time/timekeeping.c54
1 files changed, 32 insertions, 22 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index b98d9bd73e5..cb4a433bab9 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -991,6 +991,35 @@ static void timekeeping_adjust(s64 offset)
/**
+ * accumulate_nsecs_to_secs - Accumulates nsecs into secs
+ *
+ * Helper function that accumulates a the nsecs greater then a second
+ * from the xtime_nsec field to the xtime_secs field.
+ * It also calls into the NTP code to handle leapsecond processing.
+ *
+ */
+static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
+{
+ u64 nsecps = (u64)NSEC_PER_SEC << tk->shift;
+
+ while (tk->xtime_nsec >= nsecps) {
+ int leap;
+
+ tk->xtime_nsec -= nsecps;
+ tk->xtime_sec++;
+
+ /* Figure out if its a leap sec and apply if needed */
+ leap = second_overflow(tk->xtime_sec);
+ tk->xtime_sec += leap;
+ tk->wall_to_monotonic.tv_sec -= leap;
+ if (leap)
+ clock_was_set_delayed();
+
+ }
+}
+
+
+/**
* logarithmic_accumulation - shifted accumulation of cycles
*
* This functions accumulates a shifted interval of cycles into
@@ -1001,7 +1030,6 @@ static void timekeeping_adjust(s64 offset)
*/
static cycle_t logarithmic_accumulation(cycle_t offset, u32 shift)
{
- u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
u64 raw_nsecs;
/* If the offset is smaller than a shifted interval, do nothing */
@@ -1013,16 +1041,8 @@ static cycle_t logarithmic_accumulation(cycle_t offset, u32 shift)
timekeeper.clock->cycle_last += timekeeper.cycle_interval << shift;
timekeeper.xtime_nsec += timekeeper.xtime_interval << shift;
- while (timekeeper.xtime_nsec >= nsecps) {
- int leap;
- timekeeper.xtime_nsec -= nsecps;
- timekeeper.xtime_sec++;
- leap = second_overflow(timekeeper.xtime_sec);
- timekeeper.xtime_sec += leap;
- timekeeper.wall_to_monotonic.tv_sec -= leap;
- if (leap)
- clock_was_set_delayed();
- }
+
+ accumulate_nsecs_to_secs(&timekeeper);
/* Accumulate raw time */
raw_nsecs = timekeeper.raw_interval << shift;
@@ -1132,17 +1152,7 @@ static void update_wall_time(void)
* Finally, make sure that after the rounding
* xtime_nsec isn't larger than NSEC_PER_SEC
*/
- if (unlikely(timekeeper.xtime_nsec >=
- ((u64)NSEC_PER_SEC << timekeeper.shift))) {
- int leap;
- timekeeper.xtime_nsec -= (u64)NSEC_PER_SEC << timekeeper.shift;
- timekeeper.xtime_sec++;
- leap = second_overflow(timekeeper.xtime_sec);
- timekeeper.xtime_sec += leap;
- timekeeper.wall_to_monotonic.tv_sec -= leap;
- if (leap)
- clock_was_set_delayed();
- }
+ accumulate_nsecs_to_secs(&timekeeper);
timekeeping_update(false);