diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2013-02-21 22:51:40 +0000 |
---|---|---|
committer | John Stultz <john.stultz@linaro.org> | 2013-04-04 13:18:32 -0700 |
commit | ca4523cda429712fc135c5db50920d90eb776a6c (patch) | |
tree | 6a8c260a24bcd5a188d25790984d0aa5a3fb3428 /kernel/time/timekeeping.c | |
parent | 48cdc135d4840aab8efd2fc3bacb5d7dfd94a9c8 (diff) |
timekeeping: Shorten seq_count region
Shorten the seqcount write hold region to the actual update of the
timekeeper and the related data (e.g vsyscall).
On a contemporary x86 system this reduces the maximum latencies on
Preempt-RT from 8us to 4us on the non-timekeeping cores.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: John Stultz <john.stultz@linaro.org>
Diffstat (limited to 'kernel/time/timekeeping.c')
-rw-r--r-- | kernel/time/timekeeping.c | 5 |
1 files changed, 2 insertions, 3 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index d20ffdad62e..c4d2a8751f3 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -1341,7 +1341,6 @@ static void update_wall_time(void) unsigned long flags; raw_spin_lock_irqsave(&timekeeper_lock, flags); - write_seqcount_begin(&timekeeper_seq); /* Make sure we're fully resumed: */ if (unlikely(timekeeping_suspended)) @@ -1393,6 +1392,7 @@ static void update_wall_time(void) */ accumulate_nsecs_to_secs(tk); + write_seqcount_begin(&timekeeper_seq); /* Update clock->cycle_last with the new value */ clock->cycle_last = tk->cycle_last; /* @@ -1407,9 +1407,8 @@ static void update_wall_time(void) */ memcpy(real_tk, tk, sizeof(*tk)); timekeeping_update(real_tk, false, false); - -out: write_seqcount_end(&timekeeper_seq); +out: raw_spin_unlock_irqrestore(&timekeeper_lock, flags); } |