From e8fcaa5c54e3b0371230e5d43a6f650c667da9c5 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 7 Aug 2013 22:28:01 +0200 Subject: nohz: Convert a few places to use local per cpu accesses A few functions use remote per CPU access APIs when they deal with local values. Just do the right conversion to improve performance, code readability and debug checks. While at it, lets extend some of these function names with *_this_cpu() suffix in order to display their purpose more clearly. Signed-off-by: Frederic Weisbecker Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Oleg Nesterov Cc: Steven Rostedt --- kernel/time/tick-sched.c | 39 ++++++++++++++++----------------------- 1 file changed, 16 insertions(+), 23 deletions(-) (limited to 'kernel/time/tick-sched.c') diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 3612fc77f83..2afd43fca93 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -391,11 +391,9 @@ __setup("nohz=", setup_tick_nohz); */ static void tick_nohz_update_jiffies(ktime_t now) { - int cpu = smp_processor_id(); - struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); unsigned long flags; - ts->idle_waketime = now; + __this_cpu_write(tick_cpu_sched.idle_waketime, now); local_irq_save(flags); tick_do_update_jiffies64(now); @@ -426,17 +424,15 @@ update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_upda } -static void tick_nohz_stop_idle(int cpu, ktime_t now) +static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now) { - struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); - - update_ts_time_stats(cpu, ts, now, NULL); + update_ts_time_stats(smp_processor_id(), ts, now, NULL); ts->idle_active = 0; sched_clock_idle_wakeup_event(0); } -static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) +static ktime_t tick_nohz_start_idle(struct tick_sched *ts) { ktime_t now = ktime_get(); @@ -752,7 +748,7 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts) ktime_t now, expires; int cpu = smp_processor_id(); - now = tick_nohz_start_idle(cpu, ts); + now = tick_nohz_start_idle(ts); if (can_stop_idle_tick(cpu, ts)) { int was_stopped = ts->tick_stopped; @@ -914,8 +910,7 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts) */ void tick_nohz_idle_exit(void) { - int cpu = smp_processor_id(); - struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); + struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); ktime_t now; local_irq_disable(); @@ -928,7 +923,7 @@ void tick_nohz_idle_exit(void) now = ktime_get(); if (ts->idle_active) - tick_nohz_stop_idle(cpu, now); + tick_nohz_stop_idle(ts, now); if (ts->tick_stopped) { tick_nohz_restart_sched_tick(ts, now); @@ -1012,12 +1007,10 @@ static void tick_nohz_switch_to_nohz(void) * timer and do not touch the other magic bits which need to be done * when idle is left. */ -static void tick_nohz_kick_tick(int cpu, ktime_t now) +static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now) { #if 0 /* Switch back to 2.6.27 behaviour */ - - struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); ktime_t delta; /* @@ -1032,36 +1025,36 @@ static void tick_nohz_kick_tick(int cpu, ktime_t now) #endif } -static inline void tick_check_nohz(int cpu) +static inline void tick_check_nohz_this_cpu(void) { - struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); + struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); ktime_t now; if (!ts->idle_active && !ts->tick_stopped) return; now = ktime_get(); if (ts->idle_active) - tick_nohz_stop_idle(cpu, now); + tick_nohz_stop_idle(ts, now); if (ts->tick_stopped) { tick_nohz_update_jiffies(now); - tick_nohz_kick_tick(cpu, now); + tick_nohz_kick_tick(ts, now); } } #else static inline void tick_nohz_switch_to_nohz(void) { } -static inline void tick_check_nohz(int cpu) { } +static inline void tick_check_nohz_this_cpu(void) { } #endif /* CONFIG_NO_HZ_COMMON */ /* * Called from irq_enter to notify about the possible interruption of idle() */ -void tick_check_idle(int cpu) +void tick_check_idle(void) { - tick_check_oneshot_broadcast(cpu); - tick_check_nohz(cpu); + tick_check_oneshot_broadcast_this_cpu(); + tick_check_nohz_this_cpu(); } /* -- cgit v1.2.3-70-g09d2 From 47a1b796306356f358e515149d86baf0cc6bf007 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Thu, 12 Dec 2013 13:10:55 -0800 Subject: tick/timekeeping: Call update_wall_time outside the jiffies lock Since the xtime lock was split into the timekeeping lock and the jiffies lock, we no longer need to call update_wall_time() while holding the jiffies lock. Thus, this patch splits update_wall_time() out from do_timer(). This allows us to get away from calling clock_was_set_delayed() in update_wall_time() and instead use the standard clock_was_set() call that previously would deadlock, as it causes the jiffies lock to be acquired. Cc: Sasha Levin Cc: Thomas Gleixner Cc: Prarit Bhargava Cc: Richard Cochran Cc: Ingo Molnar Signed-off-by: John Stultz --- kernel/time/tick-common.c | 1 + kernel/time/tick-internal.h | 1 + kernel/time/tick-sched.c | 1 + kernel/time/timekeeping.c | 19 ++++--------------- 4 files changed, 7 insertions(+), 15 deletions(-) (limited to 'kernel/time/tick-sched.c') diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 64522ecdfe0..91c5f27e82a 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -70,6 +70,7 @@ static void tick_periodic(int cpu) do_timer(1); write_sequnlock(&jiffies_lock); + update_wall_time(); } update_process_times(user_mode(get_irq_regs())); diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index e2bced59b6d..8329669b51e 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -155,3 +155,4 @@ static inline int tick_device_is_functional(struct clock_event_device *dev) #endif extern void do_timer(unsigned long ticks); +extern void update_wall_time(void); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 2afd43fca93..c58b03d8995 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -86,6 +86,7 @@ static void tick_do_update_jiffies64(ktime_t now) tick_next_period = ktime_add(last_jiffies_update, tick_period); } write_sequnlock(&jiffies_lock); + update_wall_time(); } /* diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index d62682b6df4..44b7e6bb081 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -1360,7 +1360,7 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk) * update_wall_time - Uses the current clocksource to increment the wall time * */ -static void update_wall_time(void) +void update_wall_time(void) { struct clocksource *clock; struct timekeeper *real_tk = &timekeeper; @@ -1441,19 +1441,8 @@ static void update_wall_time(void) write_seqcount_end(&timekeeper_seq); out: raw_spin_unlock_irqrestore(&timekeeper_lock, flags); - if (clock_was_set) { - /* - * XXX - I'd rather we just call clock_was_set(), but - * since we're currently holding the jiffies lock, calling - * clock_was_set would trigger an ipi which would then grab - * the jiffies lock and we'd deadlock. :( - * The right solution should probably be droping - * the jiffies lock before calling update_wall_time - * but that requires some rework of the tick sched - * code. - */ - clock_was_set_delayed(); - } + if (clock_set) + clock_was_set(); } /** @@ -1598,7 +1587,6 @@ struct timespec get_monotonic_coarse(void) void do_timer(unsigned long ticks) { jiffies_64 += ticks; - update_wall_time(); calc_global_load(ticks); } @@ -1756,4 +1744,5 @@ void xtime_update(unsigned long ticks) write_seqlock(&jiffies_lock); do_timer(ticks); write_sequnlock(&jiffies_lock); + update_wall_time(); } -- cgit v1.2.3-70-g09d2