summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/idle.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2011-11-17 18:48:14 +0100
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-12-11 10:31:57 -0800
commit1268fbc746ea1cd279886a740dcbad4ba5232225 (patch)
treedc0ff36b4114992a3f67479e25132f5e99f36b9e /arch/powerpc/kernel/idle.c
parentb58bdccaa8d908e0f71dae396468a0d3f7bb3125 (diff)
nohz: Remove tick_nohz_idle_enter_norcu() / tick_nohz_idle_exit_norcu()
Those two APIs were provided to optimize the calls of tick_nohz_idle_enter() and rcu_idle_enter() into a single irq disabled section. This way no interrupt happening in-between would needlessly process any RCU job. Now we are talking about an optimization for which benefits have yet to be measured. Let's start simple and completely decouple idle rcu and dyntick idle logics to simplify. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Reviewed-by: Josh Triplett <josh@joshtriplett.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'arch/powerpc/kernel/idle.c')
-rw-r--r--arch/powerpc/kernel/idle.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 3cd73d1fc42..9c3cd490b1b 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -62,10 +62,10 @@ void cpu_idle(void)
set_thread_flag(TIF_POLLING_NRFLAG);
while (1) {
- if (idle_uses_rcu)
- tick_nohz_idle_enter();
- else
- tick_nohz_idle_enter_norcu();
+ tick_nohz_idle_enter();
+ if (!idle_uses_rcu)
+ rcu_idle_enter();
+
while (!need_resched() && !cpu_should_die()) {
ppc64_runlatch_off();
@@ -102,10 +102,9 @@ void cpu_idle(void)
HMT_medium();
ppc64_runlatch_on();
- if (idle_uses_rcu)
- tick_nohz_idle_exit();
- else
- tick_nohz_idle_exit_norcu();
+ if (!idle_uses_rcu)
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
preempt_enable_no_resched();
if (cpu_should_die())
cpu_die();