From c856bafae7f5b3f59ac1d99279a9b99b3b36ad12 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 21 Jun 2012 08:19:05 -0700 Subject: rcu: Allow RCU grace-period cleanup to be preempted RCU grace-period cleanup is currently carried out with interrupts disabled, which can result in excessive latency spikes on large systems (many hundreds or thousands of CPUs). This patch therefore makes the RCU grace-period cleanup be preemptible, including voluntary preemption points, which should eliminate those latency spikes. Similar spikes from forcing of quiescent states will be dealt with similarly by later patches. Updated to replace uses of spin_lock_irqsave() with spin_lock_irq(), as suggested by Peter Zijlstra. Reported-by: Mike Galbraith Reported-by: Dimitri Sivanich Signed-off-by: Paul E. McKenney Reviewed-by: Josh Triplett --- kernel/rcutree.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'kernel/rcutree.c') diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 52c3102dc5f..ddc6acc85d2 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -1151,7 +1151,7 @@ static int __noreturn rcu_gp_kthread(void *arg) * completed. */ if (*rdp->nxttail[RCU_WAIT_TAIL] == NULL) { - raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ + raw_spin_unlock_irq(&rnp->lock); /* * Propagate new ->completed value to rcu_node @@ -1160,14 +1160,13 @@ static int __noreturn rcu_gp_kthread(void *arg) * to process their callbacks. */ rcu_for_each_node_breadth_first(rsp, rnp) { - /* irqs already disabled. */ - raw_spin_lock(&rnp->lock); + raw_spin_lock_irq(&rnp->lock); rnp->completed = rsp->gpnum; - /* irqs remain disabled. */ - raw_spin_unlock(&rnp->lock); + raw_spin_unlock_irq(&rnp->lock); + cond_resched(); } rnp = rcu_get_root(rsp); - raw_spin_lock(&rnp->lock); /* irqs already disabled. */ + raw_spin_lock_irq(&rnp->lock); } rsp->completed = rsp->gpnum; /* Declare grace period done. */ -- cgit v1.2.3-70-g09d2