summaryrefslogtreecommitdiffstats
path: root/include/linux/rcutiny.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2012-03-27 16:02:08 -0700
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-05-02 14:43:23 -0700
commit616c310e83b872024271c915c1b9ab505b9efad9 (patch)
tree1339bc7b3bef920b4641a5af2f182e9dfa2a6632 /include/linux/rcutiny.h
parent66f75a5d028beaf67c931435fdc3e7823125730c (diff)
rcu: Move PREEMPT_RCU preemption to switch_to() invocation
Currently, PREEMPT_RCU readers are enqueued upon entry to the scheduler. This is inefficient because enqueuing is required only if there is a context switch, and entry to the scheduler does not guarantee a context switch. The commit therefore moves the enqueuing to immediately precede the call to switch_to() from the scheduler. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Tested-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/rcutiny.h')
-rw-r--r--include/linux/rcutiny.h6
1 files changed, 0 insertions, 6 deletions
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index e93df77176d..080b5bdda28 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -87,10 +87,6 @@ static inline void kfree_call_rcu(struct rcu_head *head,
#ifdef CONFIG_TINY_RCU
-static inline void rcu_preempt_note_context_switch(void)
-{
-}
-
static inline void exit_rcu(void)
{
}
@@ -102,7 +98,6 @@ static inline int rcu_needs_cpu(int cpu)
#else /* #ifdef CONFIG_TINY_RCU */
-void rcu_preempt_note_context_switch(void);
extern void exit_rcu(void);
int rcu_preempt_needs_cpu(void);
@@ -116,7 +111,6 @@ static inline int rcu_needs_cpu(int cpu)
static inline void rcu_note_context_switch(int cpu)
{
rcu_sched_qs(cpu);
- rcu_preempt_note_context_switch();
}
/*