From 606dba2e289446600a0b68422ed2019af5355c12 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 11 Feb 2012 06:05:00 +0100 Subject: sched: Push put_prev_task() into pick_next_task() In order to avoid having to do put/set on a whole cgroup hierarchy when we context switch, push the put into pick_next_task() so that both operations are in the same function. Further changes then allow us to possibly optimize away redundant work. Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1328936700.2476.17.camel@laptop Signed-off-by: Ingo Molnar --- kernel/sched/idle_task.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'kernel/sched/idle_task.c') diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index 516c3d9ceea..e5c922ac40c 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c @@ -33,8 +33,12 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl resched_task(rq->idle); } -static struct task_struct *pick_next_task_idle(struct rq *rq) +static struct task_struct * +pick_next_task_idle(struct rq *rq, struct task_struct *prev) { + if (prev) + prev->sched_class->put_prev_task(rq, prev); + schedstat_inc(rq, sched_goidle); #ifdef CONFIG_SMP /* Trigger the post schedule to do an idle_enter for CFS */ -- cgit v1.2.3-70-g09d2 From 6c3b4d44ba2838f00614a5a2d777d4401e0bfd71 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 17 Jan 2014 15:09:39 +0100 Subject: sched: Clean up idle task SMP logic The idle post_schedule flag is just a vile waste of time, furthermore it appears unneeded, move the idle_enter_fair() call into pick_next_task_idle(). Signed-off-by: Peter Zijlstra Cc: Daniel Lezcano Cc: Vincent Guittot Cc: alex.shi@linaro.org Cc: mingo@kernel.org Cc: Steven Rostedt Link: http://lkml.kernel.org/n/tip-aljykihtxJt3mkokxi0qZurb@git.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/idle_task.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'kernel/sched/idle_task.c') diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index e5c922ac40c..721371bf03b 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c @@ -19,11 +19,6 @@ static void pre_schedule_idle(struct rq *rq, struct task_struct *prev) idle_exit_fair(rq); rq_last_tick_reset(rq); } - -static void post_schedule_idle(struct rq *rq) -{ - idle_enter_fair(rq); -} #endif /* CONFIG_SMP */ /* * Idle tasks are unconditionally rescheduled: @@ -41,8 +36,7 @@ pick_next_task_idle(struct rq *rq, struct task_struct *prev) schedstat_inc(rq, sched_goidle); #ifdef CONFIG_SMP - /* Trigger the post schedule to do an idle_enter for CFS */ - rq->post_schedule = 1; + idle_enter_fair(rq); #endif return rq->idle; } @@ -106,7 +100,6 @@ const struct sched_class idle_sched_class = { #ifdef CONFIG_SMP .select_task_rq = select_task_rq_idle, .pre_schedule = pre_schedule_idle, - .post_schedule = post_schedule_idle, #endif .set_curr_task = set_curr_task_idle, -- cgit v1.2.3-70-g09d2 From 38033c37faab850ed5d33bb675c4de6c66be84d8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 23 Jan 2014 20:32:21 +0100 Subject: sched: Push down pre_schedule() and idle_balance() This patch both merged idle_balance() and pre_schedule() and pushes both of them into pick_next_task(). Conceptually pre_schedule() and idle_balance() are rather similar, both are used to pull more work onto the current CPU. We cannot however first move idle_balance() into pre_schedule_fair() since there is no guarantee the last runnable task is a fair task, and thus we would miss newidle balances. Similarly, the dl and rt pre_schedule calls must be ran before idle_balance() since their respective tasks have higher priority and it would not do to delay their execution searching for less important tasks first. However, by noticing that pick_next_tasks() already traverses the sched_class hierarchy in the right order, we can get the right behaviour and do away with both calls. We must however change the special case optimization to also require that prev is of sched_class_fair, otherwise we can miss doing a dl or rt pull where we needed one. Signed-off-by: Peter Zijlstra Cc: Linus Torvalds Cc: Andrew Morton Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/n/tip-a8k6vvaebtn64nie345kx1je@git.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 26 ++------------------------ kernel/sched/deadline.c | 15 +++++++-------- kernel/sched/fair.c | 26 ++++++++++++++++++++++---- kernel/sched/idle_task.c | 12 +++++------- kernel/sched/rt.c | 16 ++++++++-------- kernel/sched/sched.h | 1 - 6 files changed, 44 insertions(+), 52 deletions(-) (limited to 'kernel/sched/idle_task.c') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index dedb5f07666..3068f37f7c5 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2169,13 +2169,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) #ifdef CONFIG_SMP -/* assumes rq->lock is held */ -static inline void pre_schedule(struct rq *rq, struct task_struct *prev) -{ - if (prev->sched_class->pre_schedule) - prev->sched_class->pre_schedule(rq, prev); -} - /* rq->lock is NOT held, but preemption is disabled */ static inline void post_schedule(struct rq *rq) { @@ -2193,10 +2186,6 @@ static inline void post_schedule(struct rq *rq) #else -static inline void pre_schedule(struct rq *rq, struct task_struct *p) -{ -} - static inline void post_schedule(struct rq *rq) { } @@ -2592,7 +2581,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev) * Optimization: we know that if all tasks are in * the fair class we can call that function directly: */ - if (likely(rq->nr_running == rq->cfs.h_nr_running)) { + if (likely(prev->sched_class == &fair_sched_class && + rq->nr_running == rq->cfs.h_nr_running)) { p = fair_sched_class.pick_next_task(rq, prev); if (likely(p)) return p; @@ -2695,18 +2685,6 @@ need_resched: switch_count = &prev->nvcsw; } - pre_schedule(rq, prev); - - if (unlikely(!rq->nr_running)) { - /* - * We must set idle_stamp _before_ calling idle_balance(), such - * that we measure the duration of idle_balance() as idle time. - */ - rq->idle_stamp = rq_clock(rq); - if (idle_balance(rq)) - rq->idle_stamp = 0; - } - if (prev->on_rq || rq->skip_clock_update < 0) update_rq_clock(rq); diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 50797d57608..ed31ef66ab9 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -944,6 +944,8 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) resched_task(rq->curr); } +static int pull_dl_task(struct rq *this_rq); + #endif /* CONFIG_SMP */ /* @@ -998,6 +1000,11 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev) dl_rq = &rq->dl; +#ifdef CONFIG_SMP + if (dl_task(prev)) + pull_dl_task(rq); +#endif + if (unlikely(!dl_rq->dl_nr_running)) return NULL; @@ -1429,13 +1436,6 @@ skip: return ret; } -static void pre_schedule_dl(struct rq *rq, struct task_struct *prev) -{ - /* Try to pull other tasks here */ - if (dl_task(prev)) - pull_dl_task(rq); -} - static void post_schedule_dl(struct rq *rq) { push_dl_tasks(rq); @@ -1628,7 +1628,6 @@ const struct sched_class dl_sched_class = { .set_cpus_allowed = set_cpus_allowed_dl, .rq_online = rq_online_dl, .rq_offline = rq_offline_dl, - .pre_schedule = pre_schedule_dl, .post_schedule = post_schedule_dl, .task_woken = task_woken_dl, #endif diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index a81b241ff70..43b49fe077a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2577,7 +2577,8 @@ void idle_exit_fair(struct rq *this_rq) update_rq_runnable_avg(this_rq, 0); } -#else +#else /* CONFIG_SMP */ + static inline void update_entity_load_avg(struct sched_entity *se, int update_cfs_rq) {} static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {} @@ -2589,7 +2590,7 @@ static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq, int sleep) {} static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update) {} -#endif +#endif /* CONFIG_SMP */ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) { @@ -4682,9 +4683,10 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev) struct sched_entity *se; struct task_struct *p; +again: __maybe_unused #ifdef CONFIG_FAIR_GROUP_SCHED if (!cfs_rq->nr_running) - return NULL; + goto idle; if (!prev || prev->sched_class != &fair_sched_class) goto simple; @@ -4760,7 +4762,7 @@ simple: #endif if (!cfs_rq->nr_running) - return NULL; + goto idle; if (prev) prev->sched_class->put_prev_task(rq, prev); @@ -4777,6 +4779,22 @@ simple: hrtick_start_fair(rq, p); return p; + +idle: +#ifdef CONFIG_SMP + idle_enter_fair(rq); + /* + * We must set idle_stamp _before_ calling idle_balance(), such that we + * measure the duration of idle_balance() as idle time. + */ + rq->idle_stamp = rq_clock(rq); + if (idle_balance(rq)) { /* drops rq->lock */ + rq->idle_stamp = 0; + goto again; + } +#endif + + return NULL; } /* diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index 721371bf03b..f7d03af79a5 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c @@ -13,13 +13,8 @@ select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags) { return task_cpu(p); /* IDLE tasks as never migrated */ } - -static void pre_schedule_idle(struct rq *rq, struct task_struct *prev) -{ - idle_exit_fair(rq); - rq_last_tick_reset(rq); -} #endif /* CONFIG_SMP */ + /* * Idle tasks are unconditionally rescheduled: */ @@ -56,6 +51,10 @@ dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) { +#ifdef CONFIG_SMP + idle_exit_fair(rq); + rq_last_tick_reset(rq); +#endif } static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) @@ -99,7 +98,6 @@ const struct sched_class idle_sched_class = { #ifdef CONFIG_SMP .select_task_rq = select_task_rq_idle, - .pre_schedule = pre_schedule_idle, #endif .set_curr_task = set_curr_task_idle, diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index a15ca1c0c7b..72f9ec75997 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -229,6 +229,8 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) #ifdef CONFIG_SMP +static int pull_rt_task(struct rq *this_rq); + static inline int rt_overloaded(struct rq *rq) { return atomic_read(&rq->rd->rto_count); @@ -1330,6 +1332,12 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev) struct task_struct *p; struct rt_rq *rt_rq = &rq->rt; +#ifdef CONFIG_SMP + /* Try to pull RT tasks here if we lower this rq's prio */ + if (rq->rt.highest_prio.curr > prev->prio) + pull_rt_task(rq); +#endif + if (!rt_rq->rt_nr_running) return NULL; @@ -1721,13 +1729,6 @@ skip: return ret; } -static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) -{ - /* Try to pull RT tasks here if we lower this rq's prio */ - if (rq->rt.highest_prio.curr > prev->prio) - pull_rt_task(rq); -} - static void post_schedule_rt(struct rq *rq) { push_rt_tasks(rq); @@ -2004,7 +2005,6 @@ const struct sched_class rt_sched_class = { .set_cpus_allowed = set_cpus_allowed_rt, .rq_online = rq_online_rt, .rq_offline = rq_offline_rt, - .pre_schedule = pre_schedule_rt, .post_schedule = post_schedule_rt, .task_woken = task_woken_rt, .switched_from = switched_from_rt, diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c534cf4181a..1bf34c257d3 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1118,7 +1118,6 @@ struct sched_class { int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); void (*migrate_task_rq)(struct task_struct *p, int next_cpu); - void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); void (*post_schedule) (struct rq *this_rq); void (*task_waking) (struct task_struct *task); void (*task_woken) (struct rq *this_rq, struct task_struct *task); -- cgit v1.2.3-70-g09d2 From 3f1d2a318171bf61850d4e5a72031271e5aada76 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 12 Feb 2014 10:49:30 +0100 Subject: sched: Fix hotplug task migration Dan Carpenter reported: > kernel/sched/rt.c:1347 pick_next_task_rt() warn: variable dereferenced before check 'prev' (see line 1338) > kernel/sched/deadline.c:1011 pick_next_task_dl() warn: variable dereferenced before check 'prev' (see line 1005) Kirill also spotted that migrate_tasks() will have an instant NULL deref because pick_next_task() will immediately deref prev. Instead of fixing all the corner cases because migrate_tasks() can pass in a NULL prev task in the unlikely case of hot-un-plug, provide a fake task such that we can remove all the NULL checks from the far more common paths. A further problem; not previously spotted; is that because we pushed pre_schedule() and idle_balance() into pick_next_task() we now need to avoid those getting called and pulling more tasks on our dying CPU. We avoid pull_{dl,rt}_task() by setting fake_task.prio to MAX_PRIO+1. We also note that since we call pick_next_task() exactly the amount of times we have runnable tasks present, we should never land in idle_balance(). Fixes: 38033c37faab ("sched: Push down pre_schedule() and idle_balance()") Cc: Juri Lelli Cc: Ingo Molnar Cc: Steven Rostedt Reported-by: Kirill Tkhai Reported-by: Dan Carpenter Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20140212094930.GB3545@laptop.programming.kicks-ass.net Signed-off-by: Thomas Gleixner --- kernel/sched/core.c | 18 +++++++++++++++++- kernel/sched/deadline.c | 3 +-- kernel/sched/fair.c | 5 ++--- kernel/sched/idle_task.c | 3 +-- kernel/sched/rt.c | 3 +-- kernel/sched/sched.h | 5 +++++ kernel/sched/stop_task.c | 3 +-- 7 files changed, 28 insertions(+), 12 deletions(-) (limited to 'kernel/sched/idle_task.c') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index fb9764fbc53..49db434a35d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4681,6 +4681,22 @@ static void calc_load_migrate(struct rq *rq) atomic_long_add(delta, &calc_load_tasks); } +static void put_prev_task_fake(struct rq *rq, struct task_struct *prev) +{ +} + +static const struct sched_class fake_sched_class = { + .put_prev_task = put_prev_task_fake, +}; + +static struct task_struct fake_task = { + /* + * Avoid pull_{rt,dl}_task() + */ + .prio = MAX_PRIO + 1, + .sched_class = &fake_sched_class, +}; + /* * Migrate all tasks from the rq, sleeping tasks will be migrated by * try_to_wake_up()->select_task_rq(). @@ -4721,7 +4737,7 @@ static void migrate_tasks(unsigned int dead_cpu) if (rq->nr_running == 1) break; - next = pick_next_task(rq, NULL); + next = pick_next_task(rq, &fake_task); BUG_ON(!next); next->sched_class->put_prev_task(rq, next); diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index ed31ef66ab9..bfeb84ecc32 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1008,8 +1008,7 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev) if (unlikely(!dl_rq->dl_nr_running)) return NULL; - if (prev) - prev->sched_class->put_prev_task(rq, prev); + put_prev_task(rq, prev); dl_se = pick_next_dl_entity(rq, dl_rq); BUG_ON(!dl_se); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 40c758bbdd5..e884e45982a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4690,7 +4690,7 @@ again: if (!cfs_rq->nr_running) goto idle; - if (!prev || prev->sched_class != &fair_sched_class) + if (prev->sched_class != &fair_sched_class) goto simple; /* @@ -4766,8 +4766,7 @@ simple: if (!cfs_rq->nr_running) goto idle; - if (prev) - prev->sched_class->put_prev_task(rq, prev); + put_prev_task(rq, prev); do { se = pick_next_entity(cfs_rq, NULL); diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index f7d03af79a5..53ff9e7c76d 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c @@ -26,8 +26,7 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl static struct task_struct * pick_next_task_idle(struct rq *rq, struct task_struct *prev) { - if (prev) - prev->sched_class->put_prev_task(rq, prev); + put_prev_task(rq, prev); schedstat_inc(rq, sched_goidle); #ifdef CONFIG_SMP diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 72f9ec75997..65c2d6881ac 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1344,8 +1344,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev) if (rt_rq_throttled(rt_rq)) return NULL; - if (prev) - prev->sched_class->put_prev_task(rq, prev); + put_prev_task(rq, prev); p = _pick_next_task_rt(rq); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 92018f9821e..d276147ba5e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1147,6 +1147,11 @@ struct sched_class { #endif }; +static inline void put_prev_task(struct rq *rq, struct task_struct *prev) +{ + prev->sched_class->put_prev_task(rq, prev); +} + #define sched_class_highest (&stop_sched_class) #define for_each_class(class) \ for (class = sched_class_highest; class; class = class->next) diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index a4147c9d201..d6ce65dde54 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -31,8 +31,7 @@ pick_next_task_stop(struct rq *rq, struct task_struct *prev) if (!stop || !stop->on_rq) return NULL; - if (prev) - prev->sched_class->put_prev_task(rq, prev); + put_prev_task(rq, prev); stop->se.exec_start = rq_clock_task(rq); -- cgit v1.2.3-70-g09d2 From dc87734106bb6e97c92d8bd81f261fb71976ec2c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 12 Feb 2014 15:47:29 +0100 Subject: sched: Remove some #ifdeffery Remove a few gratuitous #ifdefs in pick_next_task*(). Cc: Ingo Molnar Cc: Steven Rostedt Cc: Juri Lelli Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/n/tip-nnzddp5c4fijyzzxxrwlxghf@git.kernel.org Signed-off-by: Thomas Gleixner --- kernel/sched/deadline.c | 31 +++++++++++++++++++++++++------ kernel/sched/idle_task.c | 4 ---- kernel/sched/rt.c | 41 ++++++++++++++++++++++++++++++----------- kernel/sched/sched.h | 5 +++++ 4 files changed, 60 insertions(+), 21 deletions(-) (limited to 'kernel/sched/idle_task.c') diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index bfeb84ecc32..3185b775dbf 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -214,6 +214,16 @@ static inline int has_pushable_dl_tasks(struct rq *rq) static int push_dl_task(struct rq *rq); +static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) +{ + return dl_task(prev); +} + +static inline void set_post_schedule(struct rq *rq) +{ + rq->post_schedule = has_pushable_dl_tasks(rq); +} + #else static inline @@ -236,6 +246,19 @@ void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) { } +static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) +{ + return false; +} + +static inline int pull_dl_task(struct rq *rq) +{ + return 0; +} + +static inline void set_post_schedule(struct rq *rq) +{ +} #endif /* CONFIG_SMP */ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags); @@ -1000,10 +1023,8 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev) dl_rq = &rq->dl; -#ifdef CONFIG_SMP - if (dl_task(prev)) + if (need_pull_dl_task(rq, prev)) pull_dl_task(rq); -#endif if (unlikely(!dl_rq->dl_nr_running)) return NULL; @@ -1024,9 +1045,7 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev) start_hrtick_dl(rq, p); #endif -#ifdef CONFIG_SMP - rq->post_schedule = has_pushable_dl_tasks(rq); -#endif /* CONFIG_SMP */ + set_post_schedule(rq); return p; } diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index 53ff9e7c76d..1f372588283 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c @@ -29,9 +29,7 @@ pick_next_task_idle(struct rq *rq, struct task_struct *prev) put_prev_task(rq, prev); schedstat_inc(rq, sched_goidle); -#ifdef CONFIG_SMP idle_enter_fair(rq); -#endif return rq->idle; } @@ -50,10 +48,8 @@ dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) { -#ifdef CONFIG_SMP idle_exit_fair(rq); rq_last_tick_reset(rq); -#endif } static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 65c2d6881ac..3e488ca6050 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -231,6 +231,12 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) static int pull_rt_task(struct rq *this_rq); +static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) +{ + /* Try to pull RT tasks here if we lower this rq's prio */ + return rq->rt.highest_prio.curr > prev->prio; +} + static inline int rt_overloaded(struct rq *rq) { return atomic_read(&rq->rd->rto_count); @@ -317,6 +323,15 @@ static inline int has_pushable_tasks(struct rq *rq) return !plist_head_empty(&rq->rt.pushable_tasks); } +static inline void set_post_schedule(struct rq *rq) +{ + /* + * We detect this state here so that we can avoid taking the RQ + * lock again later if there is no need to push + */ + rq->post_schedule = has_pushable_tasks(rq); +} + static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) { plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); @@ -361,6 +376,19 @@ void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { } +static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) +{ + return false; +} + +static inline int pull_rt_task(struct rq *this_rq) +{ + return 0; +} + +static inline void set_post_schedule(struct rq *rq) +{ +} #endif /* CONFIG_SMP */ static inline int on_rt_rq(struct sched_rt_entity *rt_se) @@ -1332,11 +1360,8 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev) struct task_struct *p; struct rt_rq *rt_rq = &rq->rt; -#ifdef CONFIG_SMP - /* Try to pull RT tasks here if we lower this rq's prio */ - if (rq->rt.highest_prio.curr > prev->prio) + if (need_pull_rt_task(rq, prev)) pull_rt_task(rq); -#endif if (!rt_rq->rt_nr_running) return NULL; @@ -1352,13 +1377,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev) if (p) dequeue_pushable_task(rq, p); -#ifdef CONFIG_SMP - /* - * We detect this state here so that we can avoid taking the RQ - * lock again later if there is no need to push - */ - rq->post_schedule = has_pushable_tasks(rq); -#endif + set_post_schedule(rq); return p; } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index d276147ba5e..caf4abda45e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1172,6 +1172,11 @@ extern void trigger_load_balance(struct rq *rq); extern void idle_enter_fair(struct rq *this_rq); extern void idle_exit_fair(struct rq *this_rq); +#else + +static inline void idle_enter_fair(struct rq *rq) { } +static inline void idle_exit_fair(struct rq *rq) { } + #endif extern void sysrq_sched_debug_show(void); -- cgit v1.2.3-70-g09d2 From e4aa358b6c23f98b2715594f6b1e9a4996a55f04 Mon Sep 17 00:00:00 2001 From: Kirill Tkhai Date: Thu, 6 Mar 2014 13:31:55 +0400 Subject: sched/fair: Push down check for high priority class task into idle_balance() We close idle_exit_fair() bracket in case of we've pulled something or we've received task of high priority class. Signed-off-by: Kirill Tkhai Signed-off-by: Peter Zijlstra Cc: Vincent Guittot Link: http://lkml.kernel.org/r/1394098315.19290.10.camel@tkhai Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 15 ++++++++++----- kernel/sched/idle_task.c | 1 - 2 files changed, 10 insertions(+), 6 deletions(-) (limited to 'kernel/sched/idle_task.c') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d8482e1c575..b956e70fc50 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4787,17 +4787,16 @@ simple: return p; idle: + new_tasks = idle_balance(rq); /* * Because idle_balance() releases (and re-acquires) rq->lock, it is * possible for any higher priority task to appear. In that case we * must re-start the pick_next_entity() loop. */ - new_tasks = idle_balance(rq); - - if (rq->nr_running != rq->cfs.h_nr_running) + if (new_tasks < 0) return RETRY_TASK; - if (new_tasks) + if (new_tasks > 0) goto again; return NULL; @@ -6728,8 +6727,14 @@ static int idle_balance(struct rq *this_rq) this_rq->max_idle_balance_cost = curr_cost; out: - if (pulled_task) + /* Is there a task of a high priority class? */ + if (this_rq->nr_running != this_rq->cfs.h_nr_running) + pulled_task = -1; + + if (pulled_task) { + idle_exit_fair(this_rq); this_rq->idle_stamp = 0; + } return pulled_task; } diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index 1f372588283..879f2b75266 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c @@ -29,7 +29,6 @@ pick_next_task_idle(struct rq *rq, struct task_struct *prev) put_prev_task(rq, prev); schedstat_inc(rq, sched_goidle); - idle_enter_fair(rq); return rq->idle; } -- cgit v1.2.3-70-g09d2