summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorKirill Tkhai <ktkhai@parallels.com>2014-03-06 13:31:55 +0400
committerIngo Molnar <mingo@kernel.org>2014-03-11 12:05:37 +0100
commite4aa358b6c23f98b2715594f6b1e9a4996a55f04 (patch)
tree6e3de5eaeccde610cf01ec06994b587084104a8b /kernel
parent734ff2a71f9e6aa6fedfa5a9a34818b8586516d5 (diff)
sched/fair: Push down check for high priority class task into idle_balance()
We close idle_exit_fair() bracket in case of we've pulled something or we've received task of high priority class. Signed-off-by: Kirill Tkhai <ktkhai@parallels.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Vincent Guittot <vincent.guittot@linaro.org> Link: http://lkml.kernel.org/r/1394098315.19290.10.camel@tkhai Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c15
-rw-r--r--kernel/sched/idle_task.c1
2 files changed, 10 insertions, 6 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d8482e1c575..b956e70fc50 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4787,17 +4787,16 @@ simple:
return p;
idle:
+ new_tasks = idle_balance(rq);
/*
* Because idle_balance() releases (and re-acquires) rq->lock, it is
* possible for any higher priority task to appear. In that case we
* must re-start the pick_next_entity() loop.
*/
- new_tasks = idle_balance(rq);
-
- if (rq->nr_running != rq->cfs.h_nr_running)
+ if (new_tasks < 0)
return RETRY_TASK;
- if (new_tasks)
+ if (new_tasks > 0)
goto again;
return NULL;
@@ -6728,8 +6727,14 @@ static int idle_balance(struct rq *this_rq)
this_rq->max_idle_balance_cost = curr_cost;
out:
- if (pulled_task)
+ /* Is there a task of a high priority class? */
+ if (this_rq->nr_running != this_rq->cfs.h_nr_running)
+ pulled_task = -1;
+
+ if (pulled_task) {
+ idle_exit_fair(this_rq);
this_rq->idle_stamp = 0;
+ }
return pulled_task;
}
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index 1f372588283..879f2b75266 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -29,7 +29,6 @@ pick_next_task_idle(struct rq *rq, struct task_struct *prev)
put_prev_task(rq, prev);
schedstat_inc(rq, sched_goidle);
- idle_enter_fair(rq);
return rq->idle;
}