summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDaniel Lezcano <daniel.lezcano@linaro.org>2014-01-17 10:04:03 +0100
committerIngo Molnar <mingo@kernel.org>2014-02-10 16:17:07 +0100
commit3c4017c13f91069194fce3160944efec50f15a6e (patch)
tree26c4c75063dcb2a3e0244b4776b76c5a218bbb58 /kernel
parente5fc66119ec97054eefc83f173a7ee9e133c3c3a (diff)
sched: Move rq->idle_stamp up to the core
idle_balance() modifies the rq->idle_stamp field, making this information shared across core.c and fair.c. As we know if the cpu is going to idle or not with the previous patch, let's encapsulate the rq->idle_stamp information in core.c by moving it up to the caller. The idle_balance() function returns true in case a balancing occured and the cpu won't be idle, false if no balance happened and the cpu is going idle. Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org> Cc: alex.shi@linaro.org Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1389949444-14821-3-git-send-email-daniel.lezcano@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c11
-rw-r--r--kernel/sched/fair.c14
-rw-r--r--kernel/sched/sched.h2
3 files changed, 16 insertions, 11 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 74dd565c2e1..417cf657a60 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2704,8 +2704,15 @@ need_resched:
pre_schedule(rq, prev);
- if (unlikely(!rq->nr_running))
- idle_balance(rq);
+ if (unlikely(!rq->nr_running)) {
+ /*
+ * We must set idle_stamp _before_ calling idle_balance(), such
+ * that we measure the duration of idle_balance() as idle time.
+ */
+ rq->idle_stamp = rq_clock(rq);
+ if (idle_balance(rq))
+ rq->idle_stamp = 0;
+ }
put_prev_task(rq, prev);
next = pick_next_task(rq);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5ebc6817c03..04fea7744a9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6531,7 +6531,7 @@ out:
* idle_balance is called by schedule() if this_cpu is about to become
* idle. Attempts to pull tasks from other CPUs.
*/
-void idle_balance(struct rq *this_rq)
+int idle_balance(struct rq *this_rq)
{
struct sched_domain *sd;
int pulled_task = 0;
@@ -6539,10 +6539,8 @@ void idle_balance(struct rq *this_rq)
u64 curr_cost = 0;
int this_cpu = this_rq->cpu;
- this_rq->idle_stamp = rq_clock(this_rq);
-
if (this_rq->avg_idle < sysctl_sched_migration_cost)
- return;
+ return 0;
/*
* Drop the rq->lock, but keep IRQ/preempt disabled.
@@ -6580,10 +6578,8 @@ void idle_balance(struct rq *this_rq)
interval = msecs_to_jiffies(sd->balance_interval);
if (time_after(next_balance, sd->last_balance + interval))
next_balance = sd->last_balance + interval;
- if (pulled_task) {
- this_rq->idle_stamp = 0;
+ if (pulled_task)
break;
- }
}
rcu_read_unlock();
@@ -6594,7 +6590,7 @@ void idle_balance(struct rq *this_rq)
* A task could have be enqueued in the meantime
*/
if (this_rq->nr_running && !pulled_task)
- return;
+ return 1;
if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
/*
@@ -6606,6 +6602,8 @@ void idle_balance(struct rq *this_rq)
if (curr_cost > this_rq->max_idle_balance_cost)
this_rq->max_idle_balance_cost = curr_cost;
+
+ return pulled_task;
}
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 82c0e02f2a5..bb89991ee40 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1158,7 +1158,7 @@ extern const struct sched_class idle_sched_class;
extern void update_group_power(struct sched_domain *sd, int cpu);
extern void trigger_load_balance(struct rq *rq);
-extern void idle_balance(struct rq *this_rq);
+extern int idle_balance(struct rq *this_rq);
extern void idle_enter_fair(struct rq *this_rq);
extern void idle_exit_fair(struct rq *this_rq);