summaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-02-11 16:11:48 +0100
committerThomas Gleixner <tglx@linutronix.de>2014-02-21 21:43:17 +0100
commit6e83125c6b151afa139c8852c099d6d92954fe3b (patch)
tree347addb4bc27edcfec493f328aba7531b39ef3f4 /kernel/sched/fair.c
parenteb7a59b2c888c2518ba2c9d0020343ca71aa9dee (diff)
sched/fair: Remove idle_balance() declaration in sched.h
Remove idle_balance() from the public life; also reduce some #ifdef clutter by folding the pick_next_task_fair() idle path into idle_balance(). Cc: mingo@kernel.org Reported-by: Daniel Lezcano <daniel.lezcano@linaro.org> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20140211151148.GP27965@twins.programming.kicks-ass.net Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c47
1 files changed, 29 insertions, 18 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 280da893cd0..40c758bbdd5 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2374,13 +2374,13 @@ static inline void __update_group_entity_contrib(struct sched_entity *se)
se->avg.load_avg_contrib >>= NICE_0_SHIFT;
}
}
-#else
+#else /* CONFIG_FAIR_GROUP_SCHED */
static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
int force_update) {}
static inline void __update_tg_runnable_avg(struct sched_avg *sa,
struct cfs_rq *cfs_rq) {}
static inline void __update_group_entity_contrib(struct sched_entity *se) {}
-#endif
+#endif /* CONFIG_FAIR_GROUP_SCHED */
static inline void __update_task_entity_contrib(struct sched_entity *se)
{
@@ -2571,6 +2571,8 @@ void idle_exit_fair(struct rq *this_rq)
update_rq_runnable_avg(this_rq, 0);
}
+static int idle_balance(struct rq *this_rq);
+
#else /* CONFIG_SMP */
static inline void update_entity_load_avg(struct sched_entity *se,
@@ -2584,6 +2586,12 @@ static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
int sleep) {}
static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
int force_update) {}
+
+static inline int idle_balance(struct rq *rq)
+{
+ return 0;
+}
+
#endif /* CONFIG_SMP */
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -4677,7 +4685,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev)
struct sched_entity *se;
struct task_struct *p;
-again: __maybe_unused
+again:
#ifdef CONFIG_FAIR_GROUP_SCHED
if (!cfs_rq->nr_running)
goto idle;
@@ -4775,18 +4783,8 @@ simple:
return p;
idle:
-#ifdef CONFIG_SMP
- idle_enter_fair(rq);
- /*
- * We must set idle_stamp _before_ calling idle_balance(), such that we
- * measure the duration of idle_balance() as idle time.
- */
- rq->idle_stamp = rq_clock(rq);
- if (idle_balance(rq)) { /* drops rq->lock */
- rq->idle_stamp = 0;
+ if (idle_balance(rq)) /* drops rq->lock */
goto again;
- }
-#endif
return NULL;
}
@@ -6634,7 +6632,7 @@ out:
* idle_balance is called by schedule() if this_cpu is about to become
* idle. Attempts to pull tasks from other CPUs.
*/
-int idle_balance(struct rq *this_rq)
+static int idle_balance(struct rq *this_rq)
{
struct sched_domain *sd;
int pulled_task = 0;
@@ -6642,8 +6640,15 @@ int idle_balance(struct rq *this_rq)
u64 curr_cost = 0;
int this_cpu = this_rq->cpu;
+ idle_enter_fair(this_rq);
+ /*
+ * We must set idle_stamp _before_ calling idle_balance(), such that we
+ * measure the duration of idle_balance() as idle time.
+ */
+ this_rq->idle_stamp = rq_clock(this_rq);
+
if (this_rq->avg_idle < sysctl_sched_migration_cost)
- return 0;
+ goto out;
/*
* Drop the rq->lock, but keep IRQ/preempt disabled.
@@ -6692,8 +6697,10 @@ int idle_balance(struct rq *this_rq)
* While browsing the domains, we released the rq lock.
* A task could have be enqueued in the meantime
*/
- if (this_rq->nr_running && !pulled_task)
- return 1;
+ if (this_rq->nr_running && !pulled_task) {
+ pulled_task = 1;
+ goto out;
+ }
if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
/*
@@ -6706,6 +6713,10 @@ int idle_balance(struct rq *this_rq)
if (curr_cost > this_rq->max_idle_balance_cost)
this_rq->max_idle_balance_cost = curr_cost;
+out:
+ if (pulled_task)
+ this_rq->idle_stamp = 0;
+
return pulled_task;
}