summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2005-06-25 14:57:09 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-25 16:24:41 -0700
commit3950745131e23472fb5ace2ee4a2093e7590ec69 (patch)
tree8b3e738f2c11ee3e4c60d8960e7bdd3c006f7154
parent16cfb1c04c3cbe3759f339d3333e7e1e7d59712a (diff)
[PATCH] sched: fix SMT scheduling problems
SMT balancing has a couple of problems. Firstly, active_load_balance is too complex - basically it should be a dumb helper for when the periodic balancer has determined there is an imbalance, but gets stuck because the task is running. So rip out all its "smarts", and just make it move one task to the target CPU. Second, the busy CPU's sched-domain tree was being used for active balancing. This means that it may not see that nr_balance_failed has reached a critical level. So use the target CPU's sched-domain tree for this. We can do this because we hold its runqueue lock. Lastly, reset nr_balance_failed to a point where we allow cache hot migration. This will help ensure active load balancing is successful. Thanks to Suresh Siddha for pointing out these issues. Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--kernel/sched.c76
1 files changed, 31 insertions, 45 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 03d737791c1..41e69b5ee65 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1995,7 +1995,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
* We've kicked active balancing, reset the failure
* counter.
*/
- sd->nr_balance_failed = sd->cache_nice_tries;
+ sd->nr_balance_failed = sd->cache_nice_tries+1;
}
} else
sd->nr_balance_failed = 0;
@@ -2106,56 +2106,42 @@ static inline void idle_balance(int this_cpu, runqueue_t *this_rq)
static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu)
{
struct sched_domain *sd;
- struct sched_group *cpu_group;
runqueue_t *target_rq;
- cpumask_t visited_cpus;
- int cpu;
+ int target_cpu = busiest_rq->push_cpu;
+
+ if (busiest_rq->nr_running <= 1)
+ /* no task to move */
+ return;
+
+ target_rq = cpu_rq(target_cpu);
/*
- * Search for suitable CPUs to push tasks to in successively higher
- * domains with SD_LOAD_BALANCE set.
+ * This condition is "impossible", if it occurs
+ * we need to fix it. Originally reported by
+ * Bjorn Helgaas on a 128-cpu setup.
*/
- visited_cpus = CPU_MASK_NONE;
- for_each_domain(busiest_cpu, sd) {
- if (!(sd->flags & SD_LOAD_BALANCE))
- /* no more domains to search */
- break;
+ BUG_ON(busiest_rq == target_rq);
- schedstat_inc(sd, alb_cnt);
+ /* move a task from busiest_rq to target_rq */
+ double_lock_balance(busiest_rq, target_rq);
- cpu_group = sd->groups;
- do {
- for_each_cpu_mask(cpu, cpu_group->cpumask) {
- if (busiest_rq->nr_running <= 1)
- /* no more tasks left to move */
- return;
- if (cpu_isset(cpu, visited_cpus))
- continue;
- cpu_set(cpu, visited_cpus);
- if (!cpu_and_siblings_are_idle(cpu) || cpu == busiest_cpu)
- continue;
-
- target_rq = cpu_rq(cpu);
- /*
- * This condition is "impossible", if it occurs
- * we need to fix it. Originally reported by
- * Bjorn Helgaas on a 128-cpu setup.
- */
- BUG_ON(busiest_rq == target_rq);
-
- /* move a task from busiest_rq to target_rq */
- double_lock_balance(busiest_rq, target_rq);
- if (move_tasks(target_rq, cpu, busiest_rq,
- 1, sd, SCHED_IDLE, NULL)) {
- schedstat_inc(sd, alb_pushed);
- } else {
- schedstat_inc(sd, alb_failed);
- }
- spin_unlock(&target_rq->lock);
- }
- cpu_group = cpu_group->next;
- } while (cpu_group != sd->groups);
- }
+ /* Search for an sd spanning us and the target CPU. */
+ for_each_domain(target_cpu, sd)
+ if ((sd->flags & SD_LOAD_BALANCE) &&
+ cpu_isset(busiest_cpu, sd->span))
+ break;
+
+ if (unlikely(sd == NULL))
+ goto out;
+
+ schedstat_inc(sd, alb_cnt);
+
+ if (move_tasks(target_rq, target_cpu, busiest_rq, 1, sd, SCHED_IDLE, NULL))
+ schedstat_inc(sd, alb_pushed);
+ else
+ schedstat_inc(sd, alb_failed);
+out:
+ spin_unlock(&target_rq->lock);
}
/*