From 01c8c57d668d94f1036d9ab11a22aa24ca16a35d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 24 Oct 2008 11:06:12 +0200 Subject: sched: fix a find_busiest_group buglet In one of the group load balancer patches: commit 408ed066b11cf9ee4536573b4269ee3613bd735e Author: Peter Zijlstra Date: Fri Jun 27 13:41:28 2008 +0200 Subject: sched: hierarchical load vs find_busiest_group The following change: - if (max_load - this_load + SCHED_LOAD_SCALE_FUZZ >= + if (max_load - this_load + 2*busiest_load_per_task >= busiest_load_per_task * imbn) { made the condition always true, because imbn is [1,2]. Therefore, remove the 2*, and give the it a fair chance. Signed-off-by: Peter Zijlstra Acked-by: Mike Galbraith Signed-off-by: Ingo Molnar --- kernel/sched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/sched.c') diff --git a/kernel/sched.c b/kernel/sched.c index 6625c3c4b10..12bc367d924 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3344,7 +3344,7 @@ small_imbalance: } else this_load_per_task = cpu_avg_load_per_task(this_cpu); - if (max_load - this_load + 2*busiest_load_per_task >= + if (max_load - this_load + busiest_load_per_task >= busiest_load_per_task * imbn) { *imbalance = busiest_load_per_task; return busiest; -- cgit v1.2.3-70-g09d2 From 3f3a490480d8ab96e0fe30a41f80f14e6a0c579d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 24 Oct 2008 11:06:16 +0200 Subject: sched: virtual time buddy preemption Since we moved wakeup preemption back to virtual time, it makes sense to move the buddy stuff back as well. The purpose of the buddy scheduling is to allow a quickly scheduling pair of tasks to run away from the group as far as a regular busy task would be allowed under wakeup preemption. This has the advantage that the pair can ping-pong for a while, enjoying cache-hotness. Without buddy scheduling other tasks would interleave destroying the cache. Also, it saves a word in cfs_rq. Signed-off-by: Peter Zijlstra Acked-by: Mike Galbraith Signed-off-by: Ingo Molnar --- kernel/sched.c | 1 - kernel/sched_fair.c | 10 ++++------ 2 files changed, 4 insertions(+), 7 deletions(-) (limited to 'kernel/sched.c') diff --git a/kernel/sched.c b/kernel/sched.c index 12bc367d924..e8819bc6f46 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -386,7 +386,6 @@ struct cfs_rq { u64 exec_clock; u64 min_vruntime; - u64 pair_start; struct rb_root tasks_timeline; struct rb_node *rb_leftmost; diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 7af17e04a6d..ce514afd78f 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -791,16 +791,14 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) se->prev_sum_exec_runtime = se->sum_exec_runtime; } +static int +wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); + static struct sched_entity * pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se) { - struct rq *rq = rq_of(cfs_rq); - u64 pair_slice = rq->clock - cfs_rq->pair_start; - - if (!cfs_rq->next || pair_slice > sysctl_sched_min_granularity) { - cfs_rq->pair_start = rq->clock; + if (!cfs_rq->next || wakeup_preempt_entity(cfs_rq->next, se) == 1) return se; - } return cfs_rq->next; } -- cgit v1.2.3-70-g09d2