summaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>2007-10-15 17:00:12 +0200
committerIngo Molnar <mingo@elte.hu>2007-10-15 17:00:12 +0200
commitb9fa3df33f9166daf81bfa8253d339f5a7726122 (patch)
tree72e5c3b9ede48fbbb6c0dbbe94c123a4cb75f7a6 /kernel/sched_fair.c
parentfad095a7b963d9e914e0cdb73e27355c47709441 (diff)
sched: group scheduler, fix latency
There is a possibility that because of task of a group moving from one cpu to another, it may gain more cpu time that desired. See http://marc.info/?l=linux-kernel&m=119073197730334 for details. This is an attempt to fix that problem. Basically it simulates dequeue of higher level entities as if they are going to sleep. Similarly it simulate wakeup of higher level entities as if they are waking up from sleep. Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c2
1 files changed, 2 insertions, 0 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 57e7f3672fd..de13a6f5b97 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -727,6 +727,7 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
break;
cfs_rq = cfs_rq_of(se);
enqueue_entity(cfs_rq, se, wakeup);
+ wakeup = 1;
}
}
@@ -746,6 +747,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
/* Don't dequeue parent if it has other entities besides us */
if (cfs_rq->load.weight)
break;
+ sleep = 1;
}
}