summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGregory Haskins <ghaskins@novell.com>2008-01-25 21:08:23 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-25 21:08:23 +0100
commitcdc8eb984ce47a7c90a049f45229f7b0d59ba781 (patch)
treecebcaf9d854b843a5381cae18cffa6fd836a20ab
parentcb46984504048db946cd551c261df4e70d59a8ea (diff)
sched: RT-balance, only adjust overload state when changing
The overload set/clears were originally idempotent when this logic was first implemented. But that is no longer true due to the addition of the atomic counter and this logic was never updated to work properly with that change. So only adjust the overload state if it is actually changing to avoid getting out of sync. Signed-off-by: Gregory Haskins <ghaskins@novell.com> Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/sched_rt.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 57fa3d96847..a386758ffeb 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -34,9 +34,11 @@ static inline void rt_clear_overload(struct rq *rq)
static void update_rt_migration(struct rq *rq)
{
if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
- rt_set_overload(rq);
- rq->rt.overloaded = 1;
- } else {
+ if (!rq->rt.overloaded) {
+ rt_set_overload(rq);
+ rq->rt.overloaded = 1;
+ }
+ } else if (rq->rt.overloaded) {
rt_clear_overload(rq);
rq->rt.overloaded = 0;
}