summaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-10-17 19:27:03 +0200
committerIngo Molnar <mingo@elte.hu>2008-10-20 14:05:03 +0200
commita4c2f00f5cb848af7a8c816426b413c8e41834df (patch)
treef99d376ae43fe7e19db0f5f268c6925d8cda2107 /kernel/sched_fair.c
parentffda12a17a324103e9900fa1035309811eecbfe5 (diff)
sched: fair scheduler should not resched rt tasks
With use of ftrace Steven noticed that some RT tasks got rescheduled due to sched_fair interaction. What happens is that we reprogram the hrtick from enqueue/dequeue_fair_task() because that can change nr_running, and thus a current tasks ideal runtime. However, its possible the current task isn't a fair_sched_class task, and thus doesn't have a hrtick set to change. Fix this by wrapping those hrtick_start_fair() calls in a hrtick_update() function, which will check for the right conditions. Reported-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c28
1 files changed, 24 insertions, 4 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 67084936b60..0c4bcac5476 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -73,6 +73,8 @@ unsigned int sysctl_sched_wakeup_granularity = 5000000UL;
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
+static const struct sched_class fair_sched_class;
+
/**************************************************************
* CFS operations on generic schedulable entities:
*/
@@ -848,11 +850,31 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
hrtick_start(rq, delta);
}
}
+
+/*
+ * called from enqueue/dequeue and updates the hrtick when the
+ * current task is from our class and nr_running is low enough
+ * to matter.
+ */
+static void hrtick_update(struct rq *rq)
+{
+ struct task_struct *curr = rq->curr;
+
+ if (curr->sched_class != &fair_sched_class)
+ return;
+
+ if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
+ hrtick_start_fair(rq, curr);
+}
#else /* !CONFIG_SCHED_HRTICK */
static inline void
hrtick_start_fair(struct rq *rq, struct task_struct *p)
{
}
+
+static inline void hrtick_update(struct rq *rq)
+{
+}
#endif
/*
@@ -873,7 +895,7 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
wakeup = 1;
}
- hrtick_start_fair(rq, rq->curr);
+ hrtick_update(rq);
}
/*
@@ -895,7 +917,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
sleep = 1;
}
- hrtick_start_fair(rq, rq->curr);
+ hrtick_update(rq);
}
/*
@@ -1001,8 +1023,6 @@ static inline int wake_idle(int cpu, struct task_struct *p)
#ifdef CONFIG_SMP
-static const struct sched_class fair_sched_class;
-
#ifdef CONFIG_FAIR_GROUP_SCHED
/*
* effective_load() calculates the load change as seen from the root_task_group