summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-02-13 15:45:39 +0100
committerIngo Molnar <mingo@elte.hu>2008-02-13 15:45:39 +0100
commit23b0fdfc9299b137bd126e9dc22f62a59dae546d (patch)
tree22019172c555109b69a73da76561d99d7776c4f7 /kernel/sched.c
parent4cf5d77a6eefaa7a464bc34e8cb767356f10fd74 (diff)
sched: rt-group: deal with PI
Steven mentioned the fun case where a lock holding task will be throttled. Simple fix: allow groups that have boosted tasks to run anyway. If a runnable task in a throttled group gets boosted the dequeue/enqueue done by rt_mutex_setprio() is enough to unthrottle the group. This is ofcourse not quite correct. Two possible ways forward are: - second prio array for boosted tasks - boost to a prio ceiling (this would also work for deadline scheduling) Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c3
1 files changed, 3 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 88a17c7128c..cecaea67ae9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -362,6 +362,8 @@ struct rt_rq {
u64 rt_time;
#ifdef CONFIG_FAIR_GROUP_SCHED
+ unsigned long rt_nr_boosted;
+
struct rq *rq;
struct list_head leaf_rt_rq_list;
struct task_group *tg;
@@ -7112,6 +7114,7 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
rt_rq->rt_throttled = 0;
#ifdef CONFIG_FAIR_GROUP_SCHED
+ rt_rq->rt_nr_boosted = 0;
rt_rq->rq = rq;
#endif
}