summaryrefslogtreecommitdiffstats
path: root/kernel/sched/rt.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2013-12-09 09:17:02 +0100
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-12-09 09:19:14 +0100
commitf7698ba75fa283435f5077b9dfb4319d28b9de9a (patch)
tree4bc16a615a35baaf2b482de81cd256a69067ff72 /kernel/sched/rt.c
parent798183c54799fbe1e5a5bfabb3a8c0505ffd2149 (diff)
parent374b105797c3d4f29c685f3be535c35f5689b30e (diff)
Merge tag 'v3.13-rc3' into drm-intel-next-queued
Linux 3.13-rc3 I need a backmerge for two reasons: - For merging the ppgtt patches from Ben I need to pull in the bdw support. - We now have duplicated calls to intel_uncore_forcewake_reset in the setup code to due 2 different patches merged into -next and 3.13. The conflict is silen so I need the merge to be able to apply Deepak's fixup patch. Conflicts: drivers/gpu/drm/i915/intel_display.c Trivial conflict, it doesn't even show up in the merge diff. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r--kernel/sched/rt.c22
1 files changed, 13 insertions, 9 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 01970c8e64d..7d57275fc39 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -246,8 +246,10 @@ static inline void rt_set_overload(struct rq *rq)
* if we should look at the mask. It would be a shame
* if we looked at the mask, but the mask was not
* updated yet.
+ *
+ * Matched by the barrier in pull_rt_task().
*/
- wmb();
+ smp_wmb();
atomic_inc(&rq->rd->rto_count);
}
@@ -1169,13 +1171,10 @@ static void yield_task_rt(struct rq *rq)
static int find_lowest_rq(struct task_struct *task);
static int
-select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
+select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
{
struct task_struct *curr;
struct rq *rq;
- int cpu;
-
- cpu = task_cpu(p);
if (p->nr_cpus_allowed == 1)
goto out;
@@ -1213,8 +1212,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
*/
if (curr && unlikely(rt_task(curr)) &&
(curr->nr_cpus_allowed < 2 ||
- curr->prio <= p->prio) &&
- (p->nr_cpus_allowed > 1)) {
+ curr->prio <= p->prio)) {
int target = find_lowest_rq(p);
if (target != -1)
@@ -1630,6 +1628,12 @@ static int pull_rt_task(struct rq *this_rq)
if (likely(!rt_overloaded(this_rq)))
return 0;
+ /*
+ * Match the barrier from rt_set_overloaded; this guarantees that if we
+ * see overloaded we must also see the rto_mask bit.
+ */
+ smp_rmb();
+
for_each_cpu(cpu, this_rq->rd->rto_mask) {
if (this_cpu == cpu)
continue;
@@ -1931,8 +1935,8 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
p->rt.time_slice = sched_rr_timeslice;
/*
- * Requeue to the end of queue if we (and all of our ancestors) are the
- * only element on the queue
+ * Requeue to the end of queue if we (and all of our ancestors) are not
+ * the only element on the queue
*/
for_each_sched_rt_entity(rt_se) {
if (rt_se->run_list.prev != rt_se->run_list.next) {