summaryrefslogtreecommitdiffstats
path: root/kernel/sched/deadline.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/deadline.c')
-rw-r--r--kernel/sched/deadline.c53
1 files changed, 14 insertions, 39 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 802188fb633..0c6b1d089cd 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -16,6 +16,8 @@
*/
#include "sched.h"
+#include <linux/slab.h>
+
struct dl_bandwidth def_dl_bandwidth;
static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
@@ -640,6 +642,7 @@ static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
*/
dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
dl_rq->earliest_dl.curr = deadline;
+ cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
} else if (dl_rq->earliest_dl.next == 0 ||
dl_time_before(deadline, dl_rq->earliest_dl.next)) {
/*
@@ -663,6 +666,7 @@ static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
if (!dl_rq->dl_nr_running) {
dl_rq->earliest_dl.curr = 0;
dl_rq->earliest_dl.next = 0;
+ cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
} else {
struct rb_node *leftmost = dl_rq->rb_leftmost;
struct sched_dl_entity *entry;
@@ -670,6 +674,7 @@ static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
dl_rq->earliest_dl.curr = entry->deadline;
dl_rq->earliest_dl.next = next_deadline(rq);
+ cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
}
}
@@ -855,9 +860,6 @@ static void yield_task_dl(struct rq *rq)
#ifdef CONFIG_SMP
static int find_later_rq(struct task_struct *task);
-static int latest_cpu_find(struct cpumask *span,
- struct task_struct *task,
- struct cpumask *later_mask);
static int
select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
@@ -904,7 +906,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
* let's hope p can move out.
*/
if (rq->curr->nr_cpus_allowed == 1 ||
- latest_cpu_find(rq->rd->span, rq->curr, NULL) == -1)
+ cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
return;
/*
@@ -912,7 +914,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
* see if it is pushed or pulled somewhere else.
*/
if (p->nr_cpus_allowed != 1 &&
- latest_cpu_find(rq->rd->span, p, NULL) != -1)
+ cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
return;
resched_task(rq->curr);
@@ -1085,39 +1087,6 @@ next_node:
return NULL;
}
-static int latest_cpu_find(struct cpumask *span,
- struct task_struct *task,
- struct cpumask *later_mask)
-{
- const struct sched_dl_entity *dl_se = &task->dl;
- int cpu, found = -1, best = 0;
- u64 max_dl = 0;
-
- for_each_cpu(cpu, span) {
- struct rq *rq = cpu_rq(cpu);
- struct dl_rq *dl_rq = &rq->dl;
-
- if (cpumask_test_cpu(cpu, &task->cpus_allowed) &&
- (!dl_rq->dl_nr_running || dl_time_before(dl_se->deadline,
- dl_rq->earliest_dl.curr))) {
- if (later_mask)
- cpumask_set_cpu(cpu, later_mask);
- if (!best && !dl_rq->dl_nr_running) {
- best = 1;
- found = cpu;
- } else if (!best &&
- dl_time_before(max_dl,
- dl_rq->earliest_dl.curr)) {
- max_dl = dl_rq->earliest_dl.curr;
- found = cpu;
- }
- } else if (later_mask)
- cpumask_clear_cpu(cpu, later_mask);
- }
-
- return found;
-}
-
static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
static int find_later_rq(struct task_struct *task)
@@ -1134,7 +1103,8 @@ static int find_later_rq(struct task_struct *task)
if (task->nr_cpus_allowed == 1)
return -1;
- best_cpu = latest_cpu_find(task_rq(task)->rd->span, task, later_mask);
+ best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
+ task, later_mask);
if (best_cpu == -1)
return -1;
@@ -1510,6 +1480,9 @@ static void rq_online_dl(struct rq *rq)
{
if (rq->dl.overloaded)
dl_set_overload(rq);
+
+ if (rq->dl.dl_nr_running > 0)
+ cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
}
/* Assumes rq->lock is held */
@@ -1517,6 +1490,8 @@ static void rq_offline_dl(struct rq *rq)
{
if (rq->dl.overloaded)
dl_clear_overload(rq);
+
+ cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
}
void init_sched_dl_class(void)