diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2009-03-25 15:01:22 +1030 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-01 13:24:51 +0200 |
commit | 13b8bd0a5713bdf05659019badd7c0407984ece1 (patch) | |
tree | 378950741dd647b672a2b3e61988725b52557f2d /kernel | |
parent | a18b83b7ef3c98cd8b4bb885e4a649a8f30fb7b0 (diff) |
sched_rt: don't allocate cpumask in fastpath
Impact: cleanup
As pointed out by Steven Rostedt. Since the arg in question is
unused, we simply change cpupri_find() to accept NULL.
Reported-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
LKML-Reference: <200903251501.22664.rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_cpupri.c | 5 | ||||
-rw-r--r-- | kernel/sched_rt.c | 15 |
2 files changed, 7 insertions, 13 deletions
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c index 1e00bfacf9b..cdd3c89574c 100644 --- a/kernel/sched_cpupri.c +++ b/kernel/sched_cpupri.c @@ -55,7 +55,7 @@ static int convert_prio(int prio) * cpupri_find - find the best (lowest-pri) CPU in the system * @cp: The cpupri context * @p: The task - * @lowest_mask: A mask to fill in with selected CPUs + * @lowest_mask: A mask to fill in with selected CPUs (or NULL) * * Note: This function returns the recommended CPUs as calculated during the * current invokation. By the time the call returns, the CPUs may have in @@ -81,7 +81,8 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) continue; - cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); + if (lowest_mask) + cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); return 1; } diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index bac1061cea2..fbec5a58ff1 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -805,20 +805,15 @@ static int select_task_rq_rt(struct task_struct *p, int sync) static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) { - cpumask_var_t mask; - if (rq->curr->rt.nr_cpus_allowed == 1) return; - if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) - return; - if (p->rt.nr_cpus_allowed != 1 - && cpupri_find(&rq->rd->cpupri, p, mask)) - goto free; + && cpupri_find(&rq->rd->cpupri, p, NULL)) + return; - if (!cpupri_find(&rq->rd->cpupri, rq->curr, mask)) - goto free; + if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) + return; /* * There appears to be other cpus that can accept @@ -827,8 +822,6 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) */ requeue_task_rt(rq, p, 1); resched_task(rq->curr); -free: - free_cpumask_var(mask); } #endif /* CONFIG_SMP */ |