summaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2009-11-03 14:53:15 +1030
committerIngo Molnar <mingo@elte.hu>2009-11-04 13:16:38 +0100
commite2c880630438f80b474378d5487b511b07665051 (patch)
tree0f35b58cd657a3b50e03a93a12367ab82bd569ef /kernel/sched_rt.c
parent45a5c8bad827ebb9c9798becc15bce2e804d49e0 (diff)
cpumask: Simplify sched_rt.c
find_lowest_rq() wants to call pick_optimal_cpu() on the intersection of sched_domain_span(sd) and lowest_mask. Rather than doing a cpus_and into a temporary, we can open-code it. This actually makes the code slightly clearer, IMHO. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Acked-by: Gregory Haskins <ghaskins@novell.com> Cc: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <200911031453.15350.rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c61
1 files changed, 24 insertions, 37 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index a4d790cddb1..5c5fef37841 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1153,29 +1153,12 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
-static inline int pick_optimal_cpu(int this_cpu,
- const struct cpumask *mask)
-{
- int first;
-
- /* "this_cpu" is cheaper to preempt than a remote processor */
- if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask))
- return this_cpu;
-
- first = cpumask_first(mask);
- if (first < nr_cpu_ids)
- return first;
-
- return -1;
-}
-
static int find_lowest_rq(struct task_struct *task)
{
struct sched_domain *sd;
struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
int this_cpu = smp_processor_id();
int cpu = task_cpu(task);
- cpumask_var_t domain_mask;
if (task->rt.nr_cpus_allowed == 1)
return -1; /* No other targets possible */
@@ -1198,28 +1181,26 @@ static int find_lowest_rq(struct task_struct *task)
* Otherwise, we consult the sched_domains span maps to figure
* out which cpu is logically closest to our hot cache data.
*/
- if (this_cpu == cpu)
- this_cpu = -1; /* Skip this_cpu opt if the same */
-
- if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) {
- for_each_domain(cpu, sd) {
- if (sd->flags & SD_WAKE_AFFINE) {
- int best_cpu;
+ if (!cpumask_test_cpu(this_cpu, lowest_mask))
+ this_cpu = -1; /* Skip this_cpu opt if not among lowest */
- cpumask_and(domain_mask,
- sched_domain_span(sd),
- lowest_mask);
+ for_each_domain(cpu, sd) {
+ if (sd->flags & SD_WAKE_AFFINE) {
+ int best_cpu;
- best_cpu = pick_optimal_cpu(this_cpu,
- domain_mask);
-
- if (best_cpu != -1) {
- free_cpumask_var(domain_mask);
- return best_cpu;
- }
- }
+ /*
+ * "this_cpu" is cheaper to preempt than a
+ * remote processor.
+ */
+ if (this_cpu != -1 &&
+ cpumask_test_cpu(this_cpu, sched_domain_span(sd)))
+ return this_cpu;
+
+ best_cpu = cpumask_first_and(lowest_mask,
+ sched_domain_span(sd));
+ if (best_cpu < nr_cpu_ids)
+ return best_cpu;
}
- free_cpumask_var(domain_mask);
}
/*
@@ -1227,7 +1208,13 @@ static int find_lowest_rq(struct task_struct *task)
* just give the caller *something* to work with from the compatible
* locations.
*/
- return pick_optimal_cpu(this_cpu, lowest_mask);
+ if (this_cpu != -1)
+ return this_cpu;
+
+ cpu = cpumask_any(lowest_mask);
+ if (cpu < nr_cpu_ids)
+ return cpu;
+ return -1;
}
/* Will lock the rq it finds */