diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-11-25 02:35:04 +1030 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-24 17:50:45 +0100 |
commit | 758b2cdc6f6a22c702bd8f2344382fb1270b2161 (patch) | |
tree | 270aec3d0f6235c1519c16e8dc8148f195e133db /kernel/sched_fair.c | |
parent | 1e5ce4f4a755ee498bd9217dae26143afa0d8f31 (diff) |
sched: wrap sched_group and sched_domain cpumask accesses.
Impact: trivial wrap of member accesses
This eases the transition in the next patch.
We also get rid of a temporary cpumask in find_idlest_cpu() thanks to
for_each_cpu_and, and sched_balance_self() due to getting weight before
setting sd to NULL.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 10 |
1 files changed, 4 insertions, 6 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 98345e45b05..bba00402ed9 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1024,7 +1024,6 @@ static void yield_task_fair(struct rq *rq) #if defined(ARCH_HAS_SCHED_WAKE_IDLE) static int wake_idle(int cpu, struct task_struct *p) { - cpumask_t tmp; struct sched_domain *sd; int i; @@ -1044,10 +1043,9 @@ static int wake_idle(int cpu, struct task_struct *p) if ((sd->flags & SD_WAKE_IDLE) || ((sd->flags & SD_WAKE_IDLE_FAR) && !task_hot(p, task_rq(p)->clock, sd))) { - cpus_and(tmp, sd->span, p->cpus_allowed); - cpus_and(tmp, tmp, cpu_active_map); - for_each_cpu_mask_nr(i, tmp) { - if (idle_cpu(i)) { + for_each_cpu_and(i, sched_domain_span(sd), + &p->cpus_allowed) { + if (cpu_active(i) && idle_cpu(i)) { if (i != task_cpu(p)) { schedstat_inc(p, se.nr_wakeups_idle); @@ -1240,7 +1238,7 @@ static int select_task_rq_fair(struct task_struct *p, int sync) * this_cpu and prev_cpu are present in: */ for_each_domain(this_cpu, sd) { - if (cpu_isset(prev_cpu, sd->span)) { + if (cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) { this_sd = sd; break; } |