diff options
author | Tejun Heo <tj@kernel.org> | 2013-03-12 11:30:03 -0700 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-03-12 11:30:03 -0700 |
commit | f02ae73aaa4f285199683862ac59972877a11c5d (patch) | |
tree | ca517312e1ed5bfaff9b18ebefcd8faf064ad040 /kernel/workqueue.c | |
parent | 7a62c2c87e3bc174fe4b9e9720e148427510fcfb (diff) |
workqueue: drop "std" from cpu_std_worker_pools and for_each_std_worker_pool()
All per-cpu pools are standard, so there's no need to use both "cpu"
and "std" and for_each_std_worker_pool() is confusing in that it can
be used only for per-cpu pools.
* s/cpu_std_worker_pools/cpu_worker_pools/
* s/for_each_std_worker_pool()/for_each_cpu_worker_pool()/
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 7642bb7b70e..2c507321477 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -252,9 +252,9 @@ EXPORT_SYMBOL_GPL(system_freezable_wq); lockdep_is_held(&workqueue_lock), \ "sched RCU or workqueue lock should be held") -#define for_each_std_worker_pool(pool, cpu) \ - for ((pool) = &per_cpu(cpu_std_worker_pools, cpu)[0]; \ - (pool) < &per_cpu(cpu_std_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ +#define for_each_cpu_worker_pool(pool, cpu) \ + for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ + (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ (pool)++) #define for_each_busy_worker(worker, i, pool) \ @@ -420,7 +420,7 @@ static bool workqueue_freezing; /* W: have wqs started freezing? */ * POOL_DISASSOCIATED set, and their workers have WORKER_UNBOUND set. */ static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], - cpu_std_worker_pools); + cpu_worker_pools); /* * idr of all pools. Modifications are protected by workqueue_lock. Read @@ -3342,7 +3342,7 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq) struct pool_workqueue *pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); struct worker_pool *cpu_pools = - per_cpu(cpu_std_worker_pools, cpu); + per_cpu(cpu_worker_pools, cpu); pwq->pool = &cpu_pools[highpri]; list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs); @@ -3694,7 +3694,7 @@ static void wq_unbind_fn(struct work_struct *work) struct worker *worker; int i; - for_each_std_worker_pool(pool, cpu) { + for_each_cpu_worker_pool(pool, cpu) { WARN_ON_ONCE(cpu != smp_processor_id()); mutex_lock(&pool->assoc_mutex); @@ -3737,7 +3737,7 @@ static void wq_unbind_fn(struct work_struct *work) * unbound chain execution of pending work items if other workers * didn't already. */ - for_each_std_worker_pool(pool, cpu) + for_each_cpu_worker_pool(pool, cpu) atomic_set(&pool->nr_running, 0); } @@ -3754,7 +3754,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, switch (action & ~CPU_TASKS_FROZEN) { case CPU_UP_PREPARE: - for_each_std_worker_pool(pool, cpu) { + for_each_cpu_worker_pool(pool, cpu) { struct worker *worker; if (pool->nr_workers) @@ -3772,7 +3772,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, case CPU_DOWN_FAILED: case CPU_ONLINE: - for_each_std_worker_pool(pool, cpu) { + for_each_cpu_worker_pool(pool, cpu) { mutex_lock(&pool->assoc_mutex); spin_lock_irq(&pool->lock); @@ -4012,7 +4012,7 @@ static int __init init_workqueues(void) struct worker_pool *pool; i = 0; - for_each_std_worker_pool(pool, cpu) { + for_each_cpu_worker_pool(pool, cpu) { BUG_ON(init_worker_pool(pool)); pool->cpu = cpu; cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu)); @@ -4027,7 +4027,7 @@ static int __init init_workqueues(void) for_each_online_cpu(cpu) { struct worker_pool *pool; - for_each_std_worker_pool(pool, cpu) { + for_each_cpu_worker_pool(pool, cpu) { struct worker *worker; pool->flags &= ~POOL_DISASSOCIATED; |