diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2013-02-19 12:17:02 -0800 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-03-04 09:44:58 -0800 |
commit | f5faa0774e07eada85b0c55ec789b3f337d01412 (patch) | |
tree | 439566bd5b8d0187d380b950ef386c349448aca9 /kernel/workqueue.c | |
parent | 45d9550a0e7e9230606ca3c4c6f4dc6297848b2f (diff) |
workqueue: use %current instead of worker->task in worker_maybe_bind_and_lock()
worker_maybe_bind_and_lock() uses both @worker->task and @current at
the same time. As worker_maybe_bind_and_lock() can only be called by
the current worker task, they are always the same.
Update worker_maybe_bind_and_lock() to use %current consistently.
This doesn't introduce any functional change.
tj: Massaged the description.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 7 |
1 files changed, 3 insertions, 4 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 81f2457811e..f456433cf53 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1512,7 +1512,7 @@ static void worker_leave_idle(struct worker *worker) * flushed from cpu callbacks while cpu is going down, they are * guaranteed to execute on the cpu. * - * This function is to be used by rogue workers and rescuers to bind + * This function is to be used by unbound workers and rescuers to bind * themselves to the target cpu and may race with cpu going down or * coming online. kthread_bind() can't be used because it may put the * worker to already dead cpu and set_cpus_allowed_ptr() can't be used @@ -1537,7 +1537,6 @@ static bool worker_maybe_bind_and_lock(struct worker *worker) __acquires(&pool->lock) { struct worker_pool *pool = worker->pool; - struct task_struct *task = worker->task; while (true) { /* @@ -1547,12 +1546,12 @@ __acquires(&pool->lock) * against POOL_DISASSOCIATED. */ if (!(pool->flags & POOL_DISASSOCIATED)) - set_cpus_allowed_ptr(task, get_cpu_mask(pool->cpu)); + set_cpus_allowed_ptr(current, get_cpu_mask(pool->cpu)); spin_lock_irq(&pool->lock); if (pool->flags & POOL_DISASSOCIATED) return false; - if (task_cpu(task) == pool->cpu && + if (task_cpu(current) == pool->cpu && cpumask_equal(¤t->cpus_allowed, get_cpu_mask(pool->cpu))) return true; |