diff options
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 87 |
1 files changed, 80 insertions, 7 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 90db1bd1a97..11869faa681 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -661,7 +661,7 @@ void wq_worker_waking_up(struct task_struct *task, unsigned int cpu) { struct worker *worker = kthread_data(task); - if (likely(!(worker->flags & WORKER_NOT_RUNNING))) + if (!(worker->flags & WORKER_NOT_RUNNING)) atomic_inc(get_gcwq_nr_running(cpu)); } @@ -687,7 +687,7 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, struct global_cwq *gcwq = get_gcwq(cpu); atomic_t *nr_running = get_gcwq_nr_running(cpu); - if (unlikely(worker->flags & WORKER_NOT_RUNNING)) + if (worker->flags & WORKER_NOT_RUNNING) return NULL; /* this can only happen on the local cpu */ @@ -768,7 +768,11 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) worker->flags &= ~flags; - /* if transitioning out of NOT_RUNNING, increment nr_running */ + /* + * If transitioning out of NOT_RUNNING, increment nr_running. Note + * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask + * of multiple flags, not a single flag. + */ if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) if (!(worker->flags & WORKER_NOT_RUNNING)) atomic_inc(get_gcwq_nr_running(gcwq->cpu)); @@ -932,6 +936,38 @@ static void insert_work(struct cpu_workqueue_struct *cwq, wake_up_worker(gcwq); } +/* + * Test whether @work is being queued from another work executing on the + * same workqueue. This is rather expensive and should only be used from + * cold paths. + */ +static bool is_chained_work(struct workqueue_struct *wq) +{ + unsigned long flags; + unsigned int cpu; + + for_each_gcwq_cpu(cpu) { + struct global_cwq *gcwq = get_gcwq(cpu); + struct worker *worker; + struct hlist_node *pos; + int i; + + spin_lock_irqsave(&gcwq->lock, flags); + for_each_busy_worker(worker, i, pos, gcwq) { + if (worker->task != current) + continue; + spin_unlock_irqrestore(&gcwq->lock, flags); + /* + * I'm @worker, no locking necessary. See if @work + * is headed to the same workqueue. + */ + return worker->current_cwq->wq == wq; + } + spin_unlock_irqrestore(&gcwq->lock, flags); + } + return false; +} + static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, struct work_struct *work) { @@ -943,7 +979,9 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, debug_work_activate(work); - if (WARN_ON_ONCE(wq->flags & WQ_DYING)) + /* if dying, only works from the same workqueue are allowed */ + if (unlikely(wq->flags & WQ_DYING) && + WARN_ON_ONCE(!is_chained_work(wq))) return; /* determine gcwq to use */ @@ -1806,7 +1844,7 @@ __acquires(&gcwq->lock) spin_unlock_irq(&gcwq->lock); work_clear_pending(work); - lock_map_acquire(&cwq->wq->lockdep_map); + lock_map_acquire_read(&cwq->wq->lockdep_map); lock_map_acquire(&lockdep_map); trace_workqueue_execute_start(work); f(work); @@ -2350,8 +2388,18 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, insert_wq_barrier(cwq, barr, work, worker); spin_unlock_irq(&gcwq->lock); - lock_map_acquire(&cwq->wq->lockdep_map); + /* + * If @max_active is 1 or rescuer is in use, flushing another work + * item on the same workqueue may lead to deadlock. Make sure the + * flusher is not running on the same workqueue by verifying write + * access. + */ + if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER) + lock_map_acquire(&cwq->wq->lockdep_map); + else + lock_map_acquire_read(&cwq->wq->lockdep_map); lock_map_release(&cwq->wq->lockdep_map); + return true; already_gone: spin_unlock_irq(&gcwq->lock); @@ -2936,11 +2984,35 @@ EXPORT_SYMBOL_GPL(__alloc_workqueue_key); */ void destroy_workqueue(struct workqueue_struct *wq) { + unsigned int flush_cnt = 0; unsigned int cpu; + /* + * Mark @wq dying and drain all pending works. Once WQ_DYING is + * set, only chain queueing is allowed. IOW, only currently + * pending or running work items on @wq can queue further work + * items on it. @wq is flushed repeatedly until it becomes empty. + * The number of flushing is detemined by the depth of chaining and + * should be relatively short. Whine if it takes too long. + */ wq->flags |= WQ_DYING; +reflush: flush_workqueue(wq); + for_each_cwq_cpu(cpu, wq) { + struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); + + if (!cwq->nr_active && list_empty(&cwq->delayed_works)) + continue; + + if (++flush_cnt == 10 || + (flush_cnt % 100 == 0 && flush_cnt <= 1000)) + printk(KERN_WARNING "workqueue %s: flush on " + "destruction isn't complete after %u tries\n", + wq->name, flush_cnt); + goto reflush; + } + /* * wq list is used to freeze wq, remove from list after * flushing is complete in case freeze races us. @@ -3692,7 +3764,8 @@ static int __init init_workqueues(void) system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0); system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); - BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq); + BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq || + !system_unbound_wq); return 0; } early_initcall(init_workqueues); |