summaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c47
1 files changed, 32 insertions, 15 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index d7f1b7e2bba..4b3663b1c67 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1004,15 +1004,33 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
complete(&cwq->wq->first_flusher->done);
}
-/*
- * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
- * so this work can't be re-armed in any way.
+/**
+ * try_to_grab_pending - steal work item from worklist
+ * @work: work item to steal
+ * @is_dwork: @work is a delayed_work
+ *
+ * Try to grab PENDING bit of @work. This function can handle @work in any
+ * stable state - idle, on timer or on worklist. Return values are
+ *
+ * 1 if @work was pending and we successfully stole PENDING
+ * 0 if @work was idle and we claimed PENDING
+ * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry
+ *
+ * On >= 0 return, the caller owns @work's PENDING bit.
*/
-static int try_to_grab_pending(struct work_struct *work)
+static int try_to_grab_pending(struct work_struct *work, bool is_dwork)
{
struct global_cwq *gcwq;
- int ret = -1;
+ /* try to steal the timer if it exists */
+ if (is_dwork) {
+ struct delayed_work *dwork = to_delayed_work(work);
+
+ if (likely(del_timer(&dwork->timer)))
+ return 1;
+ }
+
+ /* try to claim PENDING the normal way */
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
return 0;
@@ -1022,7 +1040,7 @@ static int try_to_grab_pending(struct work_struct *work)
*/
gcwq = get_work_gcwq(work);
if (!gcwq)
- return ret;
+ return -EAGAIN;
spin_lock_irq(&gcwq->lock);
if (!list_empty(&work->entry)) {
@@ -1038,12 +1056,14 @@ static int try_to_grab_pending(struct work_struct *work)
cwq_dec_nr_in_flight(get_work_cwq(work),
get_work_color(work),
*work_data_bits(work) & WORK_STRUCT_DELAYED);
- ret = 1;
+
+ spin_unlock_irq(&gcwq->lock);
+ return 1;
}
}
spin_unlock_irq(&gcwq->lock);
- return ret;
+ return -EAGAIN;
}
/**
@@ -2817,15 +2837,12 @@ bool flush_work_sync(struct work_struct *work)
}
EXPORT_SYMBOL_GPL(flush_work_sync);
-static bool __cancel_work_timer(struct work_struct *work,
- struct timer_list* timer)
+static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
{
int ret;
do {
- ret = (timer && likely(del_timer(timer)));
- if (!ret)
- ret = try_to_grab_pending(work);
+ ret = try_to_grab_pending(work, is_dwork);
wait_on_work(work);
} while (unlikely(ret < 0));
@@ -2853,7 +2870,7 @@ static bool __cancel_work_timer(struct work_struct *work,
*/
bool cancel_work_sync(struct work_struct *work)
{
- return __cancel_work_timer(work, NULL);
+ return __cancel_work_timer(work, false);
}
EXPORT_SYMBOL_GPL(cancel_work_sync);
@@ -2914,7 +2931,7 @@ EXPORT_SYMBOL(flush_delayed_work_sync);
*/
bool cancel_delayed_work_sync(struct delayed_work *dwork)
{
- return __cancel_work_timer(&dwork->work, &dwork->timer);
+ return __cancel_work_timer(&dwork->work, true);
}
EXPORT_SYMBOL(cancel_delayed_work_sync);