summaryrefslogtreecommitdiffstats
path: root/include/linux/workqueue.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-06-29 10:07:14 +0200
committerTejun Heo <tj@kernel.org>2010-06-29 10:07:14 +0200
commitdcd989cb73ab0f7b722d64ab6516f101d9f43f88 (patch)
tree8c2f14e708367cb67dd9d29f2da0f7e5f454cf31 /include/linux/workqueue.h
parentd320c03830b17af64e4547075003b1eeb274bc6c (diff)
workqueue: implement several utility APIs
Implement the following utility APIs. workqueue_set_max_active() : adjust max_active of a wq workqueue_congested() : test whether a wq is contested work_cpu() : determine the last / current cpu of a work work_busy() : query whether a work is busy * Anton Blanchard fixed missing ret initialization in work_busy(). Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Anton Blanchard <anton@samba.org>
Diffstat (limited to 'include/linux/workqueue.h')
-rw-r--r--include/linux/workqueue.h11
1 files changed, 10 insertions, 1 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 48b7422f25a..0a7f7972938 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -61,6 +61,10 @@ enum {
WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
WORK_STRUCT_NO_CPU = NR_CPUS << WORK_STRUCT_FLAG_BITS,
+
+ /* bit mask for work_busy() return values */
+ WORK_BUSY_PENDING = 1 << 0,
+ WORK_BUSY_RUNNING = 1 << 1,
};
struct work_struct {
@@ -307,9 +311,14 @@ extern void init_workqueues(void);
int execute_in_process_context(work_func_t fn, struct execute_work *);
extern int flush_work(struct work_struct *work);
-
extern int cancel_work_sync(struct work_struct *work);
+extern void workqueue_set_max_active(struct workqueue_struct *wq,
+ int max_active);
+extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
+extern unsigned int work_cpu(struct work_struct *work);
+extern unsigned int work_busy(struct work_struct *work);
+
/*
* Kill off a pending schedule_delayed_work(). Note that the work callback
* function may still be running on return from cancel_delayed_work(), unless