summaryrefslogtreecommitdiffstats
path: root/include/linux/workqueue.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-07-02 10:03:51 +0200
committerTejun Heo <tj@kernel.org>2010-07-02 11:00:08 +0200
commitc7fc77f78f16d138ca997ce096a62f46e2e9420a (patch)
tree0478e5dde66f6ff86d4baa0fe541748e1a6f1ed2 /include/linux/workqueue.h
parentf34217977d717385a3e9fd7018ac39fade3964c0 (diff)
workqueue: remove WQ_SINGLE_CPU and use WQ_UNBOUND instead
WQ_SINGLE_CPU combined with @max_active of 1 is used to achieve full ordering among works queued to a workqueue. The same can be achieved using WQ_UNBOUND as unbound workqueues always use the gcwq for WORK_CPU_UNBOUND. As @max_active is always one and benefits from cpu locality isn't accessible anyway, serving them with unbound workqueues should be fine. Drop WQ_SINGLE_CPU support and use WQ_UNBOUND instead. Note that most single thread workqueue users will be converted to use multithread or non-reentrant instead and only the ones which require strict ordering will keep using WQ_UNBOUND + @max_active of 1. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'include/linux/workqueue.h')
-rw-r--r--include/linux/workqueue.h7
1 files changed, 3 insertions, 4 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 67ce734747f..d74a529ed13 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -233,12 +233,11 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
enum {
WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
- WQ_SINGLE_CPU = 1 << 1, /* only single cpu at a time */
+ WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */
WQ_RESCUER = 1 << 3, /* has an rescue worker */
WQ_HIGHPRI = 1 << 4, /* high priority */
WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
- WQ_UNBOUND = 1 << 6, /* not bound to any cpu */
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
@@ -300,9 +299,9 @@ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
#define create_workqueue(name) \
alloc_workqueue((name), WQ_RESCUER, 1)
#define create_freezeable_workqueue(name) \
- alloc_workqueue((name), WQ_FREEZEABLE | WQ_SINGLE_CPU | WQ_RESCUER, 1)
+ alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_RESCUER, 1)
#define create_singlethread_workqueue(name) \
- alloc_workqueue((name), WQ_SINGLE_CPU | WQ_RESCUER, 1)
+ alloc_workqueue((name), WQ_UNBOUND | WQ_RESCUER, 1)
extern void destroy_workqueue(struct workqueue_struct *wq);