diff options
author | Tejun Heo <tj@kernel.org> | 2010-06-29 10:07:11 +0200 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-06-29 10:07:11 +0200 |
commit | 64166699752006f1a23a9cf7c96ae36654ccfc2c (patch) | |
tree | ef4d825be9526409102bff79ca8bb1dd60aa2443 /kernel/workqueue.c | |
parent | a62428c0ae54a39e411251e836c3fe3dc11a5f5f (diff) |
workqueue: temporarily remove workqueue tracing
Strip tracing code from workqueue and remove workqueue tracing. This
is temporary measure till concurrency managed workqueue is complete.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 14 |
1 files changed, 3 insertions, 11 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8e3082b76c7..f7ab703285a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -33,8 +33,6 @@ #include <linux/kallsyms.h> #include <linux/debug_locks.h> #include <linux/lockdep.h> -#define CREATE_TRACE_POINTS -#include <trace/events/workqueue.h> /* * Structure fields follow one of the following exclusion rules. @@ -243,10 +241,10 @@ static inline void clear_wq_data(struct work_struct *work) atomic_long_set(&work->data, work_static(work)); } -static inline -struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) +static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) { - return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); + return (void *)(atomic_long_read(&work->data) & + WORK_STRUCT_WQ_DATA_MASK); } /** @@ -265,8 +263,6 @@ static void insert_work(struct cpu_workqueue_struct *cwq, struct work_struct *work, struct list_head *head, unsigned int extra_flags) { - trace_workqueue_insertion(cwq->thread, work); - /* we own @work, set data and link */ set_wq_data(work, cwq, extra_flags); @@ -431,7 +427,6 @@ static void process_one_work(struct cpu_workqueue_struct *cwq, struct lockdep_map lockdep_map = work->lockdep_map; #endif /* claim and process */ - trace_workqueue_execution(cwq->thread, work); debug_work_deactivate(work); cwq->current_work = work; list_del_init(&work->entry); @@ -1017,8 +1012,6 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) return PTR_ERR(p); cwq->thread = p; - trace_workqueue_creation(cwq->thread, cpu); - return 0; } @@ -1123,7 +1116,6 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) * checks list_empty(), and a "normal" queue_work() can't use * a dead CPU. */ - trace_workqueue_destruction(cwq->thread); kthread_stop(cwq->thread); cwq->thread = NULL; } |