From e1d8aa9f1dd655a3534b22fcfbecb70cdb125766 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Mon, 12 Jan 2009 23:15:46 +0100 Subject: tracing: add a new workqueue tracer Impact: new tracer The workqueue tracer provides some statistical informations about each cpu workqueue thread such as the number of the works inserted and executed since their creation. It can help to evaluate the amount of work each of them have to perform. For example it can help a developer to decide whether he should choose a per cpu workqueue instead of a singlethreaded one. It only traces statistical informations for now but it will probably later provide event tracing too. Such a tracer could help too, and be improved, to help rt priority sorted workqueue development. To have a snapshot of the workqueues state at any time, just do cat /debugfs/tracing/trace_stat/workqueues Ie: 1 125 125 reiserfs/1 1 0 0 scsi_tgtd/1 1 0 0 aio/1 1 0 0 ata/1 1 114 114 kblockd/1 1 0 0 kintegrityd/1 1 2147 2147 events/1 0 0 0 kpsmoused 0 105 105 reiserfs/0 0 0 0 scsi_tgtd/0 0 0 0 aio/0 0 0 0 ata_aux 0 0 0 ata/0 0 0 0 cqueue 0 0 0 kacpi_notify 0 0 0 kacpid 0 149 149 kblockd/0 0 0 0 kintegrityd/0 0 1000 1000 khelper 0 2270 2270 events/0 Changes in V2: _ Drop the static array based on NR_CPU and dynamically allocate the stat array with num_possible_cpus() and other cpu mask facilities.... _ Trace workqueue insertion at a bit lower level (insert_work instead of queue_work) to handle even the workqueue barriers. Signed-off-by: Frederic Weisbecker Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace_workqueue.c | 287 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 287 insertions(+) create mode 100644 kernel/trace/trace_workqueue.c (limited to 'kernel/trace/trace_workqueue.c') diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c new file mode 100644 index 00000000000..f8118d39ca9 --- /dev/null +++ b/kernel/trace/trace_workqueue.c @@ -0,0 +1,287 @@ +/* + * Workqueue statistical tracer. + * + * Copyright (C) 2008 Frederic Weisbecker + * + */ + + +#include +#include +#include "trace_stat.h" +#include "trace.h" + + +/* A cpu workqueue thread */ +struct cpu_workqueue_stats { + struct list_head list; +/* Useful to know if we print the cpu headers */ + bool first_entry; + int cpu; + pid_t pid; +/* Can be inserted from interrupt or user context, need to be atomic */ + atomic_t inserted; +/* + * Don't need to be atomic, works are serialized in a single workqueue thread + * on a single CPU. + */ + unsigned int executed; +}; + +/* List of workqueue threads on one cpu */ +struct workqueue_global_stats { + struct list_head list; + spinlock_t lock; +}; + +/* Don't need a global lock because allocated before the workqueues, and + * never freed. + */ +static struct workqueue_global_stats *all_workqueue_stat; + +/* Insertion of a work */ +static void +probe_workqueue_insertion(struct task_struct *wq_thread, + struct work_struct *work) +{ + int cpu = cpumask_first(&wq_thread->cpus_allowed); + struct cpu_workqueue_stats *node, *next; + unsigned long flags; + + spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); + list_for_each_entry_safe(node, next, &all_workqueue_stat[cpu].list, + list) { + if (node->pid == wq_thread->pid) { + atomic_inc(&node->inserted); + goto found; + } + } + pr_debug("trace_workqueue: entry not found\n"); +found: + spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); +} + +/* Execution of a work */ +static void +probe_workqueue_execution(struct task_struct *wq_thread, + struct work_struct *work) +{ + int cpu = cpumask_first(&wq_thread->cpus_allowed); + struct cpu_workqueue_stats *node, *next; + unsigned long flags; + + spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); + list_for_each_entry_safe(node, next, &all_workqueue_stat[cpu].list, + list) { + if (node->pid == wq_thread->pid) { + node->executed++; + goto found; + } + } + pr_debug("trace_workqueue: entry not found\n"); +found: + spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); +} + +/* Creation of a cpu workqueue thread */ +static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu) +{ + struct cpu_workqueue_stats *cws; + unsigned long flags; + + WARN_ON(cpu < 0 || cpu >= num_possible_cpus()); + + /* Workqueues are sometimes created in atomic context */ + cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC); + if (!cws) { + pr_warning("trace_workqueue: not enough memory\n"); + return; + } + tracing_record_cmdline(wq_thread); + + INIT_LIST_HEAD(&cws->list); + cws->cpu = cpu; + + cws->pid = wq_thread->pid; + + spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); + if (list_empty(&all_workqueue_stat[cpu].list)) + cws->first_entry = true; + list_add_tail(&cws->list, &all_workqueue_stat[cpu].list); + spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); +} + +/* Destruction of a cpu workqueue thread */ +static void probe_workqueue_destruction(struct task_struct *wq_thread) +{ + /* Workqueue only execute on one cpu */ + int cpu = cpumask_first(&wq_thread->cpus_allowed); + struct cpu_workqueue_stats *node, *next; + unsigned long flags; + + spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); + list_for_each_entry_safe(node, next, &all_workqueue_stat[cpu].list, + list) { + if (node->pid == wq_thread->pid) { + list_del(&node->list); + kfree(node); + goto found; + } + } + + pr_debug("trace_workqueue: don't find workqueue to destroy\n"); +found: + spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + +} + +static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu) +{ + unsigned long flags; + struct cpu_workqueue_stats *ret = NULL; + + + spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); + + if (!list_empty(&all_workqueue_stat[cpu].list)) + ret = list_entry(all_workqueue_stat[cpu].list.next, + struct cpu_workqueue_stats, list); + + spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + + return ret; +} + +static void *workqueue_stat_start(void) +{ + int cpu; + void *ret = NULL; + + for_each_possible_cpu(cpu) { + ret = workqueue_stat_start_cpu(cpu); + if (ret) + return ret; + } + return NULL; +} + +static void *workqueue_stat_next(void *prev, int idx) +{ + struct cpu_workqueue_stats *prev_cws = prev; + int cpu = prev_cws->cpu; + unsigned long flags; + void *ret = NULL; + + spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); + if (list_is_last(&prev_cws->list, &all_workqueue_stat[cpu].list)) { + spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + for (++cpu ; cpu < num_possible_cpus(); cpu++) { + ret = workqueue_stat_start_cpu(cpu); + if (ret) + return ret; + } + return NULL; + } + spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + + return list_entry(prev_cws->list.next, struct cpu_workqueue_stats, + list); +} + +static int workqueue_stat_show(struct seq_file *s, void *p) +{ + struct cpu_workqueue_stats *cws = p; + unsigned long flags; + int cpu = cws->cpu; + + seq_printf(s, "%3d %6d %6u %s\n", cws->cpu, + atomic_read(&cws->inserted), + cws->executed, + trace_find_cmdline(cws->pid)); + + spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); + if (&cws->list == all_workqueue_stat[cpu].list.next) + seq_printf(s, "\n"); + spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + + return 0; +} + +static int workqueue_stat_headers(struct seq_file *s) +{ + seq_printf(s, "# CPU INSERTED EXECUTED NAME\n"); + seq_printf(s, "# | | | |\n\n"); + return 0; +} + +struct tracer_stat workqueue_stats __read_mostly = { + .name = "workqueues", + .stat_start = workqueue_stat_start, + .stat_next = workqueue_stat_next, + .stat_show = workqueue_stat_show, + .stat_headers = workqueue_stat_headers +}; + + +int __init stat_workqueue_init(void) +{ + if (register_stat_tracer(&workqueue_stats)) { + pr_warning("Unable to register workqueue stat tracer\n"); + return 1; + } + + return 0; +} +fs_initcall(stat_workqueue_init); + +/* + * Workqueues are created very early, just after pre-smp initcalls. + * So we must register our tracepoints at this stage. + */ +int __init trace_workqueue_early_init(void) +{ + int ret, cpu; + + ret = register_trace_workqueue_insertion(probe_workqueue_insertion); + if (ret) + goto out; + + ret = register_trace_workqueue_execution(probe_workqueue_execution); + if (ret) + goto no_insertion; + + ret = register_trace_workqueue_creation(probe_workqueue_creation); + if (ret) + goto no_execution; + + ret = register_trace_workqueue_destruction(probe_workqueue_destruction); + if (ret) + goto no_creation; + + all_workqueue_stat = kmalloc(sizeof(struct workqueue_global_stats) + * num_possible_cpus(), GFP_KERNEL); + + if (!all_workqueue_stat) { + pr_warning("trace_workqueue: not enough memory\n"); + goto no_creation; + } + + for_each_possible_cpu(cpu) { + spin_lock_init(&all_workqueue_stat[cpu].lock); + INIT_LIST_HEAD(&all_workqueue_stat[cpu].list); + } + + return 0; + +no_creation: + unregister_trace_workqueue_creation(probe_workqueue_creation); +no_execution: + unregister_trace_workqueue_execution(probe_workqueue_execution); +no_insertion: + unregister_trace_workqueue_insertion(probe_workqueue_insertion); +out: + pr_warning("trace_workqueue: unable to trace workqueues\n"); + + return 1; +} +early_initcall(trace_workqueue_early_init); -- cgit v1.2.3-70-g09d2 From 3690b5e6fd9daa030039ae9bda69044228bd476d Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Fri, 16 Jan 2009 16:32:25 +0800 Subject: trace_workqueue: use percpu data for workqueue stat Impact: use percpu data instead of a global structure Use: static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat); instead of allocating a global structure. percpu data also works well on NUMA. Signed-off-by: Lai Jiangshan Signed-off-by: Steven Rostedt Signed-off-by: Ingo Molnar --- kernel/trace/trace_workqueue.c | 64 +++++++++++++++++++----------------------- 1 file changed, 29 insertions(+), 35 deletions(-) (limited to 'kernel/trace/trace_workqueue.c') diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c index f8118d39ca9..4664990fe9c 100644 --- a/kernel/trace/trace_workqueue.c +++ b/kernel/trace/trace_workqueue.c @@ -8,6 +8,7 @@ #include #include +#include #include "trace_stat.h" #include "trace.h" @@ -37,7 +38,8 @@ struct workqueue_global_stats { /* Don't need a global lock because allocated before the workqueues, and * never freed. */ -static struct workqueue_global_stats *all_workqueue_stat; +static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat); +#define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu)) /* Insertion of a work */ static void @@ -48,8 +50,8 @@ probe_workqueue_insertion(struct task_struct *wq_thread, struct cpu_workqueue_stats *node, *next; unsigned long flags; - spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); - list_for_each_entry_safe(node, next, &all_workqueue_stat[cpu].list, + spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); + list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list, list) { if (node->pid == wq_thread->pid) { atomic_inc(&node->inserted); @@ -58,7 +60,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread, } pr_debug("trace_workqueue: entry not found\n"); found: - spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); } /* Execution of a work */ @@ -70,8 +72,8 @@ probe_workqueue_execution(struct task_struct *wq_thread, struct cpu_workqueue_stats *node, *next; unsigned long flags; - spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); - list_for_each_entry_safe(node, next, &all_workqueue_stat[cpu].list, + spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); + list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list, list) { if (node->pid == wq_thread->pid) { node->executed++; @@ -80,7 +82,7 @@ probe_workqueue_execution(struct task_struct *wq_thread, } pr_debug("trace_workqueue: entry not found\n"); found: - spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); } /* Creation of a cpu workqueue thread */ @@ -104,11 +106,11 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu) cws->pid = wq_thread->pid; - spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); - if (list_empty(&all_workqueue_stat[cpu].list)) + spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); + if (list_empty(&workqueue_cpu_stat(cpu)->list)) cws->first_entry = true; - list_add_tail(&cws->list, &all_workqueue_stat[cpu].list); - spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list); + spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); } /* Destruction of a cpu workqueue thread */ @@ -119,8 +121,8 @@ static void probe_workqueue_destruction(struct task_struct *wq_thread) struct cpu_workqueue_stats *node, *next; unsigned long flags; - spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); - list_for_each_entry_safe(node, next, &all_workqueue_stat[cpu].list, + spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); + list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list, list) { if (node->pid == wq_thread->pid) { list_del(&node->list); @@ -131,7 +133,7 @@ static void probe_workqueue_destruction(struct task_struct *wq_thread) pr_debug("trace_workqueue: don't find workqueue to destroy\n"); found: - spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); } @@ -141,13 +143,13 @@ static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu) struct cpu_workqueue_stats *ret = NULL; - spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); + spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); - if (!list_empty(&all_workqueue_stat[cpu].list)) - ret = list_entry(all_workqueue_stat[cpu].list.next, + if (!list_empty(&workqueue_cpu_stat(cpu)->list)) + ret = list_entry(workqueue_cpu_stat(cpu)->list.next, struct cpu_workqueue_stats, list); - spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); return ret; } @@ -172,9 +174,9 @@ static void *workqueue_stat_next(void *prev, int idx) unsigned long flags; void *ret = NULL; - spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); - if (list_is_last(&prev_cws->list, &all_workqueue_stat[cpu].list)) { - spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); + if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) { + spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); for (++cpu ; cpu < num_possible_cpus(); cpu++) { ret = workqueue_stat_start_cpu(cpu); if (ret) @@ -182,7 +184,7 @@ static void *workqueue_stat_next(void *prev, int idx) } return NULL; } - spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); return list_entry(prev_cws->list.next, struct cpu_workqueue_stats, list); @@ -199,10 +201,10 @@ static int workqueue_stat_show(struct seq_file *s, void *p) cws->executed, trace_find_cmdline(cws->pid)); - spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); - if (&cws->list == all_workqueue_stat[cpu].list.next) + spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); + if (&cws->list == workqueue_cpu_stat(cpu)->list.next) seq_printf(s, "\n"); - spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); + spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); return 0; } @@ -258,17 +260,9 @@ int __init trace_workqueue_early_init(void) if (ret) goto no_creation; - all_workqueue_stat = kmalloc(sizeof(struct workqueue_global_stats) - * num_possible_cpus(), GFP_KERNEL); - - if (!all_workqueue_stat) { - pr_warning("trace_workqueue: not enough memory\n"); - goto no_creation; - } - for_each_possible_cpu(cpu) { - spin_lock_init(&all_workqueue_stat[cpu].lock); - INIT_LIST_HEAD(&all_workqueue_stat[cpu].list); + spin_lock_init(&workqueue_cpu_stat(cpu)->lock); + INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list); } return 0; -- cgit v1.2.3-70-g09d2 From c3ffc7a40b7e94b094efe1c8ab4e24370a782b65 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Mon, 9 Mar 2009 18:15:34 +0900 Subject: tracing: Don't use tracing_record_cmdline() in workqueue tracer Impact: improve workqueue tracer output Currently, /sys/kernel/debug/tracing/trace_stat/workqueues can display wrong and strange thread names. Why? Currently, ftrace has tracing_record_cmdline()/trace_find_cmdline() convenience function that implements a task->comm string cache. This can avoid unnecessary memcpy overhead and the workqueue tracer uses it. However, in general, any trace statistics feature shouldn't use tracing_record_cmdline() because trace statistics can display very old process. Then comm cache can return wrong string because recent process overrides the cache. Fortunately, workqueue trace guarantees that displayed processes are live. Thus we can search comm string from PID at display time. % cat workqueues # CPU INSERTED EXECUTED NAME # | | | | 7 431913 431913 kondemand/7 7 0 0 tail 7 21 21 git 7 0 0 ls 7 9 9 cat 7 832632 832632 unix_chkpwd 7 236292 236292 ls Note: tail, git, ls, cat unix_chkpwd are obiously not workqueue thread. % cat workqueues # CPU INSERTED EXECUTED NAME # | | | | 7 510 510 kondemand/7 7 0 0 kmpathd/7 7 15 15 ata/7 7 0 0 aio/7 7 11 11 kblockd/7 7 1063 1063 work_on_cpu/7 7 167 167 events/7 Signed-off-by: KOSAKI Motohiro Cc: Lai Jiangshan Cc: Steven Rostedt Cc: Frederic Weisbecker Signed-off-by: Ingo Molnar --- kernel/trace/trace_workqueue.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'kernel/trace/trace_workqueue.c') diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c index 4664990fe9c..46c8dc896bd 100644 --- a/kernel/trace/trace_workqueue.c +++ b/kernel/trace/trace_workqueue.c @@ -99,8 +99,6 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu) pr_warning("trace_workqueue: not enough memory\n"); return; } - tracing_record_cmdline(wq_thread); - INIT_LIST_HEAD(&cws->list); cws->cpu = cpu; @@ -195,11 +193,12 @@ static int workqueue_stat_show(struct seq_file *s, void *p) struct cpu_workqueue_stats *cws = p; unsigned long flags; int cpu = cws->cpu; + struct task_struct *tsk = find_task_by_vpid(cws->pid); seq_printf(s, "%3d %6d %6u %s\n", cws->cpu, atomic_read(&cws->inserted), cws->executed, - trace_find_cmdline(cws->pid)); + tsk ? tsk->comm : "<...>"); spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); if (&cws->list == workqueue_cpu_stat(cpu)->list.next) -- cgit v1.2.3-70-g09d2 From bbcd3063597a3824357cd83c501c2a2aa21ef37b Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Tue, 10 Mar 2009 10:49:53 +0900 Subject: tracing: Don't assume possible cpu list have continuous numbers "for (++cpu ; cpu < num_possible_cpus(); cpu++)" statement assumes possible cpus have continuous number - but that's a wrong assumption. Insted, cpumask_next() should be used. Signed-off-by: KOSAKI Motohiro Cc: Lai Jiangshan Cc: Steven Rostedt Cc: Frederic Weisbecker LKML-Reference: <20090310104437.A480.A69D9226@jp.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_workqueue.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'kernel/trace/trace_workqueue.c') diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c index 46c8dc896bd..739fdacf873 100644 --- a/kernel/trace/trace_workqueue.c +++ b/kernel/trace/trace_workqueue.c @@ -91,7 +91,7 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu) struct cpu_workqueue_stats *cws; unsigned long flags; - WARN_ON(cpu < 0 || cpu >= num_possible_cpus()); + WARN_ON(cpu < 0); /* Workqueues are sometimes created in atomic context */ cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC); @@ -175,12 +175,12 @@ static void *workqueue_stat_next(void *prev, int idx) spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) { spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); - for (++cpu ; cpu < num_possible_cpus(); cpu++) { - ret = workqueue_stat_start_cpu(cpu); - if (ret) - return ret; - } - return NULL; + do { + cpu = cpumask_next(cpu, cpu_possible_mask); + if (cpu >= nr_cpu_ids) + return NULL; + } while (!(ret = workqueue_stat_start_cpu(cpu))); + return ret; } spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); -- cgit v1.2.3-70-g09d2 From ef18012b248b47ec9a12c3a83ca5e99782d39c5d Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 10 Mar 2009 14:10:56 -0400 Subject: tracing: remove funky whitespace in the trace code Impact: clean up There existed a lot of 's in the tracing code. This patch removes them. Signed-off-by: Steven Rostedt --- include/linux/tracepoint.h | 16 +++--- kernel/trace/blktrace.c | 10 ++-- kernel/trace/trace.c | 2 +- kernel/trace/trace_branch.c | 2 +- kernel/trace/trace_events_stage_3.h | 98 ++++++++++++++++++------------------ kernel/trace/trace_export.c | 2 +- kernel/trace/trace_functions_graph.c | 6 +-- kernel/trace/trace_output.c | 14 +++--- kernel/trace/trace_workqueue.c | 6 +-- 9 files changed, 78 insertions(+), 78 deletions(-) (limited to 'kernel/trace/trace_workqueue.c') diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 119ece224c2..d35a7ee7611 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -178,8 +178,8 @@ static inline void tracepoint_synchronize_unregister(void) * * prototype, declare it via TP_PROTO(): * * * - * TP_PROTO(struct rq *rq, struct task_struct *prev, - * struct task_struct *next), + * TP_PROTO(struct rq *rq, struct task_struct *prev, + * struct task_struct *next), * * * * * Define the call signature of the 'function'. @@ -187,7 +187,7 @@ static inline void tracepoint_synchronize_unregister(void) * * TP_PROTO1/TP_PROTO2/TP_PROTO3 ugliness.) * * * - * TP_ARGS(rq, prev, next), + * TP_ARGS(rq, prev, next), * * * * * Fast binary tracing: define the trace record via @@ -229,13 +229,13 @@ static inline void tracepoint_synchronize_unregister(void) * * happens, on an active tracepoint. * * * - * TP_fast_assign( - * memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); - * __entry->prev_pid = prev->pid; - * __entry->prev_prio = prev->prio; + * TP_fast_assign( + * memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); + * __entry->prev_pid = prev->pid; + * __entry->prev_prio = prev->prio; * memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); * __entry->next_pid = next->pid; - * __entry->next_prio = next->prio; + * __entry->next_prio = next->prio; * ) * * * diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index e39679a72a3..bec69d3678c 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -33,7 +33,7 @@ static struct trace_array *blk_tr; static int __read_mostly blk_tracer_enabled; /* Select an alternative, minimalistic output than the original one */ -#define TRACE_BLK_OPT_CLASSIC 0x1 +#define TRACE_BLK_OPT_CLASSIC 0x1 static struct tracer_opt blk_tracer_opts[] = { /* Default disable the minimalistic output */ @@ -564,7 +564,7 @@ EXPORT_SYMBOL_GPL(blk_trace_startstop); /** * blk_trace_ioctl: - handle the ioctls associated with tracing * @bdev: the block device - * @cmd: the ioctl cmd + * @cmd: the ioctl cmd * @arg: the argument data, if any * **/ @@ -1128,9 +1128,9 @@ static void blk_tracer_reset(struct trace_array *tr) static struct { const char *act[2]; - int (*print)(struct trace_seq *s, const struct trace_entry *ent); + int (*print)(struct trace_seq *s, const struct trace_entry *ent); } what2act[] __read_mostly = { - [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic }, + [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic }, [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic }, [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic }, [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic }, @@ -1229,7 +1229,7 @@ static struct tracer blk_tracer __read_mostly = { }; static struct trace_event trace_blk_event = { - .type = TRACE_BLK, + .type = TRACE_BLK, .trace = blk_trace_event_print, .binary = blk_trace_event_print_binary, }; diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index cc94f864248..8c6a902db40 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -799,7 +799,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, entry->preempt_count = pc & 0xff; entry->pid = (tsk) ? tsk->pid : 0; - entry->tgid = (tsk) ? tsk->tgid : 0; + entry->tgid = (tsk) ? tsk->tgid : 0; entry->flags = #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index aaa0755268b..ad8c22efff4 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c @@ -157,7 +157,7 @@ static enum print_line_t trace_branch_print(struct trace_iterator *iter, static struct trace_event trace_branch_event = { - .type = TRACE_BRANCH, + .type = TRACE_BRANCH, .trace = trace_branch_print, }; diff --git a/kernel/trace/trace_events_stage_3.h b/kernel/trace/trace_events_stage_3.h index 6ee1de59f19..ae2e323df0c 100644 --- a/kernel/trace/trace_events_stage_3.h +++ b/kernel/trace/trace_events_stage_3.h @@ -5,23 +5,23 @@ * * static void ftrace_event_(proto) * { - * event_trace_printk(_RET_IP_, ": " ); + * event_trace_printk(_RET_IP_, ": " ); * } * * static int ftrace_reg_event_(void) * { - * int ret; + * int ret; * - * ret = register_trace_(ftrace_event_); - * if (!ret) - * pr_info("event trace: Could not activate trace point " - * "probe to "); - * return ret; + * ret = register_trace_(ftrace_event_); + * if (!ret) + * pr_info("event trace: Could not activate trace point " + * "probe to "); + * return ret; * } * * static void ftrace_unreg_event_(void) * { - * unregister_trace_(ftrace_event_); + * unregister_trace_(ftrace_event_); * } * * For those macros defined with TRACE_FORMAT: @@ -29,9 +29,9 @@ * static struct ftrace_event_call __used * __attribute__((__aligned__(4))) * __attribute__((section("_ftrace_events"))) event_ = { - * .name = "", - * .regfunc = ftrace_reg_event_, - * .unregfunc = ftrace_unreg_event_, + * .name = "", + * .regfunc = ftrace_reg_event_, + * .unregfunc = ftrace_unreg_event_, * } * * @@ -41,66 +41,66 @@ * * static void ftrace_raw_event_(proto) * { - * struct ring_buffer_event *event; - * struct ftrace_raw_ *entry; <-- defined in stage 1 - * unsigned long irq_flags; - * int pc; - * - * local_save_flags(irq_flags); - * pc = preempt_count(); - * - * event = trace_current_buffer_lock_reserve(event_.id, - * sizeof(struct ftrace_raw_), - * irq_flags, pc); - * if (!event) - * return; - * entry = ring_buffer_event_data(event); - * - * ; <-- Here we assign the entries by the __field and + * struct ring_buffer_event *event; + * struct ftrace_raw_ *entry; <-- defined in stage 1 + * unsigned long irq_flags; + * int pc; + * + * local_save_flags(irq_flags); + * pc = preempt_count(); + * + * event = trace_current_buffer_lock_reserve(event_.id, + * sizeof(struct ftrace_raw_), + * irq_flags, pc); + * if (!event) + * return; + * entry = ring_buffer_event_data(event); + * + * ; <-- Here we assign the entries by the __field and * __array macros. * - * trace_current_buffer_unlock_commit(event, irq_flags, pc); + * trace_current_buffer_unlock_commit(event, irq_flags, pc); * } * * static int ftrace_raw_reg_event_(void) * { - * int ret; + * int ret; * - * ret = register_trace_(ftrace_raw_event_); - * if (!ret) - * pr_info("event trace: Could not activate trace point " - * "probe to "); - * return ret; + * ret = register_trace_(ftrace_raw_event_); + * if (!ret) + * pr_info("event trace: Could not activate trace point " + * "probe to "); + * return ret; * } * * static void ftrace_unreg_event_(void) * { - * unregister_trace_(ftrace_raw_event_); + * unregister_trace_(ftrace_raw_event_); * } * * static struct trace_event ftrace_event_type_ = { - * .trace = ftrace_raw_output_, <-- stage 2 + * .trace = ftrace_raw_output_, <-- stage 2 * }; * * static int ftrace_raw_init_event_(void) * { - * int id; + * int id; * - * id = register_ftrace_event(&ftrace_event_type_); - * if (!id) - * return -ENODEV; - * event_.id = id; - * return 0; + * id = register_ftrace_event(&ftrace_event_type_); + * if (!id) + * return -ENODEV; + * event_.id = id; + * return 0; * } * * static struct ftrace_event_call __used * __attribute__((__aligned__(4))) * __attribute__((section("_ftrace_events"))) event_ = { - * .name = "", + * .name = "", * .system = "", - * .raw_init = ftrace_raw_init_event_, - * .regfunc = ftrace_reg_event_, - * .unregfunc = ftrace_unreg_event_, + * .raw_init = ftrace_raw_init_event_, + * .regfunc = ftrace_reg_event_, + * .unregfunc = ftrace_unreg_event_, * .show_format = ftrace_format_, * } * @@ -138,7 +138,7 @@ _TRACE_FORMAT(call, PARAMS(proto), PARAMS(args), PARAMS(fmt)) \ static struct ftrace_event_call __used \ __attribute__((__aligned__(4))) \ __attribute__((section("_ftrace_events"))) event_##call = { \ - .name = #call, \ + .name = #call, \ .system = __stringify(TRACE_SYSTEM), \ .regfunc = ftrace_reg_event_##call, \ .unregfunc = ftrace_unreg_event_##call, \ @@ -163,7 +163,7 @@ static void ftrace_raw_event_##call(proto) \ pc = preempt_count(); \ \ event = trace_current_buffer_lock_reserve(event_##call.id, \ - sizeof(struct ftrace_raw_##call), \ + sizeof(struct ftrace_raw_##call), \ irq_flags, pc); \ if (!event) \ return; \ @@ -208,7 +208,7 @@ static int ftrace_raw_init_event_##call(void) \ static struct ftrace_event_call __used \ __attribute__((__aligned__(4))) \ __attribute__((section("_ftrace_events"))) event_##call = { \ - .name = #call, \ + .name = #call, \ .system = __stringify(TRACE_SYSTEM), \ .raw_init = ftrace_raw_init_event_##call, \ .regfunc = ftrace_raw_reg_event_##call, \ diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 23ae78430d5..4d9952d3df5 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c @@ -94,7 +94,7 @@ ftrace_format_##call(struct trace_seq *s) \ static struct ftrace_event_call __used \ __attribute__((__aligned__(4))) \ __attribute__((section("_ftrace_events"))) event_##call = { \ - .name = #call, \ + .name = #call, \ .id = proto, \ .system = __stringify(TRACE_SYSTEM), \ .show_format = ftrace_format_##call, \ diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 453ebd3b636..d1493b853e4 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -841,12 +841,12 @@ static void graph_trace_close(struct trace_iterator *iter) } static struct tracer graph_trace __read_mostly = { - .name = "function_graph", + .name = "function_graph", .open = graph_trace_open, .close = graph_trace_close, .wait_pipe = poll_wait_pipe, - .init = graph_trace_init, - .reset = graph_trace_reset, + .init = graph_trace_init, + .reset = graph_trace_reset, .print_line = print_graph_function, .print_header = print_graph_headers, .flags = &tracer_flags, diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index ef8fd661b21..491832af9ba 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -565,7 +565,7 @@ static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags) } static struct trace_event trace_fn_event = { - .type = TRACE_FN, + .type = TRACE_FN, .trace = trace_fn_trace, .raw = trace_fn_raw, .hex = trace_fn_hex, @@ -696,7 +696,7 @@ static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter, } static struct trace_event trace_ctx_event = { - .type = TRACE_CTX, + .type = TRACE_CTX, .trace = trace_ctx_print, .raw = trace_ctx_raw, .hex = trace_ctx_hex, @@ -704,7 +704,7 @@ static struct trace_event trace_ctx_event = { }; static struct trace_event trace_wake_event = { - .type = TRACE_WAKE, + .type = TRACE_WAKE, .trace = trace_wake_print, .raw = trace_wake_raw, .hex = trace_wake_hex, @@ -759,7 +759,7 @@ static enum print_line_t trace_special_bin(struct trace_iterator *iter, } static struct trace_event trace_special_event = { - .type = TRACE_SPECIAL, + .type = TRACE_SPECIAL, .trace = trace_special_print, .raw = trace_special_print, .hex = trace_special_hex, @@ -796,7 +796,7 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter, } static struct trace_event trace_stack_event = { - .type = TRACE_STACK, + .type = TRACE_STACK, .trace = trace_stack_print, .raw = trace_special_print, .hex = trace_special_hex, @@ -825,7 +825,7 @@ static enum print_line_t trace_user_stack_print(struct trace_iterator *iter, } static struct trace_event trace_user_stack_event = { - .type = TRACE_USER_STACK, + .type = TRACE_USER_STACK, .trace = trace_user_stack_print, .raw = trace_special_print, .hex = trace_special_hex, @@ -879,7 +879,7 @@ static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags) static struct trace_event trace_print_event = { - .type = TRACE_PRINT, + .type = TRACE_PRINT, .trace = trace_print_print, .raw = trace_print_raw, }; diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c index 4664990fe9c..e542483df62 100644 --- a/kernel/trace/trace_workqueue.c +++ b/kernel/trace/trace_workqueue.c @@ -19,14 +19,14 @@ struct cpu_workqueue_stats { /* Useful to know if we print the cpu headers */ bool first_entry; int cpu; - pid_t pid; + pid_t pid; /* Can be inserted from interrupt or user context, need to be atomic */ - atomic_t inserted; + atomic_t inserted; /* * Don't need to be atomic, works are serialized in a single workqueue thread * on a single CPU. */ - unsigned int executed; + unsigned int executed; }; /* List of workqueue threads on one cpu */ -- cgit v1.2.3-70-g09d2 From 889a6c367283709a80dad9413488472596a1a1d2 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Fri, 13 Mar 2009 09:03:04 +0900 Subject: tracing: Don't use tracing_record_cmdline() in workqueue tracer fix commit c3ffc7a40b7e94b094efe1c8ab4e24370a782b65 "Don't use tracing_record_cmdline() in workqueue tracer" has a race window. find_task_by_vpid() requires task_list_lock(). LKML-Reference: <20090313090042.43CD.A69D9226@jp.fujitsu.com> Signed-off-by: KOSAKI Motohiro Signed-off-by: Steven Rostedt --- kernel/trace/trace_workqueue.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) (limited to 'kernel/trace/trace_workqueue.c') diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c index fb5ccac8bbc..9ab035b58cf 100644 --- a/kernel/trace/trace_workqueue.c +++ b/kernel/trace/trace_workqueue.c @@ -193,12 +193,20 @@ static int workqueue_stat_show(struct seq_file *s, void *p) struct cpu_workqueue_stats *cws = p; unsigned long flags; int cpu = cws->cpu; - struct task_struct *tsk = find_task_by_vpid(cws->pid); - - seq_printf(s, "%3d %6d %6u %s\n", cws->cpu, - atomic_read(&cws->inserted), - cws->executed, - tsk ? tsk->comm : "<...>"); + struct pid *pid; + struct task_struct *tsk; + + pid = find_get_pid(cws->pid); + if (pid) { + tsk = get_pid_task(pid, PIDTYPE_PID); + if (tsk) { + seq_printf(s, "%3d %6d %6u %s\n", cws->cpu, + atomic_read(&cws->inserted), cws->executed, + tsk->comm); + put_task_struct(tsk); + } + put_pid(pid); + } spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); if (&cws->list == workqueue_cpu_stat(cpu)->list.next) -- cgit v1.2.3-70-g09d2 From 2f63b840bc8a816ac879ee773b035cf3e433fae4 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Wed, 25 Mar 2009 16:59:18 +0800 Subject: trace_workqueues: fix empty line's output Empty lines separate cpus stat. After previous fix(trace_stat: keep original order) applied, the empty lines are displayed at incorrect position. Signed-off-by: Lai Jiangshan Acked-by: Steven Rostedt Acked-by: Frederic Weisbecker LKML-Reference: <49C9F266.2060706@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- kernel/trace/trace_workqueue.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'kernel/trace/trace_workqueue.c') diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c index 9ab035b58cf..797201e4a13 100644 --- a/kernel/trace/trace_workqueue.c +++ b/kernel/trace/trace_workqueue.c @@ -196,6 +196,11 @@ static int workqueue_stat_show(struct seq_file *s, void *p) struct pid *pid; struct task_struct *tsk; + spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); + if (&cws->list == workqueue_cpu_stat(cpu)->list.next) + seq_printf(s, "\n"); + spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); + pid = find_get_pid(cws->pid); if (pid) { tsk = get_pid_task(pid, PIDTYPE_PID); @@ -208,18 +213,13 @@ static int workqueue_stat_show(struct seq_file *s, void *p) put_pid(pid); } - spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); - if (&cws->list == workqueue_cpu_stat(cpu)->list.next) - seq_printf(s, "\n"); - spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); - return 0; } static int workqueue_stat_headers(struct seq_file *s) { seq_printf(s, "# CPU INSERTED EXECUTED NAME\n"); - seq_printf(s, "# | | | |\n\n"); + seq_printf(s, "# | | | |\n"); return 0; } -- cgit v1.2.3-70-g09d2