From fcf65bf149afa91b875ffde4455967cb63ee0be9 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 7 Aug 2012 09:58:03 -0300 Subject: perf evsel: Cache associated event_format We already lookup the associated event_format when reading the perf.data header, so that we can cache the tracepoint name in evsel->name, so do it a little further and save the event_format itself, so that we can avoid relookups in tools that need to access it. Change the tools to take the most obvious advantage, when they were using pevent_find_event directly. More work is needed for further removing the need of a pointer to pevent, such as when asking for event field values ("common_pid" and the other common fields and per event_format fields). This is something that was planned but only got actually done when Andrey Wagin needed to do this lookup at perf_tool->sample() time, when we don't have access to pevent (session->pevent) to use with pevent_find_event(). Cc: Andrey Wagin Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Signed-off-by: Arnaldo Carvalho de Melo Link: http://lkml.kernel.org/n/tip-txkvew2ckko0b594ae8fbnyk@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-sched.c | 37 ++++++++++--------------------------- 1 file changed, 10 insertions(+), 27 deletions(-) (limited to 'tools/perf/builtin-sched.c') diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 7a9ad2b1ee7..30ef82aca88 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -43,11 +43,6 @@ static u64 sleep_measurement_overhead; static unsigned long nr_tasks; -struct perf_sched { - struct perf_tool tool; - struct perf_session *session; -}; - struct sched_atom; struct task_desc { @@ -1596,14 +1591,12 @@ typedef void (*tracepoint_handler)(struct perf_tool *tool, struct event_format * struct machine *machine, struct thread *thread); -static int perf_sched__process_tracepoint_sample(struct perf_tool *tool, +static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __used, union perf_event *event __used, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) { - struct perf_sched *sched = container_of(tool, struct perf_sched, tool); - struct pevent *pevent = sched->session->pevent; struct thread *thread = machine__findnew_thread(machine, sample->pid); if (thread == NULL) { @@ -1617,25 +1610,18 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool, if (evsel->handler.func != NULL) { tracepoint_handler f = evsel->handler.func; - - if (evsel->handler.data == NULL) - evsel->handler.data = pevent_find_event(pevent, - evsel->attr.config); - - f(tool, evsel->handler.data, sample, machine, thread); + f(tool, evsel->tp_format, sample, machine, thread); } return 0; } -static struct perf_sched perf_sched = { - .tool = { - .sample = perf_sched__process_tracepoint_sample, - .comm = perf_event__process_comm, - .lost = perf_event__process_lost, - .fork = perf_event__process_task, - .ordered_samples = true, - }, +static struct perf_tool perf_sched = { + .sample = perf_sched__process_tracepoint_sample, + .comm = perf_event__process_comm, + .lost = perf_event__process_lost, + .fork = perf_event__process_task, + .ordered_samples = true, }; static void read_events(bool destroy, struct perf_session **psession) @@ -1652,18 +1638,15 @@ static void read_events(bool destroy, struct perf_session **psession) }; struct perf_session *session; - session = perf_session__new(input_name, O_RDONLY, 0, false, - &perf_sched.tool); + session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_sched); if (session == NULL) die("No Memory"); - perf_sched.session = session; - err = perf_session__set_tracepoints_handlers(session, handlers); assert(err == 0); if (perf_session__has_traces(session, "record -R")) { - err = perf_session__process_events(session, &perf_sched.tool); + err = perf_session__process_events(session, &perf_sched); if (err) die("Failed to process events, error %d", err); -- cgit v1.2.3-70-g09d2 From 7f7f8d0bea5d6bb985f4ae84ca3daff34802fd32 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 7 Aug 2012 11:33:42 -0300 Subject: perf sched: Use perf_sample To reduce the number of parameters passed to the various event handling functions. Cc: Andrey Wagin Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-fc537qykjjqzvyol5fecx6ug@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-sched.c | 113 +++++++++++++++------------------------------ 1 file changed, 37 insertions(+), 76 deletions(-) (limited to 'tools/perf/builtin-sched.c') diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 30ef82aca88..a25a023965b 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -729,46 +729,30 @@ struct trace_sched_handler { void (*switch_event)(struct trace_switch_event *, struct machine *, struct event_format *, - int cpu, - u64 timestamp, - struct thread *thread); + struct perf_sample *sample); void (*runtime_event)(struct trace_runtime_event *, struct machine *, - struct event_format *, - int cpu, - u64 timestamp, - struct thread *thread); + struct perf_sample *sample); void (*wakeup_event)(struct trace_wakeup_event *, struct machine *, struct event_format *, - int cpu, - u64 timestamp, - struct thread *thread); + struct perf_sample *sample); void (*fork_event)(struct trace_fork_event *, - struct event_format *, - int cpu, - u64 timestamp, - struct thread *thread); + struct event_format *event); void (*migrate_task_event)(struct trace_migrate_task_event *, - struct machine *machine, - struct event_format *, - int cpu, - u64 timestamp, - struct thread *thread); + struct machine *machine, + struct perf_sample *sample); }; static void replay_wakeup_event(struct trace_wakeup_event *wakeup_event, struct machine *machine __used, - struct event_format *event, - int cpu __used, - u64 timestamp __used, - struct thread *thread __used) + struct event_format *event, struct perf_sample *sample) { struct task_desc *waker, *wakee; @@ -784,7 +768,7 @@ replay_wakeup_event(struct trace_wakeup_event *wakeup_event, waker = register_pid(wakeup_event->common_pid, ""); wakee = register_pid(wakeup_event->pid, wakeup_event->comm); - add_sched_event_wakeup(waker, timestamp, wakee); + add_sched_event_wakeup(waker, sample->time, wakee); } static u64 cpu_last_switched[MAX_CPUS]; @@ -793,12 +777,11 @@ static void replay_switch_event(struct trace_switch_event *switch_event, struct machine *machine __used, struct event_format *event, - int cpu, - u64 timestamp, - struct thread *thread __used) + struct perf_sample *sample) { struct task_desc *prev, __used *next; - u64 timestamp0; + u64 timestamp0, timestamp = sample->time; + int cpu = sample->cpu; s64 delta; if (verbose) @@ -835,10 +818,7 @@ replay_switch_event(struct trace_switch_event *switch_event, static void replay_fork_event(struct trace_fork_event *fork_event, - struct event_format *event, - int cpu __used, - u64 timestamp __used, - struct thread *thread __used) + struct event_format *event) { if (verbose) { printf("sched_fork event %p\n", event); @@ -944,10 +924,7 @@ static void thread_atoms_insert(struct thread *thread) static void latency_fork_event(struct trace_fork_event *fork_event __used, - struct event_format *event __used, - int cpu __used, - u64 timestamp __used, - struct thread *thread __used) + struct event_format *event __used) { /* should insert the newcomer */ } @@ -1027,13 +1004,12 @@ static void latency_switch_event(struct trace_switch_event *switch_event, struct machine *machine, struct event_format *event __used, - int cpu, - u64 timestamp, - struct thread *thread __used) + struct perf_sample *sample) { struct work_atoms *out_events, *in_events; struct thread *sched_out, *sched_in; - u64 timestamp0; + u64 timestamp0, timestamp = sample->time; + int cpu = sample->cpu; s64 delta; BUG_ON(cpu >= MAX_CPUS || cpu < 0); @@ -1078,14 +1054,12 @@ latency_switch_event(struct trace_switch_event *switch_event, static void latency_runtime_event(struct trace_runtime_event *runtime_event, - struct machine *machine, - struct event_format *event __used, - int cpu, - u64 timestamp, - struct thread *this_thread __used) + struct machine *machine, struct perf_sample *sample) { struct thread *thread = machine__findnew_thread(machine, runtime_event->pid); struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); + u64 timestamp = sample->time; + int cpu = sample->cpu; BUG_ON(cpu >= MAX_CPUS || cpu < 0); if (!atoms) { @@ -1101,15 +1075,13 @@ latency_runtime_event(struct trace_runtime_event *runtime_event, static void latency_wakeup_event(struct trace_wakeup_event *wakeup_event, - struct machine *machine, - struct event_format *__event __used, - int cpu __used, - u64 timestamp, - struct thread *thread __used) + struct machine *machine, struct event_format *event __used, + struct perf_sample *sample) { struct work_atoms *atoms; struct work_atom *atom; struct thread *wakee; + u64 timestamp = sample->time; /* Note for later, it may be interesting to observe the failing cases */ if (!wakeup_event->success) @@ -1149,12 +1121,9 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, static void latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, - struct machine *machine, - struct event_format *__event __used, - int cpu __used, - u64 timestamp, - struct thread *thread __used) + struct machine *machine, struct perf_sample *sample) { + u64 timestamp = sample->time; struct work_atoms *atoms; struct work_atom *atom; struct thread *migrant; @@ -1364,7 +1333,7 @@ process_sched_wakeup_event(struct perf_tool *tool __used, struct event_format *event, struct perf_sample *sample, struct machine *machine, - struct thread *thread) + struct thread *thread __used) { void *data = sample->raw_data; struct trace_wakeup_event wakeup_event; @@ -1378,8 +1347,7 @@ process_sched_wakeup_event(struct perf_tool *tool __used, FILL_FIELD(wakeup_event, cpu, event, data); if (trace_handler->wakeup_event) - trace_handler->wakeup_event(&wakeup_event, machine, event, - sample->cpu, sample->time, thread); + trace_handler->wakeup_event(&wakeup_event, machine, event, sample); } /* @@ -1399,15 +1367,13 @@ static void map_switch_event(struct trace_switch_event *switch_event, struct machine *machine, struct event_format *event __used, - int this_cpu, - u64 timestamp, - struct thread *thread __used) + struct perf_sample *sample) { struct thread *sched_out __used, *sched_in; int new_shortname; - u64 timestamp0; + u64 timestamp0, timestamp = sample->time; s64 delta; - int cpu; + int cpu, this_cpu = sample->cpu; BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0); @@ -1479,7 +1445,7 @@ process_sched_switch_event(struct perf_tool *tool __used, struct event_format *event, struct perf_sample *sample, struct machine *machine, - struct thread *thread) + struct thread *thread __used) { int this_cpu = sample->cpu; void *data = sample->raw_data; @@ -1504,8 +1470,7 @@ process_sched_switch_event(struct perf_tool *tool __used, nr_context_switch_bugs++; } if (trace_handler->switch_event) - trace_handler->switch_event(&switch_event, machine, event, - this_cpu, sample->time, thread); + trace_handler->switch_event(&switch_event, machine, event, sample); curr_pid[this_cpu] = switch_event.next_pid; } @@ -1515,7 +1480,7 @@ process_sched_runtime_event(struct perf_tool *tool __used, struct event_format *event, struct perf_sample *sample, struct machine *machine, - struct thread *thread) + struct thread *thread __used) { void *data = sample->raw_data; struct trace_runtime_event runtime_event; @@ -1526,8 +1491,7 @@ process_sched_runtime_event(struct perf_tool *tool __used, FILL_FIELD(runtime_event, vruntime, event, data); if (trace_handler->runtime_event) - trace_handler->runtime_event(&runtime_event, machine, event, - sample->cpu, sample->time, thread); + trace_handler->runtime_event(&runtime_event, machine, sample); } static void @@ -1535,7 +1499,7 @@ process_sched_fork_event(struct perf_tool *tool __used, struct event_format *event, struct perf_sample *sample, struct machine *machine __used, - struct thread *thread) + struct thread *thread __used) { void *data = sample->raw_data; struct trace_fork_event fork_event; @@ -1548,8 +1512,7 @@ process_sched_fork_event(struct perf_tool *tool __used, FILL_FIELD(fork_event, child_pid, event, data); if (trace_handler->fork_event) - trace_handler->fork_event(&fork_event, event, - sample->cpu, sample->time, thread); + trace_handler->fork_event(&fork_event, event); } static void @@ -1568,7 +1531,7 @@ process_sched_migrate_task_event(struct perf_tool *tool __used, struct event_format *event, struct perf_sample *sample, struct machine *machine, - struct thread *thread) + struct thread *thread __used) { void *data = sample->raw_data; struct trace_migrate_task_event migrate_task_event; @@ -1581,9 +1544,7 @@ process_sched_migrate_task_event(struct perf_tool *tool __used, FILL_FIELD(migrate_task_event, cpu, event, data); if (trace_handler->migrate_task_event) - trace_handler->migrate_task_event(&migrate_task_event, machine, - event, sample->cpu, - sample->time, thread); + trace_handler->migrate_task_event(&migrate_task_event, machine, sample); } typedef void (*tracepoint_handler)(struct perf_tool *tool, struct event_format *event, -- cgit v1.2.3-70-g09d2 From a116e05dcf61c8d758e0f0aed40325534aee2c13 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Sat, 8 Sep 2012 22:53:06 -0300 Subject: perf sched: Remove die() calls Just use pr_err() + return -1 and perf_session__process_events to abort when some event would call die(), then let the perf's main() exit doing whatever it needs. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-88cwdogxqomsy9tfr8r0as58@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-sched.c | 281 +++++++++++++++++++++++++++++---------------- 1 file changed, 179 insertions(+), 102 deletions(-) (limited to 'tools/perf/builtin-sched.c') diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index a25a023965b..782f66d3610 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -423,8 +423,8 @@ static int self_open_counters(void) fd = sys_perf_event_open(&attr, 0, -1, -1, 0); if (fd < 0) - die("Error: sys_perf_event_open() syscall returned" - "with %d (%s)\n", fd, strerror(errno)); + pr_debug("Error: sys_perf_event_open() syscall returned" + "with %d (%s)\n", fd, strerror(errno)); return fd; } @@ -450,7 +450,8 @@ static void *thread_func(void *ctx) sprintf(comm2, ":%s", this_task->comm); prctl(PR_SET_NAME, comm2); fd = self_open_counters(); - + if (fd < 0) + return NULL; again: ret = sem_post(&this_task->ready_for_work); BUG_ON(ret); @@ -726,30 +727,30 @@ struct trace_migrate_task_event { }; struct trace_sched_handler { - void (*switch_event)(struct trace_switch_event *, - struct machine *, - struct event_format *, - struct perf_sample *sample); - - void (*runtime_event)(struct trace_runtime_event *, - struct machine *, - struct perf_sample *sample); + int (*switch_event)(struct trace_switch_event *event, + struct machine *machine, + struct event_format *tp_format, + struct perf_sample *sample); - void (*wakeup_event)(struct trace_wakeup_event *, - struct machine *, - struct event_format *, + int (*runtime_event)(struct trace_runtime_event *event, + struct machine *machine, struct perf_sample *sample); - void (*fork_event)(struct trace_fork_event *, - struct event_format *event); + int (*wakeup_event)(struct trace_wakeup_event *event, + struct machine *machine, + struct event_format *tp_format, + struct perf_sample *sample); - void (*migrate_task_event)(struct trace_migrate_task_event *, - struct machine *machine, - struct perf_sample *sample); + int (*fork_event)(struct trace_fork_event *event, + struct event_format *tp_format); + + int (*migrate_task_event)(struct trace_migrate_task_event *event, + struct machine *machine, + struct perf_sample *sample); }; -static void +static int replay_wakeup_event(struct trace_wakeup_event *wakeup_event, struct machine *machine __used, struct event_format *event, struct perf_sample *sample) @@ -769,11 +770,12 @@ replay_wakeup_event(struct trace_wakeup_event *wakeup_event, wakee = register_pid(wakeup_event->pid, wakeup_event->comm); add_sched_event_wakeup(waker, sample->time, wakee); + return 0; } static u64 cpu_last_switched[MAX_CPUS]; -static void +static int replay_switch_event(struct trace_switch_event *switch_event, struct machine *machine __used, struct event_format *event, @@ -788,7 +790,7 @@ replay_switch_event(struct trace_switch_event *switch_event, printf("sched_switch event %p\n", event); if (cpu >= MAX_CPUS || cpu < 0) - return; + return 0; timestamp0 = cpu_last_switched[cpu]; if (timestamp0) @@ -796,8 +798,10 @@ replay_switch_event(struct trace_switch_event *switch_event, else delta = 0; - if (delta < 0) - die("hm, delta: %" PRIu64 " < 0 ?\n", delta); + if (delta < 0) { + pr_debug("hm, delta: %" PRIu64 " < 0 ?\n", delta); + return -1; + } if (verbose) { printf(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n", @@ -813,10 +817,12 @@ replay_switch_event(struct trace_switch_event *switch_event, add_sched_event_run(prev, timestamp, delta); add_sched_event_sleep(prev, timestamp, switch_event->prev_state); + + return 0; } -static void +static int replay_fork_event(struct trace_fork_event *fork_event, struct event_format *event) { @@ -827,6 +833,7 @@ replay_fork_event(struct trace_fork_event *fork_event, } register_pid(fork_event->parent_pid, fork_event->parent_comm); register_pid(fork_event->child_pid, fork_event->child_comm); + return 0; } static struct trace_sched_handler replay_ops = { @@ -911,22 +918,26 @@ __thread_latency_insert(struct rb_root *root, struct work_atoms *data, rb_insert_color(&data->node, root); } -static void thread_atoms_insert(struct thread *thread) +static int thread_atoms_insert(struct thread *thread) { struct work_atoms *atoms = zalloc(sizeof(*atoms)); - if (!atoms) - die("No memory"); + if (!atoms) { + pr_err("No memory at %s\n", __func__); + return -1; + } atoms->thread = thread; INIT_LIST_HEAD(&atoms->work_list); __thread_latency_insert(&atom_root, atoms, &cmp_pid); + return 0; } -static void +static int latency_fork_event(struct trace_fork_event *fork_event __used, struct event_format *event __used) { /* should insert the newcomer */ + return 0; } __used @@ -937,14 +948,16 @@ static char sched_out_state(struct trace_switch_event *switch_event) return str[switch_event->prev_state]; } -static void +static int add_sched_out_event(struct work_atoms *atoms, char run_state, u64 timestamp) { struct work_atom *atom = zalloc(sizeof(*atom)); - if (!atom) - die("Non memory"); + if (!atom) { + pr_err("Non memory at %s", __func__); + return -1; + } atom->sched_out_time = timestamp; @@ -954,6 +967,7 @@ add_sched_out_event(struct work_atoms *atoms, } list_add_tail(&atom->list, &atoms->work_list); + return 0; } static void @@ -1000,7 +1014,7 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp) atoms->nb_atoms++; } -static void +static int latency_switch_event(struct trace_switch_event *switch_event, struct machine *machine, struct event_format *event __used, @@ -1021,38 +1035,49 @@ latency_switch_event(struct trace_switch_event *switch_event, else delta = 0; - if (delta < 0) - die("hm, delta: %" PRIu64 " < 0 ?\n", delta); - + if (delta < 0) { + pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); + return -1; + } sched_out = machine__findnew_thread(machine, switch_event->prev_pid); sched_in = machine__findnew_thread(machine, switch_event->next_pid); out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); if (!out_events) { - thread_atoms_insert(sched_out); + if (thread_atoms_insert(sched_out)) + return -1; out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); - if (!out_events) - die("out-event: Internal tree error"); + if (!out_events) { + pr_err("out-event: Internal tree error"); + return -1; + } } - add_sched_out_event(out_events, sched_out_state(switch_event), timestamp); + if (add_sched_out_event(out_events, sched_out_state(switch_event), timestamp)) + return -1; in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid); if (!in_events) { - thread_atoms_insert(sched_in); + if (thread_atoms_insert(sched_in)) + return -1; in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid); - if (!in_events) - die("in-event: Internal tree error"); + if (!in_events) { + pr_err("in-event: Internal tree error"); + return -1; + } /* * Take came in we have not heard about yet, * add in an initial atom in runnable state: */ - add_sched_out_event(in_events, 'R', timestamp); + if (add_sched_out_event(in_events, 'R', timestamp)) + return -1; } add_sched_in_event(in_events, timestamp); + + return 0; } -static void +static int latency_runtime_event(struct trace_runtime_event *runtime_event, struct machine *machine, struct perf_sample *sample) { @@ -1063,17 +1088,22 @@ latency_runtime_event(struct trace_runtime_event *runtime_event, BUG_ON(cpu >= MAX_CPUS || cpu < 0); if (!atoms) { - thread_atoms_insert(thread); + if (thread_atoms_insert(thread)) + return -1; atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); - if (!atoms) - die("in-event: Internal tree error"); - add_sched_out_event(atoms, 'R', timestamp); + if (!atoms) { + pr_debug("in-event: Internal tree error"); + return -1; + } + if (add_sched_out_event(atoms, 'R', timestamp)) + return -1; } add_runtime_event(atoms, runtime_event->runtime, timestamp); + return 0; } -static void +static int latency_wakeup_event(struct trace_wakeup_event *wakeup_event, struct machine *machine, struct event_format *event __used, struct perf_sample *sample) @@ -1085,16 +1115,20 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, /* Note for later, it may be interesting to observe the failing cases */ if (!wakeup_event->success) - return; + return 0; wakee = machine__findnew_thread(machine, wakeup_event->pid); atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); if (!atoms) { - thread_atoms_insert(wakee); + if (thread_atoms_insert(wakee)) + return -1; atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); - if (!atoms) - die("wakeup-event: Internal tree error"); - add_sched_out_event(atoms, 'S', timestamp); + if (!atoms) { + pr_debug("wakeup-event: Internal tree error"); + return -1; + } + if (add_sched_out_event(atoms, 'S', timestamp)) + return -1; } BUG_ON(list_empty(&atoms->work_list)); @@ -1112,14 +1146,15 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, nr_timestamps++; if (atom->sched_out_time > timestamp) { nr_unordered_timestamps++; - return; + return 0; } atom->state = THREAD_WAIT_CPU; atom->wake_up_time = timestamp; + return 0; } -static void +static int latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, struct machine *machine, struct perf_sample *sample) { @@ -1132,17 +1167,21 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, * Only need to worry about migration when profiling one CPU. */ if (profile_cpu == -1) - return; + return 0; migrant = machine__findnew_thread(machine, migrate_task_event->pid); atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); if (!atoms) { - thread_atoms_insert(migrant); + if (thread_atoms_insert(migrant)) + return -1; register_pid(migrant->pid, migrant->comm); atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); - if (!atoms) - die("migration-event: Internal tree error"); - add_sched_out_event(atoms, 'R', timestamp); + if (!atoms) { + pr_debug("migration-event: Internal tree error"); + return -1; + } + if (add_sched_out_event(atoms, 'R', timestamp)) + return -1; } BUG_ON(list_empty(&atoms->work_list)); @@ -1154,6 +1193,8 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, if (atom->sched_out_time > timestamp) nr_unordered_timestamps++; + + return 0; } static struct trace_sched_handler lat_ops = { @@ -1328,7 +1369,7 @@ static void sort_lat(void) static struct trace_sched_handler *trace_handler; -static void +static int process_sched_wakeup_event(struct perf_tool *tool __used, struct event_format *event, struct perf_sample *sample, @@ -1337,6 +1378,7 @@ process_sched_wakeup_event(struct perf_tool *tool __used, { void *data = sample->raw_data; struct trace_wakeup_event wakeup_event; + int err = 0; FILL_COMMON_FIELDS(wakeup_event, event, data); @@ -1347,7 +1389,9 @@ process_sched_wakeup_event(struct perf_tool *tool __used, FILL_FIELD(wakeup_event, cpu, event, data); if (trace_handler->wakeup_event) - trace_handler->wakeup_event(&wakeup_event, machine, event, sample); + err = trace_handler->wakeup_event(&wakeup_event, machine, event, sample); + + return err; } /* @@ -1363,7 +1407,7 @@ static struct thread *curr_thread[MAX_CPUS]; static char next_shortname1 = 'A'; static char next_shortname2 = '0'; -static void +static int map_switch_event(struct trace_switch_event *switch_event, struct machine *machine, struct event_format *event __used, @@ -1387,9 +1431,10 @@ map_switch_event(struct trace_switch_event *switch_event, else delta = 0; - if (delta < 0) - die("hm, delta: %" PRIu64 " < 0 ?\n", delta); - + if (delta < 0) { + pr_debug("hm, delta: %" PRIu64 " < 0 ?\n", delta); + return -1; + } sched_out = machine__findnew_thread(machine, switch_event->prev_pid); sched_in = machine__findnew_thread(machine, switch_event->next_pid); @@ -1438,16 +1483,18 @@ map_switch_event(struct trace_switch_event *switch_event, } else { printf("\n"); } + + return 0; } -static void +static int process_sched_switch_event(struct perf_tool *tool __used, struct event_format *event, struct perf_sample *sample, struct machine *machine, struct thread *thread __used) { - int this_cpu = sample->cpu; + int this_cpu = sample->cpu, err = 0; void *data = sample->raw_data; struct trace_switch_event switch_event; @@ -1470,12 +1517,13 @@ process_sched_switch_event(struct perf_tool *tool __used, nr_context_switch_bugs++; } if (trace_handler->switch_event) - trace_handler->switch_event(&switch_event, machine, event, sample); + err = trace_handler->switch_event(&switch_event, machine, event, sample); curr_pid[this_cpu] = switch_event.next_pid; + return err; } -static void +static int process_sched_runtime_event(struct perf_tool *tool __used, struct event_format *event, struct perf_sample *sample, @@ -1484,6 +1532,7 @@ process_sched_runtime_event(struct perf_tool *tool __used, { void *data = sample->raw_data; struct trace_runtime_event runtime_event; + int err = 0; FILL_ARRAY(runtime_event, comm, event, data); FILL_FIELD(runtime_event, pid, event, data); @@ -1491,10 +1540,12 @@ process_sched_runtime_event(struct perf_tool *tool __used, FILL_FIELD(runtime_event, vruntime, event, data); if (trace_handler->runtime_event) - trace_handler->runtime_event(&runtime_event, machine, sample); + err = trace_handler->runtime_event(&runtime_event, machine, sample); + + return err; } -static void +static int process_sched_fork_event(struct perf_tool *tool __used, struct event_format *event, struct perf_sample *sample, @@ -1503,6 +1554,7 @@ process_sched_fork_event(struct perf_tool *tool __used, { void *data = sample->raw_data; struct trace_fork_event fork_event; + int err = 0; FILL_COMMON_FIELDS(fork_event, event, data); @@ -1512,10 +1564,12 @@ process_sched_fork_event(struct perf_tool *tool __used, FILL_FIELD(fork_event, child_pid, event, data); if (trace_handler->fork_event) - trace_handler->fork_event(&fork_event, event); + err = trace_handler->fork_event(&fork_event, event); + + return err; } -static void +static int process_sched_exit_event(struct perf_tool *tool __used, struct event_format *event, struct perf_sample *sample __used, @@ -1524,9 +1578,11 @@ process_sched_exit_event(struct perf_tool *tool __used, { if (verbose) printf("sched_exit event %p\n", event); + + return 0; } -static void +static int process_sched_migrate_task_event(struct perf_tool *tool __used, struct event_format *event, struct perf_sample *sample, @@ -1535,6 +1591,7 @@ process_sched_migrate_task_event(struct perf_tool *tool __used, { void *data = sample->raw_data; struct trace_migrate_task_event migrate_task_event; + int err = 0; FILL_COMMON_FIELDS(migrate_task_event, event, data); @@ -1544,13 +1601,16 @@ process_sched_migrate_task_event(struct perf_tool *tool __used, FILL_FIELD(migrate_task_event, cpu, event, data); if (trace_handler->migrate_task_event) - trace_handler->migrate_task_event(&migrate_task_event, machine, sample); + err = trace_handler->migrate_task_event(&migrate_task_event, machine, sample); + + return err; } -typedef void (*tracepoint_handler)(struct perf_tool *tool, struct event_format *event, - struct perf_sample *sample, - struct machine *machine, - struct thread *thread); +typedef int (*tracepoint_handler)(struct perf_tool *tool, + struct event_format *tp_format, + struct perf_sample *sample, + struct machine *machine, + struct thread *thread); static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __used, union perf_event *event __used, @@ -1559,6 +1619,7 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __used, struct machine *machine) { struct thread *thread = machine__findnew_thread(machine, sample->pid); + int err = 0; if (thread == NULL) { pr_debug("problem processing %s event, skipping it.\n", @@ -1571,10 +1632,10 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __used, if (evsel->handler.func != NULL) { tracepoint_handler f = evsel->handler.func; - f(tool, evsel->tp_format, sample, machine, thread); + err = f(tool, evsel->tp_format, sample, machine, thread); } - return 0; + return err; } static struct perf_tool perf_sched = { @@ -1585,9 +1646,8 @@ static struct perf_tool perf_sched = { .ordered_samples = true, }; -static void read_events(bool destroy, struct perf_session **psession) +static int read_events(bool destroy, struct perf_session **psession) { - int err = -EINVAL; const struct perf_evsel_str_handler handlers[] = { { "sched:sched_switch", process_sched_switch_event, }, { "sched:sched_stat_runtime", process_sched_runtime_event, }, @@ -1600,16 +1660,20 @@ static void read_events(bool destroy, struct perf_session **psession) struct perf_session *session; session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_sched); - if (session == NULL) - die("No Memory"); + if (session == NULL) { + pr_debug("No Memory for session\n"); + return -1; + } - err = perf_session__set_tracepoints_handlers(session, handlers); - assert(err == 0); + if (perf_session__set_tracepoints_handlers(session, handlers)) + goto out_delete; if (perf_session__has_traces(session, "record -R")) { - err = perf_session__process_events(session, &perf_sched); - if (err) - die("Failed to process events, error %d", err); + int err = perf_session__process_events(session, &perf_sched); + if (err) { + pr_err("Failed to process events, error %d", err); + goto out_delete; + } nr_events = session->hists.stats.nr_events[0]; nr_lost_events = session->hists.stats.total_lost; @@ -1621,6 +1685,12 @@ static void read_events(bool destroy, struct perf_session **psession) if (psession) *psession = session; + + return 0; + +out_delete: + perf_session__delete(session); + return -1; } static void print_bad_events(void) @@ -1653,13 +1723,14 @@ static void print_bad_events(void) } } -static void __cmd_lat(void) +static int __cmd_lat(void) { struct rb_node *next; struct perf_session *session; setup_pager(); - read_events(false, &session); + if (read_events(false, &session)) + return -1; sort_lat(); printf("\n ---------------------------------------------------------------------------------------------------------------\n"); @@ -1686,6 +1757,7 @@ static void __cmd_lat(void) printf("\n"); perf_session__delete(session); + return 0; } static struct trace_sched_handler map_ops = { @@ -1695,16 +1767,18 @@ static struct trace_sched_handler map_ops = { .fork_event = NULL, }; -static void __cmd_map(void) +static int __cmd_map(void) { max_cpu = sysconf(_SC_NPROCESSORS_CONF); setup_pager(); - read_events(true, NULL); + if (read_events(true, NULL)) + return -1; print_bad_events(); + return 0; } -static void __cmd_replay(void) +static int __cmd_replay(void) { unsigned long i; @@ -1713,7 +1787,8 @@ static void __cmd_replay(void) test_calibrations(); - read_events(true, NULL); + if (read_events(true, NULL)) + return -1; printf("nr_run_events: %ld\n", nr_run_events); printf("nr_sleep_events: %ld\n", nr_sleep_events); @@ -1734,6 +1809,8 @@ static void __cmd_replay(void) printf("------------------------------------------------------------\n"); for (i = 0; i < replay_repeat; i++) run_one_test(); + + return 0; } @@ -1865,11 +1942,11 @@ int cmd_sched(int argc, const char **argv, const char *prefix __used) usage_with_options(latency_usage, latency_options); } setup_sorting(); - __cmd_lat(); + return __cmd_lat(); } else if (!strcmp(argv[0], "map")) { trace_handler = &map_ops; setup_sorting(); - __cmd_map(); + return __cmd_map(); } else if (!strncmp(argv[0], "rep", 3)) { trace_handler = &replay_ops; if (argc) { @@ -1877,7 +1954,7 @@ int cmd_sched(int argc, const char **argv, const char *prefix __used) if (argc) usage_with_options(replay_usage, replay_options); } - __cmd_replay(); + return __cmd_replay(); } else { usage_with_options(sched_usage, sched_options); } -- cgit v1.2.3-70-g09d2 From 1d037ca1648b775277fc96401ec2aa233724906c Mon Sep 17 00:00:00 2001 From: Irina Tirdea Date: Tue, 11 Sep 2012 01:15:03 +0300 Subject: perf tools: Use __maybe_used for unused variables perf defines both __used and __unused variables to use for marking unused variables. The variable __used is defined to __attribute__((__unused__)), which contradicts the kernel definition to __attribute__((__used__)) for new gcc versions. On Android, __used is also defined in system headers and this leads to warnings like: warning: '__used__' attribute ignored __unused is not defined in the kernel and is not a standard definition. If __unused is included everywhere instead of __used, this leads to conflicts with glibc headers, since glibc has a variables with this name in its headers. The best approach is to use __maybe_unused, the definition used in the kernel for __attribute__((unused)). In this way there is only one definition in perf sources (instead of 2 definitions that point to the same thing: __used and __unused) and it works on both Linux and Android. This patch simply replaces all instances of __used and __unused with __maybe_unused. Signed-off-by: Irina Tirdea Acked-by: Pekka Enberg Cc: David Ahern Cc: Ingo Molnar Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Steven Rostedt Link: http://lkml.kernel.org/r/1347315303-29906-7-git-send-email-irina.tirdea@intel.com [ committer note: fixed up conflict with a116e05 in builtin-sched.c ] Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/traceevent/event-parse.c | 8 +- tools/lib/traceevent/event-parse.h | 4 +- tools/perf/bench/bench.h | 3 +- tools/perf/bench/mem-memcpy.c | 2 +- tools/perf/bench/mem-memset.c | 2 +- tools/perf/bench/sched-messaging.c | 2 +- tools/perf/bench/sched-pipe.c | 6 +- tools/perf/builtin-annotate.c | 2 +- tools/perf/builtin-bench.c | 2 +- tools/perf/builtin-buildid-cache.c | 7 +- tools/perf/builtin-buildid-list.c | 3 +- tools/perf/builtin-diff.c | 4 +- tools/perf/builtin-evlist.c | 2 +- tools/perf/builtin-help.c | 2 +- tools/perf/builtin-inject.c | 24 ++--- tools/perf/builtin-kmem.c | 22 +++-- tools/perf/builtin-kvm.c | 2 +- tools/perf/builtin-list.c | 2 +- tools/perf/builtin-lock.c | 4 +- tools/perf/builtin-probe.c | 24 ++--- tools/perf/builtin-record.c | 10 +- tools/perf/builtin-report.c | 11 ++- tools/perf/builtin-sched.c | 102 ++++++++++----------- tools/perf/builtin-script.c | 29 +++--- tools/perf/builtin-stat.c | 40 +++++--- tools/perf/builtin-test.c | 9 +- tools/perf/builtin-timechart.c | 30 +++--- tools/perf/builtin-top.c | 7 +- tools/perf/ui/browser.c | 7 +- tools/perf/ui/browsers/annotate.c | 6 +- tools/perf/ui/gtk/browser.c | 5 +- tools/perf/ui/gtk/setup.c | 2 +- tools/perf/ui/gtk/util.c | 4 +- tools/perf/ui/helpline.c | 2 +- tools/perf/ui/helpline.h | 8 +- tools/perf/ui/hist.c | 21 +++-- tools/perf/ui/tui/setup.c | 4 +- tools/perf/util/alias.c | 3 +- tools/perf/util/annotate.c | 6 +- tools/perf/util/annotate.h | 13 +-- tools/perf/util/build-id.c | 11 ++- tools/perf/util/cache.h | 6 +- tools/perf/util/callchain.c | 6 +- tools/perf/util/cgroup.c | 4 +- tools/perf/util/config.c | 6 +- tools/perf/util/debug.h | 9 +- tools/perf/util/event.c | 19 ++-- tools/perf/util/header.c | 93 ++++++++++--------- tools/perf/util/help.c | 3 +- tools/perf/util/hist.c | 2 +- tools/perf/util/hist.h | 32 ++++--- tools/perf/util/include/linux/compiler.h | 4 +- tools/perf/util/intlist.c | 4 +- tools/perf/util/map.h | 2 +- tools/perf/util/parse-events-test.c | 6 +- tools/perf/util/parse-events.c | 7 +- tools/perf/util/parse-events.l | 2 +- tools/perf/util/parse-events.y | 4 +- tools/perf/util/parse-options.c | 3 +- tools/perf/util/perf_regs.h | 2 +- tools/perf/util/pmu.y | 6 +- tools/perf/util/probe-event.c | 21 +++-- tools/perf/util/probe-finder.c | 4 +- tools/perf/util/python.c | 8 +- .../perf/util/scripting-engines/trace-event-perl.c | 8 +- .../util/scripting-engines/trace-event-python.c | 10 +- tools/perf/util/session.c | 50 +++++----- tools/perf/util/sort.c | 14 ++- tools/perf/util/symbol-minimal.c | 28 +++--- tools/perf/util/symbol.c | 2 +- tools/perf/util/symbol.h | 7 +- tools/perf/util/trace-event-parse.c | 4 +- tools/perf/util/trace-event-scripting.c | 33 ++++--- tools/perf/util/unwind.c | 36 ++++---- tools/perf/util/unwind.h | 11 ++- tools/perf/util/wrapper.c | 3 +- 76 files changed, 498 insertions(+), 418 deletions(-) (limited to 'tools/perf/builtin-sched.c') diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index f4190b5764d..2c54cdd8ae1 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -1824,7 +1824,7 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) } static enum event_type -process_entry(struct event_format *event __unused, struct print_arg *arg, +process_entry(struct event_format *event __maybe_unused, struct print_arg *arg, char **tok) { enum event_type type; @@ -2458,7 +2458,8 @@ process_paren(struct event_format *event, struct print_arg *arg, char **tok) static enum event_type -process_str(struct event_format *event __unused, struct print_arg *arg, char **tok) +process_str(struct event_format *event __maybe_unused, struct print_arg *arg, + char **tok) { enum event_type type; char *token; @@ -3653,7 +3654,8 @@ static void free_args(struct print_arg *args) } static char * -get_bprint_format(void *data, int size __unused, struct event_format *event) +get_bprint_format(void *data, int size __maybe_unused, + struct event_format *event) { struct pevent *pevent = event->pevent; unsigned long long addr; diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h index 3318963f1c9..a4bbe243792 100644 --- a/tools/lib/traceevent/event-parse.h +++ b/tools/lib/traceevent/event-parse.h @@ -24,8 +24,8 @@ #include #include -#ifndef __unused -#define __unused __attribute__ ((unused)) +#ifndef __maybe_unused +#define __maybe_unused __attribute__((unused)) #endif /* ----------------------- trace_seq ----------------------- */ diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h index a09bece6dad..8f89998eeaf 100644 --- a/tools/perf/bench/bench.h +++ b/tools/perf/bench/bench.h @@ -3,7 +3,8 @@ extern int bench_sched_messaging(int argc, const char **argv, const char *prefix); extern int bench_sched_pipe(int argc, const char **argv, const char *prefix); -extern int bench_mem_memcpy(int argc, const char **argv, const char *prefix __used); +extern int bench_mem_memcpy(int argc, const char **argv, + const char *prefix __maybe_unused); extern int bench_mem_memset(int argc, const char **argv, const char *prefix); #define BENCH_FORMAT_DEFAULT_STR "default" diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c index 02dad5d3359..93c83e3cb4a 100644 --- a/tools/perf/bench/mem-memcpy.c +++ b/tools/perf/bench/mem-memcpy.c @@ -177,7 +177,7 @@ static double do_memcpy_gettimeofday(memcpy_t fn, size_t len, bool prefault) } while (0) int bench_mem_memcpy(int argc, const char **argv, - const char *prefix __used) + const char *prefix __maybe_unused) { int i; size_t len; diff --git a/tools/perf/bench/mem-memset.c b/tools/perf/bench/mem-memset.c index 350cc955726..c6e4bc52349 100644 --- a/tools/perf/bench/mem-memset.c +++ b/tools/perf/bench/mem-memset.c @@ -171,7 +171,7 @@ static double do_memset_gettimeofday(memset_t fn, size_t len, bool prefault) } while (0) int bench_mem_memset(int argc, const char **argv, - const char *prefix __used) + const char *prefix __maybe_unused) { int i; size_t len; diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c index d1d1b30f99c..cc1190a0849 100644 --- a/tools/perf/bench/sched-messaging.c +++ b/tools/perf/bench/sched-messaging.c @@ -267,7 +267,7 @@ static const char * const bench_sched_message_usage[] = { }; int bench_sched_messaging(int argc, const char **argv, - const char *prefix __used) + const char *prefix __maybe_unused) { unsigned int i, total_children; struct timeval start, stop, diff; diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c index 15911e9c587..69cfba8d4c6 100644 --- a/tools/perf/bench/sched-pipe.c +++ b/tools/perf/bench/sched-pipe.c @@ -43,7 +43,7 @@ static const char * const bench_sched_pipe_usage[] = { }; int bench_sched_pipe(int argc, const char **argv, - const char *prefix __used) + const char *prefix __maybe_unused) { int pipe_1[2], pipe_2[2]; int m = 0, i; @@ -55,8 +55,8 @@ int bench_sched_pipe(int argc, const char **argv, * discarding returned value of read(), write() * causes error in building environment for perf */ - int __used ret, wait_stat; - pid_t pid, retpid __used; + int __maybe_unused ret, wait_stat; + pid_t pid, retpid __maybe_unused; argc = parse_options(argc, argv, options, bench_sched_pipe_usage, 0); diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 2f3f0029c0f..9ea38540b87 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -239,7 +239,7 @@ static const char * const annotate_usage[] = { NULL }; -int cmd_annotate(int argc, const char **argv, const char *prefix __used) +int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused) { struct perf_annotate annotate = { .tool = { diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c index 1f310021644..cae9a5fd2ec 100644 --- a/tools/perf/builtin-bench.c +++ b/tools/perf/builtin-bench.c @@ -173,7 +173,7 @@ static void all_subsystem(void) all_suite(&subsystems[i]); } -int cmd_bench(int argc, const char **argv, const char *prefix __used) +int cmd_bench(int argc, const char **argv, const char *prefix __maybe_unused) { int i, j, status = 0; diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c index 995368e84e4..83654557e10 100644 --- a/tools/perf/builtin-buildid-cache.c +++ b/tools/perf/builtin-buildid-cache.c @@ -51,8 +51,8 @@ static int build_id_cache__add_file(const char *filename, const char *debugdir) return err; } -static int build_id_cache__remove_file(const char *filename __used, - const char *debugdir __used) +static int build_id_cache__remove_file(const char *filename __maybe_unused, + const char *debugdir __maybe_unused) { u8 build_id[BUILD_ID_SIZE]; char sbuild_id[BUILD_ID_SIZE * 2 + 1]; @@ -120,7 +120,8 @@ static int __cmd_buildid_cache(void) return 0; } -int cmd_buildid_cache(int argc, const char **argv, const char *prefix __used) +int cmd_buildid_cache(int argc, const char **argv, + const char *prefix __maybe_unused) { argc = parse_options(argc, argv, buildid_cache_options, buildid_cache_usage, 0); diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c index 7d6842826a0..1159feeebb1 100644 --- a/tools/perf/builtin-buildid-list.c +++ b/tools/perf/builtin-buildid-list.c @@ -103,7 +103,8 @@ static int __cmd_buildid_list(void) return perf_session__list_build_ids(); } -int cmd_buildid_list(int argc, const char **argv, const char *prefix __used) +int cmd_buildid_list(int argc, const char **argv, + const char *prefix __maybe_unused) { argc = parse_options(argc, argv, options, buildid_list_usage, 0); setup_pager(); diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index c4c6d76b70e..761f4197a9e 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -33,7 +33,7 @@ static int hists__add_entry(struct hists *self, return -ENOMEM; } -static int diff__process_sample_event(struct perf_tool *tool __used, +static int diff__process_sample_event(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, @@ -242,7 +242,7 @@ static const struct option options[] = { OPT_END() }; -int cmd_diff(int argc, const char **argv, const char *prefix __used) +int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused) { sort_order = diff__default_sort_order; argc = parse_options(argc, argv, options, diff_usage, 0); diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c index 0dd5a058f76..1fb164164fd 100644 --- a/tools/perf/builtin-evlist.c +++ b/tools/perf/builtin-evlist.c @@ -113,7 +113,7 @@ static const char * const evlist_usage[] = { NULL }; -int cmd_evlist(int argc, const char **argv, const char *prefix __used) +int cmd_evlist(int argc, const char **argv, const char *prefix __maybe_unused) { struct perf_attr_details details = { .verbose = false, }; const char *input_name = NULL; diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c index f9daae5ac47..25c8b942ff8 100644 --- a/tools/perf/builtin-help.c +++ b/tools/perf/builtin-help.c @@ -426,7 +426,7 @@ static int show_html_page(const char *perf_cmd) return 0; } -int cmd_help(int argc, const char **argv, const char *prefix __used) +int cmd_help(int argc, const char **argv, const char *prefix __maybe_unused) { const char *alias; int rc = 0; diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index 64d8ba2fb7b..1eaa6617c81 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c @@ -17,9 +17,9 @@ static char const *input_name = "-"; static bool inject_build_ids; -static int perf_event__repipe_synth(struct perf_tool *tool __used, +static int perf_event__repipe_synth(struct perf_tool *tool __maybe_unused, union perf_event *event, - struct machine *machine __used) + struct machine *machine __maybe_unused) { uint32_t size; void *buf = event; @@ -40,7 +40,8 @@ static int perf_event__repipe_synth(struct perf_tool *tool __used, static int perf_event__repipe_op2_synth(struct perf_tool *tool, union perf_event *event, - struct perf_session *session __used) + struct perf_session *session + __maybe_unused) { return perf_event__repipe_synth(tool, event, NULL); } @@ -52,13 +53,14 @@ static int perf_event__repipe_event_type_synth(struct perf_tool *tool, } static int perf_event__repipe_tracing_data_synth(union perf_event *event, - struct perf_session *session __used) + struct perf_session *session + __maybe_unused) { return perf_event__repipe_synth(NULL, event, NULL); } static int perf_event__repipe_attr(union perf_event *event, - struct perf_evlist **pevlist __used) + struct perf_evlist **pevlist __maybe_unused) { int ret; ret = perf_event__process_attr(event, pevlist); @@ -70,7 +72,7 @@ static int perf_event__repipe_attr(union perf_event *event, static int perf_event__repipe(struct perf_tool *tool, union perf_event *event, - struct perf_sample *sample __used, + struct perf_sample *sample __maybe_unused, struct machine *machine) { return perf_event__repipe_synth(tool, event, machine); @@ -78,8 +80,8 @@ static int perf_event__repipe(struct perf_tool *tool, static int perf_event__repipe_sample(struct perf_tool *tool, union perf_event *event, - struct perf_sample *sample __used, - struct perf_evsel *evsel __used, + struct perf_sample *sample __maybe_unused, + struct perf_evsel *evsel __maybe_unused, struct machine *machine) { return perf_event__repipe_synth(tool, event, machine); @@ -163,7 +165,7 @@ static int dso__inject_build_id(struct dso *self, struct perf_tool *tool, static int perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, - struct perf_evsel *evsel __used, + struct perf_evsel *evsel __maybe_unused, struct machine *machine) { struct addr_location al; @@ -224,7 +226,7 @@ struct perf_tool perf_inject = { extern volatile int session_done; -static void sig_handler(int sig __attribute__((__unused__))) +static void sig_handler(int sig __maybe_unused) { session_done = 1; } @@ -267,7 +269,7 @@ static const struct option options[] = { OPT_END() }; -int cmd_inject(int argc, const char **argv, const char *prefix __used) +int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused) { argc = parse_options(argc, argv, options, report_usage, 0); diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index ad9f5209738..f5f8a6b745a 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -320,7 +320,7 @@ static int perf_evsel__process_kmem_event(struct perf_evsel *evsel, return 0; } -static int process_sample_event(struct perf_tool *tool __used, +static int process_sample_event(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, @@ -672,8 +672,8 @@ static int setup_sorting(struct list_head *sort_list, const char *arg) return 0; } -static int parse_sort_opt(const struct option *opt __used, - const char *arg, int unset __used) +static int parse_sort_opt(const struct option *opt __maybe_unused, + const char *arg, int unset __maybe_unused) { if (!arg) return -1; @@ -686,22 +686,24 @@ static int parse_sort_opt(const struct option *opt __used, return 0; } -static int parse_caller_opt(const struct option *opt __used, - const char *arg __used, int unset __used) +static int parse_caller_opt(const struct option *opt __maybe_unused, + const char *arg __maybe_unused, + int unset __maybe_unused) { caller_flag = (alloc_flag + 1); return 0; } -static int parse_alloc_opt(const struct option *opt __used, - const char *arg __used, int unset __used) +static int parse_alloc_opt(const struct option *opt __maybe_unused, + const char *arg __maybe_unused, + int unset __maybe_unused) { alloc_flag = (caller_flag + 1); return 0; } -static int parse_line_opt(const struct option *opt __used, - const char *arg, int unset __used) +static int parse_line_opt(const struct option *opt __maybe_unused, + const char *arg, int unset __maybe_unused) { int lines; @@ -771,7 +773,7 @@ static int __cmd_record(int argc, const char **argv) return cmd_record(i, rec_argv, NULL); } -int cmd_kmem(int argc, const char **argv, const char *prefix __used) +int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused) { argc = parse_options(argc, argv, kmem_options, kmem_usage, 0); diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c index 9fc6e0fa3dc..4d2aa2cbeca 100644 --- a/tools/perf/builtin-kvm.c +++ b/tools/perf/builtin-kvm.c @@ -102,7 +102,7 @@ static int __cmd_buildid_list(int argc, const char **argv) return cmd_buildid_list(i, rec_argv, NULL); } -int cmd_kvm(int argc, const char **argv, const char *prefix __used) +int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused) { perf_host = 0; perf_guest = 1; diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c index bdcff81b532..1948eceb517 100644 --- a/tools/perf/builtin-list.c +++ b/tools/perf/builtin-list.c @@ -14,7 +14,7 @@ #include "util/parse-events.h" #include "util/cache.h" -int cmd_list(int argc, const char **argv, const char *prefix __used) +int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused) { setup_pager(); diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c index 75153c87e65..a8035207a3d 100644 --- a/tools/perf/builtin-lock.c +++ b/tools/perf/builtin-lock.c @@ -870,7 +870,7 @@ static int dump_info(void) return rc; } -static int process_sample_event(struct perf_tool *tool __used, +static int process_sample_event(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, @@ -1020,7 +1020,7 @@ static int __cmd_record(int argc, const char **argv) return cmd_record(i, rec_argv, NULL); } -int cmd_lock(int argc, const char **argv, const char *prefix __used) +int cmd_lock(int argc, const char **argv, const char *prefix __maybe_unused) { unsigned int i; int rc = 0; diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index e215ae61b2a..118aa894657 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -143,8 +143,8 @@ static int parse_probe_event_argv(int argc, const char **argv) return ret; } -static int opt_add_probe_event(const struct option *opt __used, - const char *str, int unset __used) +static int opt_add_probe_event(const struct option *opt __maybe_unused, + const char *str, int unset __maybe_unused) { if (str) { params.mod_events = true; @@ -153,8 +153,8 @@ static int opt_add_probe_event(const struct option *opt __used, return 0; } -static int opt_del_probe_event(const struct option *opt __used, - const char *str, int unset __used) +static int opt_del_probe_event(const struct option *opt __maybe_unused, + const char *str, int unset __maybe_unused) { if (str) { params.mod_events = true; @@ -166,7 +166,7 @@ static int opt_del_probe_event(const struct option *opt __used, } static int opt_set_target(const struct option *opt, const char *str, - int unset __used) + int unset __maybe_unused) { int ret = -ENOENT; @@ -188,8 +188,8 @@ static int opt_set_target(const struct option *opt, const char *str, } #ifdef DWARF_SUPPORT -static int opt_show_lines(const struct option *opt __used, - const char *str, int unset __used) +static int opt_show_lines(const struct option *opt __maybe_unused, + const char *str, int unset __maybe_unused) { int ret = 0; @@ -209,8 +209,8 @@ static int opt_show_lines(const struct option *opt __used, return ret; } -static int opt_show_vars(const struct option *opt __used, - const char *str, int unset __used) +static int opt_show_vars(const struct option *opt __maybe_unused, + const char *str, int unset __maybe_unused) { struct perf_probe_event *pev = ¶ms.events[params.nevents]; int ret; @@ -229,8 +229,8 @@ static int opt_show_vars(const struct option *opt __used, } #endif -static int opt_set_filter(const struct option *opt __used, - const char *str, int unset __used) +static int opt_set_filter(const struct option *opt __maybe_unused, + const char *str, int unset __maybe_unused) { const char *err; @@ -327,7 +327,7 @@ static const struct option options[] = { OPT_END() }; -int cmd_probe(int argc, const char **argv, const char *prefix __used) +int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused) { int ret; diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 7b8b891d4d5..c643ed669ef 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -92,8 +92,8 @@ static int write_output(struct perf_record *rec, void *buf, size_t size) static int process_synthesized_event(struct perf_tool *tool, union perf_event *event, - struct perf_sample *sample __used, - struct machine *machine __used) + struct perf_sample *sample __maybe_unused, + struct machine *machine __maybe_unused) { struct perf_record *rec = container_of(tool, struct perf_record, tool); if (write_output(rec, event, event->header.size) < 0) @@ -159,7 +159,7 @@ static void sig_handler(int sig) signr = sig; } -static void perf_record__sig_exit(int exit_status __used, void *arg) +static void perf_record__sig_exit(int exit_status __maybe_unused, void *arg) { struct perf_record *rec = arg; int status; @@ -827,7 +827,7 @@ static int get_stack_size(char *str, unsigned long *_size) #endif /* !NO_LIBUNWIND_SUPPORT */ static int -parse_callchain_opt(const struct option *opt __used, const char *arg, +parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg, int unset) { struct perf_record *rec = (struct perf_record *)opt->value; @@ -1003,7 +1003,7 @@ const struct option record_options[] = { OPT_END() }; -int cmd_record(int argc, const char **argv, const char *prefix __used) +int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused) { int err = -ENOMEM; struct perf_evsel *pos; diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 1f8d11b4f7f..97b2e6300f4 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -223,9 +223,9 @@ static int process_sample_event(struct perf_tool *tool, static int process_read_event(struct perf_tool *tool, union perf_event *event, - struct perf_sample *sample __used, + struct perf_sample *sample __maybe_unused, struct perf_evsel *evsel, - struct machine *machine __used) + struct machine *machine __maybe_unused) { struct perf_report *rep = container_of(tool, struct perf_report, tool); @@ -287,7 +287,7 @@ static int perf_report__setup_sample_type(struct perf_report *rep) extern volatile int session_done; -static void sig_handler(int sig __used) +static void sig_handler(int sig __maybe_unused) { session_done = 1; } @@ -533,13 +533,14 @@ setup: } static int -parse_branch_mode(const struct option *opt __used, const char *str __used, int unset) +parse_branch_mode(const struct option *opt __maybe_unused, + const char *str __maybe_unused, int unset) { sort__branch_mode = !unset; return 0; } -int cmd_report(int argc, const char **argv, const char *prefix __used) +int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) { struct perf_session *session; struct stat st; diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 782f66d3610..82e8ec2c43b 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -299,7 +299,7 @@ add_sched_event_wakeup(struct task_desc *task, u64 timestamp, static void add_sched_event_sleep(struct task_desc *task, u64 timestamp, - u64 task_state __used) + u64 task_state __maybe_unused) { struct sched_atom *event = get_new_event(task, timestamp); @@ -369,8 +369,8 @@ static void add_cross_task_wakeups(void) } } -static void -process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom) +static void process_sched_event(struct task_desc *this_task __maybe_unused, + struct sched_atom *atom) { int ret = 0; @@ -752,7 +752,7 @@ struct trace_sched_handler { static int replay_wakeup_event(struct trace_wakeup_event *wakeup_event, - struct machine *machine __used, + struct machine *machine __maybe_unused, struct event_format *event, struct perf_sample *sample) { struct task_desc *waker, *wakee; @@ -777,11 +777,11 @@ static u64 cpu_last_switched[MAX_CPUS]; static int replay_switch_event(struct trace_switch_event *switch_event, - struct machine *machine __used, + struct machine *machine __maybe_unused, struct event_format *event, struct perf_sample *sample) { - struct task_desc *prev, __used *next; + struct task_desc *prev, __maybe_unused *next; u64 timestamp0, timestamp = sample->time; int cpu = sample->cpu; s64 delta; @@ -932,15 +932,13 @@ static int thread_atoms_insert(struct thread *thread) return 0; } -static int -latency_fork_event(struct trace_fork_event *fork_event __used, - struct event_format *event __used) +static int latency_fork_event(struct trace_fork_event *fork_event __maybe_unused, + struct event_format *event __maybe_unused) { /* should insert the newcomer */ return 0; } -__used static char sched_out_state(struct trace_switch_event *switch_event) { const char *str = TASK_STATE_TO_CHAR_STR; @@ -971,7 +969,8 @@ add_sched_out_event(struct work_atoms *atoms, } static void -add_runtime_event(struct work_atoms *atoms, u64 delta, u64 timestamp __used) +add_runtime_event(struct work_atoms *atoms, u64 delta, + u64 timestamp __maybe_unused) { struct work_atom *atom; @@ -1017,7 +1016,7 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp) static int latency_switch_event(struct trace_switch_event *switch_event, struct machine *machine, - struct event_format *event __used, + struct event_format *event __maybe_unused, struct perf_sample *sample) { struct work_atoms *out_events, *in_events; @@ -1105,7 +1104,8 @@ latency_runtime_event(struct trace_runtime_event *runtime_event, static int latency_wakeup_event(struct trace_wakeup_event *wakeup_event, - struct machine *machine, struct event_format *event __used, + struct machine *machine, + struct event_format *event __maybe_unused, struct perf_sample *sample) { struct work_atoms *atoms; @@ -1369,12 +1369,11 @@ static void sort_lat(void) static struct trace_sched_handler *trace_handler; -static int -process_sched_wakeup_event(struct perf_tool *tool __used, - struct event_format *event, - struct perf_sample *sample, - struct machine *machine, - struct thread *thread __used) +static int process_sched_wakeup_event(struct perf_tool *tool __maybe_unused, + struct event_format *event, + struct perf_sample *sample, + struct machine *machine, + struct thread *thread __maybe_unused) { void *data = sample->raw_data; struct trace_wakeup_event wakeup_event; @@ -1410,10 +1409,10 @@ static char next_shortname2 = '0'; static int map_switch_event(struct trace_switch_event *switch_event, struct machine *machine, - struct event_format *event __used, + struct event_format *event __maybe_unused, struct perf_sample *sample) { - struct thread *sched_out __used, *sched_in; + struct thread *sched_out __maybe_unused, *sched_in; int new_shortname; u64 timestamp0, timestamp = sample->time; s64 delta; @@ -1487,12 +1486,11 @@ map_switch_event(struct trace_switch_event *switch_event, return 0; } -static int -process_sched_switch_event(struct perf_tool *tool __used, - struct event_format *event, - struct perf_sample *sample, - struct machine *machine, - struct thread *thread __used) +static int process_sched_switch_event(struct perf_tool *tool __maybe_unused, + struct event_format *event, + struct perf_sample *sample, + struct machine *machine, + struct thread *thread __maybe_unused) { int this_cpu = sample->cpu, err = 0; void *data = sample->raw_data; @@ -1523,12 +1521,11 @@ process_sched_switch_event(struct perf_tool *tool __used, return err; } -static int -process_sched_runtime_event(struct perf_tool *tool __used, - struct event_format *event, - struct perf_sample *sample, - struct machine *machine, - struct thread *thread __used) +static int process_sched_runtime_event(struct perf_tool *tool __maybe_unused, + struct event_format *event, + struct perf_sample *sample, + struct machine *machine, + struct thread *thread __maybe_unused) { void *data = sample->raw_data; struct trace_runtime_event runtime_event; @@ -1545,12 +1542,11 @@ process_sched_runtime_event(struct perf_tool *tool __used, return err; } -static int -process_sched_fork_event(struct perf_tool *tool __used, - struct event_format *event, - struct perf_sample *sample, - struct machine *machine __used, - struct thread *thread __used) +static int process_sched_fork_event(struct perf_tool *tool __maybe_unused, + struct event_format *event, + struct perf_sample *sample, + struct machine *machine __maybe_unused, + struct thread *thread __maybe_unused) { void *data = sample->raw_data; struct trace_fork_event fork_event; @@ -1569,12 +1565,11 @@ process_sched_fork_event(struct perf_tool *tool __used, return err; } -static int -process_sched_exit_event(struct perf_tool *tool __used, - struct event_format *event, - struct perf_sample *sample __used, - struct machine *machine __used, - struct thread *thread __used) +static int process_sched_exit_event(struct perf_tool *tool __maybe_unused, + struct event_format *event, + struct perf_sample *sample __maybe_unused, + struct machine *machine __maybe_unused, + struct thread *thread __maybe_unused) { if (verbose) printf("sched_exit event %p\n", event); @@ -1582,12 +1577,11 @@ process_sched_exit_event(struct perf_tool *tool __used, return 0; } -static int -process_sched_migrate_task_event(struct perf_tool *tool __used, - struct event_format *event, - struct perf_sample *sample, - struct machine *machine, - struct thread *thread __used) +static int process_sched_migrate_task_event(struct perf_tool *tool __maybe_unused, + struct event_format *event, + struct perf_sample *sample, + struct machine *machine, + struct thread *thread __maybe_unused) { void *data = sample->raw_data; struct trace_migrate_task_event migrate_task_event; @@ -1612,8 +1606,8 @@ typedef int (*tracepoint_handler)(struct perf_tool *tool, struct machine *machine, struct thread *thread); -static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __used, - union perf_event *event __used, +static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused, + union perf_event *event __maybe_unused, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) @@ -1918,7 +1912,7 @@ static int __cmd_record(int argc, const char **argv) return cmd_record(i, rec_argv, NULL); } -int cmd_sched(int argc, const char **argv, const char *prefix __used) +int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused) { argc = parse_options(argc, argv, sched_options, sched_usage, PARSE_OPT_STOP_AT_NON_OPTION); diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index c350cfee315..6d98a83d5a6 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -430,9 +430,9 @@ static void process_event(union perf_event *event, struct perf_sample *sample, printf("\n"); } -static int default_start_script(const char *script __unused, - int argc __unused, - const char **argv __unused) +static int default_start_script(const char *script __maybe_unused, + int argc __maybe_unused, + const char **argv __maybe_unused) { return 0; } @@ -442,8 +442,8 @@ static int default_stop_script(void) return 0; } -static int default_generate_script(struct pevent *pevent __unused, - const char *outfile __unused) +static int default_generate_script(struct pevent *pevent __maybe_unused, + const char *outfile __maybe_unused) { return 0; } @@ -474,7 +474,7 @@ static int cleanup_scripting(void) static const char *input_name; -static int process_sample_event(struct perf_tool *tool __used, +static int process_sample_event(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, @@ -534,7 +534,7 @@ static struct perf_tool perf_script = { extern volatile int session_done; -static void sig_handler(int sig __unused) +static void sig_handler(int sig __maybe_unused) { session_done = 1; } @@ -644,8 +644,8 @@ static void list_available_languages(void) fprintf(stderr, "\n"); } -static int parse_scriptname(const struct option *opt __used, - const char *str, int unset __used) +static int parse_scriptname(const struct option *opt __maybe_unused, + const char *str, int unset __maybe_unused) { char spec[PATH_MAX]; const char *script, *ext; @@ -690,8 +690,8 @@ static int parse_scriptname(const struct option *opt __used, return 0; } -static int parse_output_fields(const struct option *opt __used, - const char *arg, int unset __used) +static int parse_output_fields(const struct option *opt __maybe_unused, + const char *arg, int unset __maybe_unused) { char *tok; int i, imax = sizeof(all_output_options) / sizeof(struct output_option); @@ -982,8 +982,9 @@ static char *get_script_root(struct dirent *script_dirent, const char *suffix) return script_root; } -static int list_available_scripts(const struct option *opt __used, - const char *s __used, int unset __used) +static int list_available_scripts(const struct option *opt __maybe_unused, + const char *s __maybe_unused, + int unset __maybe_unused) { struct dirent *script_next, *lang_next, script_dirent, lang_dirent; char scripts_path[MAXPATHLEN]; @@ -1172,7 +1173,7 @@ static int have_cmd(int argc, const char **argv) return 0; } -int cmd_script(int argc, const char **argv, const char *prefix __used) +int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused) { char *rec_script_path = NULL; char *rep_script_path = NULL; diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 02f49eba677..dab347d7b01 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -417,7 +417,7 @@ static int read_counter(struct perf_evsel *counter) return 0; } -static int run_perf_stat(int argc __used, const char **argv) +static int run_perf_stat(int argc __maybe_unused, const char **argv) { unsigned long long t0, t1; struct perf_evsel *counter, *first; @@ -634,7 +634,9 @@ static const char *get_ratio_color(enum grc_type type, double ratio) return color; } -static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __used, double avg) +static void print_stalled_cycles_frontend(int cpu, + struct perf_evsel *evsel + __maybe_unused, double avg) { double total, ratio = 0.0; const char *color; @@ -651,7 +653,9 @@ static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __us fprintf(output, " frontend cycles idle "); } -static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __used, double avg) +static void print_stalled_cycles_backend(int cpu, + struct perf_evsel *evsel + __maybe_unused, double avg) { double total, ratio = 0.0; const char *color; @@ -668,7 +672,9 @@ static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __use fprintf(output, " backend cycles idle "); } -static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double avg) +static void print_branch_misses(int cpu, + struct perf_evsel *evsel __maybe_unused, + double avg) { double total, ratio = 0.0; const char *color; @@ -685,7 +691,9 @@ static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double fprintf(output, " of all branches "); } -static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, double avg) +static void print_l1_dcache_misses(int cpu, + struct perf_evsel *evsel __maybe_unused, + double avg) { double total, ratio = 0.0; const char *color; @@ -702,7 +710,9 @@ static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, dou fprintf(output, " of all L1-dcache hits "); } -static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, double avg) +static void print_l1_icache_misses(int cpu, + struct perf_evsel *evsel __maybe_unused, + double avg) { double total, ratio = 0.0; const char *color; @@ -719,7 +729,9 @@ static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, dou fprintf(output, " of all L1-icache hits "); } -static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg) +static void print_dtlb_cache_misses(int cpu, + struct perf_evsel *evsel __maybe_unused, + double avg) { double total, ratio = 0.0; const char *color; @@ -736,7 +748,9 @@ static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, do fprintf(output, " of all dTLB cache hits "); } -static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg) +static void print_itlb_cache_misses(int cpu, + struct perf_evsel *evsel __maybe_unused, + double avg) { double total, ratio = 0.0; const char *color; @@ -753,7 +767,9 @@ static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, do fprintf(output, " of all iTLB cache hits "); } -static void print_ll_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg) +static void print_ll_cache_misses(int cpu, + struct perf_evsel *evsel __maybe_unused, + double avg) { double total, ratio = 0.0; const char *color; @@ -1059,8 +1075,8 @@ static const char * const stat_usage[] = { NULL }; -static int stat__set_big_num(const struct option *opt __used, - const char *s __used, int unset) +static int stat__set_big_num(const struct option *opt __maybe_unused, + const char *s __maybe_unused, int unset) { big_num_opt = unset ? 0 : 1; return 0; @@ -1154,7 +1170,7 @@ static int add_default_attributes(void) return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); } -int cmd_stat(int argc, const char **argv, const char *prefix __used) +int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) { struct perf_evsel *pos; int status = -ENOMEM; diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c index 6ae102eba5f..d33143efefc 100644 --- a/tools/perf/builtin-test.c +++ b/tools/perf/builtin-test.c @@ -18,7 +18,8 @@ #include -static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym) +static int vmlinux_matches_kallsyms_filter(struct map *map __maybe_unused, + struct symbol *sym) { bool *visited = symbol__priv(sym); *visited = true; @@ -996,7 +997,9 @@ static u64 mmap_read_self(void *addr) /* * If the RDPMC instruction faults then signal this back to the test parent task: */ -static void segfault_handler(int sig __used, siginfo_t *info __used, void *uc __used) +static void segfault_handler(int sig __maybe_unused, + siginfo_t *info __maybe_unused, + void *uc __maybe_unused) { exit(-1); } @@ -1315,7 +1318,7 @@ static int perf_test__list(int argc, const char **argv) return 0; } -int cmd_test(int argc, const char **argv, const char *prefix __used) +int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused) { const char * const test_usage[] = { "perf test [] [{list |[|]}]", diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index 3b75b2e21ea..55a3a6c6b9e 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c @@ -275,28 +275,28 @@ static int cpus_cstate_state[MAX_CPUS]; static u64 cpus_pstate_start_times[MAX_CPUS]; static u64 cpus_pstate_state[MAX_CPUS]; -static int process_comm_event(struct perf_tool *tool __used, +static int process_comm_event(struct perf_tool *tool __maybe_unused, union perf_event *event, - struct perf_sample *sample __used, - struct machine *machine __used) + struct perf_sample *sample __maybe_unused, + struct machine *machine __maybe_unused) { pid_set_comm(event->comm.tid, event->comm.comm); return 0; } -static int process_fork_event(struct perf_tool *tool __used, +static int process_fork_event(struct perf_tool *tool __maybe_unused, union perf_event *event, - struct perf_sample *sample __used, - struct machine *machine __used) + struct perf_sample *sample __maybe_unused, + struct machine *machine __maybe_unused) { pid_fork(event->fork.pid, event->fork.ppid, event->fork.time); return 0; } -static int process_exit_event(struct perf_tool *tool __used, +static int process_exit_event(struct perf_tool *tool __maybe_unused, union perf_event *event, - struct perf_sample *sample __used, - struct machine *machine __used) + struct perf_sample *sample __maybe_unused, + struct machine *machine __maybe_unused) { pid_exit(event->fork.pid, event->fork.time); return 0; @@ -491,11 +491,11 @@ static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te) } -static int process_sample_event(struct perf_tool *tool __used, - union perf_event *event __used, +static int process_sample_event(struct perf_tool *tool __maybe_unused, + union perf_event *event __maybe_unused, struct perf_sample *sample, struct perf_evsel *evsel, - struct machine *machine __used) + struct machine *machine __maybe_unused) { struct trace_entry *te; @@ -1081,7 +1081,8 @@ static int __cmd_record(int argc, const char **argv) } static int -parse_process(const struct option *opt __used, const char *arg, int __used unset) +parse_process(const struct option *opt __maybe_unused, const char *arg, + int __maybe_unused unset) { if (arg) add_process_filter(arg); @@ -1106,7 +1107,8 @@ static const struct option options[] = { }; -int cmd_timechart(int argc, const char **argv, const char *prefix __used) +int cmd_timechart(int argc, const char **argv, + const char *prefix __maybe_unused) { argc = parse_options(argc, argv, options, timechart_usage, PARSE_OPT_STOP_AT_NON_OPTION); diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 0513aaa659f..5550754c05f 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -95,7 +95,8 @@ static void perf_top__update_print_entries(struct perf_top *top) top->print_entries -= 9; } -static void perf_top__sig_winch(int sig __used, siginfo_t *info __used, void *arg) +static void perf_top__sig_winch(int sig __maybe_unused, + siginfo_t *info __maybe_unused, void *arg) { struct perf_top *top = arg; @@ -663,7 +664,7 @@ static const char *skip_symbols[] = { NULL }; -static int symbol_filter(struct map *map __used, struct symbol *sym) +static int symbol_filter(struct map *map __maybe_unused, struct symbol *sym) { const char *name = sym->name; int i; @@ -1163,7 +1164,7 @@ static const char * const top_usage[] = { NULL }; -int cmd_top(int argc, const char **argv, const char *prefix __used) +int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) { struct perf_evsel *pos; int status; diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c index 1818a531f1d..4aeb7d5df93 100644 --- a/tools/perf/ui/browser.c +++ b/tools/perf/ui/browser.c @@ -269,7 +269,7 @@ int ui_browser__show(struct ui_browser *browser, const char *title, return err ? 0 : -1; } -void ui_browser__hide(struct ui_browser *browser __used) +void ui_browser__hide(struct ui_browser *browser __maybe_unused) { pthread_mutex_lock(&ui__lock); ui_helpline__pop(); @@ -518,7 +518,7 @@ static struct ui_browser__colorset { static int ui_browser__color_config(const char *var, const char *value, - void *data __used) + void *data __maybe_unused) { char *fg = NULL, *bg; int i; @@ -602,7 +602,8 @@ void __ui_browser__vline(struct ui_browser *browser, unsigned int column, SLsmg_set_char_set(0); } -void ui_browser__write_graph(struct ui_browser *browser __used, int graph) +void ui_browser__write_graph(struct ui_browser *browser __maybe_unused, + int graph) { SLsmg_set_char_set(1); SLsmg_write_char(graph); diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index 67a2703e666..8f8cd2d73b3 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c @@ -54,7 +54,8 @@ static inline struct browser_disasm_line *disasm_line__browser(struct disasm_lin return (struct browser_disasm_line *)(dl + 1); } -static bool disasm_line__filter(struct ui_browser *browser __used, void *entry) +static bool disasm_line__filter(struct ui_browser *browser __maybe_unused, + void *entry) { if (annotate_browser__opts.hide_src_code) { struct disasm_line *dl = list_entry(entry, struct disasm_line, node); @@ -928,7 +929,8 @@ static int annotate_config__cmp(const void *name, const void *cfgp) return strcmp(name, cfg->name); } -static int annotate__config(const char *var, const char *value, void *data __used) +static int annotate__config(const char *var, const char *value, + void *data __maybe_unused) { struct annotate__config *cfg; const char *name; diff --git a/tools/perf/ui/gtk/browser.c b/tools/perf/ui/gtk/browser.c index 3c16ab50e0f..55acba6e0df 100644 --- a/tools/perf/ui/gtk/browser.c +++ b/tools/perf/ui/gtk/browser.c @@ -237,8 +237,9 @@ static GtkWidget *perf_gtk__setup_statusbar(void) int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, const char *help, - void (*timer) (void *arg)__used, - void *arg __used, int delay_secs __used) + void (*timer) (void *arg)__maybe_unused, + void *arg __maybe_unused, + int delay_secs __maybe_unused) { struct perf_evsel *pos; GtkWidget *vbox; diff --git a/tools/perf/ui/gtk/setup.c b/tools/perf/ui/gtk/setup.c index 26429437e19..3c4c6ef7828 100644 --- a/tools/perf/ui/gtk/setup.c +++ b/tools/perf/ui/gtk/setup.c @@ -12,7 +12,7 @@ int perf_gtk__init(void) return gtk_init_check(NULL, NULL) ? 0 : -1; } -void perf_gtk__exit(bool wait_for_ok __used) +void perf_gtk__exit(bool wait_for_ok __maybe_unused) { if (!perf_gtk__is_active_context(pgctx)) return; diff --git a/tools/perf/ui/gtk/util.c b/tools/perf/ui/gtk/util.c index b8efb966f94..8aada5b3c04 100644 --- a/tools/perf/ui/gtk/util.c +++ b/tools/perf/ui/gtk/util.c @@ -117,8 +117,8 @@ struct perf_error_ops perf_gtk_eops = { * For now, just add stubs for NO_NEWT=1 build. */ #ifdef NO_NEWT_SUPPORT -void ui_progress__update(u64 curr __used, u64 total __used, - const char *title __used) +void ui_progress__update(u64 curr __maybe_unused, u64 total __maybe_unused, + const char *title __maybe_unused) { } #endif diff --git a/tools/perf/ui/helpline.c b/tools/perf/ui/helpline.c index 78ba28ac7a2..a49bcf3c190 100644 --- a/tools/perf/ui/helpline.c +++ b/tools/perf/ui/helpline.c @@ -12,7 +12,7 @@ static void nop_helpline__pop(void) { } -static void nop_helpline__push(const char *msg __used) +static void nop_helpline__push(const char *msg __maybe_unused) { } diff --git a/tools/perf/ui/helpline.h b/tools/perf/ui/helpline.h index a2487f93aa4..2b667ee454c 100644 --- a/tools/perf/ui/helpline.h +++ b/tools/perf/ui/helpline.h @@ -24,8 +24,8 @@ void ui_helpline__puts(const char *msg); extern char ui_helpline__current[512]; #ifdef NO_NEWT_SUPPORT -static inline int ui_helpline__show_help(const char *format __used, - va_list ap __used) +static inline int ui_helpline__show_help(const char *format __maybe_unused, + va_list ap __maybe_unused) { return 0; } @@ -35,8 +35,8 @@ int ui_helpline__show_help(const char *format, va_list ap); #endif /* NO_NEWT_SUPPORT */ #ifdef NO_GTK2_SUPPORT -static inline int perf_gtk__show_helpline(const char *format __used, - va_list ap __used) +static inline int perf_gtk__show_helpline(const char *format __maybe_unused, + va_list ap __maybe_unused) { return 0; } diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index 031b349a3f8..407e855cccb 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -13,7 +13,7 @@ static int hpp__header_overhead(struct perf_hpp *hpp) return scnprintf(hpp->buf, hpp->size, fmt); } -static int hpp__width_overhead(struct perf_hpp *hpp __used) +static int hpp__width_overhead(struct perf_hpp *hpp __maybe_unused) { return 8; } @@ -62,7 +62,7 @@ static int hpp__header_overhead_sys(struct perf_hpp *hpp) return scnprintf(hpp->buf, hpp->size, fmt, "sys"); } -static int hpp__width_overhead_sys(struct perf_hpp *hpp __used) +static int hpp__width_overhead_sys(struct perf_hpp *hpp __maybe_unused) { return 6; } @@ -88,7 +88,7 @@ static int hpp__header_overhead_us(struct perf_hpp *hpp) return scnprintf(hpp->buf, hpp->size, fmt, "user"); } -static int hpp__width_overhead_us(struct perf_hpp *hpp __used) +static int hpp__width_overhead_us(struct perf_hpp *hpp __maybe_unused) { return 6; } @@ -112,7 +112,7 @@ static int hpp__header_overhead_guest_sys(struct perf_hpp *hpp) return scnprintf(hpp->buf, hpp->size, "guest sys"); } -static int hpp__width_overhead_guest_sys(struct perf_hpp *hpp __used) +static int hpp__width_overhead_guest_sys(struct perf_hpp *hpp __maybe_unused) { return 9; } @@ -138,7 +138,7 @@ static int hpp__header_overhead_guest_us(struct perf_hpp *hpp) return scnprintf(hpp->buf, hpp->size, "guest usr"); } -static int hpp__width_overhead_guest_us(struct perf_hpp *hpp __used) +static int hpp__width_overhead_guest_us(struct perf_hpp *hpp __maybe_unused) { return 9; } @@ -166,7 +166,7 @@ static int hpp__header_samples(struct perf_hpp *hpp) return scnprintf(hpp->buf, hpp->size, fmt, "Samples"); } -static int hpp__width_samples(struct perf_hpp *hpp __used) +static int hpp__width_samples(struct perf_hpp *hpp __maybe_unused) { return 11; } @@ -185,7 +185,7 @@ static int hpp__header_period(struct perf_hpp *hpp) return scnprintf(hpp->buf, hpp->size, fmt, "Period"); } -static int hpp__width_period(struct perf_hpp *hpp __used) +static int hpp__width_period(struct perf_hpp *hpp __maybe_unused) { return 12; } @@ -204,7 +204,7 @@ static int hpp__header_delta(struct perf_hpp *hpp) return scnprintf(hpp->buf, hpp->size, fmt, "Delta"); } -static int hpp__width_delta(struct perf_hpp *hpp __used) +static int hpp__width_delta(struct perf_hpp *hpp __maybe_unused) { return 7; } @@ -238,12 +238,13 @@ static int hpp__header_displ(struct perf_hpp *hpp) return scnprintf(hpp->buf, hpp->size, "Displ."); } -static int hpp__width_displ(struct perf_hpp *hpp __used) +static int hpp__width_displ(struct perf_hpp *hpp __maybe_unused) { return 6; } -static int hpp__entry_displ(struct perf_hpp *hpp, struct hist_entry *he __used) +static int hpp__entry_displ(struct perf_hpp *hpp, + struct hist_entry *he __maybe_unused) { const char *fmt = symbol_conf.field_sep ? "%s" : "%6.6s"; char buf[32] = " "; diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c index 4dc0887c04f..60debb81537 100644 --- a/tools/perf/ui/tui/setup.c +++ b/tools/perf/ui/tui/setup.c @@ -28,7 +28,7 @@ void ui__refresh_dimensions(bool force) } } -static void ui__sigwinch(int sig __used) +static void ui__sigwinch(int sig __maybe_unused) { ui__need_resize = 1; } @@ -88,7 +88,7 @@ int ui__getch(int delay_secs) return SLkp_getkey(); } -static void newt_suspend(void *d __used) +static void newt_suspend(void *d __maybe_unused) { newtSuspend(); raise(SIGTSTP); diff --git a/tools/perf/util/alias.c b/tools/perf/util/alias.c index b8144e80bb1..e6d134773d0 100644 --- a/tools/perf/util/alias.c +++ b/tools/perf/util/alias.c @@ -3,7 +3,8 @@ static const char *alias_key; static char *alias_val; -static int alias_lookup_cb(const char *k, const char *v, void *cb __used) +static int alias_lookup_cb(const char *k, const char *v, + void *cb __maybe_unused) { if (!prefixcmp(k, "alias.") && !strcmp(k+6, alias_key)) { if (!v) diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 04eafd3939d..f0a91037137 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -313,8 +313,8 @@ static struct ins_ops dec_ops = { .scnprintf = dec__scnprintf, }; -static int nop__scnprintf(struct ins *ins __used, char *bf, size_t size, - struct ins_operands *ops __used) +static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size, + struct ins_operands *ops __maybe_unused) { return scnprintf(bf, size, "%-6.6s", "nop"); } @@ -416,7 +416,7 @@ static struct ins *ins__find(const char *name) return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__cmp); } -int symbol__annotate_init(struct map *map __used, struct symbol *sym) +int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym) { struct annotation *notes = symbol__annotation(sym); pthread_mutex_init(¬es->lock, NULL); diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index 62a6e7a7365..9b5b21e7b03 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -126,7 +126,7 @@ int symbol__alloc_hist(struct symbol *sym); void symbol__annotate_zero_histograms(struct symbol *sym); int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize); -int symbol__annotate_init(struct map *map __used, struct symbol *sym); +int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym); int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx, bool full_paths, int min_pcnt, int max_lines, int context); @@ -139,11 +139,12 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx, int max_lines); #ifdef NO_NEWT_SUPPORT -static inline int symbol__tui_annotate(struct symbol *sym __used, - struct map *map __used, - int evidx __used, - void(*timer)(void *arg) __used, - void *arg __used, int delay_secs __used) +static inline int symbol__tui_annotate(struct symbol *sym __maybe_unused, + struct map *map __maybe_unused, + int evidx __maybe_unused, + void(*timer)(void *arg) __maybe_unused, + void *arg __maybe_unused, + int delay_secs __maybe_unused) { return 0; } diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c index fd9a5944b62..8e3a740ddbd 100644 --- a/tools/perf/util/build-id.c +++ b/tools/perf/util/build-id.c @@ -16,10 +16,10 @@ #include "session.h" #include "tool.h" -static int build_id__mark_dso_hit(struct perf_tool *tool __used, +static int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused, union perf_event *event, - struct perf_sample *sample __used, - struct perf_evsel *evsel __used, + struct perf_sample *sample __maybe_unused, + struct perf_evsel *evsel __maybe_unused, struct machine *machine) { struct addr_location al; @@ -41,9 +41,10 @@ static int build_id__mark_dso_hit(struct perf_tool *tool __used, return 0; } -static int perf_event__exit_del_thread(struct perf_tool *tool __used, +static int perf_event__exit_del_thread(struct perf_tool *tool __maybe_unused, union perf_event *event, - struct perf_sample *sample __used, + struct perf_sample *sample + __maybe_unused, struct machine *machine) { struct thread *thread = machine__findnew_thread(machine, event->fork.tid); diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h index cff18c617d1..ab176942654 100644 --- a/tools/perf/util/cache.h +++ b/tools/perf/util/cache.h @@ -39,7 +39,7 @@ static inline void setup_browser(bool fallback_to_pager) if (fallback_to_pager) setup_pager(); } -static inline void exit_browser(bool wait_for_ok __used) {} +static inline void exit_browser(bool wait_for_ok __maybe_unused) {} #else void setup_browser(bool fallback_to_pager); void exit_browser(bool wait_for_ok); @@ -49,7 +49,7 @@ static inline int ui__init(void) { return -1; } -static inline void ui__exit(bool wait_for_ok __used) {} +static inline void ui__exit(bool wait_for_ok __maybe_unused) {} #else int ui__init(void); void ui__exit(bool wait_for_ok); @@ -60,7 +60,7 @@ static inline int perf_gtk__init(void) { return -1; } -static inline void perf_gtk__exit(bool wait_for_ok __used) {} +static inline void perf_gtk__exit(bool wait_for_ok __maybe_unused) {} #else int perf_gtk__init(void); void perf_gtk__exit(bool wait_for_ok); diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 3a6bff47614..d3b3f5d8213 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c @@ -93,7 +93,7 @@ __sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node, */ static void sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root, - u64 min_hit, struct callchain_param *param __used) + u64 min_hit, struct callchain_param *param __maybe_unused) { __sort_chain_flat(rb_root, &root->node, min_hit); } @@ -115,7 +115,7 @@ static void __sort_chain_graph_abs(struct callchain_node *node, static void sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root, - u64 min_hit, struct callchain_param *param __used) + u64 min_hit, struct callchain_param *param __maybe_unused) { __sort_chain_graph_abs(&chain_root->node, min_hit); rb_root->rb_node = chain_root->node.rb_root.rb_node; @@ -140,7 +140,7 @@ static void __sort_chain_graph_rel(struct callchain_node *node, static void sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root, - u64 min_hit __used, struct callchain_param *param) + u64 min_hit __maybe_unused, struct callchain_param *param) { __sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0); rb_root->rb_node = chain_root->node.rb_root.rb_node; diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c index dbe2f16b1a1..96bbda1ddb8 100644 --- a/tools/perf/util/cgroup.c +++ b/tools/perf/util/cgroup.c @@ -138,8 +138,8 @@ void close_cgroup(struct cgroup_sel *cgrp) } } -int parse_cgroups(const struct option *opt __used, const char *str, - int unset __used) +int parse_cgroups(const struct option *opt __maybe_unused, const char *str, + int unset __maybe_unused) { struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; const char *p, *e, *eos = str + strlen(str); diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c index 6faa3a18bfb..3e0fdd369cc 100644 --- a/tools/perf/util/config.c +++ b/tools/perf/util/config.c @@ -342,13 +342,15 @@ const char *perf_config_dirname(const char *name, const char *value) return value; } -static int perf_default_core_config(const char *var __used, const char *value __used) +static int perf_default_core_config(const char *var __maybe_unused, + const char *value __maybe_unused) { /* Add other config variables here. */ return 0; } -int perf_default_config(const char *var, const char *value, void *dummy __used) +int perf_default_config(const char *var, const char *value, + void *dummy __maybe_unused) { if (!prefixcmp(var, "core.")) return perf_default_core_config(var, value); diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h index 05e660cbf7e..bb2e7d1007a 100644 --- a/tools/perf/util/debug.h +++ b/tools/perf/util/debug.h @@ -16,19 +16,20 @@ struct ui_progress; struct perf_error_ops; #if defined(NO_NEWT_SUPPORT) && defined(NO_GTK2_SUPPORT) -static inline void ui_progress__update(u64 curr __used, u64 total __used, - const char *title __used) {} +static inline void ui_progress__update(u64 curr __maybe_unused, + u64 total __maybe_unused, + const char *title __maybe_unused) {} #define ui__error(format, arg...) ui__warning(format, ##arg) static inline int -perf_error__register(struct perf_error_ops *eops __used) +perf_error__register(struct perf_error_ops *eops __maybe_unused) { return 0; } static inline int -perf_error__unregister(struct perf_error_ops *eops __used) +perf_error__unregister(struct perf_error_ops *eops __maybe_unused) { return 0; } diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index f7f480503af..8202f5ca048 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -514,9 +514,9 @@ size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp) return fprintf(fp, ": %s:%d\n", event->comm.comm, event->comm.tid); } -int perf_event__process_comm(struct perf_tool *tool __used, +int perf_event__process_comm(struct perf_tool *tool __maybe_unused, union perf_event *event, - struct perf_sample *sample __used, + struct perf_sample *sample __maybe_unused, struct machine *machine) { struct thread *thread = machine__findnew_thread(machine, event->comm.tid); @@ -532,10 +532,10 @@ int perf_event__process_comm(struct perf_tool *tool __used, return 0; } -int perf_event__process_lost(struct perf_tool *tool __used, +int perf_event__process_lost(struct perf_tool *tool __maybe_unused, union perf_event *event, - struct perf_sample *sample __used, - struct machine *machine __used) + struct perf_sample *sample __maybe_unused, + struct machine *machine __maybe_unused) { dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n", event->lost.id, event->lost.lost); @@ -555,7 +555,8 @@ static void perf_event__set_kernel_mmap_len(union perf_event *event, maps[MAP__FUNCTION]->end = ~0ULL; } -static int perf_event__process_kernel_mmap(struct perf_tool *tool __used, +static int perf_event__process_kernel_mmap(struct perf_tool *tool + __maybe_unused, union perf_event *event, struct machine *machine) { @@ -657,7 +658,7 @@ size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) int perf_event__process_mmap(struct perf_tool *tool, union perf_event *event, - struct perf_sample *sample __used, + struct perf_sample *sample __maybe_unused, struct machine *machine) { struct thread *thread; @@ -701,9 +702,9 @@ size_t perf_event__fprintf_task(union perf_event *event, FILE *fp) event->fork.ppid, event->fork.ptid); } -int perf_event__process_task(struct perf_tool *tool __used, +int perf_event__process_task(struct perf_tool *tool __maybe_unused, union perf_event *event, - struct perf_sample *sample __used, + struct perf_sample *sample __maybe_unused, struct machine *machine) { struct thread *thread = machine__findnew_thread(machine, event->fork.tid); diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 87996cab21d..acbf6336199 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -475,7 +475,7 @@ static bool perf_session__read_build_ids(struct perf_session *session, bool with return ret; } -static int write_tracing_data(int fd, struct perf_header *h __used, +static int write_tracing_data(int fd, struct perf_header *h __maybe_unused, struct perf_evlist *evlist) { return read_tracing_data(fd, &evlist->entries); @@ -483,7 +483,7 @@ static int write_tracing_data(int fd, struct perf_header *h __used, static int write_build_id(int fd, struct perf_header *h, - struct perf_evlist *evlist __used) + struct perf_evlist *evlist __maybe_unused) { struct perf_session *session; int err; @@ -504,8 +504,8 @@ static int write_build_id(int fd, struct perf_header *h, return 0; } -static int write_hostname(int fd, struct perf_header *h __used, - struct perf_evlist *evlist __used) +static int write_hostname(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { struct utsname uts; int ret; @@ -517,8 +517,8 @@ static int write_hostname(int fd, struct perf_header *h __used, return do_write_string(fd, uts.nodename); } -static int write_osrelease(int fd, struct perf_header *h __used, - struct perf_evlist *evlist __used) +static int write_osrelease(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { struct utsname uts; int ret; @@ -530,8 +530,8 @@ static int write_osrelease(int fd, struct perf_header *h __used, return do_write_string(fd, uts.release); } -static int write_arch(int fd, struct perf_header *h __used, - struct perf_evlist *evlist __used) +static int write_arch(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { struct utsname uts; int ret; @@ -543,14 +543,14 @@ static int write_arch(int fd, struct perf_header *h __used, return do_write_string(fd, uts.machine); } -static int write_version(int fd, struct perf_header *h __used, - struct perf_evlist *evlist __used) +static int write_version(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { return do_write_string(fd, perf_version_string); } -static int write_cpudesc(int fd, struct perf_header *h __used, - struct perf_evlist *evlist __used) +static int write_cpudesc(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { #ifndef CPUINFO_PROC #define CPUINFO_PROC NULL @@ -608,8 +608,8 @@ done: return ret; } -static int write_nrcpus(int fd, struct perf_header *h __used, - struct perf_evlist *evlist __used) +static int write_nrcpus(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { long nr; u32 nrc, nra; @@ -634,7 +634,7 @@ static int write_nrcpus(int fd, struct perf_header *h __used, return do_write(fd, &nra, sizeof(nra)); } -static int write_event_desc(int fd, struct perf_header *h __used, +static int write_event_desc(int fd, struct perf_header *h __maybe_unused, struct perf_evlist *evlist) { struct perf_evsel *evsel; @@ -691,8 +691,8 @@ static int write_event_desc(int fd, struct perf_header *h __used, return 0; } -static int write_cmdline(int fd, struct perf_header *h __used, - struct perf_evlist *evlist __used) +static int write_cmdline(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { char buf[MAXPATHLEN]; char proc[32]; @@ -860,8 +860,8 @@ static struct cpu_topo *build_cpu_topology(void) return tp; } -static int write_cpu_topology(int fd, struct perf_header *h __used, - struct perf_evlist *evlist __used) +static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { struct cpu_topo *tp; u32 i; @@ -896,8 +896,8 @@ done: -static int write_total_mem(int fd, struct perf_header *h __used, - struct perf_evlist *evlist __used) +static int write_total_mem(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { char *buf = NULL; FILE *fp; @@ -982,8 +982,8 @@ done: return ret; } -static int write_numa_topology(int fd, struct perf_header *h __used, - struct perf_evlist *evlist __used) +static int write_numa_topology(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { char *buf = NULL; size_t len = 0; @@ -1043,8 +1043,8 @@ done: * }; */ -static int write_pmu_mappings(int fd, struct perf_header *h __used, - struct perf_evlist *evlist __used) +static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { struct perf_pmu *pmu = NULL; off_t offset = lseek(fd, 0, SEEK_CUR); @@ -1074,13 +1074,14 @@ static int write_pmu_mappings(int fd, struct perf_header *h __used, * default get_cpuid(): nothing gets recorded * actual implementation must be in arch/$(ARCH)/util/header.c */ -int __attribute__((weak)) get_cpuid(char *buffer __used, size_t sz __used) +int __attribute__ ((weak)) get_cpuid(char *buffer __maybe_unused, + size_t sz __maybe_unused) { return -1; } -static int write_cpuid(int fd, struct perf_header *h __used, - struct perf_evlist *evlist __used) +static int write_cpuid(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { char buffer[64]; int ret; @@ -1094,8 +1095,9 @@ write_it: return do_write_string(fd, buffer); } -static int write_branch_stack(int fd __used, struct perf_header *h __used, - struct perf_evlist *evlist __used) +static int write_branch_stack(int fd __maybe_unused, + struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { return 0; } @@ -1372,7 +1374,8 @@ static void print_event_desc(struct perf_header *ph, int fd, FILE *fp) free_event_desc(events); } -static void print_total_mem(struct perf_header *h __used, int fd, FILE *fp) +static void print_total_mem(struct perf_header *h __maybe_unused, int fd, + FILE *fp) { uint64_t mem; ssize_t ret; @@ -1390,7 +1393,8 @@ error: fprintf(fp, "# total memory : unknown\n"); } -static void print_numa_topology(struct perf_header *h __used, int fd, FILE *fp) +static void print_numa_topology(struct perf_header *h __maybe_unused, int fd, + FILE *fp) { ssize_t ret; u32 nr, c, i; @@ -1450,7 +1454,8 @@ static void print_cpuid(struct perf_header *ph, int fd, FILE *fp) free(str); } -static void print_branch_stack(struct perf_header *ph __used, int fd __used, +static void print_branch_stack(struct perf_header *ph __maybe_unused, + int fd __maybe_unused, FILE *fp) { fprintf(fp, "# contains samples with branch stack\n"); @@ -1649,9 +1654,10 @@ out: return err; } -static int process_tracing_data(struct perf_file_section *section __unused, - struct perf_header *ph __unused, - int feat __unused, int fd, void *data) +static int process_tracing_data(struct perf_file_section *section + __maybe_unused, + struct perf_header *ph __maybe_unused, + int feat __maybe_unused, int fd, void *data) { trace_report(fd, data, false); return 0; @@ -1659,7 +1665,8 @@ static int process_tracing_data(struct perf_file_section *section __unused, static int process_build_id(struct perf_file_section *section, struct perf_header *ph, - int feat __unused, int fd, void *data __used) + int feat __maybe_unused, int fd, + void *data __maybe_unused) { if (perf_header__read_build_ids(ph, fd, section->offset, section->size)) pr_debug("Failed to read buildids, continuing...\n"); @@ -1698,9 +1705,9 @@ perf_evlist__set_event_name(struct perf_evlist *evlist, struct perf_evsel *event } static int -process_event_desc(struct perf_file_section *section __unused, - struct perf_header *header, int feat __unused, int fd, - void *data __used) +process_event_desc(struct perf_file_section *section __maybe_unused, + struct perf_header *header, int feat __maybe_unused, int fd, + void *data __maybe_unused) { struct perf_session *session = container_of(header, struct perf_session, header); struct perf_evsel *evsel, *events = read_event_desc(header, fd); @@ -2596,7 +2603,7 @@ int perf_event__synthesize_event_types(struct perf_tool *tool, return err; } -int perf_event__process_event_type(struct perf_tool *tool __unused, +int perf_event__process_event_type(struct perf_tool *tool __maybe_unused, union perf_event *event) { if (perf_header__push_event(event->event_type.event_type.event_id, @@ -2613,7 +2620,7 @@ int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, union perf_event ev; struct tracing_data *tdata; ssize_t size = 0, aligned_size = 0, padding; - int err __used = 0; + int err __maybe_unused = 0; /* * We are going to store the size of the data followed @@ -2712,7 +2719,7 @@ int perf_event__synthesize_build_id(struct perf_tool *tool, return err; } -int perf_event__process_build_id(struct perf_tool *tool __used, +int perf_event__process_build_id(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_session *session) { diff --git a/tools/perf/util/help.c b/tools/perf/util/help.c index 4fa764d8f7d..8b1f6e891b8 100644 --- a/tools/perf/util/help.c +++ b/tools/perf/util/help.c @@ -332,7 +332,8 @@ const char *help_unknown_cmd(const char *cmd) exit(1); } -int cmd_version(int argc __used, const char **argv __used, const char *prefix __used) +int cmd_version(int argc __maybe_unused, const char **argv __maybe_unused, + const char *prefix __maybe_unused) { printf("perf version %s\n", perf_version_string); return 0; diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 0ba65ad07cd..6ec5398de89 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -394,7 +394,7 @@ void hist_entry__free(struct hist_entry *he) * collapse the histogram */ -static bool hists__collapse_insert_entry(struct hists *hists __used, +static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused, struct rb_root *root, struct hist_entry *he) { diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 4146f51124f..f011ad4756e 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -156,20 +156,22 @@ struct perf_evlist; #ifdef NO_NEWT_SUPPORT static inline -int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __used, - const char *help __used, - void(*timer)(void *arg) __used, - void *arg __used, - int refresh __used) +int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused, + const char *help __maybe_unused, + void(*timer)(void *arg) __maybe_unused, + void *arg __maybe_unused, + int refresh __maybe_unused) { return 0; } -static inline int hist_entry__tui_annotate(struct hist_entry *self __used, - int evidx __used, - void(*timer)(void *arg) __used, - void *arg __used, - int delay_secs __used) +static inline int hist_entry__tui_annotate(struct hist_entry *self + __maybe_unused, + int evidx __maybe_unused, + void(*timer)(void *arg) + __maybe_unused, + void *arg __maybe_unused, + int delay_secs __maybe_unused) { return 0; } @@ -187,11 +189,11 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, #ifdef NO_GTK2_SUPPORT static inline -int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __used, - const char *help __used, - void(*timer)(void *arg) __used, - void *arg __used, - int refresh __used) +int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __maybe_unused, + const char *help __maybe_unused, + void(*timer)(void *arg) __maybe_unused, + void *arg __maybe_unused, + int refresh __maybe_unused) { return 0; } diff --git a/tools/perf/util/include/linux/compiler.h b/tools/perf/util/include/linux/compiler.h index ce2367b7b3f..96b919dae11 100644 --- a/tools/perf/util/include/linux/compiler.h +++ b/tools/perf/util/include/linux/compiler.h @@ -9,7 +9,9 @@ #define __attribute_const__ #endif -#define __used __attribute__((__unused__)) +#ifndef __maybe_unused +#define __maybe_unused __attribute__((unused)) +#endif #define __packed __attribute__((__packed__)) #ifndef __force diff --git a/tools/perf/util/intlist.c b/tools/perf/util/intlist.c index 77c504ff008..9d0740024ba 100644 --- a/tools/perf/util/intlist.c +++ b/tools/perf/util/intlist.c @@ -11,7 +11,7 @@ #include "intlist.h" -static struct rb_node *intlist__node_new(struct rblist *rblist __used, +static struct rb_node *intlist__node_new(struct rblist *rblist __maybe_unused, const void *entry) { int i = (int)((long)entry); @@ -31,7 +31,7 @@ static void int_node__delete(struct int_node *ilist) free(ilist); } -static void intlist__node_delete(struct rblist *rblist __used, +static void intlist__node_delete(struct rblist *rblist __maybe_unused, struct rb_node *rb_node) { struct int_node *node = container_of(rb_node, struct int_node, rb_node); diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index 25ab4cdbc44..d2250fc97e2 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h @@ -96,7 +96,7 @@ static inline u64 map__unmap_ip(struct map *map, u64 ip) return ip + map->start - map->pgoff; } -static inline u64 identity__map_ip(struct map *map __used, u64 ip) +static inline u64 identity__map_ip(struct map *map __maybe_unused, u64 ip) { return ip; } diff --git a/tools/perf/util/parse-events-test.c b/tools/perf/util/parse-events-test.c index bc8b65130ae..d7244e55367 100644 --- a/tools/perf/util/parse-events-test.c +++ b/tools/perf/util/parse-events-test.c @@ -569,7 +569,7 @@ static int test__group2(struct perf_evlist *evlist) return 0; } -static int test__group3(struct perf_evlist *evlist __used) +static int test__group3(struct perf_evlist *evlist __maybe_unused) { struct perf_evsel *evsel, *leader; @@ -648,7 +648,7 @@ static int test__group3(struct perf_evlist *evlist __used) return 0; } -static int test__group4(struct perf_evlist *evlist __used) +static int test__group4(struct perf_evlist *evlist __maybe_unused) { struct perf_evsel *evsel, *leader; @@ -684,7 +684,7 @@ static int test__group4(struct perf_evlist *evlist __used) return 0; } -static int test__group5(struct perf_evlist *evlist __used) +static int test__group5(struct perf_evlist *evlist __maybe_unused) { struct perf_evsel *evsel, *leader; diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index a031ee1f54f..44afcf40f79 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -807,7 +807,8 @@ int parse_events_terms(struct list_head *terms, const char *str) return ret; } -int parse_events(struct perf_evlist *evlist, const char *str, int unset __used) +int parse_events(struct perf_evlist *evlist, const char *str, + int unset __maybe_unused) { struct parse_events_data__events data = { .list = LIST_HEAD_INIT(data.list), @@ -833,14 +834,14 @@ int parse_events(struct perf_evlist *evlist, const char *str, int unset __used) } int parse_events_option(const struct option *opt, const char *str, - int unset __used) + int unset __maybe_unused) { struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; return parse_events(evlist, str, unset); } int parse_filter(const struct option *opt, const char *str, - int unset __used) + int unset __maybe_unused) { struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; struct perf_evsel *last = NULL; diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l index f5e28dc6827..c87efc12579 100644 --- a/tools/perf/util/parse-events.l +++ b/tools/perf/util/parse-events.l @@ -207,7 +207,7 @@ r{num_raw_hex} { return raw(yyscanner); } %% -int parse_events_wrap(void *scanner __used) +int parse_events_wrap(void *scanner __maybe_unused) { return 1; } diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index 42d9a17b83b..cd88209e3c5 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y @@ -391,7 +391,7 @@ sep_slash_dc: '/' | ':' | %% -void parse_events_error(void *data __used, void *scanner __used, - char const *msg __used) +void parse_events_error(void *data __maybe_unused, void *scanner __maybe_unused, + char const *msg __maybe_unused) { } diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c index 594f8fad5ec..443fc116512 100644 --- a/tools/perf/util/parse-options.c +++ b/tools/perf/util/parse-options.c @@ -557,7 +557,8 @@ int parse_options_usage(const char * const *usagestr, } -int parse_opt_verbosity_cb(const struct option *opt, const char *arg __used, +int parse_opt_verbosity_cb(const struct option *opt, + const char *arg __maybe_unused, int unset) { int *target = opt->value; diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h index 9bd6c4e069c..316dbe7f86e 100644 --- a/tools/perf/util/perf_regs.h +++ b/tools/perf/util/perf_regs.h @@ -6,7 +6,7 @@ #else #define PERF_REGS_MASK 0 -static inline const char *perf_reg_name(int id __used) +static inline const char *perf_reg_name(int id __maybe_unused) { return NULL; } diff --git a/tools/perf/util/pmu.y b/tools/perf/util/pmu.y index 20ea77e9316..ec898047ebb 100644 --- a/tools/perf/util/pmu.y +++ b/tools/perf/util/pmu.y @@ -86,8 +86,8 @@ PP_VALUE %% -void perf_pmu_error(struct list_head *list __used, - char *name __used, - char const *msg __used) +void perf_pmu_error(struct list_head *list __maybe_unused, + char *name __maybe_unused, + char const *msg __maybe_unused) { } diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index e8c72de0f70..4ce04c2281d 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -41,7 +41,7 @@ #include "symbol.h" #include "thread.h" #include "debugfs.h" -#include "trace-event.h" /* For __unused */ +#include "trace-event.h" /* For __maybe_unused */ #include "probe-event.h" #include "probe-finder.h" #include "session.h" @@ -647,8 +647,8 @@ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp, } static int try_to_find_probe_trace_events(struct perf_probe_event *pev, - struct probe_trace_event **tevs __unused, - int max_tevs __unused, const char *target) + struct probe_trace_event **tevs __maybe_unused, + int max_tevs __maybe_unused, const char *target) { if (perf_probe_event_need_dwarf(pev)) { pr_warning("Debuginfo-analysis is not supported.\n"); @@ -661,17 +661,18 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, return 0; } -int show_line_range(struct line_range *lr __unused, const char *module __unused) +int show_line_range(struct line_range *lr __maybe_unused, + const char *module __maybe_unused) { pr_warning("Debuginfo-analysis is not supported.\n"); return -ENOSYS; } -int show_available_vars(struct perf_probe_event *pevs __unused, - int npevs __unused, int max_vls __unused, - const char *module __unused, - struct strfilter *filter __unused, - bool externs __unused) +int show_available_vars(struct perf_probe_event *pevs __maybe_unused, + int npevs __maybe_unused, int max_vls __maybe_unused, + const char *module __maybe_unused, + struct strfilter *filter __maybe_unused, + bool externs __maybe_unused) { pr_warning("Debuginfo-analysis is not supported.\n"); return -ENOSYS; @@ -2183,7 +2184,7 @@ static struct strfilter *available_func_filter; * If a symbol corresponds to a function with global binding and * matches filter return 0. For all others return 1. */ -static int filter_available_functions(struct map *map __unused, +static int filter_available_functions(struct map *map __maybe_unused, struct symbol *sym) { if (sym->binding == STB_GLOBAL && diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index d448984ed78..526ba56e720 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -207,7 +207,7 @@ static int debuginfo__init_online_kernel_dwarf(struct debuginfo *self, #else /* With older elfutils, this just support kernel module... */ static int debuginfo__init_online_kernel_dwarf(struct debuginfo *self, - Dwarf_Addr addr __used) + Dwarf_Addr addr __maybe_unused) { const char *path = kernel_get_module_path("kernel"); @@ -1419,7 +1419,7 @@ static int line_range_add_line(const char *src, unsigned int lineno, } static int line_range_walk_cb(const char *fname, int lineno, - Dwarf_Addr addr __used, + Dwarf_Addr addr __maybe_unused, void *data) { struct line_finder *lf = data; diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index 27187f0b71f..ca85444bcfb 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -672,7 +672,7 @@ struct pyrf_evlist { }; static int pyrf_evlist__init(struct pyrf_evlist *pevlist, - PyObject *args, PyObject *kwargs __used) + PyObject *args, PyObject *kwargs __maybe_unused) { PyObject *pcpus = NULL, *pthreads = NULL; struct cpu_map *cpus; @@ -733,7 +733,8 @@ static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist, } static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist, - PyObject *args __used, PyObject *kwargs __used) + PyObject *args __maybe_unused, + PyObject *kwargs __maybe_unused) { struct perf_evlist *evlist = &pevlist->evlist; PyObject *list = PyList_New(0); @@ -765,7 +766,8 @@ free_list: static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist, - PyObject *args, PyObject *kwargs __used) + PyObject *args, + PyObject *kwargs __maybe_unused) { struct perf_evlist *evlist = &pevlist->evlist; PyObject *pevsel; diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c index 94e673643bc..ffde3e4e34a 100644 --- a/tools/perf/util/scripting-engines/trace-event-perl.c +++ b/tools/perf/util/scripting-engines/trace-event-perl.c @@ -257,10 +257,10 @@ static inline struct event_format *find_cache_event(struct perf_evsel *evsel) return event; } -static void perl_process_tracepoint(union perf_event *perf_event __unused, +static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused, struct perf_sample *sample, struct perf_evsel *evsel, - struct machine *machine __unused, + struct machine *machine __maybe_unused, struct addr_location *al) { struct format_field *field; @@ -349,8 +349,8 @@ static void perl_process_tracepoint(union perf_event *perf_event __unused, static void perl_process_event_generic(union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, - struct machine *machine __unused, - struct addr_location *al __unused) + struct machine *machine __maybe_unused, + struct addr_location *al __maybe_unused) { dSP; diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index afba0972918..730c6630cba 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -221,10 +221,11 @@ static inline struct event_format *find_cache_event(struct perf_evsel *evsel) return event; } -static void python_process_tracepoint(union perf_event *perf_event __unused, +static void python_process_tracepoint(union perf_event *perf_event + __maybe_unused, struct perf_sample *sample, struct perf_evsel *evsel, - struct machine *machine __unused, + struct machine *machine __maybe_unused, struct addr_location *al) { PyObject *handler, *retval, *context, *t, *obj, *dict = NULL; @@ -339,10 +340,11 @@ static void python_process_tracepoint(union perf_event *perf_event __unused, Py_DECREF(t); } -static void python_process_general_event(union perf_event *perf_event __unused, +static void python_process_general_event(union perf_event *perf_event + __maybe_unused, struct perf_sample *sample, struct perf_evsel *evsel, - struct machine *machine __unused, + struct machine *machine __maybe_unused, struct addr_location *al) { PyObject *handler, *retval, *t, *dict; diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index e0fd6c71cc5..3049b0ae700 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -401,49 +401,53 @@ int machine__resolve_callchain(struct machine *machine, } -static int process_event_synth_tracing_data_stub(union perf_event *event __used, - struct perf_session *session __used) +static int process_event_synth_tracing_data_stub(union perf_event *event + __maybe_unused, + struct perf_session *session + __maybe_unused) { dump_printf(": unhandled!\n"); return 0; } -static int process_event_synth_attr_stub(union perf_event *event __used, - struct perf_evlist **pevlist __used) +static int process_event_synth_attr_stub(union perf_event *event __maybe_unused, + struct perf_evlist **pevlist + __maybe_unused) { dump_printf(": unhandled!\n"); return 0; } -static int process_event_sample_stub(struct perf_tool *tool __used, - union perf_event *event __used, - struct perf_sample *sample __used, - struct perf_evsel *evsel __used, - struct machine *machine __used) +static int process_event_sample_stub(struct perf_tool *tool __maybe_unused, + union perf_event *event __maybe_unused, + struct perf_sample *sample __maybe_unused, + struct perf_evsel *evsel __maybe_unused, + struct machine *machine __maybe_unused) { dump_printf(": unhandled!\n"); return 0; } -static int process_event_stub(struct perf_tool *tool __used, - union perf_event *event __used, - struct perf_sample *sample __used, - struct machine *machine __used) +static int process_event_stub(struct perf_tool *tool __maybe_unused, + union perf_event *event __maybe_unused, + struct perf_sample *sample __maybe_unused, + struct machine *machine __maybe_unused) { dump_printf(": unhandled!\n"); return 0; } -static int process_finished_round_stub(struct perf_tool *tool __used, - union perf_event *event __used, - struct perf_session *perf_session __used) +static int process_finished_round_stub(struct perf_tool *tool __maybe_unused, + union perf_event *event __maybe_unused, + struct perf_session *perf_session + __maybe_unused) { dump_printf(": unhandled!\n"); return 0; } -static int process_event_type_stub(struct perf_tool *tool __used, - union perf_event *event __used) +static int process_event_type_stub(struct perf_tool *tool __maybe_unused, + union perf_event *event __maybe_unused) { dump_printf(": unhandled!\n"); return 0; @@ -520,7 +524,7 @@ static void swap_sample_id_all(union perf_event *event, void *data) } static void perf_event__all64_swap(union perf_event *event, - bool sample_id_all __used) + bool sample_id_all __maybe_unused) { struct perf_event_header *hdr = &event->header; mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); @@ -631,7 +635,7 @@ void perf_event__attr_swap(struct perf_event_attr *attr) } static void perf_event__hdr_attr_swap(union perf_event *event, - bool sample_id_all __used) + bool sample_id_all __maybe_unused) { size_t size; @@ -643,14 +647,14 @@ static void perf_event__hdr_attr_swap(union perf_event *event, } static void perf_event__event_type_swap(union perf_event *event, - bool sample_id_all __used) + bool sample_id_all __maybe_unused) { event->event_type.event_type.event_id = bswap_64(event->event_type.event_type.event_id); } static void perf_event__tracing_data_swap(union perf_event *event, - bool sample_id_all __used) + bool sample_id_all __maybe_unused) { event->tracing_data.size = bswap_32(event->tracing_data.size); } @@ -791,7 +795,7 @@ static int flush_sample_queue(struct perf_session *s, * etc... */ static int process_finished_round(struct perf_tool *tool, - union perf_event *event __used, + union perf_event *event __maybe_unused, struct perf_session *session) { int ret = flush_sample_queue(session, tool); diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 7a2fbd8855b..0981bc7a291 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -170,7 +170,7 @@ static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf, static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, u64 ip, char level, char *bf, size_t size, - unsigned int width __used) + unsigned int width __maybe_unused) { size_t ret = 0; @@ -205,7 +205,8 @@ struct sort_entry sort_dso = { }; static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf, - size_t size, unsigned int width __used) + size_t size, + unsigned int width __maybe_unused) { return _hist_entry__sym_snprintf(self->ms.map, self->ms.sym, self->ip, self->level, bf, size, width); @@ -248,7 +249,8 @@ sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) } static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf, - size_t size, unsigned int width __used) + size_t size, + unsigned int width __maybe_unused) { FILE *fp; char cmd[PATH_MAX + 2], *path = self->srcline, *nl; @@ -397,7 +399,8 @@ sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) } static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf, - size_t size, unsigned int width __used) + size_t size, + unsigned int width __maybe_unused) { struct addr_map_symbol *from = &self->branch_info->from; return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, @@ -406,7 +409,8 @@ static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf, } static int hist_entry__sym_to_snprintf(struct hist_entry *self, char *bf, - size_t size, unsigned int width __used) + size_t size, + unsigned int width __maybe_unused) { struct addr_map_symbol *to = &self->branch_info->to; return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c index 6738ea128c9..259f8f2ea9c 100644 --- a/tools/perf/util/symbol-minimal.c +++ b/tools/perf/util/symbol-minimal.c @@ -69,8 +69,9 @@ static int read_build_id(void *note_data, size_t note_len, void *bf, return -1; } -int filename__read_debuglink(const char *filename __used, - char *debuglink __used, size_t size __used) +int filename__read_debuglink(const char *filename __maybe_unused, + char *debuglink __maybe_unused, + size_t size __maybe_unused) { return -1; } @@ -241,7 +242,8 @@ out: return ret; } -int symsrc__init(struct symsrc *ss, struct dso *dso __used, const char *name, +int symsrc__init(struct symsrc *ss, struct dso *dso __maybe_unused, + const char *name, enum dso_binary_type type) { int fd = open(name, O_RDONLY); @@ -260,13 +262,13 @@ out_close: return -1; } -bool symsrc__possibly_runtime(struct symsrc *ss __used) +bool symsrc__possibly_runtime(struct symsrc *ss __maybe_unused) { /* Assume all sym sources could be a runtime image. */ return true; } -bool symsrc__has_symtab(struct symsrc *ss __used) +bool symsrc__has_symtab(struct symsrc *ss __maybe_unused) { return false; } @@ -277,17 +279,19 @@ void symsrc__destroy(struct symsrc *ss) close(ss->fd); } -int dso__synthesize_plt_symbols(struct dso *dso __used, - struct symsrc *ss __used, - struct map *map __used, - symbol_filter_t filter __used) +int dso__synthesize_plt_symbols(struct dso *dso __maybe_unused, + struct symsrc *ss __maybe_unused, + struct map *map __maybe_unused, + symbol_filter_t filter __maybe_unused) { return 0; } -int dso__load_sym(struct dso *dso, struct map *map __used, struct symsrc *ss, - struct symsrc *runtime_ss __used, - symbol_filter_t filter __used, int kmodule __used) +int dso__load_sym(struct dso *dso, struct map *map __maybe_unused, + struct symsrc *ss, + struct symsrc *runtime_ss __maybe_unused, + symbol_filter_t filter __maybe_unused, + int kmodule __maybe_unused) { unsigned char *build_id[BUILD_ID_SIZE]; diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index bbb24e95165..e2e8c697cff 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1755,7 +1755,7 @@ struct process_args { }; static int symbol__in_kernel(void *arg, const char *name, - char type __used, u64 start) + char type __maybe_unused, u64 start) { struct process_args *args = arg; diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index dde8a26f7be..4ff45e30c72 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -21,14 +21,15 @@ #ifdef HAVE_CPLUS_DEMANGLE extern char *cplus_demangle(const char *, int); -static inline char *bfd_demangle(void __used *v, const char *c, int i) +static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i) { return cplus_demangle(c, i); } #else #ifdef NO_DEMANGLE -static inline char *bfd_demangle(void __used *v, const char __used *c, - int __used i) +static inline char *bfd_demangle(void __maybe_unused *v, + const char __maybe_unused *c, + int __maybe_unused i) { return NULL; } diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index a5a554efeb5..aa4c860a21d 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -221,7 +221,7 @@ void print_event(struct pevent *pevent, int cpu, void *data, int size, } void parse_proc_kallsyms(struct pevent *pevent, - char *file, unsigned int size __unused) + char *file, unsigned int size __maybe_unused) { unsigned long long addr; char *func; @@ -253,7 +253,7 @@ void parse_proc_kallsyms(struct pevent *pevent, } void parse_ftrace_printk(struct pevent *pevent, - char *file, unsigned int size __unused) + char *file, unsigned int size __maybe_unused) { unsigned long long addr; char *printk; diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c index 302ff262494..8715a1006d0 100644 --- a/tools/perf/util/trace-event-scripting.c +++ b/tools/perf/util/trace-event-scripting.c @@ -35,11 +35,11 @@ static int stop_script_unsupported(void) return 0; } -static void process_event_unsupported(union perf_event *event __unused, - struct perf_sample *sample __unused, - struct perf_evsel *evsel __unused, - struct machine *machine __unused, - struct addr_location *al __unused) +static void process_event_unsupported(union perf_event *event __maybe_unused, + struct perf_sample *sample __maybe_unused, + struct perf_evsel *evsel __maybe_unused, + struct machine *machine __maybe_unused, + struct addr_location *al __maybe_unused) { } @@ -52,17 +52,19 @@ static void print_python_unsupported_msg(void) "\n etc.\n"); } -static int python_start_script_unsupported(const char *script __unused, - int argc __unused, - const char **argv __unused) +static int python_start_script_unsupported(const char *script __maybe_unused, + int argc __maybe_unused, + const char **argv __maybe_unused) { print_python_unsupported_msg(); return -1; } -static int python_generate_script_unsupported(struct pevent *pevent __unused, - const char *outfile __unused) +static int python_generate_script_unsupported(struct pevent *pevent + __maybe_unused, + const char *outfile + __maybe_unused) { print_python_unsupported_msg(); @@ -114,17 +116,18 @@ static void print_perl_unsupported_msg(void) "\n etc.\n"); } -static int perl_start_script_unsupported(const char *script __unused, - int argc __unused, - const char **argv __unused) +static int perl_start_script_unsupported(const char *script __maybe_unused, + int argc __maybe_unused, + const char **argv __maybe_unused) { print_perl_unsupported_msg(); return -1; } -static int perl_generate_script_unsupported(struct pevent *pevent __unused, - const char *outfile __unused) +static int perl_generate_script_unsupported(struct pevent *pevent + __maybe_unused, + const char *outfile __maybe_unused) { print_perl_unsupported_msg(); diff --git a/tools/perf/util/unwind.c b/tools/perf/util/unwind.c index 00a42aa8d5c..958723ba3d2 100644 --- a/tools/perf/util/unwind.c +++ b/tools/perf/util/unwind.c @@ -307,32 +307,36 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi, need_unwind_info, arg); } -static int access_fpreg(unw_addr_space_t __used as, unw_regnum_t __used num, - unw_fpreg_t __used *val, int __used __write, - void __used *arg) +static int access_fpreg(unw_addr_space_t __maybe_unused as, + unw_regnum_t __maybe_unused num, + unw_fpreg_t __maybe_unused *val, + int __maybe_unused __write, + void __maybe_unused *arg) { pr_err("unwind: access_fpreg unsupported\n"); return -UNW_EINVAL; } -static int get_dyn_info_list_addr(unw_addr_space_t __used as, - unw_word_t __used *dil_addr, - void __used *arg) +static int get_dyn_info_list_addr(unw_addr_space_t __maybe_unused as, + unw_word_t __maybe_unused *dil_addr, + void __maybe_unused *arg) { return -UNW_ENOINFO; } -static int resume(unw_addr_space_t __used as, unw_cursor_t __used *cu, - void __used *arg) +static int resume(unw_addr_space_t __maybe_unused as, + unw_cursor_t __maybe_unused *cu, + void __maybe_unused *arg) { pr_err("unwind: resume unsupported\n"); return -UNW_EINVAL; } static int -get_proc_name(unw_addr_space_t __used as, unw_word_t __used addr, - char __used *bufp, size_t __used buf_len, - unw_word_t __used *offp, void __used *arg) +get_proc_name(unw_addr_space_t __maybe_unused as, + unw_word_t __maybe_unused addr, + char __maybe_unused *bufp, size_t __maybe_unused buf_len, + unw_word_t __maybe_unused *offp, void __maybe_unused *arg) { pr_err("unwind: get_proc_name unsupported\n"); return -UNW_EINVAL; @@ -377,7 +381,7 @@ static int reg_value(unw_word_t *valp, struct regs_dump *regs, int id, return 0; } -static int access_mem(unw_addr_space_t __used as, +static int access_mem(unw_addr_space_t __maybe_unused as, unw_word_t addr, unw_word_t *valp, int __write, void *arg) { @@ -422,7 +426,7 @@ static int access_mem(unw_addr_space_t __used as, return 0; } -static int access_reg(unw_addr_space_t __used as, +static int access_reg(unw_addr_space_t __maybe_unused as, unw_regnum_t regnum, unw_word_t *valp, int __write, void *arg) { @@ -454,9 +458,9 @@ static int access_reg(unw_addr_space_t __used as, return 0; } -static void put_unwind_info(unw_addr_space_t __used as, - unw_proc_info_t *pi __used, - void *arg __used) +static void put_unwind_info(unw_addr_space_t __maybe_unused as, + unw_proc_info_t *pi __maybe_unused, + void *arg __maybe_unused) { pr_debug("unwind: put_unwind_info called\n"); } diff --git a/tools/perf/util/unwind.h b/tools/perf/util/unwind.h index 919bd6ad850..a78c8b303bb 100644 --- a/tools/perf/util/unwind.h +++ b/tools/perf/util/unwind.h @@ -22,11 +22,12 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg, int unwind__arch_reg_id(int regnum); #else static inline int -unwind__get_entries(unwind_entry_cb_t cb __used, void *arg __used, - struct machine *machine __used, - struct thread *thread __used, - u64 sample_uregs __used, - struct perf_sample *data __used) +unwind__get_entries(unwind_entry_cb_t cb __maybe_unused, + void *arg __maybe_unused, + struct machine *machine __maybe_unused, + struct thread *thread __maybe_unused, + u64 sample_uregs __maybe_unused, + struct perf_sample *data __maybe_unused) { return 0; } diff --git a/tools/perf/util/wrapper.c b/tools/perf/util/wrapper.c index 73e900edb5a..19f15b65070 100644 --- a/tools/perf/util/wrapper.c +++ b/tools/perf/util/wrapper.c @@ -7,7 +7,8 @@ * There's no pack memory to release - but stay close to the Git * version so wrap this away: */ -static inline void release_pack_memory(size_t size __used, int flag __used) +static inline void release_pack_memory(size_t size __maybe_unused, + int flag __maybe_unused) { } -- cgit v1.2.3-70-g09d2 From 4218e6734197f3842fc9b6362f12973918d913aa Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 11 Sep 2012 13:18:47 -0300 Subject: perf sched: Remove unused thread parameter From the tracepoint handling routines. Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-mcqd9mv34z6he0wqiz4a3mh9@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-sched.c | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) (limited to 'tools/perf/builtin-sched.c') diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 82e8ec2c43b..af11b1aa1bd 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1372,8 +1372,7 @@ static struct trace_sched_handler *trace_handler; static int process_sched_wakeup_event(struct perf_tool *tool __maybe_unused, struct event_format *event, struct perf_sample *sample, - struct machine *machine, - struct thread *thread __maybe_unused) + struct machine *machine) { void *data = sample->raw_data; struct trace_wakeup_event wakeup_event; @@ -1489,8 +1488,7 @@ map_switch_event(struct trace_switch_event *switch_event, static int process_sched_switch_event(struct perf_tool *tool __maybe_unused, struct event_format *event, struct perf_sample *sample, - struct machine *machine, - struct thread *thread __maybe_unused) + struct machine *machine) { int this_cpu = sample->cpu, err = 0; void *data = sample->raw_data; @@ -1524,8 +1522,7 @@ static int process_sched_switch_event(struct perf_tool *tool __maybe_unused, static int process_sched_runtime_event(struct perf_tool *tool __maybe_unused, struct event_format *event, struct perf_sample *sample, - struct machine *machine, - struct thread *thread __maybe_unused) + struct machine *machine) { void *data = sample->raw_data; struct trace_runtime_event runtime_event; @@ -1545,8 +1542,7 @@ static int process_sched_runtime_event(struct perf_tool *tool __maybe_unused, static int process_sched_fork_event(struct perf_tool *tool __maybe_unused, struct event_format *event, struct perf_sample *sample, - struct machine *machine __maybe_unused, - struct thread *thread __maybe_unused) + struct machine *machine __maybe_unused) { void *data = sample->raw_data; struct trace_fork_event fork_event; @@ -1568,8 +1564,7 @@ static int process_sched_fork_event(struct perf_tool *tool __maybe_unused, static int process_sched_exit_event(struct perf_tool *tool __maybe_unused, struct event_format *event, struct perf_sample *sample __maybe_unused, - struct machine *machine __maybe_unused, - struct thread *thread __maybe_unused) + struct machine *machine __maybe_unused) { if (verbose) printf("sched_exit event %p\n", event); @@ -1580,8 +1575,7 @@ static int process_sched_exit_event(struct perf_tool *tool __maybe_unused, static int process_sched_migrate_task_event(struct perf_tool *tool __maybe_unused, struct event_format *event, struct perf_sample *sample, - struct machine *machine, - struct thread *thread __maybe_unused) + struct machine *machine) { void *data = sample->raw_data; struct trace_migrate_task_event migrate_task_event; @@ -1603,8 +1597,7 @@ static int process_sched_migrate_task_event(struct perf_tool *tool __maybe_unuse typedef int (*tracepoint_handler)(struct perf_tool *tool, struct event_format *tp_format, struct perf_sample *sample, - struct machine *machine, - struct thread *thread); + struct machine *machine); static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused, union perf_event *event __maybe_unused, @@ -1626,7 +1619,7 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_ if (evsel->handler.func != NULL) { tracepoint_handler f = evsel->handler.func; - err = f(tool, evsel->tp_format, sample, machine, thread); + err = f(tool, evsel->tp_format, sample, machine); } return err; -- cgit v1.2.3-70-g09d2 From 0e9b07e574e544c1e840c59dabf39fef120620ae Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 11 Sep 2012 17:29:27 -0300 Subject: perf sched: Use perf_tool as ancestor So that we can remove all the globals. Before: text data bss dec hex filename 1586833 110368 1438600 3135801 2fd939 /tmp/oldperf After: text data bss dec hex filename 1629329 93568 848328 2571225 273bd9 /root/bin/perf Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-oph40vikij0crjz4eyapneov@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-sched.c | 1136 ++++++++++++++++++++++---------------------- 1 file changed, 562 insertions(+), 574 deletions(-) (limited to 'tools/perf/builtin-sched.c') diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index af11b1aa1bd..79f88fa3f7a 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -23,26 +23,12 @@ #include #include -static const char *input_name; - -static char default_sort_order[] = "avg, max, switch, runtime"; -static const char *sort_order = default_sort_order; - -static int profile_cpu = -1; - #define PR_SET_NAME 15 /* Set process name */ #define MAX_CPUS 4096 - -static u64 run_measurement_overhead; -static u64 sleep_measurement_overhead; - #define COMM_LEN 20 #define SYM_LEN 129 - #define MAX_PID 65536 -static unsigned long nr_tasks; - struct sched_atom; struct task_desc { @@ -80,44 +66,6 @@ struct sched_atom { struct task_desc *wakee; }; -static struct task_desc *pid_to_task[MAX_PID]; - -static struct task_desc **tasks; - -static pthread_mutex_t start_work_mutex = PTHREAD_MUTEX_INITIALIZER; -static u64 start_time; - -static pthread_mutex_t work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER; - -static unsigned long nr_run_events; -static unsigned long nr_sleep_events; -static unsigned long nr_wakeup_events; - -static unsigned long nr_sleep_corrections; -static unsigned long nr_run_events_optimized; - -static unsigned long targetless_wakeups; -static unsigned long multitarget_wakeups; - -static u64 cpu_usage; -static u64 runavg_cpu_usage; -static u64 parent_cpu_usage; -static u64 runavg_parent_cpu_usage; - -static unsigned long nr_runs; -static u64 sum_runtime; -static u64 sum_fluct; -static u64 run_avg; - -static unsigned int replay_repeat = 10; -static unsigned long nr_timestamps; -static unsigned long nr_unordered_timestamps; -static unsigned long nr_state_machine_bugs; -static unsigned long nr_context_switch_bugs; -static unsigned long nr_events; -static unsigned long nr_lost_chunks; -static unsigned long nr_lost_events; - #define TASK_STATE_TO_CHAR_STR "RSDTtZX" enum thread_state { @@ -149,11 +97,169 @@ struct work_atoms { typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *); -static struct rb_root atom_root, sorted_atom_root; +struct trace_switch_event { + u32 size; + + u16 common_type; + u8 common_flags; + u8 common_preempt_count; + u32 common_pid; + u32 common_tgid; + + char prev_comm[16]; + u32 prev_pid; + u32 prev_prio; + u64 prev_state; + char next_comm[16]; + u32 next_pid; + u32 next_prio; +}; + +struct trace_runtime_event { + u32 size; + + u16 common_type; + u8 common_flags; + u8 common_preempt_count; + u32 common_pid; + u32 common_tgid; + + char comm[16]; + u32 pid; + u64 runtime; + u64 vruntime; +}; + +struct trace_wakeup_event { + u32 size; -static u64 all_runtime; -static u64 all_count; + u16 common_type; + u8 common_flags; + u8 common_preempt_count; + u32 common_pid; + u32 common_tgid; + char comm[16]; + u32 pid; + + u32 prio; + u32 success; + u32 cpu; +}; + +struct trace_fork_event { + u32 size; + + u16 common_type; + u8 common_flags; + u8 common_preempt_count; + u32 common_pid; + u32 common_tgid; + + char parent_comm[16]; + u32 parent_pid; + char child_comm[16]; + u32 child_pid; +}; + +struct trace_migrate_task_event { + u32 size; + + u16 common_type; + u8 common_flags; + u8 common_preempt_count; + u32 common_pid; + u32 common_tgid; + + char comm[16]; + u32 pid; + + u32 prio; + u32 cpu; +}; + +struct perf_sched; + +struct trace_sched_handler { + int (*switch_event)(struct perf_sched *sched, + struct trace_switch_event *event, + struct machine *machine, + struct event_format *tp_format, + struct perf_sample *sample); + + int (*runtime_event)(struct perf_sched *sched, + struct trace_runtime_event *event, + struct machine *machine, + struct perf_sample *sample); + + int (*wakeup_event)(struct perf_sched *sched, + struct trace_wakeup_event *event, + struct machine *machine, + struct event_format *tp_format, + struct perf_sample *sample); + + int (*fork_event)(struct perf_sched *sched, + struct trace_fork_event *event, + struct event_format *tp_format); + + int (*migrate_task_event)(struct perf_sched *sched, + struct trace_migrate_task_event *event, + struct machine *machine, + struct perf_sample *sample); +}; + +struct perf_sched { + struct perf_tool tool; + const char *input_name; + const char *sort_order; + unsigned long nr_tasks; + struct task_desc *pid_to_task[MAX_PID]; + struct task_desc **tasks; + const struct trace_sched_handler *tp_handler; + pthread_mutex_t start_work_mutex; + pthread_mutex_t work_done_wait_mutex; + int profile_cpu; +/* + * Track the current task - that way we can know whether there's any + * weird events, such as a task being switched away that is not current. + */ + int max_cpu; + u32 curr_pid[MAX_CPUS]; + struct thread *curr_thread[MAX_CPUS]; + char next_shortname1; + char next_shortname2; + unsigned int replay_repeat; + unsigned long nr_run_events; + unsigned long nr_sleep_events; + unsigned long nr_wakeup_events; + unsigned long nr_sleep_corrections; + unsigned long nr_run_events_optimized; + unsigned long targetless_wakeups; + unsigned long multitarget_wakeups; + unsigned long nr_runs; + unsigned long nr_timestamps; + unsigned long nr_unordered_timestamps; + unsigned long nr_state_machine_bugs; + unsigned long nr_context_switch_bugs; + unsigned long nr_events; + unsigned long nr_lost_chunks; + unsigned long nr_lost_events; + u64 run_measurement_overhead; + u64 sleep_measurement_overhead; + u64 start_time; + u64 cpu_usage; + u64 runavg_cpu_usage; + u64 parent_cpu_usage; + u64 runavg_parent_cpu_usage; + u64 sum_runtime; + u64 sum_fluct; + u64 run_avg; + u64 all_runtime; + u64 all_count; + u64 cpu_last_switched[MAX_CPUS]; + struct rb_root atom_root, sorted_atom_root; + struct list_head sort_list, cmp_pid; +}; static u64 get_nsecs(void) { @@ -164,13 +270,13 @@ static u64 get_nsecs(void) return ts.tv_sec * 1000000000ULL + ts.tv_nsec; } -static void burn_nsecs(u64 nsecs) +static void burn_nsecs(struct perf_sched *sched, u64 nsecs) { u64 T0 = get_nsecs(), T1; do { T1 = get_nsecs(); - } while (T1 + run_measurement_overhead < T0 + nsecs); + } while (T1 + sched->run_measurement_overhead < T0 + nsecs); } static void sleep_nsecs(u64 nsecs) @@ -183,24 +289,24 @@ static void sleep_nsecs(u64 nsecs) nanosleep(&ts, NULL); } -static void calibrate_run_measurement_overhead(void) +static void calibrate_run_measurement_overhead(struct perf_sched *sched) { u64 T0, T1, delta, min_delta = 1000000000ULL; int i; for (i = 0; i < 10; i++) { T0 = get_nsecs(); - burn_nsecs(0); + burn_nsecs(sched, 0); T1 = get_nsecs(); delta = T1-T0; min_delta = min(min_delta, delta); } - run_measurement_overhead = min_delta; + sched->run_measurement_overhead = min_delta; printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta); } -static void calibrate_sleep_measurement_overhead(void) +static void calibrate_sleep_measurement_overhead(struct perf_sched *sched) { u64 T0, T1, delta, min_delta = 1000000000ULL; int i; @@ -213,7 +319,7 @@ static void calibrate_sleep_measurement_overhead(void) min_delta = min(min_delta, delta); } min_delta -= 10000; - sleep_measurement_overhead = min_delta; + sched->sleep_measurement_overhead = min_delta; printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta); } @@ -246,8 +352,8 @@ static struct sched_atom *last_event(struct task_desc *task) return task->atoms[task->nr_events - 1]; } -static void -add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration) +static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task, + u64 timestamp, u64 duration) { struct sched_atom *event, *curr_event = last_event(task); @@ -256,7 +362,7 @@ add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration) * to it: */ if (curr_event && curr_event->type == SCHED_EVENT_RUN) { - nr_run_events_optimized++; + sched->nr_run_events_optimized++; curr_event->duration += duration; return; } @@ -266,12 +372,11 @@ add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration) event->type = SCHED_EVENT_RUN; event->duration = duration; - nr_run_events++; + sched->nr_run_events++; } -static void -add_sched_event_wakeup(struct task_desc *task, u64 timestamp, - struct task_desc *wakee) +static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task, + u64 timestamp, struct task_desc *wakee) { struct sched_atom *event, *wakee_event; @@ -281,11 +386,11 @@ add_sched_event_wakeup(struct task_desc *task, u64 timestamp, wakee_event = last_event(wakee); if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) { - targetless_wakeups++; + sched->targetless_wakeups++; return; } if (wakee_event->wait_sem) { - multitarget_wakeups++; + sched->multitarget_wakeups++; return; } @@ -294,89 +399,89 @@ add_sched_event_wakeup(struct task_desc *task, u64 timestamp, wakee_event->specific_wait = 1; event->wait_sem = wakee_event->wait_sem; - nr_wakeup_events++; + sched->nr_wakeup_events++; } -static void -add_sched_event_sleep(struct task_desc *task, u64 timestamp, - u64 task_state __maybe_unused) +static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task, + u64 timestamp, u64 task_state __maybe_unused) { struct sched_atom *event = get_new_event(task, timestamp); event->type = SCHED_EVENT_SLEEP; - nr_sleep_events++; + sched->nr_sleep_events++; } -static struct task_desc *register_pid(unsigned long pid, const char *comm) +static struct task_desc *register_pid(struct perf_sched *sched, + unsigned long pid, const char *comm) { struct task_desc *task; BUG_ON(pid >= MAX_PID); - task = pid_to_task[pid]; + task = sched->pid_to_task[pid]; if (task) return task; task = zalloc(sizeof(*task)); task->pid = pid; - task->nr = nr_tasks; + task->nr = sched->nr_tasks; strcpy(task->comm, comm); /* * every task starts in sleeping state - this gets ignored * if there's no wakeup pointing to this sleep state: */ - add_sched_event_sleep(task, 0, 0); + add_sched_event_sleep(sched, task, 0, 0); - pid_to_task[pid] = task; - nr_tasks++; - tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *)); - BUG_ON(!tasks); - tasks[task->nr] = task; + sched->pid_to_task[pid] = task; + sched->nr_tasks++; + sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_task *)); + BUG_ON(!sched->tasks); + sched->tasks[task->nr] = task; if (verbose) - printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm); + printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm); return task; } -static void print_task_traces(void) +static void print_task_traces(struct perf_sched *sched) { struct task_desc *task; unsigned long i; - for (i = 0; i < nr_tasks; i++) { - task = tasks[i]; + for (i = 0; i < sched->nr_tasks; i++) { + task = sched->tasks[i]; printf("task %6ld (%20s:%10ld), nr_events: %ld\n", task->nr, task->comm, task->pid, task->nr_events); } } -static void add_cross_task_wakeups(void) +static void add_cross_task_wakeups(struct perf_sched *sched) { struct task_desc *task1, *task2; unsigned long i, j; - for (i = 0; i < nr_tasks; i++) { - task1 = tasks[i]; + for (i = 0; i < sched->nr_tasks; i++) { + task1 = sched->tasks[i]; j = i + 1; - if (j == nr_tasks) + if (j == sched->nr_tasks) j = 0; - task2 = tasks[j]; - add_sched_event_wakeup(task1, 0, task2); + task2 = sched->tasks[j]; + add_sched_event_wakeup(sched, task1, 0, task2); } } -static void process_sched_event(struct task_desc *this_task __maybe_unused, - struct sched_atom *atom) +static void perf_sched__process_event(struct perf_sched *sched, + struct sched_atom *atom) { int ret = 0; switch (atom->type) { case SCHED_EVENT_RUN: - burn_nsecs(atom->duration); + burn_nsecs(sched, atom->duration); break; case SCHED_EVENT_SLEEP: if (atom->wait_sem) @@ -439,14 +544,23 @@ static u64 get_cpu_usage_nsec_self(int fd) return runtime; } +struct sched_thread_parms { + struct task_desc *task; + struct perf_sched *sched; +}; + static void *thread_func(void *ctx) { - struct task_desc *this_task = ctx; + struct sched_thread_parms *parms = ctx; + struct task_desc *this_task = parms->task; + struct perf_sched *sched = parms->sched; u64 cpu_usage_0, cpu_usage_1; unsigned long i, ret; char comm2[22]; int fd; + free(parms); + sprintf(comm2, ":%s", this_task->comm); prctl(PR_SET_NAME, comm2); fd = self_open_counters(); @@ -455,16 +569,16 @@ static void *thread_func(void *ctx) again: ret = sem_post(&this_task->ready_for_work); BUG_ON(ret); - ret = pthread_mutex_lock(&start_work_mutex); + ret = pthread_mutex_lock(&sched->start_work_mutex); BUG_ON(ret); - ret = pthread_mutex_unlock(&start_work_mutex); + ret = pthread_mutex_unlock(&sched->start_work_mutex); BUG_ON(ret); cpu_usage_0 = get_cpu_usage_nsec_self(fd); for (i = 0; i < this_task->nr_events; i++) { this_task->curr_event = i; - process_sched_event(this_task, this_task->atoms[i]); + perf_sched__process_event(sched, this_task->atoms[i]); } cpu_usage_1 = get_cpu_usage_nsec_self(fd); @@ -472,15 +586,15 @@ again: ret = sem_post(&this_task->work_done_sem); BUG_ON(ret); - ret = pthread_mutex_lock(&work_done_wait_mutex); + ret = pthread_mutex_lock(&sched->work_done_wait_mutex); BUG_ON(ret); - ret = pthread_mutex_unlock(&work_done_wait_mutex); + ret = pthread_mutex_unlock(&sched->work_done_wait_mutex); BUG_ON(ret); goto again; } -static void create_tasks(void) +static void create_tasks(struct perf_sched *sched) { struct task_desc *task; pthread_attr_t attr; @@ -492,128 +606,129 @@ static void create_tasks(void) err = pthread_attr_setstacksize(&attr, (size_t) max(16 * 1024, PTHREAD_STACK_MIN)); BUG_ON(err); - err = pthread_mutex_lock(&start_work_mutex); + err = pthread_mutex_lock(&sched->start_work_mutex); BUG_ON(err); - err = pthread_mutex_lock(&work_done_wait_mutex); + err = pthread_mutex_lock(&sched->work_done_wait_mutex); BUG_ON(err); - for (i = 0; i < nr_tasks; i++) { - task = tasks[i]; + for (i = 0; i < sched->nr_tasks; i++) { + struct sched_thread_parms *parms = malloc(sizeof(*parms)); + BUG_ON(parms == NULL); + parms->task = task = sched->tasks[i]; + parms->sched = sched; sem_init(&task->sleep_sem, 0, 0); sem_init(&task->ready_for_work, 0, 0); sem_init(&task->work_done_sem, 0, 0); task->curr_event = 0; - err = pthread_create(&task->thread, &attr, thread_func, task); + err = pthread_create(&task->thread, &attr, thread_func, parms); BUG_ON(err); } } -static void wait_for_tasks(void) +static void wait_for_tasks(struct perf_sched *sched) { u64 cpu_usage_0, cpu_usage_1; struct task_desc *task; unsigned long i, ret; - start_time = get_nsecs(); - cpu_usage = 0; - pthread_mutex_unlock(&work_done_wait_mutex); + sched->start_time = get_nsecs(); + sched->cpu_usage = 0; + pthread_mutex_unlock(&sched->work_done_wait_mutex); - for (i = 0; i < nr_tasks; i++) { - task = tasks[i]; + for (i = 0; i < sched->nr_tasks; i++) { + task = sched->tasks[i]; ret = sem_wait(&task->ready_for_work); BUG_ON(ret); sem_init(&task->ready_for_work, 0, 0); } - ret = pthread_mutex_lock(&work_done_wait_mutex); + ret = pthread_mutex_lock(&sched->work_done_wait_mutex); BUG_ON(ret); cpu_usage_0 = get_cpu_usage_nsec_parent(); - pthread_mutex_unlock(&start_work_mutex); + pthread_mutex_unlock(&sched->start_work_mutex); - for (i = 0; i < nr_tasks; i++) { - task = tasks[i]; + for (i = 0; i < sched->nr_tasks; i++) { + task = sched->tasks[i]; ret = sem_wait(&task->work_done_sem); BUG_ON(ret); sem_init(&task->work_done_sem, 0, 0); - cpu_usage += task->cpu_usage; + sched->cpu_usage += task->cpu_usage; task->cpu_usage = 0; } cpu_usage_1 = get_cpu_usage_nsec_parent(); - if (!runavg_cpu_usage) - runavg_cpu_usage = cpu_usage; - runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10; + if (!sched->runavg_cpu_usage) + sched->runavg_cpu_usage = sched->cpu_usage; + sched->runavg_cpu_usage = (sched->runavg_cpu_usage * 9 + sched->cpu_usage) / 10; - parent_cpu_usage = cpu_usage_1 - cpu_usage_0; - if (!runavg_parent_cpu_usage) - runavg_parent_cpu_usage = parent_cpu_usage; - runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 + - parent_cpu_usage)/10; + sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0; + if (!sched->runavg_parent_cpu_usage) + sched->runavg_parent_cpu_usage = sched->parent_cpu_usage; + sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * 9 + + sched->parent_cpu_usage)/10; - ret = pthread_mutex_lock(&start_work_mutex); + ret = pthread_mutex_lock(&sched->start_work_mutex); BUG_ON(ret); - for (i = 0; i < nr_tasks; i++) { - task = tasks[i]; + for (i = 0; i < sched->nr_tasks; i++) { + task = sched->tasks[i]; sem_init(&task->sleep_sem, 0, 0); task->curr_event = 0; } } -static void run_one_test(void) +static void run_one_test(struct perf_sched *sched) { u64 T0, T1, delta, avg_delta, fluct; T0 = get_nsecs(); - wait_for_tasks(); + wait_for_tasks(sched); T1 = get_nsecs(); delta = T1 - T0; - sum_runtime += delta; - nr_runs++; + sched->sum_runtime += delta; + sched->nr_runs++; - avg_delta = sum_runtime / nr_runs; + avg_delta = sched->sum_runtime / sched->nr_runs; if (delta < avg_delta) fluct = avg_delta - delta; else fluct = delta - avg_delta; - sum_fluct += fluct; - if (!run_avg) - run_avg = delta; - run_avg = (run_avg*9 + delta)/10; + sched->sum_fluct += fluct; + if (!sched->run_avg) + sched->run_avg = delta; + sched->run_avg = (sched->run_avg * 9 + delta) / 10; - printf("#%-3ld: %0.3f, ", - nr_runs, (double)delta/1000000.0); + printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / 1000000.0); - printf("ravg: %0.2f, ", - (double)run_avg/1e6); + printf("ravg: %0.2f, ", (double)sched->run_avg / 1e6); printf("cpu: %0.2f / %0.2f", - (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6); + (double)sched->cpu_usage / 1e6, (double)sched->runavg_cpu_usage / 1e6); #if 0 /* * rusage statistics done by the parent, these are less - * accurate than the sum_exec_runtime based statistics: + * accurate than the sched->sum_exec_runtime based statistics: */ printf(" [%0.2f / %0.2f]", - (double)parent_cpu_usage/1e6, - (double)runavg_parent_cpu_usage/1e6); + (double)sched->parent_cpu_usage/1e6, + (double)sched->runavg_parent_cpu_usage/1e6); #endif printf("\n"); - if (nr_sleep_corrections) - printf(" (%ld sleep corrections)\n", nr_sleep_corrections); - nr_sleep_corrections = 0; + if (sched->nr_sleep_corrections) + printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections); + sched->nr_sleep_corrections = 0; } -static void test_calibrations(void) +static void test_calibrations(struct perf_sched *sched) { u64 T0, T1; T0 = get_nsecs(); - burn_nsecs(1e6); + burn_nsecs(sched, 1e6); T1 = get_nsecs(); printf("the run test took %" PRIu64 " nsecs\n", T1 - T0); @@ -643,115 +758,9 @@ do { \ FILL_FIELD(ptr, common_tgid, event, data); \ } while (0) - - -struct trace_switch_event { - u32 size; - - u16 common_type; - u8 common_flags; - u8 common_preempt_count; - u32 common_pid; - u32 common_tgid; - - char prev_comm[16]; - u32 prev_pid; - u32 prev_prio; - u64 prev_state; - char next_comm[16]; - u32 next_pid; - u32 next_prio; -}; - -struct trace_runtime_event { - u32 size; - - u16 common_type; - u8 common_flags; - u8 common_preempt_count; - u32 common_pid; - u32 common_tgid; - - char comm[16]; - u32 pid; - u64 runtime; - u64 vruntime; -}; - -struct trace_wakeup_event { - u32 size; - - u16 common_type; - u8 common_flags; - u8 common_preempt_count; - u32 common_pid; - u32 common_tgid; - - char comm[16]; - u32 pid; - - u32 prio; - u32 success; - u32 cpu; -}; - -struct trace_fork_event { - u32 size; - - u16 common_type; - u8 common_flags; - u8 common_preempt_count; - u32 common_pid; - u32 common_tgid; - - char parent_comm[16]; - u32 parent_pid; - char child_comm[16]; - u32 child_pid; -}; - -struct trace_migrate_task_event { - u32 size; - - u16 common_type; - u8 common_flags; - u8 common_preempt_count; - u32 common_pid; - u32 common_tgid; - - char comm[16]; - u32 pid; - - u32 prio; - u32 cpu; -}; - -struct trace_sched_handler { - int (*switch_event)(struct trace_switch_event *event, - struct machine *machine, - struct event_format *tp_format, - struct perf_sample *sample); - - int (*runtime_event)(struct trace_runtime_event *event, - struct machine *machine, - struct perf_sample *sample); - - int (*wakeup_event)(struct trace_wakeup_event *event, - struct machine *machine, - struct event_format *tp_format, - struct perf_sample *sample); - - int (*fork_event)(struct trace_fork_event *event, - struct event_format *tp_format); - - int (*migrate_task_event)(struct trace_migrate_task_event *event, - struct machine *machine, - struct perf_sample *sample); -}; - - static int -replay_wakeup_event(struct trace_wakeup_event *wakeup_event, +replay_wakeup_event(struct perf_sched *sched, + struct trace_wakeup_event *wakeup_event, struct machine *machine __maybe_unused, struct event_format *event, struct perf_sample *sample) { @@ -761,22 +770,19 @@ replay_wakeup_event(struct trace_wakeup_event *wakeup_event, printf("sched_wakeup event %p\n", event); printf(" ... pid %d woke up %s/%d\n", - wakeup_event->common_pid, - wakeup_event->comm, - wakeup_event->pid); + wakeup_event->common_pid, wakeup_event->comm, wakeup_event->pid); } - waker = register_pid(wakeup_event->common_pid, ""); - wakee = register_pid(wakeup_event->pid, wakeup_event->comm); + waker = register_pid(sched, wakeup_event->common_pid, ""); + wakee = register_pid(sched, wakeup_event->pid, wakeup_event->comm); - add_sched_event_wakeup(waker, sample->time, wakee); + add_sched_event_wakeup(sched, waker, sample->time, wakee); return 0; } -static u64 cpu_last_switched[MAX_CPUS]; - static int -replay_switch_event(struct trace_switch_event *switch_event, +replay_switch_event(struct perf_sched *sched, + struct trace_switch_event *switch_event, struct machine *machine __maybe_unused, struct event_format *event, struct perf_sample *sample) @@ -792,7 +798,7 @@ replay_switch_event(struct trace_switch_event *switch_event, if (cpu >= MAX_CPUS || cpu < 0) return 0; - timestamp0 = cpu_last_switched[cpu]; + timestamp0 = sched->cpu_last_switched[cpu]; if (timestamp0) delta = timestamp - timestamp0; else @@ -810,20 +816,19 @@ replay_switch_event(struct trace_switch_event *switch_event, delta); } - prev = register_pid(switch_event->prev_pid, switch_event->prev_comm); - next = register_pid(switch_event->next_pid, switch_event->next_comm); + prev = register_pid(sched, switch_event->prev_pid, switch_event->prev_comm); + next = register_pid(sched, switch_event->next_pid, switch_event->next_comm); - cpu_last_switched[cpu] = timestamp; + sched->cpu_last_switched[cpu] = timestamp; - add_sched_event_run(prev, timestamp, delta); - add_sched_event_sleep(prev, timestamp, switch_event->prev_state); + add_sched_event_run(sched, prev, timestamp, delta); + add_sched_event_sleep(sched, prev, timestamp, switch_event->prev_state); return 0; } - static int -replay_fork_event(struct trace_fork_event *fork_event, +replay_fork_event(struct perf_sched *sched, struct trace_fork_event *fork_event, struct event_format *event) { if (verbose) { @@ -831,25 +836,17 @@ replay_fork_event(struct trace_fork_event *fork_event, printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid); printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid); } - register_pid(fork_event->parent_pid, fork_event->parent_comm); - register_pid(fork_event->child_pid, fork_event->child_comm); + register_pid(sched, fork_event->parent_pid, fork_event->parent_comm); + register_pid(sched, fork_event->child_pid, fork_event->child_comm); return 0; } -static struct trace_sched_handler replay_ops = { - .wakeup_event = replay_wakeup_event, - .switch_event = replay_switch_event, - .fork_event = replay_fork_event, -}; - struct sort_dimension { const char *name; sort_fn_t cmp; struct list_head list; }; -static LIST_HEAD(cmp_pid); - static int thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r) { @@ -918,7 +915,7 @@ __thread_latency_insert(struct rb_root *root, struct work_atoms *data, rb_insert_color(&data->node, root); } -static int thread_atoms_insert(struct thread *thread) +static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread) { struct work_atoms *atoms = zalloc(sizeof(*atoms)); if (!atoms) { @@ -928,11 +925,12 @@ static int thread_atoms_insert(struct thread *thread) atoms->thread = thread; INIT_LIST_HEAD(&atoms->work_list); - __thread_latency_insert(&atom_root, atoms, &cmp_pid); + __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid); return 0; } -static int latency_fork_event(struct trace_fork_event *fork_event __maybe_unused, +static int latency_fork_event(struct perf_sched *sched __maybe_unused, + struct trace_fork_event *fork_event __maybe_unused, struct event_format *event __maybe_unused) { /* should insert the newcomer */ @@ -1014,7 +1012,8 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp) } static int -latency_switch_event(struct trace_switch_event *switch_event, +latency_switch_event(struct perf_sched *sched, + struct trace_switch_event *switch_event, struct machine *machine, struct event_format *event __maybe_unused, struct perf_sample *sample) @@ -1027,8 +1026,8 @@ latency_switch_event(struct trace_switch_event *switch_event, BUG_ON(cpu >= MAX_CPUS || cpu < 0); - timestamp0 = cpu_last_switched[cpu]; - cpu_last_switched[cpu] = timestamp; + timestamp0 = sched->cpu_last_switched[cpu]; + sched->cpu_last_switched[cpu] = timestamp; if (timestamp0) delta = timestamp - timestamp0; else @@ -1042,11 +1041,11 @@ latency_switch_event(struct trace_switch_event *switch_event, sched_out = machine__findnew_thread(machine, switch_event->prev_pid); sched_in = machine__findnew_thread(machine, switch_event->next_pid); - out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); + out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); if (!out_events) { - if (thread_atoms_insert(sched_out)) + if (thread_atoms_insert(sched, sched_out)) return -1; - out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); + out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); if (!out_events) { pr_err("out-event: Internal tree error"); return -1; @@ -1055,11 +1054,11 @@ latency_switch_event(struct trace_switch_event *switch_event, if (add_sched_out_event(out_events, sched_out_state(switch_event), timestamp)) return -1; - in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid); + in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); if (!in_events) { - if (thread_atoms_insert(sched_in)) + if (thread_atoms_insert(sched, sched_in)) return -1; - in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid); + in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); if (!in_events) { pr_err("in-event: Internal tree error"); return -1; @@ -1077,19 +1076,20 @@ latency_switch_event(struct trace_switch_event *switch_event, } static int -latency_runtime_event(struct trace_runtime_event *runtime_event, +latency_runtime_event(struct perf_sched *sched, + struct trace_runtime_event *runtime_event, struct machine *machine, struct perf_sample *sample) { struct thread *thread = machine__findnew_thread(machine, runtime_event->pid); - struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); + struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); u64 timestamp = sample->time; int cpu = sample->cpu; BUG_ON(cpu >= MAX_CPUS || cpu < 0); if (!atoms) { - if (thread_atoms_insert(thread)) + if (thread_atoms_insert(sched, thread)) return -1; - atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); + atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); if (!atoms) { pr_debug("in-event: Internal tree error"); return -1; @@ -1103,7 +1103,8 @@ latency_runtime_event(struct trace_runtime_event *runtime_event, } static int -latency_wakeup_event(struct trace_wakeup_event *wakeup_event, +latency_wakeup_event(struct perf_sched *sched, + struct trace_wakeup_event *wakeup_event, struct machine *machine, struct event_format *event __maybe_unused, struct perf_sample *sample) @@ -1118,11 +1119,11 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, return 0; wakee = machine__findnew_thread(machine, wakeup_event->pid); - atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); + atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); if (!atoms) { - if (thread_atoms_insert(wakee)) + if (thread_atoms_insert(sched, wakee)) return -1; - atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); + atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); if (!atoms) { pr_debug("wakeup-event: Internal tree error"); return -1; @@ -1140,12 +1141,12 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, * one CPU, or are only looking at only one, so don't * make useless noise. */ - if (profile_cpu == -1 && atom->state != THREAD_SLEEPING) - nr_state_machine_bugs++; + if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING) + sched->nr_state_machine_bugs++; - nr_timestamps++; + sched->nr_timestamps++; if (atom->sched_out_time > timestamp) { - nr_unordered_timestamps++; + sched->nr_unordered_timestamps++; return 0; } @@ -1155,7 +1156,8 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, } static int -latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, +latency_migrate_task_event(struct perf_sched *sched, + struct trace_migrate_task_event *migrate_task_event, struct machine *machine, struct perf_sample *sample) { u64 timestamp = sample->time; @@ -1166,16 +1168,16 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, /* * Only need to worry about migration when profiling one CPU. */ - if (profile_cpu == -1) + if (sched->profile_cpu == -1) return 0; migrant = machine__findnew_thread(machine, migrate_task_event->pid); - atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); + atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); if (!atoms) { - if (thread_atoms_insert(migrant)) + if (thread_atoms_insert(sched, migrant)) return -1; - register_pid(migrant->pid, migrant->comm); - atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); + register_pid(sched, migrant->pid, migrant->comm); + atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); if (!atoms) { pr_debug("migration-event: Internal tree error"); return -1; @@ -1189,23 +1191,15 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, atom = list_entry(atoms->work_list.prev, struct work_atom, list); atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp; - nr_timestamps++; + sched->nr_timestamps++; if (atom->sched_out_time > timestamp) - nr_unordered_timestamps++; + sched->nr_unordered_timestamps++; return 0; } -static struct trace_sched_handler lat_ops = { - .wakeup_event = latency_wakeup_event, - .switch_event = latency_switch_event, - .runtime_event = latency_runtime_event, - .fork_event = latency_fork_event, - .migrate_task_event = latency_migrate_task_event, -}; - -static void output_lat_thread(struct work_atoms *work_list) +static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list) { int i; int ret; @@ -1219,8 +1213,8 @@ static void output_lat_thread(struct work_atoms *work_list) if (!strcmp(work_list->thread->comm, "swapper")) return; - all_runtime += work_list->total_runtime; - all_count += work_list->nb_atoms; + sched->all_runtime += work_list->total_runtime; + sched->all_count += work_list->nb_atoms; ret = printf(" %s:%d ", work_list->thread->comm, work_list->thread->pid); @@ -1246,11 +1240,6 @@ static int pid_cmp(struct work_atoms *l, struct work_atoms *r) return 0; } -static struct sort_dimension pid_sort_dimension = { - .name = "pid", - .cmp = pid_cmp, -}; - static int avg_cmp(struct work_atoms *l, struct work_atoms *r) { u64 avgl, avgr; @@ -1272,11 +1261,6 @@ static int avg_cmp(struct work_atoms *l, struct work_atoms *r) return 0; } -static struct sort_dimension avg_sort_dimension = { - .name = "avg", - .cmp = avg_cmp, -}; - static int max_cmp(struct work_atoms *l, struct work_atoms *r) { if (l->max_lat < r->max_lat) @@ -1287,11 +1271,6 @@ static int max_cmp(struct work_atoms *l, struct work_atoms *r) return 0; } -static struct sort_dimension max_sort_dimension = { - .name = "max", - .cmp = max_cmp, -}; - static int switch_cmp(struct work_atoms *l, struct work_atoms *r) { if (l->nb_atoms < r->nb_atoms) @@ -1302,11 +1281,6 @@ static int switch_cmp(struct work_atoms *l, struct work_atoms *r) return 0; } -static struct sort_dimension switch_sort_dimension = { - .name = "switch", - .cmp = switch_cmp, -}; - static int runtime_cmp(struct work_atoms *l, struct work_atoms *r) { if (l->total_runtime < r->total_runtime) @@ -1317,28 +1291,38 @@ static int runtime_cmp(struct work_atoms *l, struct work_atoms *r) return 0; } -static struct sort_dimension runtime_sort_dimension = { - .name = "runtime", - .cmp = runtime_cmp, -}; - -static struct sort_dimension *available_sorts[] = { - &pid_sort_dimension, - &avg_sort_dimension, - &max_sort_dimension, - &switch_sort_dimension, - &runtime_sort_dimension, -}; - -#define NB_AVAILABLE_SORTS (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *)) - -static LIST_HEAD(sort_list); - static int sort_dimension__add(const char *tok, struct list_head *list) { - int i; + size_t i; + static struct sort_dimension avg_sort_dimension = { + .name = "avg", + .cmp = avg_cmp, + }; + static struct sort_dimension max_sort_dimension = { + .name = "max", + .cmp = max_cmp, + }; + static struct sort_dimension pid_sort_dimension = { + .name = "pid", + .cmp = pid_cmp, + }; + static struct sort_dimension runtime_sort_dimension = { + .name = "runtime", + .cmp = runtime_cmp, + }; + static struct sort_dimension switch_sort_dimension = { + .name = "switch", + .cmp = switch_cmp, + }; + struct sort_dimension *available_sorts[] = { + &pid_sort_dimension, + &avg_sort_dimension, + &max_sort_dimension, + &switch_sort_dimension, + &runtime_sort_dimension, + }; - for (i = 0; i < NB_AVAILABLE_SORTS; i++) { + for (i = 0; i < ARRAY_SIZE(available_sorts); i++) { if (!strcmp(available_sorts[i]->name, tok)) { list_add_tail(&available_sorts[i]->list, list); @@ -1349,31 +1333,28 @@ static int sort_dimension__add(const char *tok, struct list_head *list) return -1; } -static void setup_sorting(void); - -static void sort_lat(void) +static void perf_sched__sort_lat(struct perf_sched *sched) { struct rb_node *node; for (;;) { struct work_atoms *data; - node = rb_first(&atom_root); + node = rb_first(&sched->atom_root); if (!node) break; - rb_erase(node, &atom_root); + rb_erase(node, &sched->atom_root); data = rb_entry(node, struct work_atoms, node); - __thread_latency_insert(&sorted_atom_root, data, &sort_list); + __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list); } } -static struct trace_sched_handler *trace_handler; - -static int process_sched_wakeup_event(struct perf_tool *tool __maybe_unused, +static int process_sched_wakeup_event(struct perf_tool *tool, struct event_format *event, struct perf_sample *sample, struct machine *machine) { + struct perf_sched *sched = container_of(tool, struct perf_sched, tool); void *data = sample->raw_data; struct trace_wakeup_event wakeup_event; int err = 0; @@ -1386,27 +1367,15 @@ static int process_sched_wakeup_event(struct perf_tool *tool __maybe_unused, FILL_FIELD(wakeup_event, success, event, data); FILL_FIELD(wakeup_event, cpu, event, data); - if (trace_handler->wakeup_event) - err = trace_handler->wakeup_event(&wakeup_event, machine, event, sample); + if (sched->tp_handler->wakeup_event) + err = sched->tp_handler->wakeup_event(sched, &wakeup_event, machine, event, sample); return err; } -/* - * Track the current task - that way we can know whether there's any - * weird events, such as a task being switched away that is not current. - */ -static int max_cpu; - -static u32 curr_pid[MAX_CPUS] = { [0 ... MAX_CPUS-1] = -1 }; - -static struct thread *curr_thread[MAX_CPUS]; - -static char next_shortname1 = 'A'; -static char next_shortname2 = '0'; - static int -map_switch_event(struct trace_switch_event *switch_event, +map_switch_event(struct perf_sched *sched, + struct trace_switch_event *switch_event, struct machine *machine, struct event_format *event __maybe_unused, struct perf_sample *sample) @@ -1419,11 +1388,11 @@ map_switch_event(struct trace_switch_event *switch_event, BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0); - if (this_cpu > max_cpu) - max_cpu = this_cpu; + if (this_cpu > sched->max_cpu) + sched->max_cpu = this_cpu; - timestamp0 = cpu_last_switched[this_cpu]; - cpu_last_switched[this_cpu] = timestamp; + timestamp0 = sched->cpu_last_switched[this_cpu]; + sched->cpu_last_switched[this_cpu] = timestamp; if (timestamp0) delta = timestamp - timestamp0; else @@ -1437,37 +1406,37 @@ map_switch_event(struct trace_switch_event *switch_event, sched_out = machine__findnew_thread(machine, switch_event->prev_pid); sched_in = machine__findnew_thread(machine, switch_event->next_pid); - curr_thread[this_cpu] = sched_in; + sched->curr_thread[this_cpu] = sched_in; printf(" "); new_shortname = 0; if (!sched_in->shortname[0]) { - sched_in->shortname[0] = next_shortname1; - sched_in->shortname[1] = next_shortname2; + sched_in->shortname[0] = sched->next_shortname1; + sched_in->shortname[1] = sched->next_shortname2; - if (next_shortname1 < 'Z') { - next_shortname1++; + if (sched->next_shortname1 < 'Z') { + sched->next_shortname1++; } else { - next_shortname1='A'; - if (next_shortname2 < '9') { - next_shortname2++; + sched->next_shortname1='A'; + if (sched->next_shortname2 < '9') { + sched->next_shortname2++; } else { - next_shortname2='0'; + sched->next_shortname2='0'; } } new_shortname = 1; } - for (cpu = 0; cpu <= max_cpu; cpu++) { + for (cpu = 0; cpu <= sched->max_cpu; cpu++) { if (cpu != this_cpu) printf(" "); else printf("*"); - if (curr_thread[cpu]) { - if (curr_thread[cpu]->pid) - printf("%2s ", curr_thread[cpu]->shortname); + if (sched->curr_thread[cpu]) { + if (sched->curr_thread[cpu]->pid) + printf("%2s ", sched->curr_thread[cpu]->shortname); else printf(". "); } else @@ -1485,11 +1454,12 @@ map_switch_event(struct trace_switch_event *switch_event, return 0; } -static int process_sched_switch_event(struct perf_tool *tool __maybe_unused, +static int process_sched_switch_event(struct perf_tool *tool, struct event_format *event, struct perf_sample *sample, struct machine *machine) { + struct perf_sched *sched = container_of(tool, struct perf_sched, tool); int this_cpu = sample->cpu, err = 0; void *data = sample->raw_data; struct trace_switch_event switch_event; @@ -1504,26 +1474,27 @@ static int process_sched_switch_event(struct perf_tool *tool __maybe_unused, FILL_FIELD(switch_event, next_pid, event, data); FILL_FIELD(switch_event, next_prio, event, data); - if (curr_pid[this_cpu] != (u32)-1) { + if (sched->curr_pid[this_cpu] != (u32)-1) { /* * Are we trying to switch away a PID that is * not current? */ - if (curr_pid[this_cpu] != switch_event.prev_pid) - nr_context_switch_bugs++; + if (sched->curr_pid[this_cpu] != switch_event.prev_pid) + sched->nr_context_switch_bugs++; } - if (trace_handler->switch_event) - err = trace_handler->switch_event(&switch_event, machine, event, sample); + if (sched->tp_handler->switch_event) + err = sched->tp_handler->switch_event(sched, &switch_event, machine, event, sample); - curr_pid[this_cpu] = switch_event.next_pid; + sched->curr_pid[this_cpu] = switch_event.next_pid; return err; } -static int process_sched_runtime_event(struct perf_tool *tool __maybe_unused, +static int process_sched_runtime_event(struct perf_tool *tool, struct event_format *event, struct perf_sample *sample, struct machine *machine) { + struct perf_sched *sched = container_of(tool, struct perf_sched, tool); void *data = sample->raw_data; struct trace_runtime_event runtime_event; int err = 0; @@ -1533,17 +1504,18 @@ static int process_sched_runtime_event(struct perf_tool *tool __maybe_unused, FILL_FIELD(runtime_event, runtime, event, data); FILL_FIELD(runtime_event, vruntime, event, data); - if (trace_handler->runtime_event) - err = trace_handler->runtime_event(&runtime_event, machine, sample); + if (sched->tp_handler->runtime_event) + err = sched->tp_handler->runtime_event(sched, &runtime_event, machine, sample); return err; } -static int process_sched_fork_event(struct perf_tool *tool __maybe_unused, +static int process_sched_fork_event(struct perf_tool *tool, struct event_format *event, struct perf_sample *sample, struct machine *machine __maybe_unused) { + struct perf_sched *sched = container_of(tool, struct perf_sched, tool); void *data = sample->raw_data; struct trace_fork_event fork_event; int err = 0; @@ -1555,8 +1527,8 @@ static int process_sched_fork_event(struct perf_tool *tool __maybe_unused, FILL_ARRAY(fork_event, child_comm, event, data); FILL_FIELD(fork_event, child_pid, event, data); - if (trace_handler->fork_event) - err = trace_handler->fork_event(&fork_event, event); + if (sched->tp_handler->fork_event) + err = sched->tp_handler->fork_event(sched, &fork_event, event); return err; } @@ -1572,11 +1544,12 @@ static int process_sched_exit_event(struct perf_tool *tool __maybe_unused, return 0; } -static int process_sched_migrate_task_event(struct perf_tool *tool __maybe_unused, +static int process_sched_migrate_task_event(struct perf_tool *tool, struct event_format *event, struct perf_sample *sample, struct machine *machine) { + struct perf_sched *sched = container_of(tool, struct perf_sched, tool); void *data = sample->raw_data; struct trace_migrate_task_event migrate_task_event; int err = 0; @@ -1588,8 +1561,8 @@ static int process_sched_migrate_task_event(struct perf_tool *tool __maybe_unuse FILL_FIELD(migrate_task_event, prio, event, data); FILL_FIELD(migrate_task_event, cpu, event, data); - if (trace_handler->migrate_task_event) - err = trace_handler->migrate_task_event(&migrate_task_event, machine, sample); + if (sched->tp_handler->migrate_task_event) + err = sched->tp_handler->migrate_task_event(sched, &migrate_task_event, machine, sample); return err; } @@ -1625,15 +1598,8 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_ return err; } -static struct perf_tool perf_sched = { - .sample = perf_sched__process_tracepoint_sample, - .comm = perf_event__process_comm, - .lost = perf_event__process_lost, - .fork = perf_event__process_task, - .ordered_samples = true, -}; - -static int read_events(bool destroy, struct perf_session **psession) +static int perf_sched__read_events(struct perf_sched *sched, bool destroy, + struct perf_session **psession) { const struct perf_evsel_str_handler handlers[] = { { "sched:sched_switch", process_sched_switch_event, }, @@ -1646,7 +1612,7 @@ static int read_events(bool destroy, struct perf_session **psession) }; struct perf_session *session; - session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_sched); + session = perf_session__new(sched->input_name, O_RDONLY, 0, false, &sched->tool); if (session == NULL) { pr_debug("No Memory for session\n"); return -1; @@ -1656,15 +1622,15 @@ static int read_events(bool destroy, struct perf_session **psession) goto out_delete; if (perf_session__has_traces(session, "record -R")) { - int err = perf_session__process_events(session, &perf_sched); + int err = perf_session__process_events(session, &sched->tool); if (err) { pr_err("Failed to process events, error %d", err); goto out_delete; } - nr_events = session->hists.stats.nr_events[0]; - nr_lost_events = session->hists.stats.total_lost; - nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST]; + sched->nr_events = session->hists.stats.nr_events[0]; + sched->nr_lost_events = session->hists.stats.total_lost; + sched->nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST]; } if (destroy) @@ -1680,213 +1646,158 @@ out_delete: return -1; } -static void print_bad_events(void) +static void print_bad_events(struct perf_sched *sched) { - if (nr_unordered_timestamps && nr_timestamps) { + if (sched->nr_unordered_timestamps && sched->nr_timestamps) { printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n", - (double)nr_unordered_timestamps/(double)nr_timestamps*100.0, - nr_unordered_timestamps, nr_timestamps); + (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0, + sched->nr_unordered_timestamps, sched->nr_timestamps); } - if (nr_lost_events && nr_events) { + if (sched->nr_lost_events && sched->nr_events) { printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n", - (double)nr_lost_events/(double)nr_events*100.0, - nr_lost_events, nr_events, nr_lost_chunks); + (double)sched->nr_lost_events/(double)sched->nr_events * 100.0, + sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks); } - if (nr_state_machine_bugs && nr_timestamps) { + if (sched->nr_state_machine_bugs && sched->nr_timestamps) { printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)", - (double)nr_state_machine_bugs/(double)nr_timestamps*100.0, - nr_state_machine_bugs, nr_timestamps); - if (nr_lost_events) + (double)sched->nr_state_machine_bugs/(double)sched->nr_timestamps*100.0, + sched->nr_state_machine_bugs, sched->nr_timestamps); + if (sched->nr_lost_events) printf(" (due to lost events?)"); printf("\n"); } - if (nr_context_switch_bugs && nr_timestamps) { + if (sched->nr_context_switch_bugs && sched->nr_timestamps) { printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)", - (double)nr_context_switch_bugs/(double)nr_timestamps*100.0, - nr_context_switch_bugs, nr_timestamps); - if (nr_lost_events) + (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0, + sched->nr_context_switch_bugs, sched->nr_timestamps); + if (sched->nr_lost_events) printf(" (due to lost events?)"); printf("\n"); } } -static int __cmd_lat(void) +static int perf_sched__lat(struct perf_sched *sched) { struct rb_node *next; struct perf_session *session; setup_pager(); - if (read_events(false, &session)) + if (perf_sched__read_events(sched, false, &session)) return -1; - sort_lat(); + perf_sched__sort_lat(sched); printf("\n ---------------------------------------------------------------------------------------------------------------\n"); printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n"); printf(" ---------------------------------------------------------------------------------------------------------------\n"); - next = rb_first(&sorted_atom_root); + next = rb_first(&sched->sorted_atom_root); while (next) { struct work_atoms *work_list; work_list = rb_entry(next, struct work_atoms, node); - output_lat_thread(work_list); + output_lat_thread(sched, work_list); next = rb_next(next); } printf(" -----------------------------------------------------------------------------------------\n"); printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n", - (double)all_runtime/1e6, all_count); + (double)sched->all_runtime / 1e6, sched->all_count); printf(" ---------------------------------------------------\n"); - print_bad_events(); + print_bad_events(sched); printf("\n"); perf_session__delete(session); return 0; } -static struct trace_sched_handler map_ops = { - .wakeup_event = NULL, - .switch_event = map_switch_event, - .runtime_event = NULL, - .fork_event = NULL, -}; - -static int __cmd_map(void) +static int perf_sched__map(struct perf_sched *sched) { - max_cpu = sysconf(_SC_NPROCESSORS_CONF); + sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF); setup_pager(); - if (read_events(true, NULL)) + if (perf_sched__read_events(sched, true, NULL)) return -1; - print_bad_events(); + print_bad_events(sched); return 0; } -static int __cmd_replay(void) +static int perf_sched__replay(struct perf_sched *sched) { unsigned long i; - calibrate_run_measurement_overhead(); - calibrate_sleep_measurement_overhead(); + calibrate_run_measurement_overhead(sched); + calibrate_sleep_measurement_overhead(sched); - test_calibrations(); + test_calibrations(sched); - if (read_events(true, NULL)) + if (perf_sched__read_events(sched, true, NULL)) return -1; - printf("nr_run_events: %ld\n", nr_run_events); - printf("nr_sleep_events: %ld\n", nr_sleep_events); - printf("nr_wakeup_events: %ld\n", nr_wakeup_events); + printf("nr_run_events: %ld\n", sched->nr_run_events); + printf("nr_sleep_events: %ld\n", sched->nr_sleep_events); + printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events); - if (targetless_wakeups) - printf("target-less wakeups: %ld\n", targetless_wakeups); - if (multitarget_wakeups) - printf("multi-target wakeups: %ld\n", multitarget_wakeups); - if (nr_run_events_optimized) + if (sched->targetless_wakeups) + printf("target-less wakeups: %ld\n", sched->targetless_wakeups); + if (sched->multitarget_wakeups) + printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups); + if (sched->nr_run_events_optimized) printf("run atoms optimized: %ld\n", - nr_run_events_optimized); + sched->nr_run_events_optimized); - print_task_traces(); - add_cross_task_wakeups(); + print_task_traces(sched); + add_cross_task_wakeups(sched); - create_tasks(); + create_tasks(sched); printf("------------------------------------------------------------\n"); - for (i = 0; i < replay_repeat; i++) - run_one_test(); + for (i = 0; i < sched->replay_repeat; i++) + run_one_test(sched); return 0; } - -static const char * const sched_usage[] = { - "perf sched [] {record|latency|map|replay|script}", - NULL -}; - -static const struct option sched_options[] = { - OPT_STRING('i', "input", &input_name, "file", - "input file name"), - OPT_INCR('v', "verbose", &verbose, - "be more verbose (show symbol address, etc)"), - OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, - "dump raw trace in ASCII"), - OPT_END() -}; - -static const char * const latency_usage[] = { - "perf sched latency []", - NULL -}; - -static const struct option latency_options[] = { - OPT_STRING('s', "sort", &sort_order, "key[,key2...]", - "sort by key(s): runtime, switch, avg, max"), - OPT_INCR('v', "verbose", &verbose, - "be more verbose (show symbol address, etc)"), - OPT_INTEGER('C', "CPU", &profile_cpu, - "CPU to profile on"), - OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, - "dump raw trace in ASCII"), - OPT_END() -}; - -static const char * const replay_usage[] = { - "perf sched replay []", - NULL -}; - -static const struct option replay_options[] = { - OPT_UINTEGER('r', "repeat", &replay_repeat, - "repeat the workload replay N times (-1: infinite)"), - OPT_INCR('v', "verbose", &verbose, - "be more verbose (show symbol address, etc)"), - OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, - "dump raw trace in ASCII"), - OPT_END() -}; - -static void setup_sorting(void) +static void setup_sorting(struct perf_sched *sched, const struct option *options, + const char * const usage_msg[]) { - char *tmp, *tok, *str = strdup(sort_order); + char *tmp, *tok, *str = strdup(sched->sort_order); for (tok = strtok_r(str, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) { - if (sort_dimension__add(tok, &sort_list) < 0) { + if (sort_dimension__add(tok, &sched->sort_list) < 0) { error("Unknown --sort key: `%s'", tok); - usage_with_options(latency_usage, latency_options); + usage_with_options(usage_msg, options); } } free(str); - sort_dimension__add("pid", &cmp_pid); + sort_dimension__add("pid", &sched->cmp_pid); } -static const char *record_args[] = { - "record", - "-a", - "-R", - "-f", - "-m", "1024", - "-c", "1", - "-e", "sched:sched_switch", - "-e", "sched:sched_stat_wait", - "-e", "sched:sched_stat_sleep", - "-e", "sched:sched_stat_iowait", - "-e", "sched:sched_stat_runtime", - "-e", "sched:sched_process_exit", - "-e", "sched:sched_process_fork", - "-e", "sched:sched_wakeup", - "-e", "sched:sched_migrate_task", -}; - static int __cmd_record(int argc, const char **argv) { unsigned int rec_argc, i, j; const char **rec_argv; + const char * const record_args[] = { + "record", + "-a", + "-R", + "-f", + "-m", "1024", + "-c", "1", + "-e", "sched:sched_switch", + "-e", "sched:sched_stat_wait", + "-e", "sched:sched_stat_sleep", + "-e", "sched:sched_stat_iowait", + "-e", "sched:sched_stat_runtime", + "-e", "sched:sched_process_exit", + "-e", "sched:sched_process_fork", + "-e", "sched:sched_wakeup", + "-e", "sched:sched_migrate_task", + }; rec_argc = ARRAY_SIZE(record_args) + argc - 1; rec_argv = calloc(rec_argc + 1, sizeof(char *)); @@ -1907,6 +1818,83 @@ static int __cmd_record(int argc, const char **argv) int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused) { + const char default_sort_order[] = "avg, max, switch, runtime"; + struct perf_sched sched = { + .tool = { + .sample = perf_sched__process_tracepoint_sample, + .comm = perf_event__process_comm, + .lost = perf_event__process_lost, + .fork = perf_event__process_task, + .ordered_samples = true, + }, + .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid), + .sort_list = LIST_HEAD_INIT(sched.sort_list), + .start_work_mutex = PTHREAD_MUTEX_INITIALIZER, + .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER, + .curr_pid = { [0 ... MAX_CPUS - 1] = -1 }, + .sort_order = default_sort_order, + .replay_repeat = 10, + .profile_cpu = -1, + .next_shortname1 = 'A', + .next_shortname2 = '0', + }; + const struct option latency_options[] = { + OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]", + "sort by key(s): runtime, switch, avg, max"), + OPT_INCR('v', "verbose", &verbose, + "be more verbose (show symbol address, etc)"), + OPT_INTEGER('C', "CPU", &sched.profile_cpu, + "CPU to profile on"), + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, + "dump raw trace in ASCII"), + OPT_END() + }; + const struct option replay_options[] = { + OPT_UINTEGER('r', "repeat", &sched.replay_repeat, + "repeat the workload replay N times (-1: infinite)"), + OPT_INCR('v', "verbose", &verbose, + "be more verbose (show symbol address, etc)"), + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, + "dump raw trace in ASCII"), + OPT_END() + }; + const struct option sched_options[] = { + OPT_STRING('i', "input", &sched.input_name, "file", + "input file name"), + OPT_INCR('v', "verbose", &verbose, + "be more verbose (show symbol address, etc)"), + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, + "dump raw trace in ASCII"), + OPT_END() + }; + const char * const latency_usage[] = { + "perf sched latency []", + NULL + }; + const char * const replay_usage[] = { + "perf sched replay []", + NULL + }; + const char * const sched_usage[] = { + "perf sched [] {record|latency|map|replay|script}", + NULL + }; + struct trace_sched_handler lat_ops = { + .wakeup_event = latency_wakeup_event, + .switch_event = latency_switch_event, + .runtime_event = latency_runtime_event, + .fork_event = latency_fork_event, + .migrate_task_event = latency_migrate_task_event, + }; + struct trace_sched_handler map_ops = { + .switch_event = map_switch_event, + }; + struct trace_sched_handler replay_ops = { + .wakeup_event = replay_wakeup_event, + .switch_event = replay_switch_event, + .fork_event = replay_fork_event, + }; + argc = parse_options(argc, argv, sched_options, sched_usage, PARSE_OPT_STOP_AT_NON_OPTION); if (!argc) @@ -1922,26 +1910,26 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused) if (!strncmp(argv[0], "rec", 3)) { return __cmd_record(argc, argv); } else if (!strncmp(argv[0], "lat", 3)) { - trace_handler = &lat_ops; + sched.tp_handler = &lat_ops; if (argc > 1) { argc = parse_options(argc, argv, latency_options, latency_usage, 0); if (argc) usage_with_options(latency_usage, latency_options); } - setup_sorting(); - return __cmd_lat(); + setup_sorting(&sched, latency_options, latency_usage); + return perf_sched__lat(&sched); } else if (!strcmp(argv[0], "map")) { - trace_handler = &map_ops; - setup_sorting(); - return __cmd_map(); + sched.tp_handler = &map_ops; + setup_sorting(&sched, latency_options, latency_usage); + return perf_sched__map(&sched); } else if (!strncmp(argv[0], "rep", 3)) { - trace_handler = &replay_ops; + sched.tp_handler = &replay_ops; if (argc) { argc = parse_options(argc, argv, replay_options, replay_usage, 0); if (argc) usage_with_options(replay_usage, replay_options); } - return __cmd_replay(); + return perf_sched__replay(&sched); } else { usage_with_options(sched_usage, sched_options); } -- cgit v1.2.3-70-g09d2 From 2b7fcbc5a9c719a306af1c4986a9f5c2cbfcec65 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 11 Sep 2012 19:29:17 -0300 Subject: perf sched: Use perf_evsel__{int,str}val This patch also stops reading the common fields, as they were not being used except for one ->common_pid case that was replaced by sample->tid, i.e. the info is already in the perf_sample struct. Also it only fills the _event structures when there is a handler. [root@sandy ~]# perf sched record sleep 30s [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 8.585 MB perf.data (~375063 samples) ] Before: [root@sandy ~]# perf stat -r 10 perf sched lat > /dev/null Performance counter stats for 'perf sched lat' (10 runs): 129.117838 task-clock # 0.994 CPUs utilized ( +- 0.28% ) 14 context-switches # 0.111 K/sec ( +- 2.10% ) 0 cpu-migrations # 0.002 K/sec ( +- 66.67% ) 7,654 page-faults # 0.059 M/sec ( +- 0.67% ) 438,121,661 cycles # 3.393 GHz ( +- 0.06% ) [83.06%] 150,808,605 stalled-cycles-frontend # 34.42% frontend cycles idle ( +- 0.14% ) [83.10%] 80,748,941 stalled-cycles-backend # 18.43% backend cycles idle ( +- 0.64% ) [66.73%] 758,605,879 instructions # 1.73 insns per cycle # 0.20 stalled cycles per insn ( +- 0.08% ) [83.54%] 162,164,321 branches # 1255.940 M/sec ( +- 0.10% ) [83.70%] 1,609,903 branch-misses # 0.99% of all branches ( +- 0.08% ) [83.62%] 0.129949153 seconds time elapsed ( +- 0.28% ) After: [root@sandy ~]# perf stat -r 10 perf sched lat > /dev/null Performance counter stats for 'perf sched lat' (10 runs): 103.592215 task-clock # 0.993 CPUs utilized ( +- 0.33% ) 12 context-switches # 0.114 K/sec ( +- 3.29% ) 0 cpu-migrations # 0.000 K/sec 7,605 page-faults # 0.073 M/sec ( +- 0.00% ) 345,796,112 cycles # 3.338 GHz ( +- 0.07% ) [82.90%] 106,876,796 stalled-cycles-frontend # 30.91% frontend cycles idle ( +- 0.38% ) [83.23%] 62,060,877 stalled-cycles-backend # 17.95% backend cycles idle ( +- 0.80% ) [67.14%] 628,246,586 instructions # 1.82 insns per cycle # 0.17 stalled cycles per insn ( +- 0.04% ) [83.64%] 134,962,057 branches # 1302.820 M/sec ( +- 0.10% ) [83.64%] 1,233,037 branch-misses # 0.91% of all branches ( +- 0.29% ) [83.41%] 0.104333272 seconds time elapsed ( +- 0.33% ) [root@sandy ~]# Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-weu9t63zkrfrazkn0gxj48xy@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-sched.c | 249 ++++++++++++++++----------------------------- 1 file changed, 90 insertions(+), 159 deletions(-) (limited to 'tools/perf/builtin-sched.c') diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 79f88fa3f7a..0df5e7a08c6 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -98,82 +98,40 @@ struct work_atoms { typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *); struct trace_switch_event { - u32 size; - - u16 common_type; - u8 common_flags; - u8 common_preempt_count; - u32 common_pid; - u32 common_tgid; - - char prev_comm[16]; + char *prev_comm; u32 prev_pid; u32 prev_prio; u64 prev_state; - char next_comm[16]; + char *next_comm; u32 next_pid; u32 next_prio; }; struct trace_runtime_event { - u32 size; - - u16 common_type; - u8 common_flags; - u8 common_preempt_count; - u32 common_pid; - u32 common_tgid; - - char comm[16]; + char *comm; u32 pid; u64 runtime; u64 vruntime; }; struct trace_wakeup_event { - u32 size; - - u16 common_type; - u8 common_flags; - u8 common_preempt_count; - u32 common_pid; - u32 common_tgid; - - char comm[16]; + char *comm; u32 pid; - u32 prio; u32 success; u32 cpu; }; struct trace_fork_event { - u32 size; - - u16 common_type; - u8 common_flags; - u8 common_preempt_count; - u32 common_pid; - u32 common_tgid; - - char parent_comm[16]; + char *parent_comm; u32 parent_pid; - char child_comm[16]; + char *child_comm; u32 child_pid; }; struct trace_migrate_task_event { - u32 size; - - u16 common_type; - u8 common_flags; - u8 common_preempt_count; - u32 common_pid; - u32 common_tgid; - - char comm[16]; + char *comm; u32 pid; - u32 prio; u32 cpu; }; @@ -184,7 +142,7 @@ struct trace_sched_handler { int (*switch_event)(struct perf_sched *sched, struct trace_switch_event *event, struct machine *machine, - struct event_format *tp_format, + struct perf_evsel *evsel, struct perf_sample *sample); int (*runtime_event)(struct perf_sched *sched, @@ -195,12 +153,12 @@ struct trace_sched_handler { int (*wakeup_event)(struct perf_sched *sched, struct trace_wakeup_event *event, struct machine *machine, - struct event_format *tp_format, + struct perf_evsel *evsel, struct perf_sample *sample); int (*fork_event)(struct perf_sched *sched, struct trace_fork_event *event, - struct event_format *tp_format); + struct perf_evsel *evsel); int (*migrate_task_event)(struct perf_sched *sched, struct trace_migrate_task_event *event, @@ -740,40 +698,22 @@ static void test_calibrations(struct perf_sched *sched) printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0); } -#define FILL_FIELD(ptr, field, event, data) \ - ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data) - -#define FILL_ARRAY(ptr, array, event, data) \ -do { \ - void *__array = raw_field_ptr(event, #array, data); \ - memcpy(ptr.array, __array, sizeof(ptr.array)); \ -} while(0) - -#define FILL_COMMON_FIELDS(ptr, event, data) \ -do { \ - FILL_FIELD(ptr, common_type, event, data); \ - FILL_FIELD(ptr, common_flags, event, data); \ - FILL_FIELD(ptr, common_preempt_count, event, data); \ - FILL_FIELD(ptr, common_pid, event, data); \ - FILL_FIELD(ptr, common_tgid, event, data); \ -} while (0) - static int replay_wakeup_event(struct perf_sched *sched, struct trace_wakeup_event *wakeup_event, struct machine *machine __maybe_unused, - struct event_format *event, struct perf_sample *sample) + struct perf_evsel *evsel, struct perf_sample *sample) { struct task_desc *waker, *wakee; if (verbose) { - printf("sched_wakeup event %p\n", event); + printf("sched_wakeup event %p\n", evsel); printf(" ... pid %d woke up %s/%d\n", - wakeup_event->common_pid, wakeup_event->comm, wakeup_event->pid); + sample->tid, wakeup_event->comm, wakeup_event->pid); } - waker = register_pid(sched, wakeup_event->common_pid, ""); + waker = register_pid(sched, sample->tid, ""); wakee = register_pid(sched, wakeup_event->pid, wakeup_event->comm); add_sched_event_wakeup(sched, waker, sample->time, wakee); @@ -784,7 +724,7 @@ static int replay_switch_event(struct perf_sched *sched, struct trace_switch_event *switch_event, struct machine *machine __maybe_unused, - struct event_format *event, + struct perf_evsel *evsel, struct perf_sample *sample) { struct task_desc *prev, __maybe_unused *next; @@ -793,7 +733,7 @@ replay_switch_event(struct perf_sched *sched, s64 delta; if (verbose) - printf("sched_switch event %p\n", event); + printf("sched_switch event %p\n", evsel); if (cpu >= MAX_CPUS || cpu < 0) return 0; @@ -829,10 +769,10 @@ replay_switch_event(struct perf_sched *sched, static int replay_fork_event(struct perf_sched *sched, struct trace_fork_event *fork_event, - struct event_format *event) + struct perf_evsel *evsel) { if (verbose) { - printf("sched_fork event %p\n", event); + printf("sched_fork event %p\n", evsel); printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid); printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid); } @@ -931,7 +871,7 @@ static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread) static int latency_fork_event(struct perf_sched *sched __maybe_unused, struct trace_fork_event *fork_event __maybe_unused, - struct event_format *event __maybe_unused) + struct perf_evsel *evsel __maybe_unused) { /* should insert the newcomer */ return 0; @@ -1015,7 +955,7 @@ static int latency_switch_event(struct perf_sched *sched, struct trace_switch_event *switch_event, struct machine *machine, - struct event_format *event __maybe_unused, + struct perf_evsel *evsel __maybe_unused, struct perf_sample *sample) { struct work_atoms *out_events, *in_events; @@ -1106,7 +1046,7 @@ static int latency_wakeup_event(struct perf_sched *sched, struct trace_wakeup_event *wakeup_event, struct machine *machine, - struct event_format *event __maybe_unused, + struct perf_evsel *evsel __maybe_unused, struct perf_sample *sample) { struct work_atoms *atoms; @@ -1350,34 +1290,32 @@ static void perf_sched__sort_lat(struct perf_sched *sched) } static int process_sched_wakeup_event(struct perf_tool *tool, - struct event_format *event, + struct perf_evsel *evsel, struct perf_sample *sample, struct machine *machine) { struct perf_sched *sched = container_of(tool, struct perf_sched, tool); - void *data = sample->raw_data; - struct trace_wakeup_event wakeup_event; - int err = 0; - - FILL_COMMON_FIELDS(wakeup_event, event, data); - FILL_ARRAY(wakeup_event, comm, event, data); - FILL_FIELD(wakeup_event, pid, event, data); - FILL_FIELD(wakeup_event, prio, event, data); - FILL_FIELD(wakeup_event, success, event, data); - FILL_FIELD(wakeup_event, cpu, event, data); + if (sched->tp_handler->wakeup_event) { + struct trace_wakeup_event event = { + .comm = perf_evsel__strval(evsel, sample, "comm"), + .pid = perf_evsel__intval(evsel, sample, "pid"), + .prio = perf_evsel__intval(evsel, sample, "prio"), + .success = perf_evsel__intval(evsel, sample, "success"), + .cpu = perf_evsel__intval(evsel, sample, "cpu"), + }; - if (sched->tp_handler->wakeup_event) - err = sched->tp_handler->wakeup_event(sched, &wakeup_event, machine, event, sample); + return sched->tp_handler->wakeup_event(sched, &event, machine, evsel, sample); + } - return err; + return 0; } static int map_switch_event(struct perf_sched *sched, struct trace_switch_event *switch_event, struct machine *machine, - struct event_format *event __maybe_unused, + struct perf_evsel *evsel __maybe_unused, struct perf_sample *sample) { struct thread *sched_out __maybe_unused, *sched_in; @@ -1455,120 +1393,113 @@ map_switch_event(struct perf_sched *sched, } static int process_sched_switch_event(struct perf_tool *tool, - struct event_format *event, + struct perf_evsel *evsel, struct perf_sample *sample, struct machine *machine) { struct perf_sched *sched = container_of(tool, struct perf_sched, tool); int this_cpu = sample->cpu, err = 0; - void *data = sample->raw_data; - struct trace_switch_event switch_event; - - FILL_COMMON_FIELDS(switch_event, event, data); - - FILL_ARRAY(switch_event, prev_comm, event, data); - FILL_FIELD(switch_event, prev_pid, event, data); - FILL_FIELD(switch_event, prev_prio, event, data); - FILL_FIELD(switch_event, prev_state, event, data); - FILL_ARRAY(switch_event, next_comm, event, data); - FILL_FIELD(switch_event, next_pid, event, data); - FILL_FIELD(switch_event, next_prio, event, data); + u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), + next_pid = perf_evsel__intval(evsel, sample, "next_pid"); if (sched->curr_pid[this_cpu] != (u32)-1) { /* * Are we trying to switch away a PID that is * not current? */ - if (sched->curr_pid[this_cpu] != switch_event.prev_pid) + if (sched->curr_pid[this_cpu] != prev_pid) sched->nr_context_switch_bugs++; } - if (sched->tp_handler->switch_event) - err = sched->tp_handler->switch_event(sched, &switch_event, machine, event, sample); - sched->curr_pid[this_cpu] = switch_event.next_pid; + if (sched->tp_handler->switch_event) { + struct trace_switch_event event = { + .prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"), + .prev_pid = prev_pid, + .prev_prio = perf_evsel__intval(evsel, sample, "prev_prio"), + .prev_state = perf_evsel__intval(evsel, sample, "prev_state"), + .next_comm = perf_evsel__strval(evsel, sample, "next_comm"), + .next_pid = next_pid, + .next_prio = perf_evsel__intval(evsel, sample, "next_prio"), + }; + + err = sched->tp_handler->switch_event(sched, &event, machine, evsel, sample); + } + + sched->curr_pid[this_cpu] = next_pid; return err; } static int process_sched_runtime_event(struct perf_tool *tool, - struct event_format *event, + struct perf_evsel *evsel, struct perf_sample *sample, struct machine *machine) { struct perf_sched *sched = container_of(tool, struct perf_sched, tool); - void *data = sample->raw_data; - struct trace_runtime_event runtime_event; - int err = 0; - FILL_ARRAY(runtime_event, comm, event, data); - FILL_FIELD(runtime_event, pid, event, data); - FILL_FIELD(runtime_event, runtime, event, data); - FILL_FIELD(runtime_event, vruntime, event, data); - - if (sched->tp_handler->runtime_event) - err = sched->tp_handler->runtime_event(sched, &runtime_event, machine, sample); + if (sched->tp_handler->runtime_event) { + struct trace_runtime_event event = { + .comm = perf_evsel__strval(evsel, sample, "comm"), + .pid = perf_evsel__intval(evsel, sample, "pid"), + .runtime = perf_evsel__intval(evsel, sample, "runtime"), + .vruntime = perf_evsel__intval(evsel, sample, "vruntime"), + }; + return sched->tp_handler->runtime_event(sched, &event, machine, sample); + } - return err; + return 0; } static int process_sched_fork_event(struct perf_tool *tool, - struct event_format *event, + struct perf_evsel *evsel, struct perf_sample *sample, struct machine *machine __maybe_unused) { struct perf_sched *sched = container_of(tool, struct perf_sched, tool); - void *data = sample->raw_data; - struct trace_fork_event fork_event; - int err = 0; - - FILL_COMMON_FIELDS(fork_event, event, data); - FILL_ARRAY(fork_event, parent_comm, event, data); - FILL_FIELD(fork_event, parent_pid, event, data); - FILL_ARRAY(fork_event, child_comm, event, data); - FILL_FIELD(fork_event, child_pid, event, data); - - if (sched->tp_handler->fork_event) - err = sched->tp_handler->fork_event(sched, &fork_event, event); + if (sched->tp_handler->fork_event) { + struct trace_fork_event event = { + .parent_comm = perf_evsel__strval(evsel, sample, "parent_comm"), + .child_comm = perf_evsel__strval(evsel, sample, "child_comm"), + .parent_pid = perf_evsel__intval(evsel, sample, "parent_pid"), + .child_pid = perf_evsel__intval(evsel, sample, "child_pid"), + }; + return sched->tp_handler->fork_event(sched, &event, evsel); + } - return err; + return 0; } static int process_sched_exit_event(struct perf_tool *tool __maybe_unused, - struct event_format *event, + struct perf_evsel *evsel, struct perf_sample *sample __maybe_unused, struct machine *machine __maybe_unused) { - if (verbose) - printf("sched_exit event %p\n", event); - + pr_debug("sched_exit event %p\n", evsel); return 0; } static int process_sched_migrate_task_event(struct perf_tool *tool, - struct event_format *event, + struct perf_evsel *evsel, struct perf_sample *sample, struct machine *machine) { struct perf_sched *sched = container_of(tool, struct perf_sched, tool); - void *data = sample->raw_data; - struct trace_migrate_task_event migrate_task_event; - int err = 0; - FILL_COMMON_FIELDS(migrate_task_event, event, data); - - FILL_ARRAY(migrate_task_event, comm, event, data); - FILL_FIELD(migrate_task_event, pid, event, data); - FILL_FIELD(migrate_task_event, prio, event, data); - FILL_FIELD(migrate_task_event, cpu, event, data); - - if (sched->tp_handler->migrate_task_event) - err = sched->tp_handler->migrate_task_event(sched, &migrate_task_event, machine, sample); + if (sched->tp_handler->migrate_task_event) { + struct trace_migrate_task_event event = { + .comm = perf_evsel__strval(evsel, sample, "comm"), + .pid = perf_evsel__intval(evsel, sample, "pid"), + .prio = perf_evsel__intval(evsel, sample, "prio"), + .cpu = perf_evsel__intval(evsel, sample, "cpu"), + }; + return sched->tp_handler->migrate_task_event(sched, &event, machine, sample); + } - return err; + return 0; } typedef int (*tracepoint_handler)(struct perf_tool *tool, - struct event_format *tp_format, + struct perf_evsel *evsel, struct perf_sample *sample, struct machine *machine); @@ -1592,7 +1523,7 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_ if (evsel->handler.func != NULL) { tracepoint_handler f = evsel->handler.func; - err = f(tool, evsel->tp_format, sample, machine); + err = f(tool, evsel, sample, machine); } return err; -- cgit v1.2.3-70-g09d2 From 9ec3f4e437ede2f3b5087d412abe16a0219b3b99 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 11 Sep 2012 19:29:17 -0300 Subject: perf sched: Don't read all tracepoint variables in advance Do it just at the actual consumer of these fields, that way we avoid needless lookups: [root@sandy ~]# perf sched record sleep 30s [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 8.585 MB perf.data (~375063 samples) ] Before: [root@sandy ~]# perf stat -r 10 perf sched lat > /dev/null Performance counter stats for 'perf sched lat' (10 runs): 103.592215 task-clock # 0.993 CPUs utilized ( +- 0.33% ) 12 context-switches # 0.114 K/sec ( +- 3.29% ) 0 cpu-migrations # 0.000 K/sec 7,605 page-faults # 0.073 M/sec ( +- 0.00% ) 345,796,112 cycles # 3.338 GHz ( +- 0.07% ) [82.90%] 106,876,796 stalled-cycles-frontend # 30.91% frontend cycles idle ( +- 0.38% ) [83.23%] 62,060,877 stalled-cycles-backend # 17.95% backend cycles idle ( +- 0.80% ) [67.14%] 628,246,586 instructions # 1.82 insns per cycle # 0.17 stalled cycles per insn ( +- 0.04% ) [83.64%] 134,962,057 branches # 1302.820 M/sec ( +- 0.10% ) [83.64%] 1,233,037 branch-misses # 0.91% of all branches ( +- 0.29% ) [83.41%] 0.104333272 seconds time elapsed ( +- 0.33% ) [root@sandy ~]# perf stat -r 10 perf sched lat > /dev/null Performance counter stats for 'perf sched lat' (10 runs): 98.848272 task-clock # 0.993 CPUs utilized ( +- 0.48% ) 11 context-switches # 0.112 K/sec ( +- 2.83% ) 0 cpu-migrations # 0.003 K/sec ( +- 50.92% ) 7,604 page-faults # 0.077 M/sec ( +- 0.00% ) 332,216,085 cycles # 3.361 GHz ( +- 0.14% ) [82.87%] 100,623,710 stalled-cycles-frontend # 30.29% frontend cycles idle ( +- 0.53% ) [82.95%] 58,788,692 stalled-cycles-backend # 17.70% backend cycles idle ( +- 0.59% ) [67.15%] 609,402,433 instructions # 1.83 insns per cycle # 0.17 stalled cycles per insn ( +- 0.04% ) [83.76%] 131,277,138 branches # 1328.067 M/sec ( +- 0.06% ) [83.77%] 1,117,871 branch-misses # 0.85% of all branches ( +- 0.32% ) [83.51%] 0.099580430 seconds time elapsed ( +- 0.48% ) [root@sandy ~]# Cc: David Ahern Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Mike Galbraith Cc: Namhyung Kim Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-kracdpw8wqlr0xjh75uk8g11@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-sched.c | 277 ++++++++++++++++----------------------------- 1 file changed, 97 insertions(+), 180 deletions(-) (limited to 'tools/perf/builtin-sched.c') diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 0df5e7a08c6..af305f57bd2 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -97,73 +97,25 @@ struct work_atoms { typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *); -struct trace_switch_event { - char *prev_comm; - u32 prev_pid; - u32 prev_prio; - u64 prev_state; - char *next_comm; - u32 next_pid; - u32 next_prio; -}; - -struct trace_runtime_event { - char *comm; - u32 pid; - u64 runtime; - u64 vruntime; -}; +struct perf_sched; -struct trace_wakeup_event { - char *comm; - u32 pid; - u32 prio; - u32 success; - u32 cpu; -}; +struct trace_sched_handler { + int (*switch_event)(struct perf_sched *sched, struct perf_evsel *evsel, + struct perf_sample *sample, struct machine *machine); -struct trace_fork_event { - char *parent_comm; - u32 parent_pid; - char *child_comm; - u32 child_pid; -}; + int (*runtime_event)(struct perf_sched *sched, struct perf_evsel *evsel, + struct perf_sample *sample, struct machine *machine); -struct trace_migrate_task_event { - char *comm; - u32 pid; - u32 prio; - u32 cpu; -}; + int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel, + struct perf_sample *sample, struct machine *machine); -struct perf_sched; - -struct trace_sched_handler { - int (*switch_event)(struct perf_sched *sched, - struct trace_switch_event *event, - struct machine *machine, - struct perf_evsel *evsel, - struct perf_sample *sample); - - int (*runtime_event)(struct perf_sched *sched, - struct trace_runtime_event *event, - struct machine *machine, - struct perf_sample *sample); - - int (*wakeup_event)(struct perf_sched *sched, - struct trace_wakeup_event *event, - struct machine *machine, - struct perf_evsel *evsel, - struct perf_sample *sample); - - int (*fork_event)(struct perf_sched *sched, - struct trace_fork_event *event, - struct perf_evsel *evsel); + int (*fork_event)(struct perf_sched *sched, struct perf_evsel *evsel, + struct perf_sample *sample); int (*migrate_task_event)(struct perf_sched *sched, - struct trace_migrate_task_event *event, - struct machine *machine, - struct perf_sample *sample); + struct perf_evsel *evsel, + struct perf_sample *sample, + struct machine *machine); }; struct perf_sched { @@ -700,33 +652,36 @@ static void test_calibrations(struct perf_sched *sched) static int replay_wakeup_event(struct perf_sched *sched, - struct trace_wakeup_event *wakeup_event, - struct machine *machine __maybe_unused, - struct perf_evsel *evsel, struct perf_sample *sample) + struct perf_evsel *evsel, struct perf_sample *sample, + struct machine *machine __maybe_unused) { + const char *comm = perf_evsel__strval(evsel, sample, "comm"); + const u32 pid = perf_evsel__intval(evsel, sample, "pid"); struct task_desc *waker, *wakee; if (verbose) { printf("sched_wakeup event %p\n", evsel); - printf(" ... pid %d woke up %s/%d\n", - sample->tid, wakeup_event->comm, wakeup_event->pid); + printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid); } waker = register_pid(sched, sample->tid, ""); - wakee = register_pid(sched, wakeup_event->pid, wakeup_event->comm); + wakee = register_pid(sched, pid, comm); add_sched_event_wakeup(sched, waker, sample->time, wakee); return 0; } -static int -replay_switch_event(struct perf_sched *sched, - struct trace_switch_event *switch_event, - struct machine *machine __maybe_unused, - struct perf_evsel *evsel, - struct perf_sample *sample) +static int replay_switch_event(struct perf_sched *sched, + struct perf_evsel *evsel, + struct perf_sample *sample, + struct machine *machine __maybe_unused) { + const char *prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"), + *next_comm = perf_evsel__strval(evsel, sample, "next_comm"); + const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), + next_pid = perf_evsel__intval(evsel, sample, "next_pid"); + const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); struct task_desc *prev, __maybe_unused *next; u64 timestamp0, timestamp = sample->time; int cpu = sample->cpu; @@ -749,35 +704,36 @@ replay_switch_event(struct perf_sched *sched, return -1; } - if (verbose) { - printf(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n", - switch_event->prev_comm, switch_event->prev_pid, - switch_event->next_comm, switch_event->next_pid, - delta); - } + pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n", + prev_comm, prev_pid, next_comm, next_pid, delta); - prev = register_pid(sched, switch_event->prev_pid, switch_event->prev_comm); - next = register_pid(sched, switch_event->next_pid, switch_event->next_comm); + prev = register_pid(sched, prev_pid, prev_comm); + next = register_pid(sched, next_pid, next_comm); sched->cpu_last_switched[cpu] = timestamp; add_sched_event_run(sched, prev, timestamp, delta); - add_sched_event_sleep(sched, prev, timestamp, switch_event->prev_state); + add_sched_event_sleep(sched, prev, timestamp, prev_state); return 0; } -static int -replay_fork_event(struct perf_sched *sched, struct trace_fork_event *fork_event, - struct perf_evsel *evsel) +static int replay_fork_event(struct perf_sched *sched, struct perf_evsel *evsel, + struct perf_sample *sample) { + const char *parent_comm = perf_evsel__strval(evsel, sample, "parent_comm"), + *child_comm = perf_evsel__strval(evsel, sample, "child_comm"); + const u32 parent_pid = perf_evsel__intval(evsel, sample, "parent_pid"), + child_pid = perf_evsel__intval(evsel, sample, "child_pid"); + if (verbose) { printf("sched_fork event %p\n", evsel); - printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid); - printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid); + printf("... parent: %s/%d\n", parent_comm, parent_pid); + printf("... child: %s/%d\n", child_comm, child_pid); } - register_pid(sched, fork_event->parent_pid, fork_event->parent_comm); - register_pid(sched, fork_event->child_pid, fork_event->child_comm); + + register_pid(sched, parent_pid, parent_comm); + register_pid(sched, child_pid, child_comm); return 0; } @@ -870,18 +826,18 @@ static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread) } static int latency_fork_event(struct perf_sched *sched __maybe_unused, - struct trace_fork_event *fork_event __maybe_unused, - struct perf_evsel *evsel __maybe_unused) + struct perf_evsel *evsel __maybe_unused, + struct perf_sample *sample __maybe_unused) { /* should insert the newcomer */ return 0; } -static char sched_out_state(struct trace_switch_event *switch_event) +static char sched_out_state(u64 prev_state) { const char *str = TASK_STATE_TO_CHAR_STR; - return str[switch_event->prev_state]; + return str[prev_state]; } static int @@ -951,13 +907,14 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp) atoms->nb_atoms++; } -static int -latency_switch_event(struct perf_sched *sched, - struct trace_switch_event *switch_event, - struct machine *machine, - struct perf_evsel *evsel __maybe_unused, - struct perf_sample *sample) +static int latency_switch_event(struct perf_sched *sched, + struct perf_evsel *evsel, + struct perf_sample *sample, + struct machine *machine) { + const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), + next_pid = perf_evsel__intval(evsel, sample, "next_pid"); + const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); struct work_atoms *out_events, *in_events; struct thread *sched_out, *sched_in; u64 timestamp0, timestamp = sample->time; @@ -978,8 +935,8 @@ latency_switch_event(struct perf_sched *sched, return -1; } - sched_out = machine__findnew_thread(machine, switch_event->prev_pid); - sched_in = machine__findnew_thread(machine, switch_event->next_pid); + sched_out = machine__findnew_thread(machine, prev_pid); + sched_in = machine__findnew_thread(machine, next_pid); out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); if (!out_events) { @@ -991,7 +948,7 @@ latency_switch_event(struct perf_sched *sched, return -1; } } - if (add_sched_out_event(out_events, sched_out_state(switch_event), timestamp)) + if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp)) return -1; in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); @@ -1015,12 +972,14 @@ latency_switch_event(struct perf_sched *sched, return 0; } -static int -latency_runtime_event(struct perf_sched *sched, - struct trace_runtime_event *runtime_event, - struct machine *machine, struct perf_sample *sample) +static int latency_runtime_event(struct perf_sched *sched, + struct perf_evsel *evsel, + struct perf_sample *sample, + struct machine *machine) { - struct thread *thread = machine__findnew_thread(machine, runtime_event->pid); + const u32 pid = perf_evsel__intval(evsel, sample, "pid"); + const u64 runtime = perf_evsel__intval(evsel, sample, "runtime"); + struct thread *thread = machine__findnew_thread(machine, pid); struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); u64 timestamp = sample->time; int cpu = sample->cpu; @@ -1038,27 +997,27 @@ latency_runtime_event(struct perf_sched *sched, return -1; } - add_runtime_event(atoms, runtime_event->runtime, timestamp); + add_runtime_event(atoms, runtime, timestamp); return 0; } -static int -latency_wakeup_event(struct perf_sched *sched, - struct trace_wakeup_event *wakeup_event, - struct machine *machine, - struct perf_evsel *evsel __maybe_unused, - struct perf_sample *sample) +static int latency_wakeup_event(struct perf_sched *sched, + struct perf_evsel *evsel, + struct perf_sample *sample, + struct machine *machine) { + const u32 pid = perf_evsel__intval(evsel, sample, "pid"), + success = perf_evsel__intval(evsel, sample, "success"); struct work_atoms *atoms; struct work_atom *atom; struct thread *wakee; u64 timestamp = sample->time; /* Note for later, it may be interesting to observe the failing cases */ - if (!wakeup_event->success) + if (!success) return 0; - wakee = machine__findnew_thread(machine, wakeup_event->pid); + wakee = machine__findnew_thread(machine, pid); atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); if (!atoms) { if (thread_atoms_insert(sched, wakee)) @@ -1095,11 +1054,12 @@ latency_wakeup_event(struct perf_sched *sched, return 0; } -static int -latency_migrate_task_event(struct perf_sched *sched, - struct trace_migrate_task_event *migrate_task_event, - struct machine *machine, struct perf_sample *sample) +static int latency_migrate_task_event(struct perf_sched *sched, + struct perf_evsel *evsel, + struct perf_sample *sample, + struct machine *machine) { + const u32 pid = perf_evsel__intval(evsel, sample, "pid"); u64 timestamp = sample->time; struct work_atoms *atoms; struct work_atom *atom; @@ -1111,7 +1071,7 @@ latency_migrate_task_event(struct perf_sched *sched, if (sched->profile_cpu == -1) return 0; - migrant = machine__findnew_thread(machine, migrate_task_event->pid); + migrant = machine__findnew_thread(machine, pid); atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); if (!atoms) { if (thread_atoms_insert(sched, migrant)) @@ -1296,28 +1256,17 @@ static int process_sched_wakeup_event(struct perf_tool *tool, { struct perf_sched *sched = container_of(tool, struct perf_sched, tool); - if (sched->tp_handler->wakeup_event) { - struct trace_wakeup_event event = { - .comm = perf_evsel__strval(evsel, sample, "comm"), - .pid = perf_evsel__intval(evsel, sample, "pid"), - .prio = perf_evsel__intval(evsel, sample, "prio"), - .success = perf_evsel__intval(evsel, sample, "success"), - .cpu = perf_evsel__intval(evsel, sample, "cpu"), - }; - - return sched->tp_handler->wakeup_event(sched, &event, machine, evsel, sample); - } + if (sched->tp_handler->wakeup_event) + return sched->tp_handler->wakeup_event(sched, evsel, sample, machine); return 0; } -static int -map_switch_event(struct perf_sched *sched, - struct trace_switch_event *switch_event, - struct machine *machine, - struct perf_evsel *evsel __maybe_unused, - struct perf_sample *sample) +static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel, + struct perf_sample *sample, struct machine *machine) { + const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), + next_pid = perf_evsel__intval(evsel, sample, "next_pid"); struct thread *sched_out __maybe_unused, *sched_in; int new_shortname; u64 timestamp0, timestamp = sample->time; @@ -1341,8 +1290,8 @@ map_switch_event(struct perf_sched *sched, return -1; } - sched_out = machine__findnew_thread(machine, switch_event->prev_pid); - sched_in = machine__findnew_thread(machine, switch_event->next_pid); + sched_out = machine__findnew_thread(machine, prev_pid); + sched_in = machine__findnew_thread(machine, next_pid); sched->curr_thread[this_cpu] = sched_in; @@ -1411,19 +1360,8 @@ static int process_sched_switch_event(struct perf_tool *tool, sched->nr_context_switch_bugs++; } - if (sched->tp_handler->switch_event) { - struct trace_switch_event event = { - .prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"), - .prev_pid = prev_pid, - .prev_prio = perf_evsel__intval(evsel, sample, "prev_prio"), - .prev_state = perf_evsel__intval(evsel, sample, "prev_state"), - .next_comm = perf_evsel__strval(evsel, sample, "next_comm"), - .next_pid = next_pid, - .next_prio = perf_evsel__intval(evsel, sample, "next_prio"), - }; - - err = sched->tp_handler->switch_event(sched, &event, machine, evsel, sample); - } + if (sched->tp_handler->switch_event) + err = sched->tp_handler->switch_event(sched, evsel, sample, machine); sched->curr_pid[this_cpu] = next_pid; return err; @@ -1436,15 +1374,8 @@ static int process_sched_runtime_event(struct perf_tool *tool, { struct perf_sched *sched = container_of(tool, struct perf_sched, tool); - if (sched->tp_handler->runtime_event) { - struct trace_runtime_event event = { - .comm = perf_evsel__strval(evsel, sample, "comm"), - .pid = perf_evsel__intval(evsel, sample, "pid"), - .runtime = perf_evsel__intval(evsel, sample, "runtime"), - .vruntime = perf_evsel__intval(evsel, sample, "vruntime"), - }; - return sched->tp_handler->runtime_event(sched, &event, machine, sample); - } + if (sched->tp_handler->runtime_event) + return sched->tp_handler->runtime_event(sched, evsel, sample, machine); return 0; } @@ -1456,15 +1387,8 @@ static int process_sched_fork_event(struct perf_tool *tool, { struct perf_sched *sched = container_of(tool, struct perf_sched, tool); - if (sched->tp_handler->fork_event) { - struct trace_fork_event event = { - .parent_comm = perf_evsel__strval(evsel, sample, "parent_comm"), - .child_comm = perf_evsel__strval(evsel, sample, "child_comm"), - .parent_pid = perf_evsel__intval(evsel, sample, "parent_pid"), - .child_pid = perf_evsel__intval(evsel, sample, "child_pid"), - }; - return sched->tp_handler->fork_event(sched, &event, evsel); - } + if (sched->tp_handler->fork_event) + return sched->tp_handler->fork_event(sched, evsel, sample); return 0; } @@ -1485,15 +1409,8 @@ static int process_sched_migrate_task_event(struct perf_tool *tool, { struct perf_sched *sched = container_of(tool, struct perf_sched, tool); - if (sched->tp_handler->migrate_task_event) { - struct trace_migrate_task_event event = { - .comm = perf_evsel__strval(evsel, sample, "comm"), - .pid = perf_evsel__intval(evsel, sample, "pid"), - .prio = perf_evsel__intval(evsel, sample, "prio"), - .cpu = perf_evsel__intval(evsel, sample, "cpu"), - }; - return sched->tp_handler->migrate_task_event(sched, &event, machine, sample); - } + if (sched->tp_handler->migrate_task_event) + return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine); return 0; } -- cgit v1.2.3-70-g09d2 From 60b7d14af46ef07778e89556429ec9ab5a7fad0b Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 12 Sep 2012 11:11:06 +0900 Subject: perf sched: Fixup for the die() removal The commit a116e05dcf61 ("perf sched: Remove die() calls") replaced die() call to pr_debug + return -1, but it should be pr_err otherwise it'll not show up unless -v option is given. Fix it. Signed-off-by: Namhyung Kim Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1347415866-303-2-git-send-email-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-sched.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'tools/perf/builtin-sched.c') diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index af305f57bd2..9b9e32eaa80 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -438,8 +438,8 @@ static int self_open_counters(void) fd = sys_perf_event_open(&attr, 0, -1, -1, 0); if (fd < 0) - pr_debug("Error: sys_perf_event_open() syscall returned" - "with %d (%s)\n", fd, strerror(errno)); + pr_err("Error: sys_perf_event_open() syscall returned " + "with %d (%s)\n", fd, strerror(errno)); return fd; } @@ -700,7 +700,7 @@ static int replay_switch_event(struct perf_sched *sched, delta = 0; if (delta < 0) { - pr_debug("hm, delta: %" PRIu64 " < 0 ?\n", delta); + pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); return -1; } @@ -990,7 +990,7 @@ static int latency_runtime_event(struct perf_sched *sched, return -1; atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); if (!atoms) { - pr_debug("in-event: Internal tree error"); + pr_err("in-event: Internal tree error"); return -1; } if (add_sched_out_event(atoms, 'R', timestamp)) @@ -1024,7 +1024,7 @@ static int latency_wakeup_event(struct perf_sched *sched, return -1; atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); if (!atoms) { - pr_debug("wakeup-event: Internal tree error"); + pr_err("wakeup-event: Internal tree error"); return -1; } if (add_sched_out_event(atoms, 'S', timestamp)) @@ -1079,7 +1079,7 @@ static int latency_migrate_task_event(struct perf_sched *sched, register_pid(sched, migrant->pid, migrant->comm); atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); if (!atoms) { - pr_debug("migration-event: Internal tree error"); + pr_err("migration-event: Internal tree error"); return -1; } if (add_sched_out_event(atoms, 'R', timestamp)) @@ -1286,7 +1286,7 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel, delta = 0; if (delta < 0) { - pr_debug("hm, delta: %" PRIu64 " < 0 ?\n", delta); + pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); return -1; } -- cgit v1.2.3-70-g09d2