diff options
author | Ingo Molnar <mingo@kernel.org> | 2012-08-21 11:27:00 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-08-21 11:27:00 +0200 |
commit | bcada3d4b8c96b8792c2306f363992ca5ab9da42 (patch) | |
tree | e420679a5db6ea4e1694eef57f9abb6acac8d4d3 /tools/perf/builtin-sched.c | |
parent | 26198c21d1b286a084fe5d514a30bc7e6c712a34 (diff) | |
parent | 000078bc3ee69efb1124b8478c7527389a826074 (diff) |
Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core
Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:
* Fix include order for bison/flex-generated C files, from Ben Hutchings
* Build fixes and documentation corrections from David Ahern
* Group parsing support, from Jiri Olsa
* UI/gtk refactorings and improvements from Namhyung Kim
* NULL deref fix for perf script, from Namhyung Kim
* Assorted cleanups from Robert Richter
* Let O= makes handle relative paths, from Steven Rostedt
* perf script python fixes, from Feng Tang.
* Improve 'perf lock' error message when the needed tracepoints
are not present, from David Ahern.
* Initial bash completion support, from Frederic Weisbecker
* Allow building without libelf, from Namhyung Kim.
* Support DWARF CFI based unwind to have callchains when %bp
based unwinding is not possible, from Jiri Olsa.
* Symbol resolution fixes, while fixing support PPC64 files with an .opt ELF
section was the end goal, several fixes for code that handles all
architectures and cleanups are included, from Cody Schafer.
* Add a description for the JIT interface, from Andi Kleen.
* Assorted fixes for Documentation and build in 32 bit, from Robert Richter
* Add support for non-tracepoint events in perf script python, from Feng Tang
* Cache the libtraceevent event_format associated to each evsel early, so that we
avoid relookups, i.e. calling pevent_find_event repeatedly when processing
tracepoint events.
[ This is to reduce the surface contact with libtraceevents and make clear what
is that the perf tools needs from that lib: so far parsing the common and per
event fields. ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'tools/perf/builtin-sched.c')
-rw-r--r-- | tools/perf/builtin-sched.c | 150 |
1 files changed, 47 insertions, 103 deletions
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 7a9ad2b1ee7..a25a023965b 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -43,11 +43,6 @@ static u64 sleep_measurement_overhead; static unsigned long nr_tasks; -struct perf_sched { - struct perf_tool tool; - struct perf_session *session; -}; - struct sched_atom; struct task_desc { @@ -734,46 +729,30 @@ struct trace_sched_handler { void (*switch_event)(struct trace_switch_event *, struct machine *, struct event_format *, - int cpu, - u64 timestamp, - struct thread *thread); + struct perf_sample *sample); void (*runtime_event)(struct trace_runtime_event *, struct machine *, - struct event_format *, - int cpu, - u64 timestamp, - struct thread *thread); + struct perf_sample *sample); void (*wakeup_event)(struct trace_wakeup_event *, struct machine *, struct event_format *, - int cpu, - u64 timestamp, - struct thread *thread); + struct perf_sample *sample); void (*fork_event)(struct trace_fork_event *, - struct event_format *, - int cpu, - u64 timestamp, - struct thread *thread); + struct event_format *event); void (*migrate_task_event)(struct trace_migrate_task_event *, - struct machine *machine, - struct event_format *, - int cpu, - u64 timestamp, - struct thread *thread); + struct machine *machine, + struct perf_sample *sample); }; static void replay_wakeup_event(struct trace_wakeup_event *wakeup_event, struct machine *machine __used, - struct event_format *event, - int cpu __used, - u64 timestamp __used, - struct thread *thread __used) + struct event_format *event, struct perf_sample *sample) { struct task_desc *waker, *wakee; @@ -789,7 +768,7 @@ replay_wakeup_event(struct trace_wakeup_event *wakeup_event, waker = register_pid(wakeup_event->common_pid, "<unknown>"); wakee = register_pid(wakeup_event->pid, wakeup_event->comm); - add_sched_event_wakeup(waker, timestamp, wakee); + add_sched_event_wakeup(waker, sample->time, wakee); } static u64 cpu_last_switched[MAX_CPUS]; @@ -798,12 +777,11 @@ static void replay_switch_event(struct trace_switch_event *switch_event, struct machine *machine __used, struct event_format *event, - int cpu, - u64 timestamp, - struct thread *thread __used) + struct perf_sample *sample) { struct task_desc *prev, __used *next; - u64 timestamp0; + u64 timestamp0, timestamp = sample->time; + int cpu = sample->cpu; s64 delta; if (verbose) @@ -840,10 +818,7 @@ replay_switch_event(struct trace_switch_event *switch_event, static void replay_fork_event(struct trace_fork_event *fork_event, - struct event_format *event, - int cpu __used, - u64 timestamp __used, - struct thread *thread __used) + struct event_format *event) { if (verbose) { printf("sched_fork event %p\n", event); @@ -949,10 +924,7 @@ static void thread_atoms_insert(struct thread *thread) static void latency_fork_event(struct trace_fork_event *fork_event __used, - struct event_format *event __used, - int cpu __used, - u64 timestamp __used, - struct thread *thread __used) + struct event_format *event __used) { /* should insert the newcomer */ } @@ -1032,13 +1004,12 @@ static void latency_switch_event(struct trace_switch_event *switch_event, struct machine *machine, struct event_format *event __used, - int cpu, - u64 timestamp, - struct thread *thread __used) + struct perf_sample *sample) { struct work_atoms *out_events, *in_events; struct thread *sched_out, *sched_in; - u64 timestamp0; + u64 timestamp0, timestamp = sample->time; + int cpu = sample->cpu; s64 delta; BUG_ON(cpu >= MAX_CPUS || cpu < 0); @@ -1083,14 +1054,12 @@ latency_switch_event(struct trace_switch_event *switch_event, static void latency_runtime_event(struct trace_runtime_event *runtime_event, - struct machine *machine, - struct event_format *event __used, - int cpu, - u64 timestamp, - struct thread *this_thread __used) + struct machine *machine, struct perf_sample *sample) { struct thread *thread = machine__findnew_thread(machine, runtime_event->pid); struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); + u64 timestamp = sample->time; + int cpu = sample->cpu; BUG_ON(cpu >= MAX_CPUS || cpu < 0); if (!atoms) { @@ -1106,15 +1075,13 @@ latency_runtime_event(struct trace_runtime_event *runtime_event, static void latency_wakeup_event(struct trace_wakeup_event *wakeup_event, - struct machine *machine, - struct event_format *__event __used, - int cpu __used, - u64 timestamp, - struct thread *thread __used) + struct machine *machine, struct event_format *event __used, + struct perf_sample *sample) { struct work_atoms *atoms; struct work_atom *atom; struct thread *wakee; + u64 timestamp = sample->time; /* Note for later, it may be interesting to observe the failing cases */ if (!wakeup_event->success) @@ -1154,12 +1121,9 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, static void latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, - struct machine *machine, - struct event_format *__event __used, - int cpu __used, - u64 timestamp, - struct thread *thread __used) + struct machine *machine, struct perf_sample *sample) { + u64 timestamp = sample->time; struct work_atoms *atoms; struct work_atom *atom; struct thread *migrant; @@ -1369,7 +1333,7 @@ process_sched_wakeup_event(struct perf_tool *tool __used, struct event_format *event, struct perf_sample *sample, struct machine *machine, - struct thread *thread) + struct thread *thread __used) { void *data = sample->raw_data; struct trace_wakeup_event wakeup_event; @@ -1383,8 +1347,7 @@ process_sched_wakeup_event(struct perf_tool *tool __used, FILL_FIELD(wakeup_event, cpu, event, data); if (trace_handler->wakeup_event) - trace_handler->wakeup_event(&wakeup_event, machine, event, - sample->cpu, sample->time, thread); + trace_handler->wakeup_event(&wakeup_event, machine, event, sample); } /* @@ -1404,15 +1367,13 @@ static void map_switch_event(struct trace_switch_event *switch_event, struct machine *machine, struct event_format *event __used, - int this_cpu, - u64 timestamp, - struct thread *thread __used) + struct perf_sample *sample) { struct thread *sched_out __used, *sched_in; int new_shortname; - u64 timestamp0; + u64 timestamp0, timestamp = sample->time; s64 delta; - int cpu; + int cpu, this_cpu = sample->cpu; BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0); @@ -1484,7 +1445,7 @@ process_sched_switch_event(struct perf_tool *tool __used, struct event_format *event, struct perf_sample *sample, struct machine *machine, - struct thread *thread) + struct thread *thread __used) { int this_cpu = sample->cpu; void *data = sample->raw_data; @@ -1509,8 +1470,7 @@ process_sched_switch_event(struct perf_tool *tool __used, nr_context_switch_bugs++; } if (trace_handler->switch_event) - trace_handler->switch_event(&switch_event, machine, event, - this_cpu, sample->time, thread); + trace_handler->switch_event(&switch_event, machine, event, sample); curr_pid[this_cpu] = switch_event.next_pid; } @@ -1520,7 +1480,7 @@ process_sched_runtime_event(struct perf_tool *tool __used, struct event_format *event, struct perf_sample *sample, struct machine *machine, - struct thread *thread) + struct thread *thread __used) { void *data = sample->raw_data; struct trace_runtime_event runtime_event; @@ -1531,8 +1491,7 @@ process_sched_runtime_event(struct perf_tool *tool __used, FILL_FIELD(runtime_event, vruntime, event, data); if (trace_handler->runtime_event) - trace_handler->runtime_event(&runtime_event, machine, event, - sample->cpu, sample->time, thread); + trace_handler->runtime_event(&runtime_event, machine, sample); } static void @@ -1540,7 +1499,7 @@ process_sched_fork_event(struct perf_tool *tool __used, struct event_format *event, struct perf_sample *sample, struct machine *machine __used, - struct thread *thread) + struct thread *thread __used) { void *data = sample->raw_data; struct trace_fork_event fork_event; @@ -1553,8 +1512,7 @@ process_sched_fork_event(struct perf_tool *tool __used, FILL_FIELD(fork_event, child_pid, event, data); if (trace_handler->fork_event) - trace_handler->fork_event(&fork_event, event, - sample->cpu, sample->time, thread); + trace_handler->fork_event(&fork_event, event); } static void @@ -1573,7 +1531,7 @@ process_sched_migrate_task_event(struct perf_tool *tool __used, struct event_format *event, struct perf_sample *sample, struct machine *machine, - struct thread *thread) + struct thread *thread __used) { void *data = sample->raw_data; struct trace_migrate_task_event migrate_task_event; @@ -1586,9 +1544,7 @@ process_sched_migrate_task_event(struct perf_tool *tool __used, FILL_FIELD(migrate_task_event, cpu, event, data); if (trace_handler->migrate_task_event) - trace_handler->migrate_task_event(&migrate_task_event, machine, - event, sample->cpu, - sample->time, thread); + trace_handler->migrate_task_event(&migrate_task_event, machine, sample); } typedef void (*tracepoint_handler)(struct perf_tool *tool, struct event_format *event, @@ -1596,14 +1552,12 @@ typedef void (*tracepoint_handler)(struct perf_tool *tool, struct event_format * struct machine *machine, struct thread *thread); -static int perf_sched__process_tracepoint_sample(struct perf_tool *tool, +static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __used, union perf_event *event __used, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) { - struct perf_sched *sched = container_of(tool, struct perf_sched, tool); - struct pevent *pevent = sched->session->pevent; struct thread *thread = machine__findnew_thread(machine, sample->pid); if (thread == NULL) { @@ -1617,25 +1571,18 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool, if (evsel->handler.func != NULL) { tracepoint_handler f = evsel->handler.func; - - if (evsel->handler.data == NULL) - evsel->handler.data = pevent_find_event(pevent, - evsel->attr.config); - - f(tool, evsel->handler.data, sample, machine, thread); + f(tool, evsel->tp_format, sample, machine, thread); } return 0; } -static struct perf_sched perf_sched = { - .tool = { - .sample = perf_sched__process_tracepoint_sample, - .comm = perf_event__process_comm, - .lost = perf_event__process_lost, - .fork = perf_event__process_task, - .ordered_samples = true, - }, +static struct perf_tool perf_sched = { + .sample = perf_sched__process_tracepoint_sample, + .comm = perf_event__process_comm, + .lost = perf_event__process_lost, + .fork = perf_event__process_task, + .ordered_samples = true, }; static void read_events(bool destroy, struct perf_session **psession) @@ -1652,18 +1599,15 @@ static void read_events(bool destroy, struct perf_session **psession) }; struct perf_session *session; - session = perf_session__new(input_name, O_RDONLY, 0, false, - &perf_sched.tool); + session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_sched); if (session == NULL) die("No Memory"); - perf_sched.session = session; - err = perf_session__set_tracepoints_handlers(session, handlers); assert(err == 0); if (perf_session__has_traces(session, "record -R")) { - err = perf_session__process_events(session, &perf_sched.tool); + err = perf_session__process_events(session, &perf_sched); if (err) die("Failed to process events, error %d", err); |