diff options
Diffstat (limited to 'tools/perf/builtin-sched.c')
-rw-r--r-- | tools/perf/builtin-sched.c | 139 |
1 files changed, 67 insertions, 72 deletions
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index fed9ae432c1..0f3c65518a2 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -109,8 +109,9 @@ struct trace_sched_handler { int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel, struct perf_sample *sample, struct machine *machine); - int (*fork_event)(struct perf_sched *sched, struct perf_evsel *evsel, - struct perf_sample *sample); + /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */ + int (*fork_event)(struct perf_sched *sched, union perf_event *event, + struct machine *machine); int (*migrate_task_event)(struct perf_sched *sched, struct perf_evsel *evsel, @@ -717,22 +718,31 @@ static int replay_switch_event(struct perf_sched *sched, return 0; } -static int replay_fork_event(struct perf_sched *sched, struct perf_evsel *evsel, - struct perf_sample *sample) +static int replay_fork_event(struct perf_sched *sched, + union perf_event *event, + struct machine *machine) { - const char *parent_comm = perf_evsel__strval(evsel, sample, "parent_comm"), - *child_comm = perf_evsel__strval(evsel, sample, "child_comm"); - const u32 parent_pid = perf_evsel__intval(evsel, sample, "parent_pid"), - child_pid = perf_evsel__intval(evsel, sample, "child_pid"); + struct thread *child, *parent; + + child = machine__findnew_thread(machine, event->fork.pid, + event->fork.tid); + parent = machine__findnew_thread(machine, event->fork.ppid, + event->fork.ptid); + + if (child == NULL || parent == NULL) { + pr_debug("thread does not exist on fork event: child %p, parent %p\n", + child, parent); + return 0; + } if (verbose) { - printf("sched_fork event %p\n", evsel); - printf("... parent: %s/%d\n", parent_comm, parent_pid); - printf("... child: %s/%d\n", child_comm, child_pid); + printf("fork event\n"); + printf("... parent: %s/%d\n", thread__comm_str(parent), parent->tid); + printf("... child: %s/%d\n", thread__comm_str(child), child->tid); } - register_pid(sched, parent_pid, parent_comm); - register_pid(sched, child_pid, child_comm); + register_pid(sched, parent->tid, thread__comm_str(parent)); + register_pid(sched, child->tid, thread__comm_str(child)); return 0; } @@ -824,14 +834,6 @@ static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread) return 0; } -static int latency_fork_event(struct perf_sched *sched __maybe_unused, - struct perf_evsel *evsel __maybe_unused, - struct perf_sample *sample __maybe_unused) -{ - /* should insert the newcomer */ - return 0; -} - static char sched_out_state(u64 prev_state) { const char *str = TASK_STATE_TO_CHAR_STR; @@ -934,8 +936,8 @@ static int latency_switch_event(struct perf_sched *sched, return -1; } - sched_out = machine__findnew_thread(machine, prev_pid); - sched_in = machine__findnew_thread(machine, next_pid); + sched_out = machine__findnew_thread(machine, 0, prev_pid); + sched_in = machine__findnew_thread(machine, 0, next_pid); out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); if (!out_events) { @@ -978,7 +980,7 @@ static int latency_runtime_event(struct perf_sched *sched, { const u32 pid = perf_evsel__intval(evsel, sample, "pid"); const u64 runtime = perf_evsel__intval(evsel, sample, "runtime"); - struct thread *thread = machine__findnew_thread(machine, pid); + struct thread *thread = machine__findnew_thread(machine, 0, pid); struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); u64 timestamp = sample->time; int cpu = sample->cpu; @@ -1016,7 +1018,7 @@ static int latency_wakeup_event(struct perf_sched *sched, if (!success) return 0; - wakee = machine__findnew_thread(machine, pid); + wakee = machine__findnew_thread(machine, 0, pid); atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); if (!atoms) { if (thread_atoms_insert(sched, wakee)) @@ -1070,12 +1072,12 @@ static int latency_migrate_task_event(struct perf_sched *sched, if (sched->profile_cpu == -1) return 0; - migrant = machine__findnew_thread(machine, pid); + migrant = machine__findnew_thread(machine, 0, pid); atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); if (!atoms) { if (thread_atoms_insert(sched, migrant)) return -1; - register_pid(sched, migrant->pid, migrant->comm); + register_pid(sched, migrant->tid, thread__comm_str(migrant)); atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); if (!atoms) { pr_err("migration-event: Internal tree error"); @@ -1109,13 +1111,13 @@ static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_ /* * Ignore idle threads: */ - if (!strcmp(work_list->thread->comm, "swapper")) + if (!strcmp(thread__comm_str(work_list->thread), "swapper")) return; sched->all_runtime += work_list->total_runtime; sched->all_count += work_list->nb_atoms; - ret = printf(" %s:%d ", work_list->thread->comm, work_list->thread->pid); + ret = printf(" %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid); for (i = 0; i < 24 - ret; i++) printf(" "); @@ -1131,9 +1133,9 @@ static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_ static int pid_cmp(struct work_atoms *l, struct work_atoms *r) { - if (l->thread->pid < r->thread->pid) + if (l->thread->tid < r->thread->tid) return -1; - if (l->thread->pid > r->thread->pid) + if (l->thread->tid > r->thread->tid) return 1; return 0; @@ -1289,8 +1291,8 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel, return -1; } - sched_out = machine__findnew_thread(machine, prev_pid); - sched_in = machine__findnew_thread(machine, next_pid); + sched_out = machine__findnew_thread(machine, 0, prev_pid); + sched_in = machine__findnew_thread(machine, 0, next_pid); sched->curr_thread[this_cpu] = sched_in; @@ -1321,7 +1323,7 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel, printf("*"); if (sched->curr_thread[cpu]) { - if (sched->curr_thread[cpu]->pid) + if (sched->curr_thread[cpu]->tid) printf("%2s ", sched->curr_thread[cpu]->shortname); else printf(". "); @@ -1332,7 +1334,7 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel, printf(" %12.6f secs ", (double)timestamp/1e9); if (new_shortname) { printf("%s => %s:%d\n", - sched_in->shortname, sched_in->comm, sched_in->pid); + sched_in->shortname, thread__comm_str(sched_in), sched_in->tid); } else { printf("\n"); } @@ -1379,25 +1381,20 @@ static int process_sched_runtime_event(struct perf_tool *tool, return 0; } -static int process_sched_fork_event(struct perf_tool *tool, - struct perf_evsel *evsel, - struct perf_sample *sample, - struct machine *machine __maybe_unused) +static int perf_sched__process_fork_event(struct perf_tool *tool, + union perf_event *event, + struct perf_sample *sample, + struct machine *machine) { struct perf_sched *sched = container_of(tool, struct perf_sched, tool); - if (sched->tp_handler->fork_event) - return sched->tp_handler->fork_event(sched, evsel, sample); + /* run the fork event through the perf machineruy */ + perf_event__process_fork(tool, event, sample, machine); - return 0; -} + /* and then run additional processing needed for this command */ + if (sched->tp_handler->fork_event) + return sched->tp_handler->fork_event(sched, event, machine); -static int process_sched_exit_event(struct perf_tool *tool __maybe_unused, - struct perf_evsel *evsel, - struct perf_sample *sample __maybe_unused, - struct machine *machine __maybe_unused) -{ - pr_debug("sched_exit event %p\n", evsel); return 0; } @@ -1425,27 +1422,20 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_ struct perf_evsel *evsel, struct machine *machine) { - struct thread *thread = machine__findnew_thread(machine, sample->tid); int err = 0; - if (thread == NULL) { - pr_debug("problem processing %s event, skipping it.\n", - perf_evsel__name(evsel)); - return -1; - } - evsel->hists.stats.total_period += sample->period; hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); - if (evsel->handler.func != NULL) { - tracepoint_handler f = evsel->handler.func; + if (evsel->handler != NULL) { + tracepoint_handler f = evsel->handler; err = f(tool, evsel, sample, machine); } return err; } -static int perf_sched__read_events(struct perf_sched *sched, bool destroy, +static int perf_sched__read_events(struct perf_sched *sched, struct perf_session **psession) { const struct perf_evsel_str_handler handlers[] = { @@ -1453,13 +1443,15 @@ static int perf_sched__read_events(struct perf_sched *sched, bool destroy, { "sched:sched_stat_runtime", process_sched_runtime_event, }, { "sched:sched_wakeup", process_sched_wakeup_event, }, { "sched:sched_wakeup_new", process_sched_wakeup_event, }, - { "sched:sched_process_fork", process_sched_fork_event, }, - { "sched:sched_process_exit", process_sched_exit_event, }, { "sched:sched_migrate_task", process_sched_migrate_task_event, }, }; struct perf_session *session; + struct perf_data_file file = { + .path = input_name, + .mode = PERF_DATA_MODE_READ, + }; - session = perf_session__new(input_name, O_RDONLY, 0, false, &sched->tool); + session = perf_session__new(&file, false, &sched->tool); if (session == NULL) { pr_debug("No Memory for session\n"); return -1; @@ -1480,11 +1472,10 @@ static int perf_sched__read_events(struct perf_sched *sched, bool destroy, sched->nr_lost_chunks = session->stats.nr_events[PERF_RECORD_LOST]; } - if (destroy) - perf_session__delete(session); - if (psession) *psession = session; + else + perf_session__delete(session); return 0; @@ -1529,8 +1520,11 @@ static int perf_sched__lat(struct perf_sched *sched) struct perf_session *session; setup_pager(); - if (perf_sched__read_events(sched, false, &session)) + + /* save session -- references to threads are held in work_list */ + if (perf_sched__read_events(sched, &session)) return -1; + perf_sched__sort_lat(sched); printf("\n ---------------------------------------------------------------------------------------------------------------\n"); @@ -1565,7 +1559,7 @@ static int perf_sched__map(struct perf_sched *sched) sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF); setup_pager(); - if (perf_sched__read_events(sched, true, NULL)) + if (perf_sched__read_events(sched, NULL)) return -1; print_bad_events(sched); return 0; @@ -1580,7 +1574,7 @@ static int perf_sched__replay(struct perf_sched *sched) test_calibrations(sched); - if (perf_sched__read_events(sched, true, NULL)) + if (perf_sched__read_events(sched, NULL)) return -1; printf("nr_run_events: %ld\n", sched->nr_run_events); @@ -1639,7 +1633,6 @@ static int __cmd_record(int argc, const char **argv) "-e", "sched:sched_stat_sleep", "-e", "sched:sched_stat_iowait", "-e", "sched:sched_stat_runtime", - "-e", "sched:sched_process_exit", "-e", "sched:sched_process_fork", "-e", "sched:sched_wakeup", "-e", "sched:sched_migrate_task", @@ -1670,14 +1663,13 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused) .sample = perf_sched__process_tracepoint_sample, .comm = perf_event__process_comm, .lost = perf_event__process_lost, - .fork = perf_event__process_fork, + .fork = perf_sched__process_fork_event, .ordered_samples = true, }, .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid), .sort_list = LIST_HEAD_INIT(sched.sort_list), .start_work_mutex = PTHREAD_MUTEX_INITIALIZER, .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER, - .curr_pid = { [0 ... MAX_CPUS - 1] = -1 }, .sort_order = default_sort_order, .replay_repeat = 10, .profile_cpu = -1, @@ -1729,7 +1721,6 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused) .wakeup_event = latency_wakeup_event, .switch_event = latency_switch_event, .runtime_event = latency_runtime_event, - .fork_event = latency_fork_event, .migrate_task_event = latency_migrate_task_event, }; struct trace_sched_handler map_ops = { @@ -1740,6 +1731,10 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused) .switch_event = replay_switch_event, .fork_event = replay_fork_event, }; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++) + sched.curr_pid[i] = -1; argc = parse_options(argc, argv, sched_options, sched_usage, PARSE_OPT_STOP_AT_NON_OPTION); |