summaryrefslogtreecommitdiffstats
path: root/tools/perf/util/session.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/session.c')
-rw-r--r--tools/perf/util/session.c196
1 files changed, 182 insertions, 14 deletions
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 6702ac28754..5f0e05a76c0 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -228,6 +228,15 @@ static int process_finished_round(struct perf_tool *tool,
union perf_event *event,
struct perf_session *session);
+static int process_id_index_stub(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_session *perf_session
+ __maybe_unused)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
void perf_tool__fill_defaults(struct perf_tool *tool)
{
if (tool->sample == NULL)
@@ -262,6 +271,8 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
else
tool->finished_round = process_finished_round_stub;
}
+ if (tool->id_index == NULL)
+ tool->id_index = process_id_index_stub;
}
static void swap_sample_id_all(union perf_event *event, void *data)
@@ -460,6 +471,7 @@ static perf_event__swap_op perf_event__swap_ops[] = {
[PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
[PERF_RECORD_HEADER_BUILD_ID] = NULL,
+ [PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
[PERF_RECORD_HEADER_MAX] = NULL,
};
@@ -521,15 +533,11 @@ int perf_session_queue_event(struct perf_session *s, union perf_event *event,
return -ETIME;
if (timestamp < oe->last_flush) {
- WARN_ONCE(1, "Timestamp below last timeslice flush\n");
-
- pr_oe_time(timestamp, "out of order event");
+ pr_oe_time(timestamp, "out of order event\n");
pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
oe->last_flush_type);
- /* We could get out of order messages after forced flush. */
- if (oe->last_flush_type != OE_FLUSH__HALF)
- return -EINVAL;
+ s->stats.nr_unordered_events++;
}
new = ordered_events__new(oe, timestamp, event);
@@ -580,15 +588,46 @@ static void regs_dump__printf(u64 mask, u64 *regs)
}
}
+static const char *regs_abi[] = {
+ [PERF_SAMPLE_REGS_ABI_NONE] = "none",
+ [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
+ [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
+};
+
+static inline const char *regs_dump_abi(struct regs_dump *d)
+{
+ if (d->abi > PERF_SAMPLE_REGS_ABI_64)
+ return "unknown";
+
+ return regs_abi[d->abi];
+}
+
+static void regs__printf(const char *type, struct regs_dump *regs)
+{
+ u64 mask = regs->mask;
+
+ printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
+ type,
+ mask,
+ regs_dump_abi(regs));
+
+ regs_dump__printf(mask, regs->regs);
+}
+
static void regs_user__printf(struct perf_sample *sample)
{
struct regs_dump *user_regs = &sample->user_regs;
- if (user_regs->regs) {
- u64 mask = user_regs->mask;
- printf("... user regs: mask 0x%" PRIx64 "\n", mask);
- regs_dump__printf(mask, user_regs->regs);
- }
+ if (user_regs->regs)
+ regs__printf("user", user_regs);
+}
+
+static void regs_intr__printf(struct perf_sample *sample)
+{
+ struct regs_dump *intr_regs = &sample->intr_regs;
+
+ if (intr_regs->regs)
+ regs__printf("intr", intr_regs);
}
static void stack_user__printf(struct stack_dump *dump)
@@ -687,6 +726,9 @@ static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
if (sample_type & PERF_SAMPLE_REGS_USER)
regs_user__printf(sample);
+ if (sample_type & PERF_SAMPLE_REGS_INTR)
+ regs_intr__printf(sample);
+
if (sample_type & PERF_SAMPLE_STACK_USER)
stack_user__printf(&sample->user_stack);
@@ -888,11 +930,26 @@ static s64 perf_session__process_user_event(struct perf_session *session,
return tool->build_id(tool, event, session);
case PERF_RECORD_FINISHED_ROUND:
return tool->finished_round(tool, event, session);
+ case PERF_RECORD_ID_INDEX:
+ return tool->id_index(tool, event, session);
default:
return -EINVAL;
}
}
+int perf_session__deliver_synth_event(struct perf_session *session,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_tool *tool)
+{
+ events_stats__inc(&session->stats, event->header.type);
+
+ if (event->header.type >= PERF_RECORD_USER_TYPE_START)
+ return perf_session__process_user_event(session, event, tool, 0);
+
+ return perf_session__deliver_event(session, event, sample, tool, 0);
+}
+
static void event_swap(union perf_event *event, bool sample_id_all)
{
perf_event__swap_op swap;
@@ -1057,6 +1114,9 @@ static void perf_session__warn_about_errors(const struct perf_session *session,
"Do you have a KVM guest running and not using 'perf kvm'?\n",
session->stats.nr_unprocessable_samples);
}
+
+ if (session->stats.nr_unordered_events != 0)
+ ui__warning("%u out of order events recorded.\n", session->stats.nr_unordered_events);
}
volatile int session_done;
@@ -1417,9 +1477,9 @@ void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample,
if (symbol_conf.use_callchain && sample->callchain) {
struct addr_location node_al;
- if (machine__resolve_callchain(al->machine, evsel, al->thread,
- sample, NULL, NULL,
- PERF_MAX_STACK_DEPTH) != 0) {
+ if (thread__resolve_callchain(al->thread, evsel,
+ sample, NULL, NULL,
+ PERF_MAX_STACK_DEPTH) != 0) {
if (verbose)
error("Failed to resolve callchain. Skipping\n");
return;
@@ -1594,3 +1654,111 @@ int __perf_session__set_tracepoints_handlers(struct perf_session *session,
out:
return err;
}
+
+int perf_event__process_id_index(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_session *session)
+{
+ struct perf_evlist *evlist = session->evlist;
+ struct id_index_event *ie = &event->id_index;
+ size_t i, nr, max_nr;
+
+ max_nr = (ie->header.size - sizeof(struct id_index_event)) /
+ sizeof(struct id_index_entry);
+ nr = ie->nr;
+ if (nr > max_nr)
+ return -EINVAL;
+
+ if (dump_trace)
+ fprintf(stdout, " nr: %zu\n", nr);
+
+ for (i = 0; i < nr; i++) {
+ struct id_index_entry *e = &ie->entries[i];
+ struct perf_sample_id *sid;
+
+ if (dump_trace) {
+ fprintf(stdout, " ... id: %"PRIu64, e->id);
+ fprintf(stdout, " idx: %"PRIu64, e->idx);
+ fprintf(stdout, " cpu: %"PRId64, e->cpu);
+ fprintf(stdout, " tid: %"PRId64"\n", e->tid);
+ }
+
+ sid = perf_evlist__id2sid(evlist, e->id);
+ if (!sid)
+ return -ENOENT;
+ sid->idx = e->idx;
+ sid->cpu = e->cpu;
+ sid->tid = e->tid;
+ }
+ return 0;
+}
+
+int perf_event__synthesize_id_index(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct perf_evlist *evlist,
+ struct machine *machine)
+{
+ union perf_event *ev;
+ struct perf_evsel *evsel;
+ size_t nr = 0, i = 0, sz, max_nr, n;
+ int err;
+
+ pr_debug2("Synthesizing id index\n");
+
+ max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
+ sizeof(struct id_index_entry);
+
+ evlist__for_each(evlist, evsel)
+ nr += evsel->ids;
+
+ n = nr > max_nr ? max_nr : nr;
+ sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
+ ev = zalloc(sz);
+ if (!ev)
+ return -ENOMEM;
+
+ ev->id_index.header.type = PERF_RECORD_ID_INDEX;
+ ev->id_index.header.size = sz;
+ ev->id_index.nr = n;
+
+ evlist__for_each(evlist, evsel) {
+ u32 j;
+
+ for (j = 0; j < evsel->ids; j++) {
+ struct id_index_entry *e;
+ struct perf_sample_id *sid;
+
+ if (i >= n) {
+ err = process(tool, ev, NULL, machine);
+ if (err)
+ goto out_err;
+ nr -= n;
+ i = 0;
+ }
+
+ e = &ev->id_index.entries[i++];
+
+ e->id = evsel->id[j];
+
+ sid = perf_evlist__id2sid(evlist, e->id);
+ if (!sid) {
+ free(ev);
+ return -ENOENT;
+ }
+
+ e->idx = sid->idx;
+ e->cpu = sid->cpu;
+ e->tid = sid->tid;
+ }
+ }
+
+ sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
+ ev->id_index.header.size = sz;
+ ev->id_index.nr = nr;
+
+ err = process(tool, ev, NULL, machine);
+out_err:
+ free(ev);
+
+ return err;
+}