summaryrefslogtreecommitdiffstats
path: root/tools/perf/util/evsel.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/evsel.c')
-rw-r--r--tools/perf/util/evsel.c108
1 files changed, 81 insertions, 27 deletions
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index e8177136486..7ff3c8fb736 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -8,6 +8,7 @@
*/
#include <byteswap.h>
+#include <linux/bitops.h>
#include "asm/bug.h"
#include "evsel.h"
#include "evlist.h"
@@ -16,11 +17,12 @@
#include "thread_map.h"
#include "target.h"
#include "../../../include/linux/hw_breakpoint.h"
+#include "../../include/linux/perf_event.h"
+#include "perf_regs.h"
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
-#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
-int __perf_evsel__sample_size(u64 sample_type)
+static int __perf_evsel__sample_size(u64 sample_type)
{
u64 mask = sample_type & PERF_SAMPLE_MASK;
int size = 0;
@@ -53,6 +55,7 @@ void perf_evsel__init(struct perf_evsel *evsel,
evsel->attr = *attr;
INIT_LIST_HEAD(&evsel->node);
hists__init(&evsel->hists);
+ evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
}
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
@@ -316,7 +319,8 @@ const char *perf_evsel__name(struct perf_evsel *evsel)
break;
default:
- scnprintf(bf, sizeof(bf), "%s", "unknown attr type");
+ scnprintf(bf, sizeof(bf), "unknown attr type: %d",
+ evsel->attr.type);
break;
}
@@ -366,9 +370,18 @@ void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
attr->mmap_data = track;
}
- if (opts->call_graph)
+ if (opts->call_graph) {
attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
+ if (opts->call_graph == CALLCHAIN_DWARF) {
+ attr->sample_type |= PERF_SAMPLE_REGS_USER |
+ PERF_SAMPLE_STACK_USER;
+ attr->sample_regs_user = PERF_REGS_MASK;
+ attr->sample_stack_user = opts->stack_dump_size;
+ attr->exclude_callchain_user = 1;
+ }
+ }
+
if (perf_target__has_cpu(&opts->target))
attr->sample_type |= PERF_SAMPLE_CPU;
@@ -480,6 +493,7 @@ void perf_evsel__delete(struct perf_evsel *evsel)
{
perf_evsel__exit(evsel);
close_cgroup(evsel->cgrp);
+ free(evsel->group_name);
free(evsel->name);
free(evsel);
}
@@ -555,9 +569,28 @@ int __perf_evsel__read(struct perf_evsel *evsel,
return 0;
}
+static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
+{
+ struct perf_evsel *leader = evsel->leader;
+ int fd;
+
+ if (!leader)
+ return -1;
+
+ /*
+ * Leader must be already processed/open,
+ * if not it's a bug.
+ */
+ BUG_ON(!leader->fd);
+
+ fd = FD(leader, cpu, thread);
+ BUG_ON(fd == -1);
+
+ return fd;
+}
+
static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
- struct thread_map *threads, bool group,
- struct xyarray *group_fds)
+ struct thread_map *threads)
{
int cpu, thread;
unsigned long flags = 0;
@@ -573,13 +606,15 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
}
for (cpu = 0; cpu < cpus->nr; cpu++) {
- int group_fd = group_fds ? GROUP_FD(group_fds, cpu) : -1;
for (thread = 0; thread < threads->nr; thread++) {
+ int group_fd;
if (!evsel->cgrp)
pid = threads->map[thread];
+ group_fd = get_group_fd(evsel, cpu, thread);
+
FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
pid,
cpus->map[cpu],
@@ -588,9 +623,6 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
err = -errno;
goto out_close;
}
-
- if (group && group_fd == -1)
- group_fd = FD(evsel, cpu, thread);
}
}
@@ -634,8 +666,7 @@ static struct {
};
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
- struct thread_map *threads, bool group,
- struct xyarray *group_fd)
+ struct thread_map *threads)
{
if (cpus == NULL) {
/* Work around old compiler warnings about strict aliasing */
@@ -645,23 +676,19 @@ int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
if (threads == NULL)
threads = &empty_thread_map.map;
- return __perf_evsel__open(evsel, cpus, threads, group, group_fd);
+ return __perf_evsel__open(evsel, cpus, threads);
}
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
- struct cpu_map *cpus, bool group,
- struct xyarray *group_fd)
+ struct cpu_map *cpus)
{
- return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group,
- group_fd);
+ return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
}
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
- struct thread_map *threads, bool group,
- struct xyarray *group_fd)
+ struct thread_map *threads)
{
- return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group,
- group_fd);
+ return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
}
static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
@@ -728,10 +755,11 @@ static bool sample_overlap(const union perf_event *event,
return false;
}
-int perf_event__parse_sample(const union perf_event *event, u64 type,
- int sample_size, bool sample_id_all,
+int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
struct perf_sample *data, bool swapped)
{
+ u64 type = evsel->attr.sample_type;
+ u64 regs_user = evsel->attr.sample_regs_user;
const u64 *array;
/*
@@ -746,14 +774,14 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
data->period = 1;
if (event->header.type != PERF_RECORD_SAMPLE) {
- if (!sample_id_all)
+ if (!evsel->attr.sample_id_all)
return 0;
return perf_event__parse_id_sample(event, type, data, swapped);
}
array = event->sample.array;
- if (sample_size + sizeof(event->header) > event->header.size)
+ if (evsel->sample_size + sizeof(event->header) > event->header.size)
return -EFAULT;
if (type & PERF_SAMPLE_IP) {
@@ -868,6 +896,32 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
sz /= sizeof(u64);
array += sz;
}
+
+ if (type & PERF_SAMPLE_REGS_USER) {
+ /* First u64 tells us if we have any regs in sample. */
+ u64 avail = *array++;
+
+ if (avail) {
+ data->user_regs.regs = (u64 *)array;
+ array += hweight_long(regs_user);
+ }
+ }
+
+ if (type & PERF_SAMPLE_STACK_USER) {
+ u64 size = *array++;
+
+ data->user_stack.offset = ((char *)(array - 1)
+ - (char *) event);
+
+ if (!size) {
+ data->user_stack.size = 0;
+ } else {
+ data->user_stack.data = (char *)array;
+ array += size / sizeof(*array);
+ data->user_stack.size = *array;
+ }
+ }
+
return 0;
}
@@ -895,7 +949,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
u.val32[1] = sample->tid;
if (swapped) {
/*
- * Inverse of what is done in perf_event__parse_sample
+ * Inverse of what is done in perf_evsel__parse_sample
*/
u.val32[0] = bswap_32(u.val32[0]);
u.val32[1] = bswap_32(u.val32[1]);
@@ -930,7 +984,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
u.val32[0] = sample->cpu;
if (swapped) {
/*
- * Inverse of what is done in perf_event__parse_sample
+ * Inverse of what is done in perf_evsel__parse_sample
*/
u.val32[0] = bswap_32(u.val32[0]);
u.val64 = bswap_64(u.val64);