summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/perf/Makefile16
-rw-r--r--tools/perf/builtin-record.c9
-rw-r--r--tools/perf/builtin-stat.c7
-rw-r--r--tools/perf/builtin-test.c12
-rw-r--r--tools/perf/builtin-top.c11
-rw-r--r--tools/perf/util/evlist.c157
-rw-r--r--tools/perf/util/evlist.h3
-rw-r--r--tools/perf/util/evsel.c27
-rw-r--r--tools/perf/util/evsel.h6
-rw-r--r--tools/perf/util/python.c11
-rw-r--r--tools/perf/util/ui/browsers/annotate.c6
-rw-r--r--tools/perf/util/ui/browsers/hists.c2
12 files changed, 167 insertions, 100 deletions
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 207dee5c5b1..0c542563ea6 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -35,15 +35,21 @@ ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
-e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
-e s/sh[234].*/sh/ )
+CC = $(CROSS_COMPILE)gcc
+AR = $(CROSS_COMPILE)ar
+
# Additional ARCH settings for x86
ifeq ($(ARCH),i386)
ARCH := x86
endif
ifeq ($(ARCH),x86_64)
- RAW_ARCH := x86_64
- ARCH := x86
- ARCH_CFLAGS := -DARCH_X86_64
- ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S
+ ARCH := x86
+ IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -xc - | tail -n 1)
+ ifeq (${IS_X86_64}, 1)
+ RAW_ARCH := x86_64
+ ARCH_CFLAGS := -DARCH_X86_64
+ ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S
+ endif
endif
#
@@ -119,8 +125,6 @@ lib = lib
export prefix bindir sharedir sysconfdir
-CC = $(CROSS_COMPILE)gcc
-AR = $(CROSS_COMPILE)ar
RM = rm -f
MKDIR = mkdir
FIND = find
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 17d1dcb3c66..0974f957b8f 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -163,6 +163,7 @@ static void config_attr(struct perf_evsel *evsel, struct perf_evlist *evlist)
struct perf_event_attr *attr = &evsel->attr;
int track = !evsel->idx; /* only the first counter needs these */
+ attr->inherit = !no_inherit;
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
PERF_FORMAT_TOTAL_TIME_RUNNING |
PERF_FORMAT_ID;
@@ -251,6 +252,9 @@ static void open_counters(struct perf_evlist *evlist)
{
struct perf_evsel *pos;
+ if (evlist->cpus->map[0] < 0)
+ no_inherit = true;
+
list_for_each_entry(pos, &evlist->entries, node) {
struct perf_event_attr *attr = &pos->attr;
/*
@@ -271,8 +275,7 @@ static void open_counters(struct perf_evlist *evlist)
retry_sample_id:
attr->sample_id_all = sample_id_all_avail ? 1 : 0;
try_again:
- if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group,
- !no_inherit) < 0) {
+ if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group) < 0) {
int err = errno;
if (err == EPERM || err == EACCES) {
@@ -424,7 +427,7 @@ static void mmap_read_all(void)
{
int i;
- for (i = 0; i < evsel_list->cpus->nr; i++) {
+ for (i = 0; i < evsel_list->nr_mmaps; i++) {
if (evsel_list->mmap[i].base)
mmap_read(&evsel_list->mmap[i]);
}
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index e2109f9b43e..03f0e45f147 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -167,16 +167,17 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
PERF_FORMAT_TOTAL_TIME_RUNNING;
+ attr->inherit = !no_inherit;
+
if (system_wide)
- return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, false, false);
+ return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, false);
- attr->inherit = !no_inherit;
if (target_pid == -1 && target_tid == -1) {
attr->disabled = 1;
attr->enable_on_exec = 1;
}
- return perf_evsel__open_per_thread(evsel, evsel_list->threads, false, false);
+ return perf_evsel__open_per_thread(evsel, evsel_list->threads, false);
}
/*
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 1b2106c58f6..2f9a337b182 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -290,7 +290,7 @@ static int test__open_syscall_event(void)
goto out_thread_map_delete;
}
- if (perf_evsel__open_per_thread(evsel, threads, false, false) < 0) {
+ if (perf_evsel__open_per_thread(evsel, threads, false) < 0) {
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
strerror(errno));
@@ -303,7 +303,7 @@ static int test__open_syscall_event(void)
}
if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
- pr_debug("perf_evsel__open_read_on_cpu\n");
+ pr_debug("perf_evsel__read_on_cpu\n");
goto out_close_fd;
}
@@ -365,7 +365,7 @@ static int test__open_syscall_event_on_all_cpus(void)
goto out_thread_map_delete;
}
- if (perf_evsel__open(evsel, cpus, threads, false, false) < 0) {
+ if (perf_evsel__open(evsel, cpus, threads, false) < 0) {
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
strerror(errno));
@@ -418,7 +418,7 @@ static int test__open_syscall_event_on_all_cpus(void)
continue;
if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
- pr_debug("perf_evsel__open_read_on_cpu\n");
+ pr_debug("perf_evsel__read_on_cpu\n");
err = -1;
break;
}
@@ -529,7 +529,7 @@ static int test__basic_mmap(void)
perf_evlist__add(evlist, evsels[i]);
- if (perf_evsel__open(evsels[i], cpus, threads, false, false) < 0) {
+ if (perf_evsel__open(evsels[i], cpus, threads, false) < 0) {
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
strerror(errno));
@@ -549,7 +549,7 @@ static int test__basic_mmap(void)
++foo;
}
- while ((event = perf_evlist__read_on_cpu(evlist, 0)) != NULL) {
+ while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
struct perf_sample sample;
if (event->header.type != PERF_RECORD_SAMPLE) {
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index fc1273e976c..ebfc7cf5f63 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -801,12 +801,12 @@ static void perf_event__process_sample(const union perf_event *event,
}
}
-static void perf_session__mmap_read_cpu(struct perf_session *self, int cpu)
+static void perf_session__mmap_read_idx(struct perf_session *self, int idx)
{
struct perf_sample sample;
union perf_event *event;
- while ((event = perf_evlist__read_on_cpu(top.evlist, cpu)) != NULL) {
+ while ((event = perf_evlist__mmap_read(top.evlist, idx)) != NULL) {
perf_session__parse_sample(self, event, &sample);
if (event->header.type == PERF_RECORD_SAMPLE)
@@ -820,8 +820,8 @@ static void perf_session__mmap_read(struct perf_session *self)
{
int i;
- for (i = 0; i < top.evlist->cpus->nr; i++)
- perf_session__mmap_read_cpu(self, i);
+ for (i = 0; i < top.evlist->nr_mmaps; i++)
+ perf_session__mmap_read_idx(self, i);
}
static void start_counters(struct perf_evlist *evlist)
@@ -845,9 +845,10 @@ static void start_counters(struct perf_evlist *evlist)
}
attr->mmap = 1;
+ attr->inherit = inherit;
try_again:
if (perf_evsel__open(counter, top.evlist->cpus,
- top.evlist->threads, group, inherit) < 0) {
+ top.evlist->threads, group) < 0) {
int err = errno;
if (err == EPERM || err == EACCES) {
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index d852cefa20d..23eb22b05d2 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -12,6 +12,7 @@
#include "evlist.h"
#include "evsel.h"
#include "util.h"
+#include "debug.h"
#include <sys/mman.h>
@@ -165,11 +166,11 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
return NULL;
}
-union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu)
+union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
{
/* XXX Move this to perf.c, making it generally available */
unsigned int page_size = sysconf(_SC_PAGE_SIZE);
- struct perf_mmap *md = &evlist->mmap[cpu];
+ struct perf_mmap *md = &evlist->mmap[idx];
unsigned int head = perf_mmap__read_head(md);
unsigned int old = md->prev;
unsigned char *data = md->base + page_size;
@@ -234,36 +235,126 @@ union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu)
void perf_evlist__munmap(struct perf_evlist *evlist)
{
- int cpu;
+ int i;
- for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
- if (evlist->mmap[cpu].base != NULL) {
- munmap(evlist->mmap[cpu].base, evlist->mmap_len);
- evlist->mmap[cpu].base = NULL;
+ for (i = 0; i < evlist->nr_mmaps; i++) {
+ if (evlist->mmap[i].base != NULL) {
+ munmap(evlist->mmap[i].base, evlist->mmap_len);
+ evlist->mmap[i].base = NULL;
}
}
+
+ free(evlist->mmap);
+ evlist->mmap = NULL;
}
int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
{
- evlist->mmap = zalloc(evlist->cpus->nr * sizeof(struct perf_mmap));
+ evlist->nr_mmaps = evlist->cpus->nr;
+ if (evlist->cpus->map[0] == -1)
+ evlist->nr_mmaps = evlist->threads->nr;
+ evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
return evlist->mmap != NULL ? 0 : -ENOMEM;
}
-static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot,
- int mask, int fd)
+static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel,
+ int idx, int prot, int mask, int fd)
{
- evlist->mmap[cpu].prev = 0;
- evlist->mmap[cpu].mask = mask;
- evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot,
+ evlist->mmap[idx].prev = 0;
+ evlist->mmap[idx].mask = mask;
+ evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
MAP_SHARED, fd, 0);
- if (evlist->mmap[cpu].base == MAP_FAILED)
+ if (evlist->mmap[idx].base == MAP_FAILED) {
+ if (evlist->cpus->map[idx] == -1 && evsel->attr.inherit)
+ ui__warning("Inherit is not allowed on per-task "
+ "events using mmap.\n");
return -1;
+ }
perf_evlist__add_pollfd(evlist, fd);
return 0;
}
+static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
+{
+ struct perf_evsel *evsel;
+ int cpu, thread;
+
+ for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+ int output = -1;
+
+ for (thread = 0; thread < evlist->threads->nr; thread++) {
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ int fd = FD(evsel, cpu, thread);
+
+ if (output == -1) {
+ output = fd;
+ if (__perf_evlist__mmap(evlist, evsel, cpu,
+ prot, mask, output) < 0)
+ goto out_unmap;
+ } else {
+ if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
+ goto out_unmap;
+ }
+
+ if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
+ perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
+ goto out_unmap;
+ }
+ }
+ }
+
+ return 0;
+
+out_unmap:
+ for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+ if (evlist->mmap[cpu].base != NULL) {
+ munmap(evlist->mmap[cpu].base, evlist->mmap_len);
+ evlist->mmap[cpu].base = NULL;
+ }
+ }
+ return -1;
+}
+
+static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
+{
+ struct perf_evsel *evsel;
+ int thread;
+
+ for (thread = 0; thread < evlist->threads->nr; thread++) {
+ int output = -1;
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ int fd = FD(evsel, 0, thread);
+
+ if (output == -1) {
+ output = fd;
+ if (__perf_evlist__mmap(evlist, evsel, thread,
+ prot, mask, output) < 0)
+ goto out_unmap;
+ } else {
+ if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
+ goto out_unmap;
+ }
+
+ if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
+ perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
+ goto out_unmap;
+ }
+ }
+
+ return 0;
+
+out_unmap:
+ for (thread = 0; thread < evlist->threads->nr; thread++) {
+ if (evlist->mmap[thread].base != NULL) {
+ munmap(evlist->mmap[thread].base, evlist->mmap_len);
+ evlist->mmap[thread].base = NULL;
+ }
+ }
+ return -1;
+}
+
/** perf_evlist__mmap - Create per cpu maps to receive events
*
* @evlist - list of events
@@ -282,11 +373,11 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot,
int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
{
unsigned int page_size = sysconf(_SC_PAGE_SIZE);
- int mask = pages * page_size - 1, cpu;
- struct perf_evsel *first_evsel, *evsel;
+ int mask = pages * page_size - 1;
+ struct perf_evsel *evsel;
const struct cpu_map *cpus = evlist->cpus;
const struct thread_map *threads = evlist->threads;
- int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
+ int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
return -ENOMEM;
@@ -296,42 +387,18 @@ int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
evlist->overwrite = overwrite;
evlist->mmap_len = (pages + 1) * page_size;
- first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
list_for_each_entry(evsel, &evlist->entries, node) {
if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
evsel->sample_id == NULL &&
perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
return -ENOMEM;
-
- for (cpu = 0; cpu < cpus->nr; cpu++) {
- for (thread = 0; thread < threads->nr; thread++) {
- int fd = FD(evsel, cpu, thread);
-
- if (evsel->idx || thread) {
- if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT,
- FD(first_evsel, cpu, 0)) != 0)
- goto out_unmap;
- } else if (__perf_evlist__mmap(evlist, cpu, prot, mask, fd) < 0)
- goto out_unmap;
-
- if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
- perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
- goto out_unmap;
- }
- }
}
- return 0;
+ if (evlist->cpus->map[0] == -1)
+ return perf_evlist__mmap_per_thread(evlist, prot, mask);
-out_unmap:
- for (cpu = 0; cpu < cpus->nr; cpu++) {
- if (evlist->mmap[cpu].base != NULL) {
- munmap(evlist->mmap[cpu].base, evlist->mmap_len);
- evlist->mmap[cpu].base = NULL;
- }
- }
- return -1;
+ return perf_evlist__mmap_per_cpu(evlist, prot, mask);
}
int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
@@ -342,7 +409,7 @@ int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
if (evlist->threads == NULL)
return -1;
- if (target_tid != -1)
+ if (cpu_list == NULL && target_tid != -1)
evlist->cpus = cpu_map__dummy_new();
else
evlist->cpus = cpu_map__new(cpu_list);
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 8b1cb7a4c5f..7109d7add14 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -17,6 +17,7 @@ struct perf_evlist {
struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
int nr_entries;
int nr_fds;
+ int nr_mmaps;
int mmap_len;
bool overwrite;
union perf_event event_copy;
@@ -46,7 +47,7 @@ void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
-union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *self, int cpu);
+union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
int perf_evlist__alloc_mmap(struct perf_evlist *evlist);
int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 662596afd7f..d6fd59beb86 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -175,7 +175,7 @@ int __perf_evsel__read(struct perf_evsel *evsel,
}
static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
- struct thread_map *threads, bool group, bool inherit)
+ struct thread_map *threads, bool group)
{
int cpu, thread;
unsigned long flags = 0;
@@ -192,19 +192,6 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
for (cpu = 0; cpu < cpus->nr; cpu++) {
int group_fd = -1;
- /*
- * Don't allow mmap() of inherited per-task counters. This
- * would create a performance issue due to all children writing
- * to the same buffer.
- *
- * FIXME:
- * Proper fix is not to pass 'inherit' to perf_evsel__open*,
- * but a 'flags' parameter, with 'group' folded there as well,
- * then introduce a PERF_O_{MMAP,GROUP,INHERIT} enum, and if
- * O_MMAP is set, emit a warning if cpu < 0 and O_INHERIT is
- * set. Lets go for the minimal fix first tho.
- */
- evsel->attr.inherit = (cpus->map[cpu] >= 0) && inherit;
for (thread = 0; thread < threads->nr; thread++) {
@@ -253,7 +240,7 @@ static struct {
};
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
- struct thread_map *threads, bool group, bool inherit)
+ struct thread_map *threads, bool group)
{
if (cpus == NULL) {
/* Work around old compiler warnings about strict aliasing */
@@ -263,19 +250,19 @@ int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
if (threads == NULL)
threads = &empty_thread_map.map;
- return __perf_evsel__open(evsel, cpus, threads, group, inherit);
+ return __perf_evsel__open(evsel, cpus, threads, group);
}
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
- struct cpu_map *cpus, bool group, bool inherit)
+ struct cpu_map *cpus, bool group)
{
- return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group, inherit);
+ return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group);
}
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
- struct thread_map *threads, bool group, bool inherit)
+ struct thread_map *threads, bool group)
{
- return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group, inherit);
+ return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group);
}
static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 6710ab53834..f79bb2c09a6 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -81,11 +81,11 @@ void perf_evsel__free_id(struct perf_evsel *evsel);
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
- struct cpu_map *cpus, bool group, bool inherit);
+ struct cpu_map *cpus, bool group);
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
- struct thread_map *threads, bool group, bool inherit);
+ struct thread_map *threads, bool group);
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
- struct thread_map *threads, bool group, bool inherit);
+ struct thread_map *threads, bool group);
#define perf_evsel__match(evsel, t, c) \
(evsel->attr.type == PERF_TYPE_##t && \
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index a9f2d7e1204..99c722672f8 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -498,11 +498,11 @@ static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
struct cpu_map *cpus = NULL;
struct thread_map *threads = NULL;
PyObject *pcpus = NULL, *pthreads = NULL;
- int group = 0, overwrite = 0;
- static char *kwlist[] = {"cpus", "threads", "group", "overwrite", NULL, NULL};
+ int group = 0, inherit = 0;
+ static char *kwlist[] = {"cpus", "threads", "group", "inherit", NULL, NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
- &pcpus, &pthreads, &group, &overwrite))
+ &pcpus, &pthreads, &group, &inherit))
return NULL;
if (pthreads != NULL)
@@ -511,7 +511,8 @@ static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
if (pcpus != NULL)
cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
- if (perf_evsel__open(evsel, cpus, threads, group, overwrite) < 0) {
+ evsel->attr.inherit = inherit;
+ if (perf_evsel__open(evsel, cpus, threads, group) < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
@@ -679,7 +680,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
&cpu, &sample_id_all))
return NULL;
- event = perf_evlist__read_on_cpu(evlist, cpu);
+ event = perf_evlist__mmap_read(evlist, cpu);
if (event != NULL) {
struct perf_evsel *first;
PyObject *pyevent = pyrf_event__new(event);
diff --git a/tools/perf/util/ui/browsers/annotate.c b/tools/perf/util/ui/browsers/annotate.c
index 8c17a8730e4..15633d60813 100644
--- a/tools/perf/util/ui/browsers/annotate.c
+++ b/tools/perf/util/ui/browsers/annotate.c
@@ -256,10 +256,9 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
int refresh)
{
struct objdump_line *pos, *n;
- struct annotation *notes = symbol__annotation(sym);
+ struct annotation *notes;
struct annotate_browser browser = {
.b = {
- .entries = &notes->src->source,
.refresh = ui_browser__list_head_refresh,
.seek = ui_browser__list_head_seek,
.write = annotate_browser__write,
@@ -281,6 +280,8 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
ui_helpline__push("Press <- or ESC to exit");
+ notes = symbol__annotation(sym);
+
list_for_each_entry(pos, &notes->src->source, node) {
struct objdump_line_rb_node *rbpos;
size_t line_len = strlen(pos->line);
@@ -291,6 +292,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
rbpos->idx = browser.b.nr_entries++;
}
+ browser.b.entries = &notes->src->source,
browser.b.width += 18; /* Percentage */
ret = annotate_browser__run(&browser, evidx, refresh);
list_for_each_entry_safe(pos, n, &notes->src->source, node) {
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c
index 798efdca3ea..5d767c622df 100644
--- a/tools/perf/util/ui/browsers/hists.c
+++ b/tools/perf/util/ui/browsers/hists.c
@@ -851,7 +851,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel,
goto out_free_stack;
case 'a':
if (browser->selection == NULL ||
- browser->selection->map == NULL ||
+ browser->selection->sym == NULL ||
browser->selection->map->dso->annotate_warned)
continue;
goto do_annotate;