summaryrefslogtreecommitdiffstats
path: root/tools/perf/util/evsel.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2012-12-10 14:53:43 -0300
committerArnaldo Carvalho de Melo <acme@redhat.com>2012-12-10 14:53:43 -0300
commit7be5ebe8767eaa482e18f566de5f56c1519abf59 (patch)
tree353f1845712fbe56ac6990dbc8fb83e07dc6e06a /tools/perf/util/evsel.c
parent3f067dcab711c2df7eefcfc5b3aa9a0e2b5f7d42 (diff)
perf evsel: Update sample_size when setting sample_type bits
We use evsel->sample_size to detect underflows in perf_evsel__parse_sample, but we were failing to update it after perf_evsel__init(), i.e. when we decide, after creating an evsel, that we want some extra field bit set. Fix it by introducing methods to set a bit that will take care of correctly adjusting evsel->sample_size. Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Namhyung Kim <namhyung@gmail.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/n/tip-2ny5pzsing0dcth7hws48x9c@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/util/evsel.c')
-rw-r--r--tools/perf/util/evsel.c45
1 files changed, 32 insertions, 13 deletions
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index bb58b05f905..fc80f5a32fa 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -50,6 +50,24 @@ void hists__init(struct hists *hists)
pthread_mutex_init(&hists->lock, NULL);
}
+void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
+ enum perf_event_sample_format bit)
+{
+ if (!(evsel->attr.sample_type & bit)) {
+ evsel->attr.sample_type |= bit;
+ evsel->sample_size += sizeof(u64);
+ }
+}
+
+void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
+ enum perf_event_sample_format bit)
+{
+ if (evsel->attr.sample_type & bit) {
+ evsel->attr.sample_type &= ~bit;
+ evsel->sample_size -= sizeof(u64);
+ }
+}
+
void perf_evsel__init(struct perf_evsel *evsel,
struct perf_event_attr *attr, int idx)
{
@@ -445,7 +463,8 @@ void perf_evsel__config(struct perf_evsel *evsel,
PERF_FORMAT_TOTAL_TIME_RUNNING |
PERF_FORMAT_ID;
- attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
+ perf_evsel__set_sample_bit(evsel, IP);
+ perf_evsel__set_sample_bit(evsel, TID);
/*
* We default some events to a 1 default interval. But keep
@@ -454,7 +473,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
opts->user_interval != ULLONG_MAX)) {
if (opts->freq) {
- attr->sample_type |= PERF_SAMPLE_PERIOD;
+ perf_evsel__set_sample_bit(evsel, PERIOD);
attr->freq = 1;
attr->sample_freq = opts->freq;
} else {
@@ -469,16 +488,16 @@ void perf_evsel__config(struct perf_evsel *evsel,
attr->inherit_stat = 1;
if (opts->sample_address) {
- attr->sample_type |= PERF_SAMPLE_ADDR;
+ perf_evsel__set_sample_bit(evsel, ADDR);
attr->mmap_data = track;
}
if (opts->call_graph) {
- attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
+ perf_evsel__set_sample_bit(evsel, CALLCHAIN);
if (opts->call_graph == CALLCHAIN_DWARF) {
- attr->sample_type |= PERF_SAMPLE_REGS_USER |
- PERF_SAMPLE_STACK_USER;
+ perf_evsel__set_sample_bit(evsel, REGS_USER);
+ perf_evsel__set_sample_bit(evsel, STACK_USER);
attr->sample_regs_user = PERF_REGS_MASK;
attr->sample_stack_user = opts->stack_dump_size;
attr->exclude_callchain_user = 1;
@@ -486,20 +505,20 @@ void perf_evsel__config(struct perf_evsel *evsel,
}
if (perf_target__has_cpu(&opts->target))
- attr->sample_type |= PERF_SAMPLE_CPU;
+ perf_evsel__set_sample_bit(evsel, CPU);
if (opts->period)
- attr->sample_type |= PERF_SAMPLE_PERIOD;
+ perf_evsel__set_sample_bit(evsel, PERIOD);
if (!opts->sample_id_all_missing &&
(opts->sample_time || !opts->no_inherit ||
perf_target__has_cpu(&opts->target)))
- attr->sample_type |= PERF_SAMPLE_TIME;
+ perf_evsel__set_sample_bit(evsel, TIME);
if (opts->raw_samples) {
- attr->sample_type |= PERF_SAMPLE_TIME;
- attr->sample_type |= PERF_SAMPLE_RAW;
- attr->sample_type |= PERF_SAMPLE_CPU;
+ perf_evsel__set_sample_bit(evsel, TIME);
+ perf_evsel__set_sample_bit(evsel, RAW);
+ perf_evsel__set_sample_bit(evsel, CPU);
}
if (opts->no_delay) {
@@ -507,7 +526,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
attr->wakeup_events = 1;
}
if (opts->branch_stack) {
- attr->sample_type |= PERF_SAMPLE_BRANCH_STACK;
+ perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
attr->branch_sample_type = opts->branch_stack;
}