summaryrefslogtreecommitdiffstats
path: root/include/linux/perf_counter.h
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-06-25 11:27:12 +0200
committerIngo Molnar <mingo@elte.hu>2009-06-25 21:39:08 +0200
commite6e18ec79b023d5fe84226cef533cf0e3770ce93 (patch)
tree6fc1bd9afd21454864abe2aec6a0e35e17d47f04 /include/linux/perf_counter.h
parentbfbd3381e63aa2a14c6706afb50ce4630aa0d9a2 (diff)
perf_counter: Rework the sample ABI
The PERF_EVENT_READ implementation made me realize we don't actually need the sample_type int the output sample, since we already have that in the perf_counter_attr information. Therefore, remove the PERF_EVENT_MISC_OVERFLOW bit and the event->type overloading, and imply put counter overflow samples in a PERF_EVENT_SAMPLE type. This also fixes the issue that event->type was only 32-bit and sample_type had 64 usable bits. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/perf_counter.h')
-rw-r--r--include/linux/perf_counter.h10
1 files changed, 5 insertions, 5 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index de70a10b5ec..3078e23c91e 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -262,7 +262,6 @@ struct perf_counter_mmap_page {
#define PERF_EVENT_MISC_KERNEL (1 << 0)
#define PERF_EVENT_MISC_USER (2 << 0)
#define PERF_EVENT_MISC_HYPERVISOR (3 << 0)
-#define PERF_EVENT_MISC_OVERFLOW (1 << 2)
struct perf_event_header {
__u32 type;
@@ -348,9 +347,6 @@ enum perf_event_type {
PERF_EVENT_READ = 8,
/*
- * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
- * will be PERF_SAMPLE_*
- *
* struct {
* struct perf_event_header header;
*
@@ -358,8 +354,9 @@ enum perf_event_type {
* { u32 pid, tid; } && PERF_SAMPLE_TID
* { u64 time; } && PERF_SAMPLE_TIME
* { u64 addr; } && PERF_SAMPLE_ADDR
- * { u64 config; } && PERF_SAMPLE_CONFIG
+ * { u64 id; } && PERF_SAMPLE_ID
* { u32 cpu, res; } && PERF_SAMPLE_CPU
+ * { u64 period; } && PERF_SAMPLE_PERIOD
*
* { u64 nr;
* { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP
@@ -368,6 +365,9 @@ enum perf_event_type {
* u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
* };
*/
+ PERF_EVENT_SAMPLE = 9,
+
+ PERF_EVENT_MAX, /* non-ABI */
};
enum perf_callchain_context {