diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/init_task.h | 14 | ||||
-rw-r--r-- | include/linux/perf_event.h (renamed from include/linux/perf_counter.h) | 306 | ||||
-rw-r--r-- | include/linux/prctl.h | 4 | ||||
-rw-r--r-- | include/linux/sched.h | 12 | ||||
-rw-r--r-- | include/linux/syscalls.h | 6 |
5 files changed, 171 insertions, 171 deletions
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 9e7f2e8fc66..21a6f5d9af2 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -106,13 +106,13 @@ extern struct group_info init_groups; extern struct cred init_cred; -#ifdef CONFIG_PERF_COUNTERS -# define INIT_PERF_COUNTERS(tsk) \ - .perf_counter_mutex = \ - __MUTEX_INITIALIZER(tsk.perf_counter_mutex), \ - .perf_counter_list = LIST_HEAD_INIT(tsk.perf_counter_list), +#ifdef CONFIG_PERF_EVENTS +# define INIT_PERF_EVENTS(tsk) \ + .perf_event_mutex = \ + __MUTEX_INITIALIZER(tsk.perf_event_mutex), \ + .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list), #else -# define INIT_PERF_COUNTERS(tsk) +# define INIT_PERF_EVENTS(tsk) #endif /* @@ -178,7 +178,7 @@ extern struct cred init_cred; }, \ .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ INIT_IDS \ - INIT_PERF_COUNTERS(tsk) \ + INIT_PERF_EVENTS(tsk) \ INIT_TRACE_IRQFLAGS \ INIT_LOCKDEP \ INIT_FTRACE_GRAPH \ diff --git a/include/linux/perf_counter.h b/include/linux/perf_event.h index f6486273267..ae9d9ed6df2 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_event.h @@ -1,5 +1,5 @@ /* - * Performance counters: + * Performance events: * * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar @@ -11,8 +11,8 @@ * * For licencing details see kernel-base/COPYING */ -#ifndef _LINUX_PERF_COUNTER_H -#define _LINUX_PERF_COUNTER_H +#ifndef _LINUX_PERF_EVENT_H +#define _LINUX_PERF_EVENT_H #include <linux/types.h> #include <linux/ioctl.h> @@ -36,8 +36,8 @@ enum perf_type_id { }; /* - * Generalized performance counter event types, used by the - * attr.event_id parameter of the sys_perf_counter_open() + * Generalized performance event event_id types, used by the + * attr.event_id parameter of the sys_perf_event_open() * syscall: */ enum perf_hw_id { @@ -56,7 +56,7 @@ enum perf_hw_id { }; /* - * Generalized hardware cache counters: + * Generalized hardware cache events: * * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x * { read, write, prefetch } x @@ -89,8 +89,8 @@ enum perf_hw_cache_op_result_id { }; /* - * Special "software" counters provided by the kernel, even if the hardware - * does not support performance counters. These counters measure various + * Special "software" events provided by the kernel, even if the hardware + * does not support performance events. These events measure various * physical and sw events of the kernel (and allow the profiling of them as * well): */ @@ -110,7 +110,7 @@ enum perf_sw_ids { * Bits that can be set in attr.sample_type to request information * in the overflow packets. */ -enum perf_counter_sample_format { +enum perf_event_sample_format { PERF_SAMPLE_IP = 1U << 0, PERF_SAMPLE_TID = 1U << 1, PERF_SAMPLE_TIME = 1U << 2, @@ -127,7 +127,7 @@ enum perf_counter_sample_format { }; /* - * The format of the data returned by read() on a perf counter fd, + * The format of the data returned by read() on a perf event fd, * as specified by attr.read_format: * * struct read_format { @@ -146,7 +146,7 @@ enum perf_counter_sample_format { * } && PERF_FORMAT_GROUP * }; */ -enum perf_counter_read_format { +enum perf_event_read_format { PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, PERF_FORMAT_ID = 1U << 2, @@ -158,9 +158,9 @@ enum perf_counter_read_format { #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ /* - * Hardware event to monitor via a performance monitoring counter: + * Hardware event_id to monitor via a performance monitoring event: */ -struct perf_counter_attr { +struct perf_event_attr { /* * Major type: hardware/software/tracepoint/etc. @@ -213,28 +213,28 @@ struct perf_counter_attr { }; /* - * Ioctls that can be done on a perf counter fd: + * Ioctls that can be done on a perf event fd: */ -#define PERF_COUNTER_IOC_ENABLE _IO ('$', 0) -#define PERF_COUNTER_IOC_DISABLE _IO ('$', 1) -#define PERF_COUNTER_IOC_REFRESH _IO ('$', 2) -#define PERF_COUNTER_IOC_RESET _IO ('$', 3) -#define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64) -#define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5) - -enum perf_counter_ioc_flags { +#define PERF_EVENT_IOC_ENABLE _IO ('$', 0) +#define PERF_EVENT_IOC_DISABLE _IO ('$', 1) +#define PERF_EVENT_IOC_REFRESH _IO ('$', 2) +#define PERF_EVENT_IOC_RESET _IO ('$', 3) +#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64) +#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) + +enum perf_event_ioc_flags { PERF_IOC_FLAG_GROUP = 1U << 0, }; /* * Structure of the page that can be mapped via mmap */ -struct perf_counter_mmap_page { +struct perf_event_mmap_page { __u32 version; /* version number of this structure */ __u32 compat_version; /* lowest version this is compat with */ /* - * Bits needed to read the hw counters in user-space. + * Bits needed to read the hw events in user-space. * * u32 seq; * s64 count; @@ -256,10 +256,10 @@ struct perf_counter_mmap_page { * processes. */ __u32 lock; /* seqlock for synchronization */ - __u32 index; /* hardware counter identifier */ - __s64 offset; /* add to hardware counter value */ - __u64 time_enabled; /* time counter active */ - __u64 time_running; /* time counter on cpu */ + __u32 index; /* hardware event identifier */ + __s64 offset; /* add to hardware event value */ + __u64 time_enabled; /* time event active */ + __u64 time_running; /* time event on cpu */ /* * Hole for extension of the self monitor capabilities @@ -272,7 +272,7 @@ struct perf_counter_mmap_page { * * User-space reading the @data_head value should issue an rmb(), on * SMP capable platforms, after reading this value -- see - * perf_counter_wakeup(). + * perf_event_wakeup(). * * When the mapping is PROT_WRITE the @data_tail value should be * written by userspace to reflect the last read data. In this case @@ -282,11 +282,11 @@ struct perf_counter_mmap_page { __u64 data_tail; /* user-space written tail */ }; -#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0) -#define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0) -#define PERF_EVENT_MISC_KERNEL (1 << 0) -#define PERF_EVENT_MISC_USER (2 << 0) -#define PERF_EVENT_MISC_HYPERVISOR (3 << 0) +#define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0) +#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) +#define PERF_RECORD_MISC_KERNEL (1 << 0) +#define PERF_RECORD_MISC_USER (2 << 0) +#define PERF_RECORD_MISC_HYPERVISOR (3 << 0) struct perf_event_header { __u32 type; @@ -310,7 +310,7 @@ enum perf_event_type { * char filename[]; * }; */ - PERF_EVENT_MMAP = 1, + PERF_RECORD_MMAP = 1, /* * struct { @@ -319,7 +319,7 @@ enum perf_event_type { * u64 lost; * }; */ - PERF_EVENT_LOST = 2, + PERF_RECORD_LOST = 2, /* * struct { @@ -329,7 +329,7 @@ enum perf_event_type { * char comm[]; * }; */ - PERF_EVENT_COMM = 3, + PERF_RECORD_COMM = 3, /* * struct { @@ -339,7 +339,7 @@ enum perf_event_type { * u64 time; * }; */ - PERF_EVENT_EXIT = 4, + PERF_RECORD_EXIT = 4, /* * struct { @@ -349,8 +349,8 @@ enum perf_event_type { * u64 stream_id; * }; */ - PERF_EVENT_THROTTLE = 5, - PERF_EVENT_UNTHROTTLE = 6, + PERF_RECORD_THROTTLE = 5, + PERF_RECORD_UNTHROTTLE = 6, /* * struct { @@ -360,7 +360,7 @@ enum perf_event_type { * { u64 time; } && PERF_SAMPLE_TIME * }; */ - PERF_EVENT_FORK = 7, + PERF_RECORD_FORK = 7, /* * struct { @@ -370,7 +370,7 @@ enum perf_event_type { * struct read_format values; * }; */ - PERF_EVENT_READ = 8, + PERF_RECORD_READ = 8, /* * struct { @@ -395,7 +395,7 @@ enum perf_event_type { * # * # That is, the ABI doesn't make any promises wrt to * # the stability of its content, it may vary depending - * # on event, hardware, kernel version and phase of + * # on event_id, hardware, kernel version and phase of * # the moon. * # * # In other words, PERF_SAMPLE_RAW contents are not an ABI. @@ -405,9 +405,9 @@ enum perf_event_type { * char data[size];}&& PERF_SAMPLE_RAW * }; */ - PERF_EVENT_SAMPLE = 9, + PERF_RECORD_SAMPLE = 9, - PERF_EVENT_MAX, /* non-ABI */ + PERF_RECORD_MAX, /* non-ABI */ }; enum perf_callchain_context { @@ -430,8 +430,8 @@ enum perf_callchain_context { * Kernel-internal data types and definitions: */ -#ifdef CONFIG_PERF_COUNTERS -# include <asm/perf_counter.h> +#ifdef CONFIG_PERF_EVENTS +# include <asm/perf_event.h> #endif #include <linux/list.h> @@ -459,15 +459,15 @@ struct perf_raw_record { struct task_struct; /** - * struct hw_perf_counter - performance counter hardware details: + * struct hw_perf_event - performance event hardware details: */ -struct hw_perf_counter { -#ifdef CONFIG_PERF_COUNTERS +struct hw_perf_event { +#ifdef CONFIG_PERF_EVENTS union { struct { /* hardware */ u64 config; unsigned long config_base; - unsigned long counter_base; + unsigned long event_base; int idx; }; union { /* software */ @@ -487,26 +487,26 @@ struct hw_perf_counter { #endif }; -struct perf_counter; +struct perf_event; /** * struct pmu - generic performance monitoring unit */ struct pmu { - int (*enable) (struct perf_counter *counter); - void (*disable) (struct perf_counter *counter); - void (*read) (struct perf_counter *counter); - void (*unthrottle) (struct perf_counter *counter); + int (*enable) (struct perf_event *event); + void (*disable) (struct perf_event *event); + void (*read) (struct perf_event *event); + void (*unthrottle) (struct perf_event *event); }; /** - * enum perf_counter_active_state - the states of a counter + * enum perf_event_active_state - the states of a event */ -enum perf_counter_active_state { - PERF_COUNTER_STATE_ERROR = -2, - PERF_COUNTER_STATE_OFF = -1, - PERF_COUNTER_STATE_INACTIVE = 0, - PERF_COUNTER_STATE_ACTIVE = 1, +enum perf_event_active_state { + PERF_EVENT_STATE_ERROR = -2, + PERF_EVENT_STATE_OFF = -1, + PERF_EVENT_STATE_INACTIVE = 0, + PERF_EVENT_STATE_ACTIVE = 1, }; struct file; @@ -518,7 +518,7 @@ struct perf_mmap_data { int nr_locked; /* nr pages mlocked */ atomic_t poll; /* POLL_ for wakeups */ - atomic_t events; /* event limit */ + atomic_t events; /* event_id limit */ atomic_long_t head; /* write position */ atomic_long_t done_head; /* completed head */ @@ -529,7 +529,7 @@ struct perf_mmap_data { long watermark; /* wakeup watermark */ - struct perf_counter_mmap_page *user_page; + struct perf_event_mmap_page *user_page; void *data_pages[0]; }; @@ -539,56 +539,56 @@ struct perf_pending_entry { }; /** - * struct perf_counter - performance counter kernel representation: + * struct perf_event - performance event kernel representation: */ -struct perf_counter { -#ifdef CONFIG_PERF_COUNTERS +struct perf_event { +#ifdef CONFIG_PERF_EVENTS struct list_head group_entry; struct list_head event_entry; struct list_head sibling_list; int nr_siblings; - struct perf_counter *group_leader; - struct perf_counter *output; + struct perf_event *group_leader; + struct perf_event *output; const struct pmu *pmu; - enum perf_counter_active_state state; + enum perf_event_active_state state; atomic64_t count; /* - * These are the total time in nanoseconds that the counter + * These are the total time in nanoseconds that the event * has been enabled (i.e. eligible to run, and the task has - * been scheduled in, if this is a per-task counter) + * been scheduled in, if this is a per-task event) * and running (scheduled onto the CPU), respectively. * * They are computed from tstamp_enabled, tstamp_running and - * tstamp_stopped when the counter is in INACTIVE or ACTIVE state. + * tstamp_stopped when the event is in INACTIVE or ACTIVE state. */ u64 total_time_enabled; u64 total_time_running; /* * These are timestamps used for computing total_time_enabled - * and total_time_running when the counter is in INACTIVE or + * and total_time_running when the event is in INACTIVE or * ACTIVE state, measured in nanoseconds from an arbitrary point * in time. - * tstamp_enabled: the notional time when the counter was enabled - * tstamp_running: the notional time when the counter was scheduled on + * tstamp_enabled: the notional time when the event was enabled + * tstamp_running: the notional time when the event was scheduled on * tstamp_stopped: in INACTIVE state, the notional time when the - * counter was scheduled off. + * event was scheduled off. */ u64 tstamp_enabled; u64 tstamp_running; u64 tstamp_stopped; - struct perf_counter_attr attr; - struct hw_perf_counter hw; + struct perf_event_attr attr; + struct hw_perf_event hw; - struct perf_counter_context *ctx; + struct perf_event_context *ctx; struct file *filp; /* * These accumulate total time (in nanoseconds) that children - * counters have been enabled and running, respectively. + * events have been enabled and running, respectively. */ atomic64_t child_total_time_enabled; atomic64_t child_total_time_running; @@ -598,7 +598,7 @@ struct perf_counter { */ struct mutex child_mutex; struct list_head child_list; - struct perf_counter *parent; + struct perf_event *parent; int oncpu; int cpu; @@ -623,7 +623,7 @@ struct perf_counter { atomic_t event_limit; - void (*destroy)(struct perf_counter *); + void (*destroy)(struct perf_event *); struct rcu_head rcu_head; struct pid_namespace *ns; @@ -632,18 +632,18 @@ struct perf_counter { }; /** - * struct perf_counter_context - counter context structure + * struct perf_event_context - event context structure * - * Used as a container for task counters and CPU counters as well: + * Used as a container for task events and CPU events as well: */ -struct perf_counter_context { +struct perf_event_context { /* - * Protect the states of the counters in the list, + * Protect the states of the events in the list, * nr_active, and the list: */ spinlock_t lock; /* - * Protect the list of counters. Locking either mutex or lock + * Protect the list of events. Locking either mutex or lock * is sufficient to ensure the list doesn't change; to change * the list you need to lock both the mutex and the spinlock. */ @@ -651,7 +651,7 @@ struct perf_counter_context { struct list_head group_list; struct list_head event_list; - int nr_counters; + int nr_events; int nr_active; int is_active; int nr_stat; @@ -668,7 +668,7 @@ struct perf_counter_context { * These fields let us detect when two contexts have both * been cloned (inherited) from a common ancestor. */ - struct perf_counter_context *parent_ctx; + struct perf_event_context *parent_ctx; u64 parent_gen; u64 generation; int pin_count; @@ -676,11 +676,11 @@ struct perf_counter_context { }; /** - * struct perf_counter_cpu_context - per cpu counter context structure + * struct perf_event_cpu_context - per cpu event context structure */ struct perf_cpu_context { - struct perf_counter_context ctx; - struct perf_counter_context *task_ctx; + struct perf_event_context ctx; + struct perf_event_context *task_ctx; int active_oncpu; int max_pertask; int exclusive; @@ -694,7 +694,7 @@ struct perf_cpu_context { }; struct perf_output_handle { - struct perf_counter *counter; + struct perf_event *event; struct perf_mmap_data *data; unsigned long head; unsigned long offset; @@ -704,35 +704,35 @@ struct perf_output_handle { unsigned long flags; }; -#ifdef CONFIG_PERF_COUNTERS +#ifdef CONFIG_PERF_EVENTS /* * Set by architecture code: */ -extern int perf_max_counters; +extern int perf_max_events; -extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter); +extern const struct pmu *hw_perf_event_init(struct perf_event *event); -extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); -extern void perf_counter_task_sched_out(struct task_struct *task, +extern void perf_event_task_sched_in(struct task_struct *task, int cpu); +extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next, int cpu); -extern void perf_counter_task_tick(struct task_struct *task, int cpu); -extern int perf_counter_init_task(struct task_struct *child); -extern void perf_counter_exit_task(struct task_struct *child); -extern void perf_counter_free_task(struct task_struct *task); -extern void set_perf_counter_pending(void); -extern void perf_counter_do_pending(void); -extern void perf_counter_print_debug(void); +extern void perf_event_task_tick(struct task_struct *task, int cpu); +extern int perf_event_init_task(struct task_struct *child); +extern void perf_event_exit_task(struct task_struct *child); +extern void perf_event_free_task(struct task_struct *task); +extern void set_perf_event_pending(void); +extern void perf_event_do_pending(void); +extern void perf_event_print_debug(void); extern void __perf_disable(void); extern bool __perf_enable(void); extern void perf_disable(void); extern void perf_enable(void); -extern int perf_counter_task_disable(void); -extern int perf_counter_task_enable(void); -extern int hw_perf_group_sched_in(struct perf_counter *group_leader, +extern int perf_event_task_disable(void); +extern int perf_event_task_enable(void); +extern int hw_perf_group_sched_in(struct perf_event *group_leader, struct perf_cpu_context *cpuctx, - struct perf_counter_context *ctx, int cpu); -extern void perf_counter_update_userpage(struct perf_counter *counter); + struct perf_event_context *ctx, int cpu); +extern void perf_event_update_userpage(struct perf_event *event); struct perf_sample_data { u64 type; @@ -758,96 +758,96 @@ struct perf_sample_data { extern void perf_output_sample(struct perf_output_handle *handle, struct perf_event_header *header, struct perf_sample_data *data, - struct perf_counter *counter); + struct perf_event *event); extern void perf_prepare_sample(struct perf_event_header *header, struct perf_sample_data *data, - struct perf_counter *counter, + struct perf_event *event, struct pt_regs *regs); -extern int perf_counter_overflow(struct perf_counter *counter, int nmi, +extern int perf_event_overflow(struct perf_event *event, int nmi, struct perf_sample_data *data, struct pt_regs *regs); /* - * Return 1 for a software counter, 0 for a hardware counter + * Return 1 for a software event, 0 for a hardware event */ -static inline int is_software_counter(struct perf_counter *counter) +static inline int is_software_event(struct perf_event *event) { - return (counter->attr.type != PERF_TYPE_RAW) && - (counter->attr.type != PERF_TYPE_HARDWARE) && - (counter->attr.type != PERF_TYPE_HW_CACHE); + return (event->attr.type != PERF_TYPE_RAW) && + (event->attr.type != PERF_TYPE_HARDWARE) && + (event->attr.type != PERF_TYPE_HW_CACHE); } -extern atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX]; +extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; -extern void __perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); +extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); static inline void -perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr) +perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) { - if (atomic_read(&perf_swcounter_enabled[event])) - __perf_swcounter_event(event, nr, nmi, regs, addr); + if (atomic_read(&perf_swevent_enabled[event_id])) + __perf_sw_event(event_id, nr, nmi, regs, addr); } -extern void __perf_counter_mmap(struct vm_area_struct *vma); +extern void __perf_event_mmap(struct vm_area_struct *vma); -static inline void perf_counter_mmap(struct vm_area_struct *vma) +static inline void perf_event_mmap(struct vm_area_struct *vma) { if (vma->vm_flags & VM_EXEC) - __perf_counter_mmap(vma); + __perf_event_mmap(vma); } -extern void perf_counter_comm(struct task_struct *tsk); -extern void perf_counter_fork(struct task_struct *tsk); +extern void perf_event_comm(struct task_struct *tsk); +extern void perf_event_fork(struct task_struct *tsk); extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); -extern int sysctl_perf_counter_paranoid; -extern int sysctl_perf_counter_mlock; -extern int sysctl_perf_counter_sample_rate; +extern int sysctl_perf_event_paranoid; +extern int sysctl_perf_event_mlock; +extern int sysctl_perf_event_sample_rate; -extern void perf_counter_init(void); -extern void perf_tpcounter_event(int event_id, u64 addr, u64 count, +extern void perf_event_init(void); +extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, int entry_size); #ifndef perf_misc_flags -#define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \ - PERF_EVENT_MISC_KERNEL) +#define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \ + PERF_RECORD_MISC_KERNEL) #define perf_instruction_pointer(regs) instruction_pointer(regs) #endif extern int perf_output_begin(struct perf_output_handle *handle, - struct perf_counter *counter, unsigned int size, + struct perf_event *event, unsigned int size, int nmi, int sample); extern void perf_output_end(struct perf_output_handle *handle); extern void perf_output_copy(struct perf_output_handle *handle, const void *buf, unsigned int len); #else static inline void -perf_counter_task_sched_in(struct task_struct *task, int cpu) { } +perf_event_task_sched_in(struct task_struct *task, int cpu) { } static inline void -perf_counter_task_sched_out(struct task_struct *task, +perf_event_task_sched_out(struct task_struct *task, struct task_struct *next, int cpu) { } static inline void -perf_counter_task_tick(struct task_struct *task, int cpu) { } -static inline int perf_counter_init_task(struct task_struct *child) { return 0; } -static inline void perf_counter_exit_task(struct task_struct *child) { } -static inline void perf_counter_free_task(struct task_struct *task) { } -static inline void perf_counter_do_pending(void) { } -static inline void perf_counter_print_debug(void) { } +perf_event_task_tick(struct task_struct *task, int cpu) { } +static inline int perf_event_init_task(struct task_struct *child) { return 0; } +static inline void perf_event_exit_task(struct task_struct *child) { } +static inline void perf_event_free_task(struct task_struct *task) { } +static inline void perf_event_do_pending(void) { } +static inline void perf_event_print_debug(void) { } static inline void perf_disable(void) { } static inline void perf_enable(void) { } -static inline int perf_counter_task_disable(void) { return -EINVAL; } -static inline int perf_counter_task_enable(void) { return -EINVAL; } +static inline int perf_event_task_disable(void) { return -EINVAL; } +static inline int perf_event_task_enable(void) { return -EINVAL; } static inline void -perf_swcounter_event(u32 event, u64 nr, int nmi, +perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) { } -static inline void perf_counter_mmap(struct vm_area_struct *vma) { } -static inline void perf_counter_comm(struct task_struct *tsk) { } -static inline void perf_counter_fork(struct task_struct *tsk) { } -static inline void perf_counter_init(void) { } +static inline void perf_event_mmap(struct vm_area_struct *vma) { } +static inline void perf_event_comm(struct task_struct *tsk) { } +static inline void perf_event_fork(struct task_struct *tsk) { } +static inline void perf_event_init(void) { } #endif @@ -855,4 +855,4 @@ static inline void perf_counter_init(void) { } perf_output_copy((handle), &(x), sizeof(x)) #endif /* __KERNEL__ */ -#endif /* _LINUX_PERF_COUNTER_H */ +#endif /* _LINUX_PERF_EVENT_H */ diff --git a/include/linux/prctl.h b/include/linux/prctl.h index b00df4c79c6..07bff666e65 100644 --- a/include/linux/prctl.h +++ b/include/linux/prctl.h @@ -85,7 +85,7 @@ #define PR_SET_TIMERSLACK 29 #define PR_GET_TIMERSLACK 30 -#define PR_TASK_PERF_COUNTERS_DISABLE 31 -#define PR_TASK_PERF_COUNTERS_ENABLE 32 +#define PR_TASK_PERF_EVENTS_DISABLE 31 +#define PR_TASK_PERF_EVENTS_ENABLE 32 #endif /* _LINUX_PRCTL_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 8af3d249170..8b265a8986d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -100,7 +100,7 @@ struct robust_list_head; struct bio; struct fs_struct; struct bts_context; -struct perf_counter_context; +struct perf_event_context; /* * List of flags we want to share for kernel threads, @@ -701,7 +701,7 @@ struct user_struct { #endif #endif -#ifdef CONFIG_PERF_COUNTERS +#ifdef CONFIG_PERF_EVENTS atomic_long_t locked_vm; #endif }; @@ -1449,10 +1449,10 @@ struct task_struct { struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; #endif -#ifdef CONFIG_PERF_COUNTERS - struct perf_counter_context *perf_counter_ctxp; - struct mutex perf_counter_mutex; - struct list_head perf_counter_list; +#ifdef CONFIG_PERF_EVENTS + struct perf_event_context *perf_event_ctxp; + struct mutex perf_event_mutex; + struct list_head perf_event_list; #endif #ifdef CONFIG_NUMA struct mempolicy *mempolicy; /* Protected by alloc_lock */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index a8e37821cc6..02f19f9a76c 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -55,7 +55,7 @@ struct compat_timeval; struct robust_list_head; struct getcpu_cache; struct old_linux_dirent; -struct perf_counter_attr; +struct perf_event_attr; #include <linux/types.h> #include <linux/aio_abi.h> @@ -885,7 +885,7 @@ asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int, int kernel_execve(const char *filename, char *const argv[], char *const envp[]); -asmlinkage long sys_perf_counter_open( - struct perf_counter_attr __user *attr_uptr, +asmlinkage long sys_perf_event_open( + struct perf_event_attr __user *attr_uptr, pid_t pid, int cpu, int group_fd, unsigned long flags); #endif |