summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/kmap_types.h5
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/perf_counter.h79
3 files changed, 59 insertions, 31 deletions
diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
index 54e8b3d956b..eddbce0f9fb 100644
--- a/include/asm-generic/kmap_types.h
+++ b/include/asm-generic/kmap_types.h
@@ -24,7 +24,10 @@ D(12) KM_SOFTIRQ1,
D(13) KM_SYNC_ICACHE,
D(14) KM_SYNC_DCACHE,
D(15) KM_UML_USERCOPY, /* UML specific, for copy_*_user - used in do_op_one_page */
-D(16) KM_TYPE_NR
+D(16) KM_IRQ_PTE,
+D(17) KM_NMI,
+D(18) KM_NMI_PTE,
+D(19) KM_TYPE_NR
};
#undef D
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d88d6fc530a..cf260d848eb 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -854,6 +854,12 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
unsigned long end, unsigned long newflags);
/*
+ * doesn't attempt to fault and will return short.
+ */
+int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages);
+
+/*
* A callback you can register to apply pressure to ageable caches.
*
* 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 1b3118a1023..89698d8aba5 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -236,10 +236,16 @@ struct perf_counter_mmap_page {
/*
* Control data for the mmap() data buffer.
*
- * User-space reading this value should issue an rmb(), on SMP capable
- * platforms, after reading this value -- see perf_counter_wakeup().
+ * User-space reading the @data_head value should issue an rmb(), on
+ * SMP capable platforms, after reading this value -- see
+ * perf_counter_wakeup().
+ *
+ * When the mapping is PROT_WRITE the @data_tail value should be
+ * written by userspace to reflect the last read data. In this case
+ * the kernel will not over-write unread data.
*/
__u64 data_head; /* head in the data section */
+ __u64 data_tail; /* user-space written tail */
};
#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0)
@@ -275,6 +281,15 @@ enum perf_event_type {
/*
* struct {
+ * struct perf_event_header header;
+ * u64 id;
+ * u64 lost;
+ * };
+ */
+ PERF_EVENT_LOST = 2,
+
+ /*
+ * struct {
* struct perf_event_header header;
*
* u32 pid, tid;
@@ -313,30 +328,39 @@ enum perf_event_type {
/*
* When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
- * will be PERF_RECORD_*
+ * will be PERF_SAMPLE_*
*
* struct {
* struct perf_event_header header;
*
- * { u64 ip; } && PERF_RECORD_IP
- * { u32 pid, tid; } && PERF_RECORD_TID
- * { u64 time; } && PERF_RECORD_TIME
- * { u64 addr; } && PERF_RECORD_ADDR
- * { u64 config; } && PERF_RECORD_CONFIG
- * { u32 cpu, res; } && PERF_RECORD_CPU
+ * { u64 ip; } && PERF_SAMPLE_IP
+ * { u32 pid, tid; } && PERF_SAMPLE_TID
+ * { u64 time; } && PERF_SAMPLE_TIME
+ * { u64 addr; } && PERF_SAMPLE_ADDR
+ * { u64 config; } && PERF_SAMPLE_CONFIG
+ * { u32 cpu, res; } && PERF_SAMPLE_CPU
*
* { u64 nr;
- * { u64 id, val; } cnt[nr]; } && PERF_RECORD_GROUP
+ * { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP
*
- * { u16 nr,
- * hv,
- * kernel,
- * user;
- * u64 ips[nr]; } && PERF_RECORD_CALLCHAIN
+ * { u64 nr,
+ * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
* };
*/
};
+enum perf_callchain_context {
+ PERF_CONTEXT_HV = (__u64)-32,
+ PERF_CONTEXT_KERNEL = (__u64)-128,
+ PERF_CONTEXT_USER = (__u64)-512,
+
+ PERF_CONTEXT_GUEST = (__u64)-2048,
+ PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
+ PERF_CONTEXT_GUEST_USER = (__u64)-2560,
+
+ PERF_CONTEXT_MAX = (__u64)-4095,
+};
+
#ifdef __KERNEL__
/*
* Kernel-internal data types and definitions:
@@ -356,6 +380,13 @@ enum perf_event_type {
#include <linux/pid_namespace.h>
#include <asm/atomic.h>
+#define PERF_MAX_STACK_DEPTH 255
+
+struct perf_callchain_entry {
+ __u64 nr;
+ __u64 ip[PERF_MAX_STACK_DEPTH];
+};
+
struct task_struct;
/**
@@ -414,6 +445,7 @@ struct file;
struct perf_mmap_data {
struct rcu_head rcu_head;
int nr_pages; /* nr of data pages */
+ int writable; /* are we writable */
int nr_locked; /* nr pages mlocked */
atomic_t poll; /* POLL_ for wakeups */
@@ -423,8 +455,8 @@ struct perf_mmap_data {
atomic_long_t done_head; /* completed head */
atomic_t lock; /* concurrent writes */
-
atomic_t wakeup; /* needs a wakeup */
+ atomic_t lost; /* nr records lost */
struct perf_counter_mmap_page *user_page;
void *data_pages[0];
@@ -604,6 +636,7 @@ extern void perf_counter_task_tick(struct task_struct *task, int cpu);
extern int perf_counter_init_task(struct task_struct *child);
extern void perf_counter_exit_task(struct task_struct *child);
extern void perf_counter_free_task(struct task_struct *task);
+extern void set_perf_counter_pending(void);
extern void perf_counter_do_pending(void);
extern void perf_counter_print_debug(void);
extern void __perf_disable(void);
@@ -649,18 +682,6 @@ static inline void perf_counter_mmap(struct vm_area_struct *vma)
extern void perf_counter_comm(struct task_struct *tsk);
extern void perf_counter_fork(struct task_struct *tsk);
-extern void perf_counter_task_migration(struct task_struct *task, int cpu);
-
-#define MAX_STACK_DEPTH 255
-
-struct perf_callchain_entry {
- u16 nr;
- u16 hv;
- u16 kernel;
- u16 user;
- u64 ip[MAX_STACK_DEPTH];
-};
-
extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
extern int sysctl_perf_counter_paranoid;
@@ -701,8 +722,6 @@ static inline void perf_counter_mmap(struct vm_area_struct *vma) { }
static inline void perf_counter_comm(struct task_struct *tsk) { }
static inline void perf_counter_fork(struct task_struct *tsk) { }
static inline void perf_counter_init(void) { }
-static inline void perf_counter_task_migration(struct task_struct *task,
- int cpu) { }
#endif
#endif /* __KERNEL__ */