summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/blackfin/include/asm/unistd.h2
-rw-r--r--arch/blackfin/mach-common/entry.S2
-rw-r--r--arch/frv/Kconfig2
-rw-r--r--arch/frv/include/asm/perf_event.h (renamed from arch/frv/include/asm/perf_counter.h)10
-rw-r--r--arch/frv/include/asm/unistd.h2
-rw-r--r--arch/frv/kernel/entry.S2
-rw-r--r--arch/frv/lib/Makefile2
-rw-r--r--arch/frv/lib/perf_event.c (renamed from arch/frv/lib/perf_counter.c)8
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/kernel/entry.S2
-rw-r--r--arch/m68knommu/kernel/syscalltable.S2
-rw-r--r--arch/microblaze/include/asm/unistd.h2
-rw-r--r--arch/microblaze/kernel/syscall_table.S2
-rw-r--r--arch/mips/include/asm/unistd.h6
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mn10300/include/asm/unistd.h2
-rw-r--r--arch/mn10300/kernel/entry.S2
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/include/asm/perf_counter.h7
-rw-r--r--arch/parisc/include/asm/perf_event.h7
-rw-r--r--arch/parisc/include/asm/unistd.h4
-rw-r--r--arch/parisc/kernel/syscall_table.S2
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/include/asm/hw_irq.h22
-rw-r--r--arch/powerpc/include/asm/paca.h2
-rw-r--r--arch/powerpc/include/asm/perf_event.h (renamed from arch/powerpc/include/asm/perf_counter.h)26
-rw-r--r--arch/powerpc/include/asm/systbl.h2
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/entry_64.S8
-rw-r--r--arch/powerpc/kernel/irq.c8
-rw-r--r--arch/powerpc/kernel/mpc7450-pmu.c2
-rw-r--r--arch/powerpc/kernel/perf_callchain.c2
-rw-r--r--arch/powerpc/kernel/perf_event.c (renamed from arch/powerpc/kernel/perf_counter.c)594
-rw-r--r--arch/powerpc/kernel/power4-pmu.c2
-rw-r--r--arch/powerpc/kernel/power5+-pmu.c2
-rw-r--r--arch/powerpc/kernel/power5-pmu.c2
-rw-r--r--arch/powerpc/kernel/power6-pmu.c2
-rw-r--r--arch/powerpc/kernel/power7-pmu.c2
-rw-r--r--arch/powerpc/kernel/ppc970-pmu.c2
-rw-r--r--arch/powerpc/kernel/time.c30
-rw-r--r--arch/powerpc/mm/fault.c8
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype4
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/include/asm/perf_counter.h10
-rw-r--r--arch/s390/include/asm/perf_event.h10
-rw-r--r--arch/s390/include/asm/unistd.h2
-rw-r--r--arch/s390/kernel/compat_wrapper.S8
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/s390/mm/fault.c8
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sh/include/asm/perf_counter.h9
-rw-r--r--arch/sh/include/asm/perf_event.h9
-rw-r--r--arch/sh/include/asm/unistd_32.h2
-rw-r--r--arch/sh/include/asm/unistd_64.h2
-rw-r--r--arch/sh/kernel/syscalls_32.S2
-rw-r--r--arch/sh/kernel/syscalls_64.S2
-rw-r--r--arch/sh/mm/fault_32.c8
-rw-r--r--arch/sh/mm/tlbflush_64.c8
-rw-r--r--arch/sparc/Kconfig4
-rw-r--r--arch/sparc/include/asm/perf_counter.h14
-rw-r--r--arch/sparc/include/asm/perf_event.h14
-rw-r--r--arch/sparc/include/asm/unistd.h2
-rw-r--r--arch/sparc/kernel/Makefile2
-rw-r--r--arch/sparc/kernel/nmi.c4
-rw-r--r--arch/sparc/kernel/pcr.c10
-rw-r--r--arch/sparc/kernel/perf_event.c (renamed from arch/sparc/kernel/perf_counter.c)178
-rw-r--r--arch/sparc/kernel/systbls_32.S2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/ia32/ia32entry.S2
-rw-r--r--arch/x86/include/asm/entry_arch.h2
-rw-r--r--arch/x86/include/asm/perf_event.h (renamed from arch/x86/include/asm/perf_counter.h)30
-rw-r--r--arch/x86/include/asm/unistd_32.h2
-rw-r--r--arch/x86/include/asm/unistd_64.h4
-rw-r--r--arch/x86/kernel/apic/apic.c6
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event.c (renamed from arch/x86/kernel/cpu/perf_counter.c)504
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c2
-rw-r--r--arch/x86/kernel/entry_64.S2
-rw-r--r--arch/x86/kernel/irqinit.c2
-rw-r--r--arch/x86/kernel/syscall_table_32.S2
-rw-r--r--arch/x86/mm/fault.c8
-rw-r--r--arch/x86/oprofile/op_model_ppro.c4
-rw-r--r--arch/x86/oprofile/op_x86_model.h2
92 files changed, 856 insertions, 856 deletions
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 9122c9ee18f..89f7eade20a 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -390,7 +390,7 @@
#define __NR_preadv (__NR_SYSCALL_BASE+361)
#define __NR_pwritev (__NR_SYSCALL_BASE+362)
#define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363)
-#define __NR_perf_counter_open (__NR_SYSCALL_BASE+364)
+#define __NR_perf_event_open (__NR_SYSCALL_BASE+364)
/*
* The following SWIs are ARM private.
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index ecfa98954d1..fafce1b5c69 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -373,7 +373,7 @@
CALL(sys_preadv)
CALL(sys_pwritev)
CALL(sys_rt_tgsigqueueinfo)
- CALL(sys_perf_counter_open)
+ CALL(sys_perf_event_open)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h
index c8e7ee4768c..02b1529dad5 100644
--- a/arch/blackfin/include/asm/unistd.h
+++ b/arch/blackfin/include/asm/unistd.h
@@ -381,7 +381,7 @@
#define __NR_preadv 366
#define __NR_pwritev 367
#define __NR_rt_tgsigqueueinfo 368
-#define __NR_perf_counter_open 369
+#define __NR_perf_event_open 369
#define __NR_syscall 370
#define NR_syscalls __NR_syscall
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 01af24cde36..1e7cac23e25 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -1620,7 +1620,7 @@ ENTRY(_sys_call_table)
.long _sys_preadv
.long _sys_pwritev
.long _sys_rt_tgsigqueueinfo
- .long _sys_perf_counter_open
+ .long _sys_perf_event_open
.rept NR_syscalls-(.-_sys_call_table)/4
.long _sys_ni_syscall
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index b86e19c9b5b..4b5830bcbe2 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -7,7 +7,7 @@ config FRV
default y
select HAVE_IDE
select HAVE_ARCH_TRACEHOOK
- select HAVE_PERF_COUNTERS
+ select HAVE_PERF_EVENTS
config ZONE_DMA
bool
diff --git a/arch/frv/include/asm/perf_counter.h b/arch/frv/include/asm/perf_event.h
index ccf726e61b2..a69e0155d14 100644
--- a/arch/frv/include/asm/perf_counter.h
+++ b/arch/frv/include/asm/perf_event.h
@@ -1,4 +1,4 @@
-/* FRV performance counter support
+/* FRV performance event support
*
* Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -9,9 +9,9 @@
* 2 of the Licence, or (at your option) any later version.
*/
-#ifndef _ASM_PERF_COUNTER_H
-#define _ASM_PERF_COUNTER_H
+#ifndef _ASM_PERF_EVENT_H
+#define _ASM_PERF_EVENT_H
-#define PERF_COUNTER_INDEX_OFFSET 0
+#define PERF_EVENT_INDEX_OFFSET 0
-#endif /* _ASM_PERF_COUNTER_H */
+#endif /* _ASM_PERF_EVENT_H */
diff --git a/arch/frv/include/asm/unistd.h b/arch/frv/include/asm/unistd.h
index 4a8fb427ce0..be6ef0f5cd4 100644
--- a/arch/frv/include/asm/unistd.h
+++ b/arch/frv/include/asm/unistd.h
@@ -342,7 +342,7 @@
#define __NR_preadv 333
#define __NR_pwritev 334
#define __NR_rt_tgsigqueueinfo 335
-#define __NR_perf_counter_open 336
+#define __NR_perf_event_open 336
#ifdef __KERNEL__
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S
index fde1e446b44..189397ec012 100644
--- a/arch/frv/kernel/entry.S
+++ b/arch/frv/kernel/entry.S
@@ -1525,6 +1525,6 @@ sys_call_table:
.long sys_preadv
.long sys_pwritev
.long sys_rt_tgsigqueueinfo /* 335 */
- .long sys_perf_counter_open
+ .long sys_perf_event_open
syscall_table_size = (. - sys_call_table)
diff --git a/arch/frv/lib/Makefile b/arch/frv/lib/Makefile
index 0a377210c89..f4709756d0d 100644
--- a/arch/frv/lib/Makefile
+++ b/arch/frv/lib/Makefile
@@ -5,4 +5,4 @@
lib-y := \
__ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \
checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \
- outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_counter.o
+ outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_event.o
diff --git a/arch/frv/lib/perf_counter.c b/arch/frv/lib/perf_event.c
index 2000feecd57..9ac5acfd2e9 100644
--- a/arch/frv/lib/perf_counter.c
+++ b/arch/frv/lib/perf_event.c
@@ -1,4 +1,4 @@
-/* Performance counter handling
+/* Performance event handling
*
* Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -9,11 +9,11 @@
* 2 of the Licence, or (at your option) any later version.
*/
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
/*
- * mark the performance counter as pending
+ * mark the performance event as pending
*/
-void set_perf_counter_pending(void)
+void set_perf_event_pending(void)
{
}
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 946d8691f2b..48b87f5ced5 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -335,7 +335,7 @@
#define __NR_preadv 329
#define __NR_pwritev 330
#define __NR_rt_tgsigqueueinfo 331
-#define __NR_perf_counter_open 332
+#define __NR_perf_event_open 332
#ifdef __KERNEL__
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index 922f52e7ed1..c5b33634c98 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -756,5 +756,5 @@ sys_call_table:
.long sys_preadv
.long sys_pwritev /* 330 */
.long sys_rt_tgsigqueueinfo
- .long sys_perf_counter_open
+ .long sys_perf_event_open
diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S
index 0ae123e0898..23535cc415a 100644
--- a/arch/m68knommu/kernel/syscalltable.S
+++ b/arch/m68knommu/kernel/syscalltable.S
@@ -350,7 +350,7 @@ ENTRY(sys_call_table)
.long sys_preadv
.long sys_pwritev /* 330 */
.long sys_rt_tgsigqueueinfo
- .long sys_perf_counter_open
+ .long sys_perf_event_open
.rept NR_syscalls-(.-sys_call_table)/4
.long sys_ni_syscall
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index 0b852327c0e..cb05a07e55e 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -381,7 +381,7 @@
#define __NR_preadv 363 /* new */
#define __NR_pwritev 364 /* new */
#define __NR_rt_tgsigqueueinfo 365 /* new */
-#define __NR_perf_counter_open 366 /* new */
+#define __NR_perf_event_open 366 /* new */
#define __NR_syscalls 367
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 457216097df..ecec1915513 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -370,4 +370,4 @@ ENTRY(sys_call_table)
.long sys_ni_syscall
.long sys_ni_syscall
.long sys_rt_tgsigqueueinfo /* 365 */
- .long sys_perf_counter_open
+ .long sys_perf_event_open
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index e753a777949..8c9dfa9e901 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -353,7 +353,7 @@
#define __NR_preadv (__NR_Linux + 330)
#define __NR_pwritev (__NR_Linux + 331)
#define __NR_rt_tgsigqueueinfo (__NR_Linux + 332)
-#define __NR_perf_counter_open (__NR_Linux + 333)
+#define __NR_perf_event_open (__NR_Linux + 333)
#define __NR_accept4 (__NR_Linux + 334)
/*
@@ -664,7 +664,7 @@
#define __NR_preadv (__NR_Linux + 289)
#define __NR_pwritev (__NR_Linux + 290)
#define __NR_rt_tgsigqueueinfo (__NR_Linux + 291)
-#define __NR_perf_counter_open (__NR_Linux + 292)
+#define __NR_perf_event_open (__NR_Linux + 292)
#define __NR_accept4 (__NR_Linux + 293)
/*
@@ -979,7 +979,7 @@
#define __NR_preadv (__NR_Linux + 293)
#define __NR_pwritev (__NR_Linux + 294)
#define __NR_rt_tgsigqueueinfo (__NR_Linux + 295)
-#define __NR_perf_counter_open (__NR_Linux + 296)
+#define __NR_perf_event_open (__NR_Linux + 296)
#define __NR_accept4 (__NR_Linux + 297)
/*
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 7c2de4f091c..fd2a9bb620d 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -581,7 +581,7 @@ einval: li v0, -ENOSYS
sys sys_preadv 6 /* 4330 */
sys sys_pwritev 6
sys sys_rt_tgsigqueueinfo 4
- sys sys_perf_counter_open 5
+ sys sys_perf_event_open 5
sys sys_accept4 4
.endm
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index b97b993846d..18bf7f32c5e 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -418,6 +418,6 @@ sys_call_table:
PTR sys_preadv
PTR sys_pwritev /* 5390 */
PTR sys_rt_tgsigqueueinfo
- PTR sys_perf_counter_open
+ PTR sys_perf_event_open
PTR sys_accept4
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 1a6ae124635..6ebc0797669 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -416,6 +416,6 @@ EXPORT(sysn32_call_table)
PTR sys_preadv
PTR sys_pwritev
PTR compat_sys_rt_tgsigqueueinfo /* 5295 */
- PTR sys_perf_counter_open
+ PTR sys_perf_event_open
PTR sys_accept4
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index cd31087a651..9bbf9775e0b 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -536,6 +536,6 @@ sys_call_table:
PTR compat_sys_preadv /* 4330 */
PTR compat_sys_pwritev
PTR compat_sys_rt_tgsigqueueinfo
- PTR sys_perf_counter_open
+ PTR sys_perf_event_open
PTR sys_accept4
.size sys_call_table,.-sys_call_table
diff --git a/arch/mn10300/include/asm/unistd.h b/arch/mn10300/include/asm/unistd.h
index fad68616af3..2a983931c11 100644
--- a/arch/mn10300/include/asm/unistd.h
+++ b/arch/mn10300/include/asm/unistd.h
@@ -347,7 +347,7 @@
#define __NR_preadv 334
#define __NR_pwritev 335
#define __NR_rt_tgsigqueueinfo 336
-#define __NR_perf_counter_open 337
+#define __NR_perf_event_open 337
#ifdef __KERNEL__
diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S
index e0d2563af4f..a94e7ea3faa 100644
--- a/arch/mn10300/kernel/entry.S
+++ b/arch/mn10300/kernel/entry.S
@@ -723,7 +723,7 @@ ENTRY(sys_call_table)
.long sys_preadv
.long sys_pwritev /* 335 */
.long sys_rt_tgsigqueueinfo
- .long sys_perf_counter_open
+ .long sys_perf_event_open
nr_syscalls=(.-sys_call_table)/4
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 06f8d5b5b0f..f388dc68f60 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -16,7 +16,7 @@ config PARISC
select RTC_DRV_GENERIC
select INIT_ALL_POSSIBLE
select BUG
- select HAVE_PERF_COUNTERS
+ select HAVE_PERF_EVENTS
select GENERIC_ATOMIC64 if !64BIT
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
diff --git a/arch/parisc/include/asm/perf_counter.h b/arch/parisc/include/asm/perf_counter.h
deleted file mode 100644
index dc9e829f701..00000000000
--- a/arch/parisc/include/asm/perf_counter.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __ASM_PARISC_PERF_COUNTER_H
-#define __ASM_PARISC_PERF_COUNTER_H
-
-/* parisc only supports software counters through this interface. */
-static inline void set_perf_counter_pending(void) { }
-
-#endif /* __ASM_PARISC_PERF_COUNTER_H */
diff --git a/arch/parisc/include/asm/perf_event.h b/arch/parisc/include/asm/perf_event.h
new file mode 100644
index 00000000000..cc146427d8f
--- /dev/null
+++ b/arch/parisc/include/asm/perf_event.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_PARISC_PERF_EVENT_H
+#define __ASM_PARISC_PERF_EVENT_H
+
+/* parisc only supports software events through this interface. */
+static inline void set_perf_event_pending(void) { }
+
+#endif /* __ASM_PARISC_PERF_EVENT_H */
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
index f3d3b8b012c..cda158318c6 100644
--- a/arch/parisc/include/asm/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -810,9 +810,9 @@
#define __NR_preadv (__NR_Linux + 315)
#define __NR_pwritev (__NR_Linux + 316)
#define __NR_rt_tgsigqueueinfo (__NR_Linux + 317)
-#define __NR_perf_counter_open (__NR_Linux + 318)
+#define __NR_perf_event_open (__NR_Linux + 318)
-#define __NR_Linux_syscalls (__NR_perf_counter_open + 1)
+#define __NR_Linux_syscalls (__NR_perf_event_open + 1)
#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index cf145eb026b..843f423dec6 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -416,7 +416,7 @@
ENTRY_COMP(preadv) /* 315 */
ENTRY_COMP(pwritev)
ENTRY_COMP(rt_tgsigqueueinfo)
- ENTRY_SAME(perf_counter_open)
+ ENTRY_SAME(perf_event_open)
/* Nothing yet */
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 8250902265c..4fd479059d6 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -129,7 +129,7 @@ config PPC
select HAVE_OPROFILE
select HAVE_SYSCALL_WRAPPERS if PPC64
select GENERIC_ATOMIC64 if PPC32
- select HAVE_PERF_COUNTERS
+ select HAVE_PERF_EVENTS
config EARLY_PRINTK
bool
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index e73d554538d..abbc2aaaced 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -135,43 +135,43 @@ static inline int irqs_disabled_flags(unsigned long flags)
*/
struct irq_chip;
-#ifdef CONFIG_PERF_COUNTERS
+#ifdef CONFIG_PERF_EVENTS
#ifdef CONFIG_PPC64
-static inline unsigned long test_perf_counter_pending(void)
+static inline unsigned long test_perf_event_pending(void)
{
unsigned long x;
asm volatile("lbz %0,%1(13)"
: "=r" (x)
- : "i" (offsetof(struct paca_struct, perf_counter_pending)));
+ : "i" (offsetof(struct paca_struct, perf_event_pending)));
return x;
}
-static inline void set_perf_counter_pending(void)
+static inline void set_perf_event_pending(void)
{
asm volatile("stb %0,%1(13)" : :
"r" (1),
- "i" (offsetof(struct paca_struct, perf_counter_pending)));
+ "i" (offsetof(struct paca_struct, perf_event_pending)));
}
-static inline void clear_perf_counter_pending(void)
+static inline void clear_perf_event_pending(void)
{
asm volatile("stb %0,%1(13)" : :
"r" (0),
- "i" (offsetof(struct paca_struct, perf_counter_pending)));
+ "i" (offsetof(struct paca_struct, perf_event_pending)));
}
#endif /* CONFIG_PPC64 */
-#else /* CONFIG_PERF_COUNTERS */
+#else /* CONFIG_PERF_EVENTS */
-static inline unsigned long test_perf_counter_pending(void)
+static inline unsigned long test_perf_event_pending(void)
{
return 0;
}
-static inline void clear_perf_counter_pending(void) {}
-#endif /* CONFIG_PERF_COUNTERS */
+static inline void clear_perf_event_pending(void) {}
+#endif /* CONFIG_PERF_EVENTS */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_HW_IRQ_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index b634456ea89..154f405b642 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -122,7 +122,7 @@ struct paca_struct {
u8 soft_enabled; /* irq soft-enable flag */
u8 hard_enabled; /* set if irqs are enabled in MSR */
u8 io_sync; /* writel() needs spin_unlock sync */
- u8 perf_counter_pending; /* PM interrupt while soft-disabled */
+ u8 perf_event_pending; /* PM interrupt while soft-disabled */
/* Stuff for accurate time accounting */
u64 user_time; /* accumulated usermode TB ticks */
diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_event.h
index 0ea0639fcf7..2499aaadaeb 100644
--- a/arch/powerpc/include/asm/perf_counter.h
+++ b/arch/powerpc/include/asm/perf_event.h
@@ -1,5 +1,5 @@
/*
- * Performance counter support - PowerPC-specific definitions.
+ * Performance event support - PowerPC-specific definitions.
*
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
*
@@ -12,9 +12,9 @@
#include <asm/hw_irq.h>
-#define MAX_HWCOUNTERS 8
+#define MAX_HWEVENTS 8
#define MAX_EVENT_ALTERNATIVES 8
-#define MAX_LIMITED_HWCOUNTERS 2
+#define MAX_LIMITED_HWEVENTS 2
/*
* This struct provides the constants and functions needed to
@@ -22,18 +22,18 @@
*/
struct power_pmu {
const char *name;
- int n_counter;
+ int n_event;
int max_alternatives;
unsigned long add_fields;
unsigned long test_adder;
int (*compute_mmcr)(u64 events[], int n_ev,
unsigned int hwc[], unsigned long mmcr[]);
- int (*get_constraint)(u64 event, unsigned long *mskp,
+ int (*get_constraint)(u64 event_id, unsigned long *mskp,
unsigned long *valp);
- int (*get_alternatives)(u64 event, unsigned int flags,
+ int (*get_alternatives)(u64 event_id, unsigned int flags,
u64 alt[]);
void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
- int (*limited_pmc_event)(u64 event);
+ int (*limited_pmc_event)(u64 event_id);
u32 flags;
int n_generic;
int *generic_events;
@@ -61,10 +61,10 @@ struct pt_regs;
extern unsigned long perf_misc_flags(struct pt_regs *regs);
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
-#define PERF_COUNTER_INDEX_OFFSET 1
+#define PERF_EVENT_INDEX_OFFSET 1
/*
- * Only override the default definitions in include/linux/perf_counter.h
+ * Only override the default definitions in include/linux/perf_event.h
* if we have hardware PMU support.
*/
#ifdef CONFIG_PPC_PERF_CTRS
@@ -73,14 +73,14 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
/*
* The power_pmu.get_constraint function returns a 32/64-bit value and
- * a 32/64-bit mask that express the constraints between this event and
+ * a 32/64-bit mask that express the constraints between this event_id and
* other events.
*
* The value and mask are divided up into (non-overlapping) bitfields
* of three different types:
*
* Select field: this expresses the constraint that some set of bits
- * in MMCR* needs to be set to a specific value for this event. For a
+ * in MMCR* needs to be set to a specific value for this event_id. For a
* select field, the mask contains 1s in every bit of the field, and
* the value contains a unique value for each possible setting of the
* MMCR* bits. The constraint checking code will ensure that two events
@@ -102,9 +102,9 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
* possible.) For N classes, the field is N+1 bits wide, and each class
* is assigned one bit from the least-significant N bits. The mask has
* only the most-significant bit set, and the value has only the bit
- * for the event's class set. The test_adder has the least significant
+ * for the event_id's class set. The test_adder has the least significant
* bit set in the field.
*
- * If an event is not subject to the constraint expressed by a particular
+ * If an event_id is not subject to the constraint expressed by a particular
* field, then it will have 0 in both the mask and value for that field.
*/
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index ed24bd92fe4..c7d671a7d9a 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -322,7 +322,7 @@ SYSCALL_SPU(epoll_create1)
SYSCALL_SPU(dup3)
SYSCALL_SPU(pipe2)
SYSCALL(inotify_init1)
-SYSCALL_SPU(perf_counter_open)
+SYSCALL_SPU(perf_event_open)
COMPAT_SYS_SPU(preadv)
COMPAT_SYS_SPU(pwritev)
COMPAT_SYS(rt_tgsigqueueinfo)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index cef080bfc60..f6ca7617676 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -341,7 +341,7 @@
#define __NR_dup3 316
#define __NR_pipe2 317
#define __NR_inotify_init1 318
-#define __NR_perf_counter_open 319
+#define __NR_perf_event_open 319
#define __NR_preadv 320
#define __NR_pwritev 321
#define __NR_rt_tgsigqueueinfo 322
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 569f79ccd31..b23664a0b86 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -97,7 +97,7 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
-obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o perf_callchain.o
+obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o perf_callchain.o
obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
power5+-pmu.o power6-pmu.o power7-pmu.o
obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index f0df285f0f8..0812b0f414b 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -133,7 +133,7 @@ int main(void)
DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
- DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_counter_pending));
+ DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_event_pending));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
#ifdef CONFIG_PPC_MM_SLICES
DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 66bcda34a6b..900e0eea009 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -556,14 +556,14 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
2:
TRACE_AND_RESTORE_IRQ(r5);
-#ifdef CONFIG_PERF_COUNTERS
- /* check paca->perf_counter_pending if we're enabling ints */
+#ifdef CONFIG_PERF_EVENTS
+ /* check paca->perf_event_pending if we're enabling ints */
lbz r3,PACAPERFPEND(r13)
and. r3,r3,r5
beq 27f
- bl .perf_counter_do_pending
+ bl .perf_event_do_pending
27:
-#endif /* CONFIG_PERF_COUNTERS */
+#endif /* CONFIG_PERF_EVENTS */
/* extract EE bit and use it to restore paca->hard_enabled */
ld r3,_MSR(r1)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index f7f376ea7b1..e5d12117798 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -53,7 +53,7 @@
#include <linux/bootmem.h>
#include <linux/pci.h>
#include <linux/debugfs.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -138,9 +138,9 @@ notrace void raw_local_irq_restore(unsigned long en)
}
#endif /* CONFIG_PPC_STD_MMU_64 */
- if (test_perf_counter_pending()) {
- clear_perf_counter_pending();
- perf_counter_do_pending();
+ if (test_perf_event_pending()) {
+ clear_perf_event_pending();
+ perf_event_do_pending();
}
/*
diff --git a/arch/powerpc/kernel/mpc7450-pmu.c b/arch/powerpc/kernel/mpc7450-pmu.c
index cc466d039af..09d72028f31 100644
--- a/arch/powerpc/kernel/mpc7450-pmu.c
+++ b/arch/powerpc/kernel/mpc7450-pmu.c
@@ -9,7 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/string.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <asm/reg.h>
#include <asm/cputable.h>
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c
index f74b62c6751..0a03cf70d24 100644
--- a/arch/powerpc/kernel/perf_callchain.c
+++ b/arch/powerpc/kernel/perf_callchain.c
@@ -10,7 +10,7 @@
*/
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/uaccess.h>
#include <linux/mm.h>
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_event.c
index 5ccf9bca96c..c98321fcb45 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -1,5 +1,5 @@
/*
- * Performance counter support - powerpc architecture code
+ * Performance event support - powerpc architecture code
*
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
*
@@ -10,7 +10,7 @@
*/
#include <linux/kernel.h>
#include <linux/sched.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <asm/reg.h>
@@ -19,35 +19,35 @@
#include <asm/firmware.h>
#include <asm/ptrace.h>
-struct cpu_hw_counters {
- int n_counters;
+struct cpu_hw_events {
+ int n_events;
int n_percpu;
int disabled;
int n_added;
int n_limited;
u8 pmcs_enabled;
- struct perf_counter *counter[MAX_HWCOUNTERS];
- u64 events[MAX_HWCOUNTERS];
- unsigned int flags[MAX_HWCOUNTERS];
+ struct perf_event *event[MAX_HWEVENTS];
+ u64 events[MAX_HWEVENTS];
+ unsigned int flags[MAX_HWEVENTS];
unsigned long mmcr[3];
- struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS];
- u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
- u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
- unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
- unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
+ struct perf_event *limited_event[MAX_LIMITED_HWEVENTS];
+ u8 limited_hwidx[MAX_LIMITED_HWEVENTS];
+ u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
+ unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
+ unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
};
-DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
+DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
struct power_pmu *ppmu;
/*
- * Normally, to ignore kernel events we set the FCS (freeze counters
+ * Normally, to ignore kernel events we set the FCS (freeze events
* in supervisor mode) bit in MMCR0, but if the kernel runs with the
* hypervisor bit set in the MSR, or if we are running on a processor
* where the hypervisor bit is forced to 1 (as on Apple G5 processors),
* then we need to use the FCHV bit to ignore kernel events.
*/
-static unsigned int freeze_counters_kernel = MMCR0_FCS;
+static unsigned int freeze_events_kernel = MMCR0_FCS;
/*
* 32-bit doesn't have MMCRA but does have an MMCR2,
@@ -122,14 +122,14 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs)
if (ppmu->flags & PPMU_ALT_SIPR) {
if (mmcra & POWER6_MMCRA_SIHV)
- return PERF_EVENT_MISC_HYPERVISOR;
+ return PERF_RECORD_MISC_HYPERVISOR;
return (mmcra & POWER6_MMCRA_SIPR) ?
- PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL;
+ PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL;
}
if (mmcra & MMCRA_SIHV)
- return PERF_EVENT_MISC_HYPERVISOR;
- return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
- PERF_EVENT_MISC_KERNEL;
+ return PERF_RECORD_MISC_HYPERVISOR;
+ return (mmcra & MMCRA_SIPR) ? PERF_RECORD_MISC_USER :
+ PERF_RECORD_MISC_KERNEL;
}
/*
@@ -152,14 +152,14 @@ static inline int perf_intr_is_nmi(struct pt_regs *regs)
#endif /* CONFIG_PPC64 */
-static void perf_counter_interrupt(struct pt_regs *regs);
+static void perf_event_interrupt(struct pt_regs *regs);
-void perf_counter_print_debug(void)
+void perf_event_print_debug(void)
{
}
/*
- * Read one performance monitor counter (PMC).
+ * Read one performance monitor event (PMC).
*/
static unsigned long read_pmc(int idx)
{
@@ -240,31 +240,31 @@ static void write_pmc(int idx, unsigned long val)
* Check if a set of events can all go on the PMU at once.
* If they can't, this will look at alternative codes for the events
* and see if any combination of alternative codes is feasible.
- * The feasible set is returned in event[].
+ * The feasible set is returned in event_id[].
*/
-static int power_check_constraints(struct cpu_hw_counters *cpuhw,
- u64 event[], unsigned int cflags[],
+static int power_check_constraints(struct cpu_hw_events *cpuhw,
+ u64 event_id[], unsigned int cflags[],
int n_ev)
{
unsigned long mask, value, nv;
- unsigned long smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
- int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS];
+ unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS];
+ int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS];
int i, j;
unsigned long addf = ppmu->add_fields;
unsigned long tadd = ppmu->test_adder;
- if (n_ev > ppmu->n_counter)
+ if (n_ev > ppmu->n_event)
return -1;
/* First see if the events will go on as-is */
for (i = 0; i < n_ev; ++i) {
if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
- && !ppmu->limited_pmc_event(event[i])) {
- ppmu->get_alternatives(event[i], cflags[i],
+ && !ppmu->limited_pmc_event(event_id[i])) {
+ ppmu->get_alternatives(event_id[i], cflags[i],
cpuhw->alternatives[i]);
- event[i] = cpuhw->alternatives[i][0];
+ event_id[i] = cpuhw->alternatives[i][0];
}
- if (ppmu->get_constraint(event[i], &cpuhw->amasks[i][0],
+ if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0],
&cpuhw->avalues[i][0]))
return -1;
}
@@ -287,7 +287,7 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw,
return -1;
for (i = 0; i < n_ev; ++i) {
choice[i] = 0;
- n_alt[i] = ppmu->get_alternatives(event[i], cflags[i],
+ n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i],
cpuhw->alternatives[i]);
for (j = 1; j < n_alt[i]; ++j)
ppmu->get_constraint(cpuhw->alternatives[i][j],
@@ -307,7 +307,7 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw,
j = choice[i];
}
/*
- * See if any alternative k for event i,
+ * See if any alternative k for event_id i,
* where k > j, will satisfy the constraints.
*/
while (++j < n_alt[i]) {
@@ -321,16 +321,16 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw,
if (j >= n_alt[i]) {
/*
* No feasible alternative, backtrack
- * to event i-1 and continue enumerating its
+ * to event_id i-1 and continue enumerating its
* alternatives from where we got up to.
*/
if (--i < 0)
return -1;
} else {
/*
- * Found a feasible alternative for event i,
- * remember where we got up to with this event,
- * go on to the next event, and start with
+ * Found a feasible alternative for event_id i,
+ * remember where we got up to with this event_id,
+ * go on to the next event_id, and start with
* the first alternative for it.
*/
choice[i] = j;
@@ -345,21 +345,21 @@ static int power_check_constraints(struct cpu_hw_counters *cpuhw,
/* OK, we have a feasible combination, tell the caller the solution */
for (i = 0; i < n_ev; ++i)
- event[i] = cpuhw->alternatives[i][choice[i]];
+ event_id[i] = cpuhw->alternatives[i][choice[i]];
return 0;
}
/*
- * Check if newly-added counters have consistent settings for
+ * Check if newly-added events have consistent settings for
* exclude_{user,kernel,hv} with each other and any previously
- * added counters.
+ * added events.
*/
-static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
+static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
int n_prev, int n_new)
{
int eu = 0, ek = 0, eh = 0;
int i, n, first;
- struct perf_counter *counter;
+ struct perf_event *event;
n = n_prev + n_new;
if (n <= 1)
@@ -371,15 +371,15 @@ static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
continue;
}
- counter = ctrs[i];
+ event = ctrs[i];
if (first) {
- eu = counter->attr.exclude_user;
- ek = counter->attr.exclude_kernel;
- eh = counter->attr.exclude_hv;
+ eu = event->attr.exclude_user;
+ ek = event->attr.exclude_kernel;
+ eh = event->attr.exclude_hv;
first = 0;
- } else if (counter->attr.exclude_user != eu ||
- counter->attr.exclude_kernel != ek ||
- counter->attr.exclude_hv != eh) {
+ } else if (event->attr.exclude_user != eu ||
+ event->attr.exclude_kernel != ek ||
+ event->attr.exclude_hv != eh) {
return -EAGAIN;
}
}
@@ -392,11 +392,11 @@ static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
return 0;
}
-static void power_pmu_read(struct perf_counter *counter)
+static void power_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
- if (!counter->hw.idx)
+ if (!event->hw.idx)
return;
/*
* Performance monitor interrupts come even when interrupts
@@ -404,21 +404,21 @@ static void power_pmu_read(struct perf_counter *counter)
* Therefore we treat them like NMIs.
*/
do {
- prev = atomic64_read(&counter->hw.prev_count);
+ prev = atomic64_read(&event->hw.prev_count);
barrier();
- val = read_pmc(counter->hw.idx);
- } while (atomic64_cmpxchg(&counter->hw.prev_count, prev, val) != prev);
+ val = read_pmc(event->hw.idx);
+ } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
- /* The counters are only 32 bits wide */
+ /* The events are only 32 bits wide */
delta = (val - prev) & 0xfffffffful;
- atomic64_add(delta, &counter->count);
- atomic64_sub(delta, &counter->hw.period_left);
+ atomic64_add(delta, &event->count);
+ atomic64_sub(delta, &event->hw.period_left);
}
/*
* On some machines, PMC5 and PMC6 can't be written, don't respect
* the freeze conditions, and don't generate interrupts. This tells
- * us if `counter' is using such a PMC.
+ * us if `event' is using such a PMC.
*/
static int is_limited_pmc(int pmcnum)
{
@@ -426,53 +426,53 @@ static int is_limited_pmc(int pmcnum)
&& (pmcnum == 5 || pmcnum == 6);
}
-static void freeze_limited_counters(struct cpu_hw_counters *cpuhw,
+static void freeze_limited_events(struct cpu_hw_events *cpuhw,
unsigned long pmc5, unsigned long pmc6)
{
- struct perf_counter *counter;
+ struct perf_event *event;
u64 val, prev, delta;
int i;
for (i = 0; i < cpuhw->n_limited; ++i) {
- counter = cpuhw->limited_counter[i];
- if (!counter->hw.idx)
+ event = cpuhw->limited_event[i];
+ if (!event->hw.idx)
continue;
- val = (counter->hw.idx == 5) ? pmc5 : pmc6;
- prev = atomic64_read(&counter->hw.prev_count);
- counter->hw.idx = 0;
+ val = (event->hw.idx == 5) ? pmc5 : pmc6;
+ prev = atomic64_read(&event->hw.prev_count);
+ event->hw.idx = 0;
delta = (val - prev) & 0xfffffffful;
- atomic64_add(delta, &counter->count);
+ atomic64_add(delta, &event->count);
}
}
-static void thaw_limited_counters(struct cpu_hw_counters *cpuhw,
+static void thaw_limited_events(struct cpu_hw_events *cpuhw,
unsigned long pmc5, unsigned long pmc6)
{
- struct perf_counter *counter;
+ struct perf_event *event;
u64 val;
int i;
for (i = 0; i < cpuhw->n_limited; ++i) {
- counter = cpuhw->limited_counter[i];
- counter->hw.idx = cpuhw->limited_hwidx[i];
- val = (counter->hw.idx == 5) ? pmc5 : pmc6;
- atomic64_set(&counter->hw.prev_count, val);
- perf_counter_update_userpage(counter);
+ event = cpuhw->limited_event[i];
+ event->hw.idx = cpuhw->limited_hwidx[i];
+ val = (event->hw.idx == 5) ? pmc5 : pmc6;
+ atomic64_set(&event->hw.prev_count, val);
+ perf_event_update_userpage(event);
}
}
/*
- * Since limited counters don't respect the freeze conditions, we
+ * Since limited events don't respect the freeze conditions, we
* have to read them immediately after freezing or unfreezing the
- * other counters. We try to keep the values from the limited
- * counters as consistent as possible by keeping the delay (in
+ * other events. We try to keep the values from the limited
+ * events as consistent as possible by keeping the delay (in
* cycles and instructions) between freezing/unfreezing and reading
- * the limited counters as small and consistent as possible.
- * Therefore, if any limited counters are in use, we read them
+ * the limited events as small and consistent as possible.
+ * Therefore, if any limited events are in use, we read them
* both, and always in the same order, to minimize variability,
* and do it inside the same asm that writes MMCR0.
*/
-static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
+static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
{
unsigned long pmc5, pmc6;
@@ -485,7 +485,7 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
* Write MMCR0, then read PMC5 and PMC6 immediately.
* To ensure we don't get a performance monitor interrupt
* between writing MMCR0 and freezing/thawing the limited
- * counters, we first write MMCR0 with the counter overflow
+ * events, we first write MMCR0 with the event overflow
* interrupt enable bits turned off.
*/
asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
@@ -495,12 +495,12 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
"i" (SPRN_PMC5), "i" (SPRN_PMC6));
if (mmcr0 & MMCR0_FC)
- freeze_limited_counters(cpuhw, pmc5, pmc6);
+ freeze_limited_events(cpuhw, pmc5, pmc6);
else
- thaw_limited_counters(cpuhw, pmc5, pmc6);
+ thaw_limited_events(cpuhw, pmc5, pmc6);
/*
- * Write the full MMCR0 including the counter overflow interrupt
+ * Write the full MMCR0 including the event overflow interrupt
* enable bits, if necessary.
*/
if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
@@ -508,18 +508,18 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
}
/*
- * Disable all counters to prevent PMU interrupts and to allow
- * counters to be added or removed.
+ * Disable all events to prevent PMU interrupts and to allow
+ * events to be added or removed.
*/
void hw_perf_disable(void)
{
- struct cpu_hw_counters *cpuhw;
+ struct cpu_hw_events *cpuhw;
unsigned long flags;
if (!ppmu)
return;
local_irq_save(flags);
- cpuhw = &__get_cpu_var(cpu_hw_counters);
+ cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled) {
cpuhw->disabled = 1;
@@ -543,9 +543,9 @@ void hw_perf_disable(void)
}
/*
- * Set the 'freeze counters' bit.
+ * Set the 'freeze events' bit.
* The barrier is to make sure the mtspr has been
- * executed and the PMU has frozen the counters
+ * executed and the PMU has frozen the events
* before we return.
*/
write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
@@ -555,26 +555,26 @@ void hw_perf_disable(void)
}
/*
- * Re-enable all counters if disable == 0.
- * If we were previously disabled and counters were added, then
+ * Re-enable all events if disable == 0.
+ * If we were previously disabled and events were added, then
* put the new config on the PMU.
*/
void hw_perf_enable(void)
{
- struct perf_counter *counter;
- struct cpu_hw_counters *cpuhw;
+ struct perf_event *event;
+ struct cpu_hw_events *cpuhw;
unsigned long flags;
long i;
unsigned long val;
s64 left;
- unsigned int hwc_index[MAX_HWCOUNTERS];
+ unsigned int hwc_index[MAX_HWEVENTS];
int n_lim;
int idx;
if (!ppmu)
return;
local_irq_save(flags);
- cpuhw = &__get_cpu_var(cpu_hw_counters);
+ cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled) {
local_irq_restore(flags);
return;
@@ -582,23 +582,23 @@ void hw_perf_enable(void)
cpuhw->disabled = 0;
/*
- * If we didn't change anything, or only removed counters,
+ * If we didn't change anything, or only removed events,
* no need to recalculate MMCR* settings and reset the PMCs.
* Just reenable the PMU with the current MMCR* settings
- * (possibly updated for removal of counters).
+ * (possibly updated for removal of events).
*/
if (!cpuhw->n_added) {
mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
- if (cpuhw->n_counters == 0)
+ if (cpuhw->n_events == 0)
ppc_set_pmu_inuse(0);
goto out_enable;
}
/*
- * Compute MMCR* values for the new set of counters
+ * Compute MMCR* values for the new set of events
*/
- if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_counters, hwc_index,
+ if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
cpuhw->mmcr)) {
/* shouldn't ever get here */
printk(KERN_ERR "oops compute_mmcr failed\n");
@@ -607,22 +607,22 @@ void hw_perf_enable(void)
/*
* Add in MMCR0 freeze bits corresponding to the
- * attr.exclude_* bits for the first counter.
- * We have already checked that all counters have the
- * same values for these bits as the first counter.
+ * attr.exclude_* bits for the first event.
+ * We have already checked that all events have the
+ * same values for these bits as the first event.
*/
- counter = cpuhw->counter[0];
- if (counter->attr.exclude_user)
+ event = cpuhw->event[0];
+ if (event->attr.exclude_user)
cpuhw->mmcr[0] |= MMCR0_FCP;
- if (counter->attr.exclude_kernel)
- cpuhw->mmcr[0] |= freeze_counters_kernel;
- if (counter->attr.exclude_hv)
+ if (event->attr.exclude_kernel)
+ cpuhw->mmcr[0] |= freeze_events_kernel;
+ if (event->attr.exclude_hv)
cpuhw->mmcr[0] |= MMCR0_FCHV;
/*
* Write the new configuration to MMCR* with the freeze
- * bit set and set the hardware counters to their initial values.
- * Then unfreeze the counters.
+ * bit set and set the hardware events to their initial values.
+ * Then unfreeze the events.
*/
ppc_set_pmu_inuse(1);
mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
@@ -631,43 +631,43 @@ void hw_perf_enable(void)
| MMCR0_FC);
/*
- * Read off any pre-existing counters that need to move
+ * Read off any pre-existing events that need to move
* to another PMC.
*/
- for (i = 0; i < cpuhw->n_counters; ++i) {
- counter = cpuhw->counter[i];
- if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) {
- power_pmu_read(counter);
- write_pmc(counter->hw.idx, 0);
- counter->hw.idx = 0;
+ for (i = 0; i < cpuhw->n_events; ++i) {
+ event = cpuhw->event[i];
+ if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) {
+ power_pmu_read(event);
+ write_pmc(event->hw.idx, 0);
+ event->hw.idx = 0;
}
}
/*
- * Initialize the PMCs for all the new and moved counters.
+ * Initialize the PMCs for all the new and moved events.
*/
cpuhw->n_limited = n_lim = 0;
- for (i = 0; i < cpuhw->n_counters; ++i) {
- counter = cpuhw->counter[i];
- if (counter->hw.idx)
+ for (i = 0; i < cpuhw->n_events; ++i) {
+ event = cpuhw->event[i];
+ if (event->hw.idx)
continue;
idx = hwc_index[i] + 1;
if (is_limited_pmc(idx)) {
- cpuhw->limited_counter[n_lim] = counter;
+ cpuhw->limited_event[n_lim] = event;
cpuhw->limited_hwidx[n_lim] = idx;
++n_lim;
continue;
}
val = 0;
- if (counter->hw.sample_period) {
- left = atomic64_read(&counter->hw.period_left);
+ if (event->hw.sample_period) {
+ left = atomic64_read(&event->hw.period_left);
if (left < 0x80000000L)
val = 0x80000000L - left;
}
- atomic64_set(&counter->hw.prev_count, val);
- counter->hw.idx = idx;
+ atomic64_set(&event->hw.prev_count, val);
+ event->hw.idx = idx;
write_pmc(idx, val);
- perf_counter_update_userpage(counter);
+ perf_event_update_userpage(event);
}
cpuhw->n_limited = n_lim;
cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
@@ -688,85 +688,85 @@ void hw_perf_enable(void)
local_irq_restore(flags);
}
-static int collect_events(struct perf_counter *group, int max_count,
- struct perf_counter *ctrs[], u64 *events,
+static int collect_events(struct perf_event *group, int max_count,
+ struct perf_event *ctrs[], u64 *events,
unsigned int *flags)
{
int n = 0;
- struct perf_counter *counter;
+ struct perf_event *event;
- if (!is_software_counter(group)) {
+ if (!is_software_event(group)) {
if (n >= max_count)
return -1;
ctrs[n] = group;
- flags[n] = group->hw.counter_base;
+ flags[n] = group->hw.event_base;
events[n++] = group->hw.config;
}
- list_for_each_entry(counter, &group->sibling_list, list_entry) {
- if (!is_software_counter(counter) &&
- counter->state != PERF_COUNTER_STATE_OFF) {
+ list_for_each_entry(event, &group->sibling_list, list_entry) {
+ if (!is_software_event(event) &&
+ event->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count)
return -1;
- ctrs[n] = counter;
- flags[n] = counter->hw.counter_base;
- events[n++] = counter->hw.config;
+ ctrs[n] = event;
+ flags[n] = event->hw.event_base;
+ events[n++] = event->hw.config;
}
}
return n;
}
-static void counter_sched_in(struct perf_counter *counter, int cpu)
+static void event_sched_in(struct perf_event *event, int cpu)
{
- counter->state = PERF_COUNTER_STATE_ACTIVE;
- counter->oncpu = cpu;
- counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped;
- if (is_software_counter(counter))
- counter->pmu->enable(counter);
+ event->state = PERF_EVENT_STATE_ACTIVE;
+ event->oncpu = cpu;
+ event->tstamp_running += event->ctx->time - event->tstamp_stopped;
+ if (is_software_event(event))
+ event->pmu->enable(event);
}
/*
- * Called to enable a whole group of counters.
+ * Called to enable a whole group of events.
* Returns 1 if the group was enabled, or -EAGAIN if it could not be.
* Assumes the caller has disabled interrupts and has
* frozen the PMU with hw_perf_save_disable.
*/
-int hw_perf_group_sched_in(struct perf_counter *group_leader,
+int hw_perf_group_sched_in(struct perf_event *group_leader,
struct perf_cpu_context *cpuctx,
- struct perf_counter_context *ctx, int cpu)
+ struct perf_event_context *ctx, int cpu)
{
- struct cpu_hw_counters *cpuhw;
+ struct cpu_hw_events *cpuhw;
long i, n, n0;
- struct perf_counter *sub;
+ struct perf_event *sub;
if (!ppmu)
return 0;
- cpuhw = &__get_cpu_var(cpu_hw_counters);
- n0 = cpuhw->n_counters;
- n = collect_events(group_leader, ppmu->n_counter - n0,
- &cpuhw->counter[n0], &cpuhw->events[n0],
+ cpuhw = &__get_cpu_var(cpu_hw_events);
+ n0 = cpuhw->n_events;
+ n = collect_events(group_leader, ppmu->n_event - n0,
+ &cpuhw->event[n0], &cpuhw->events[n0],
&cpuhw->flags[n0]);
if (n < 0)
return -EAGAIN;
- if (check_excludes(cpuhw->counter, cpuhw->flags, n0, n))
+ if (check_excludes(cpuhw->event, cpuhw->flags, n0, n))
return -EAGAIN;
i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0);
if (i < 0)
return -EAGAIN;
- cpuhw->n_counters = n0 + n;
+ cpuhw->n_events = n0 + n;
cpuhw->n_added += n;
/*
- * OK, this group can go on; update counter states etc.,
- * and enable any software counters
+ * OK, this group can go on; update event states etc.,
+ * and enable any software events
*/
for (i = n0; i < n0 + n; ++i)
- cpuhw->counter[i]->hw.config = cpuhw->events[i];
+ cpuhw->event[i]->hw.config = cpuhw->events[i];
cpuctx->active_oncpu += n;
n = 1;
- counter_sched_in(group_leader, cpu);
+ event_sched_in(group_leader, cpu);
list_for_each_entry(sub, &group_leader->sibling_list, list_entry) {
- if (sub->state != PERF_COUNTER_STATE_OFF) {
- counter_sched_in(sub, cpu);
+ if (sub->state != PERF_EVENT_STATE_OFF) {
+ event_sched_in(sub, cpu);
++n;
}
}
@@ -776,14 +776,14 @@ int hw_perf_group_sched_in(struct perf_counter *group_leader,
}
/*
- * Add a counter to the PMU.
- * If all counters are not already frozen, then we disable and
+ * Add a event to the PMU.
+ * If all events are not already frozen, then we disable and
* re-enable the PMU in order to get hw_perf_enable to do the
* actual work of reconfiguring the PMU.
*/
-static int power_pmu_enable(struct perf_counter *counter)
+static int power_pmu_enable(struct perf_event *event)
{
- struct cpu_hw_counters *cpuhw;
+ struct cpu_hw_events *cpuhw;
unsigned long flags;
int n0;
int ret = -EAGAIN;
@@ -792,23 +792,23 @@ static int power_pmu_enable(struct perf_counter *counter)
perf_disable();
/*
- * Add the counter to the list (if there is room)
+ * Add the event to the list (if there is room)
* and check whether the total set is still feasible.
*/
- cpuhw = &__get_cpu_var(cpu_hw_counters);
- n0 = cpuhw->n_counters;
- if (n0 >= ppmu->n_counter)
+ cpuhw = &__get_cpu_var(cpu_hw_events);
+ n0 = cpuhw->n_events;
+ if (n0 >= ppmu->n_event)
goto out;
- cpuhw->counter[n0] = counter;
- cpuhw->events[n0] = counter->hw.config;
- cpuhw->flags[n0] = counter->hw.counter_base;
- if (check_excludes(cpuhw->counter, cpuhw->flags, n0, 1))
+ cpuhw->event[n0] = event;
+ cpuhw->events[n0] = event->hw.config;
+ cpuhw->flags[n0] = event->hw.event_base;
+ if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
goto out;
if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
goto out;
- counter->hw.config = cpuhw->events[n0];
- ++cpuhw->n_counters;
+ event->hw.config = cpuhw->events[n0];
+ ++cpuhw->n_events;
++cpuhw->n_added;
ret = 0;
@@ -819,46 +819,46 @@ static int power_pmu_enable(struct perf_counter *counter)
}
/*
- * Remove a counter from the PMU.
+ * Remove a event from the PMU.
*/
-static void power_pmu_disable(struct perf_counter *counter)
+static void power_pmu_disable(struct perf_event *event)
{
- struct cpu_hw_counters *cpuhw;
+ struct cpu_hw_events *cpuhw;
long i;
unsigned long flags;
local_irq_save(flags);
perf_disable();
- power_pmu_read(counter);
-
- cpuhw = &__get_cpu_var(cpu_hw_counters);
- for (i = 0; i < cpuhw->n_counters; ++i) {
- if (counter == cpuhw->counter[i]) {
- while (++i < cpuhw->n_counters)
- cpuhw->counter[i-1] = cpuhw->counter[i];
- --cpuhw->n_counters;
- ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr);
- if (counter->hw.idx) {
- write_pmc(counter->hw.idx, 0);
- counter->hw.idx = 0;
+ power_pmu_read(event);
+
+ cpuhw = &__get_cpu_var(cpu_hw_events);
+ for (i = 0; i < cpuhw->n_events; ++i) {
+ if (event == cpuhw->event[i]) {
+ while (++i < cpuhw->n_events)
+ cpuhw->event[i-1] = cpuhw->event[i];
+ --cpuhw->n_events;
+ ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr);
+ if (event->hw.idx) {
+ write_pmc(event->hw.idx, 0);
+ event->hw.idx = 0;
}
- perf_counter_update_userpage(counter);
+ perf_event_update_userpage(event);
break;
}
}
for (i = 0; i < cpuhw->n_limited; ++i)
- if (counter == cpuhw->limited_counter[i])
+ if (event == cpuhw->limited_event[i])
break;
if (i < cpuhw->n_limited) {
while (++i < cpuhw->n_limited) {
- cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
+ cpuhw->limited_event[i-1] = cpuhw->limited_event[i];
cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
}
--cpuhw->n_limited;
}
- if (cpuhw->n_counters == 0) {
- /* disable exceptions if no counters are running */
+ if (cpuhw->n_events == 0) {
+ /* disable exceptions if no events are running */
cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
}
@@ -867,28 +867,28 @@ static void power_pmu_disable(struct perf_counter *counter)
}
/*
- * Re-enable interrupts on a counter after they were throttled
+ * Re-enable interrupts on a event after they were throttled
* because they were coming too fast.
*/
-static void power_pmu_unthrottle(struct perf_counter *counter)
+static void power_pmu_unthrottle(struct perf_event *event)
{
s64 val, left;
unsigned long flags;
- if (!counter->hw.idx || !counter->hw.sample_period)
+ if (!event->hw.idx || !event->hw.sample_period)
return;
local_irq_save(flags);
perf_disable();
- power_pmu_read(counter);
- left = counter->hw.sample_period;
- counter->hw.last_period = left;
+ power_pmu_read(event);
+ left = event->hw.sample_period;
+ event->hw.last_period = left;
val = 0;
if (left < 0x80000000L)
val = 0x80000000L - left;
- write_pmc(counter->hw.idx, val);
- atomic64_set(&counter->hw.prev_count, val);
- atomic64_set(&counter->hw.period_left, left);
- perf_counter_update_userpage(counter);
+ write_pmc(event->hw.idx, val);
+ atomic64_set(&event->hw.prev_count, val);
+ atomic64_set(&event->hw.period_left, left);
+ perf_event_update_userpage(event);
perf_enable();
local_irq_restore(flags);
}
@@ -901,29 +901,29 @@ struct pmu power_pmu = {
};
/*
- * Return 1 if we might be able to put counter on a limited PMC,
+ * Return 1 if we might be able to put event on a limited PMC,
* or 0 if not.
- * A counter can only go on a limited PMC if it counts something
+ * A event can only go on a limited PMC if it counts something
* that a limited PMC can count, doesn't require interrupts, and
* doesn't exclude any processor mode.
*/
-static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev,
+static int can_go_on_limited_pmc(struct perf_event *event, u64 ev,
unsigned int flags)
{
int n;
u64 alt[MAX_EVENT_ALTERNATIVES];
- if (counter->attr.exclude_user
- || counter->attr.exclude_kernel
- || counter->attr.exclude_hv
- || counter->attr.sample_period)
+ if (event->attr.exclude_user
+ || event->attr.exclude_kernel
+ || event->attr.exclude_hv
+ || event->attr.sample_period)
return 0;
if (ppmu->limited_pmc_event(ev))
return 1;
/*
- * The requested event isn't on a limited PMC already;
+ * The requested event_id isn't on a limited PMC already;
* see if any alternative code goes on a limited PMC.
*/
if (!ppmu->get_alternatives)
@@ -936,9 +936,9 @@ static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev,
}
/*
- * Find an alternative event that goes on a normal PMC, if possible,
- * and return the event code, or 0 if there is no such alternative.
- * (Note: event code 0 is "don't count" on all machines.)
+ * Find an alternative event_id that goes on a normal PMC, if possible,
+ * and return the event_id code, or 0 if there is no such alternative.
+ * (Note: event_id code 0 is "don't count" on all machines.)
*/
static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
{
@@ -952,26 +952,26 @@ static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
return alt[0];
}
-/* Number of perf_counters counting hardware events */
-static atomic_t num_counters;
+/* Number of perf_events counting hardware events */
+static atomic_t num_events;
/* Used to avoid races in calling reserve/release_pmc_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);
/*
- * Release the PMU if this is the last perf_counter.
+ * Release the PMU if this is the last perf_event.
*/
-static void hw_perf_counter_destroy(struct perf_counter *counter)
+static void hw_perf_event_destroy(struct perf_event *event)
{
- if (!atomic_add_unless(&num_counters, -1, 1)) {
+ if (!atomic_add_unless(&num_events, -1, 1)) {
mutex_lock(&pmc_reserve_mutex);
- if (atomic_dec_return(&num_counters) == 0)
+ if (atomic_dec_return(&num_events) == 0)
release_pmc_hardware();
mutex_unlock(&pmc_reserve_mutex);
}
}
/*
- * Translate a generic cache event config to a raw event code.
+ * Translate a generic cache event_id config to a raw event_id code.
*/
static int hw_perf_cache_event(u64 config, u64 *eventp)
{
@@ -1000,39 +1000,39 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
return 0;
}
-const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
+const struct pmu *hw_perf_event_init(struct perf_event *event)
{
u64 ev;
unsigned long flags;
- struct perf_counter *ctrs[MAX_HWCOUNTERS];
- u64 events[MAX_HWCOUNTERS];
- unsigned int cflags[MAX_HWCOUNTERS];
+ struct perf_event *ctrs[MAX_HWEVENTS];
+ u64 events[MAX_HWEVENTS];
+ unsigned int cflags[MAX_HWEVENTS];
int n;
int err;
- struct cpu_hw_counters *cpuhw;
+ struct cpu_hw_events *cpuhw;
if (!ppmu)
return ERR_PTR(-ENXIO);
- switch (counter->attr.type) {
+ switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
- ev = counter->attr.config;
+ ev = event->attr.config;
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
return ERR_PTR(-EOPNOTSUPP);
ev = ppmu->generic_events[ev];
break;
case PERF_TYPE_HW_CACHE:
- err = hw_perf_cache_event(counter->attr.config, &ev);
+ err = hw_perf_cache_event(event->attr.config, &ev);
if (err)
return ERR_PTR(err);
break;
case PERF_TYPE_RAW:
- ev = counter->attr.config;
+ ev = event->attr.config;
break;
default:
return ERR_PTR(-EINVAL);
}
- counter->hw.config_base = ev;
- counter->hw.idx = 0;
+ event->hw.config_base = ev;
+ event->hw.idx = 0;
/*
* If we are not running on a hypervisor, force the
@@ -1040,28 +1040,28 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
* the user set it to.
*/
if (!firmware_has_feature(FW_FEATURE_LPAR))
- counter->attr.exclude_hv = 0;
+ event->attr.exclude_hv = 0;
/*
- * If this is a per-task counter, then we can use
+ * If this is a per-task event, then we can use
* PM_RUN_* events interchangeably with their non RUN_*
* equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
* XXX we should check if the task is an idle task.
*/
flags = 0;
- if (counter->ctx->task)
+ if (event->ctx->task)
flags |= PPMU_ONLY_COUNT_RUN;
/*
- * If this machine has limited counters, check whether this
- * event could go on a limited counter.
+ * If this machine has limited events, check whether this
+ * event_id could go on a limited event.
*/
if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
- if (can_go_on_limited_pmc(counter, ev, flags)) {
+ if (can_go_on_limited_pmc(event, ev, flags)) {
flags |= PPMU_LIMITED_PMC_OK;
} else if (ppmu->limited_pmc_event(ev)) {
/*
- * The requested event is on a limited PMC,
+ * The requested event_id is on a limited PMC,
* but we can't use a limited PMC; see if any
* alternative goes on a normal PMC.
*/
@@ -1073,50 +1073,50 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
/*
* If this is in a group, check if it can go on with all the
- * other hardware counters in the group. We assume the counter
+ * other hardware events in the group. We assume the event
* hasn't been linked into its leader's sibling list at this point.
*/
n = 0;
- if (counter->group_leader != counter) {
- n = collect_events(counter->group_leader, ppmu->n_counter - 1,
+ if (event->group_leader != event) {
+ n = collect_events(event->group_leader, ppmu->n_event - 1,
ctrs, events, cflags);
if (n < 0)
return ERR_PTR(-EINVAL);
}
events[n] = ev;
- ctrs[n] = counter;
+ ctrs[n] = event;
cflags[n] = flags;
if (check_excludes(ctrs, cflags, n, 1))
return ERR_PTR(-EINVAL);
- cpuhw = &get_cpu_var(cpu_hw_counters);
+ cpuhw = &get_cpu_var(cpu_hw_events);
err = power_check_constraints(cpuhw, events, cflags, n + 1);
- put_cpu_var(cpu_hw_counters);
+ put_cpu_var(cpu_hw_events);
if (err)
return ERR_PTR(-EINVAL);
- counter->hw.config = events[n];
- counter->hw.counter_base = cflags[n];
- counter->hw.last_period = counter->hw.sample_period;
- atomic64_set(&counter->hw.period_left, counter->hw.last_period);
+ event->hw.config = events[n];
+ event->hw.event_base = cflags[n];
+ event->hw.last_period = event->hw.sample_period;
+ atomic64_set(&event->hw.period_left, event->hw.last_period);
/*
* See if we need to reserve the PMU.
- * If no counters are currently in use, then we have to take a
+ * If no events are currently in use, then we have to take a
* mutex to ensure that we don't race with another task doing
* reserve_pmc_hardware or release_pmc_hardware.
*/
err = 0;
- if (!atomic_inc_not_zero(&num_counters)) {
+ if (!atomic_inc_not_zero(&num_events)) {
mutex_lock(&pmc_reserve_mutex);
- if (atomic_read(&num_counters) == 0 &&
- reserve_pmc_hardware(perf_counter_interrupt))
+ if (atomic_read(&num_events) == 0 &&
+ reserve_pmc_hardware(perf_event_interrupt))
err = -EBUSY;
else
- atomic_inc(&num_counters);
+ atomic_inc(&num_events);
mutex_unlock(&pmc_reserve_mutex);
}
- counter->destroy = hw_perf_counter_destroy;
+ event->destroy = hw_perf_event_destroy;
if (err)
return ERR_PTR(err);
@@ -1124,28 +1124,28 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
}
/*
- * A counter has overflowed; update its count and record
+ * A event has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
* here so there is no possibility of being interrupted.
*/
-static void record_and_restart(struct perf_counter *counter, unsigned long val,
+static void record_and_restart(struct perf_event *event, unsigned long val,
struct pt_regs *regs, int nmi)
{
- u64 period = counter->hw.sample_period;
+ u64 period = event->hw.sample_period;
s64 prev, delta, left;
int record = 0;
/* we don't have to worry about interrupts here */
- prev = atomic64_read(&counter->hw.prev_count);
+ prev = atomic64_read(&event->hw.prev_count);
delta = (val - prev) & 0xfffffffful;
- atomic64_add(delta, &counter->count);
+ atomic64_add(delta, &event->count);
/*
- * See if the total period for this counter has expired,
+ * See if the total period for this event has expired,
* and update for the next period.
*/
val = 0;
- left = atomic64_read(&counter->hw.period_left) - delta;
+ left = atomic64_read(&event->hw.period_left) - delta;
if (period) {
if (left <= 0) {
left += period;
@@ -1163,18 +1163,18 @@ static void record_and_restart(struct perf_counter *counter, unsigned long val,
if (record) {
struct perf_sample_data data = {
.addr = 0,
- .period = counter->hw.last_period,
+ .period = event->hw.last_period,
};
- if (counter->attr.sample_type & PERF_SAMPLE_ADDR)
+ if (event->attr.sample_type & PERF_SAMPLE_ADDR)
perf_get_data_addr(regs, &data.addr);
- if (perf_counter_overflow(counter, nmi, &data, regs)) {
+ if (perf_event_overflow(event, nmi, &data, regs)) {
/*
* Interrupts are coming too fast - throttle them
- * by setting the counter to 0, so it will be
+ * by setting the event to 0, so it will be
* at least 2^30 cycles until the next interrupt
- * (assuming each counter counts at most 2 counts
+ * (assuming each event counts at most 2 counts
* per cycle).
*/
val = 0;
@@ -1182,15 +1182,15 @@ static void record_and_restart(struct perf_counter *counter, unsigned long val,
}
}
- write_pmc(counter->hw.idx, val);
- atomic64_set(&counter->hw.prev_count, val);
- atomic64_set(&counter->hw.period_left, left);
- perf_counter_update_userpage(counter);
+ write_pmc(event->hw.idx, val);
+ atomic64_set(&event->hw.prev_count, val);
+ atomic64_set(&event->hw.period_left, left);
+ perf_event_update_userpage(event);
}
/*
* Called from generic code to get the misc flags (i.e. processor mode)
- * for an event.
+ * for an event_id.
*/
unsigned long perf_misc_flags(struct pt_regs *regs)
{
@@ -1198,13 +1198,13 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
if (flags)
return flags;
- return user_mode(regs) ? PERF_EVENT_MISC_USER :
- PERF_EVENT_MISC_KERNEL;
+ return user_mode(regs) ? PERF_RECORD_MISC_USER :
+ PERF_RECORD_MISC_KERNEL;
}
/*
* Called from generic code to get the instruction pointer
- * for an event.
+ * for an event_id.
*/
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
@@ -1220,17 +1220,17 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)
/*
* Performance monitor interrupt stuff
*/
-static void perf_counter_interrupt(struct pt_regs *regs)
+static void perf_event_interrupt(struct pt_regs *regs)
{
int i;
- struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
- struct perf_counter *counter;
+ struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct perf_event *event;
unsigned long val;
int found = 0;
int nmi;
if (cpuhw->n_limited)
- freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
+ freeze_limited_events(cpuhw, mfspr(SPRN_PMC5),
mfspr(SPRN_PMC6));
perf_read_regs(regs);
@@ -1241,26 +1241,26 @@ static void perf_counter_interrupt(struct pt_regs *regs)
else
irq_enter();
- for (i = 0; i < cpuhw->n_counters; ++i) {
- counter = cpuhw->counter[i];
- if (!counter->hw.idx || is_limited_pmc(counter->hw.idx))
+ for (i = 0; i < cpuhw->n_events; ++i) {
+ event = cpuhw->event[i];
+ if (!event->hw.idx || is_limited_pmc(event->hw.idx))
continue;
- val = read_pmc(counter->hw.idx);
+ val = read_pmc(event->hw.idx);
if ((int)val < 0) {
- /* counter has overflowed */
+ /* event has overflowed */
found = 1;
- record_and_restart(counter, val, regs, nmi);
+ record_and_restart(event, val, regs, nmi);
}
}
/*
- * In case we didn't find and reset the counter that caused
- * the interrupt, scan all counters and reset any that are
+ * In case we didn't find and reset the event that caused
+ * the interrupt, scan all events and reset any that are
* negative, to avoid getting continual interrupts.
* Any that we processed in the previous loop will not be negative.
*/
if (!found) {
- for (i = 0; i < ppmu->n_counter; ++i) {
+ for (i = 0; i < ppmu->n_event; ++i) {
if (is_limited_pmc(i + 1))
continue;
val = read_pmc(i + 1);
@@ -1271,9 +1271,9 @@ static void perf_counter_interrupt(struct pt_regs *regs)
/*
* Reset MMCR0 to its normal value. This will set PMXE and
- * clear FC (freeze counters) and PMAO (perf mon alert occurred)
+ * clear FC (freeze events) and PMAO (perf mon alert occurred)
* and thus allow interrupts to occur again.
- * XXX might want to use MSR.PM to keep the counters frozen until
+ * XXX might want to use MSR.PM to keep the events frozen until
* we get back out of this interrupt.
*/
write_mmcr0(cpuhw, cpuhw->mmcr[0]);
@@ -1284,9 +1284,9 @@ static void perf_counter_interrupt(struct pt_regs *regs)
irq_exit();
}
-void hw_perf_counter_setup(int cpu)
+void hw_perf_event_setup(int cpu)
{
- struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu);
+ struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
if (!ppmu)
return;
@@ -1308,7 +1308,7 @@ int register_power_pmu(struct power_pmu *pmu)
* Use FCHV to ignore kernel events if MSR.HV is set.
*/
if (mfmsr() & MSR_HV)
- freeze_counters_kernel = MMCR0_FCHV;
+ freeze_events_kernel = MMCR0_FCHV;
#endif /* CONFIG_PPC64 */
return 0;
diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c
index 3c90a3d9173..2a361cdda63 100644
--- a/arch/powerpc/kernel/power4-pmu.c
+++ b/arch/powerpc/kernel/power4-pmu.c
@@ -9,7 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/string.h>
#include <asm/reg.h>
#include <asm/cputable.h>
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c
index 31918af3e35..0f4c1c73a6a 100644
--- a/arch/powerpc/kernel/power5+-pmu.c
+++ b/arch/powerpc/kernel/power5+-pmu.c
@@ -9,7 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/string.h>
#include <asm/reg.h>
#include <asm/cputable.h>
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c
index 867f6f66396..c351b3a57fb 100644
--- a/arch/powerpc/kernel/power5-pmu.c
+++ b/arch/powerpc/kernel/power5-pmu.c
@@ -9,7 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/string.h>
#include <asm/reg.h>
#include <asm/cputable.h>
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c
index fa21890531d..ca399ba5034 100644
--- a/arch/powerpc/kernel/power6-pmu.c
+++ b/arch/powerpc/kernel/power6-pmu.c
@@ -9,7 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/string.h>
#include <asm/reg.h>
#include <asm/cputable.h>
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c
index 018d094d92f..28a4daacdc0 100644
--- a/arch/powerpc/kernel/power7-pmu.c
+++ b/arch/powerpc/kernel/power7-pmu.c
@@ -9,7 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/string.h>
#include <asm/reg.h>
#include <asm/cputable.h>
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c
index 75dccb71a04..479574413a9 100644
--- a/arch/powerpc/kernel/ppc970-pmu.c
+++ b/arch/powerpc/kernel/ppc970-pmu.c
@@ -9,7 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/string.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <asm/reg.h>
#include <asm/cputable.h>
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 465e498bcb3..df45a7449a6 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -53,7 +53,7 @@
#include <linux/posix-timers.h>
#include <linux/irq.h>
#include <linux/delay.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <asm/io.h>
#include <asm/processor.h>
@@ -527,25 +527,25 @@ void __init iSeries_time_init_early(void)
}
#endif /* CONFIG_PPC_ISERIES */
-#if defined(CONFIG_PERF_COUNTERS) && defined(CONFIG_PPC32)
-DEFINE_PER_CPU(u8, perf_counter_pending);
+#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_PPC32)
+DEFINE_PER_CPU(u8, perf_event_pending);
-void set_perf_counter_pending(void)
+void set_perf_event_pending(void)
{
- get_cpu_var(perf_counter_pending) = 1;
+ get_cpu_var(perf_event_pending) = 1;
set_dec(1);
- put_cpu_var(perf_counter_pending);
+ put_cpu_var(perf_event_pending);
}
-#define test_perf_counter_pending() __get_cpu_var(perf_counter_pending)
-#define clear_perf_counter_pending() __get_cpu_var(perf_counter_pending) = 0
+#define test_perf_event_pending() __get_cpu_var(perf_event_pending)
+#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0
-#else /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */
+#else /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
-#define test_perf_counter_pending() 0
-#define clear_perf_counter_pending()
+#define test_perf_event_pending() 0
+#define clear_perf_event_pending()
-#endif /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */
+#endif /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
/*
* For iSeries shared processors, we have to let the hypervisor
@@ -573,9 +573,9 @@ void timer_interrupt(struct pt_regs * regs)
set_dec(DECREMENTER_MAX);
#ifdef CONFIG_PPC32
- if (test_perf_counter_pending()) {
- clear_perf_counter_pending();
- perf_counter_do_pending();
+ if (test_perf_event_pending()) {
+ clear_perf_event_pending();
+ perf_event_do_pending();
}
if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs);
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 830bef0a113..e7dae82c128 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -29,7 +29,7 @@
#include <linux/module.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <asm/firmware.h>
#include <asm/page.h>
@@ -171,7 +171,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
die("Weird page fault", regs, SIGSEGV);
}
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
@@ -312,7 +312,7 @@ good_area:
}
if (ret & VM_FAULT_MAJOR) {
current->maj_flt++;
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
#ifdef CONFIG_PPC_SMLPAR
if (firmware_has_feature(FW_FEATURE_CMO)) {
@@ -323,7 +323,7 @@ good_area:
#endif
} else {
current->min_flt++;
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
up_read(&mm->mmap_sem);
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 9efc8bda01b..e382cae678b 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -280,9 +280,9 @@ config PPC_HAVE_PMU_SUPPORT
config PPC_PERF_CTRS
def_bool y
- depends on PERF_COUNTERS && PPC_HAVE_PMU_SUPPORT
+ depends on PERF_EVENTS && PPC_HAVE_PMU_SUPPORT
help
- This enables the powerpc-specific perf_counter back-end.
+ This enables the powerpc-specific perf_event back-end.
config SMP
depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 1c866efd217..43c0acad716 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -94,7 +94,7 @@ config S390
select HAVE_KVM if 64BIT
select HAVE_ARCH_TRACEHOOK
select INIT_ALL_POSSIBLE
- select HAVE_PERF_COUNTERS
+ select HAVE_PERF_EVENTS
config SCHED_OMIT_FRAME_POINTER
bool
diff --git a/arch/s390/include/asm/perf_counter.h b/arch/s390/include/asm/perf_counter.h
deleted file mode 100644
index 7015188c2cc..00000000000
--- a/arch/s390/include/asm/perf_counter.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * Performance counter support - s390 specific definitions.
- *
- * Copyright 2009 Martin Schwidefsky, IBM Corporation.
- */
-
-static inline void set_perf_counter_pending(void) {}
-static inline void clear_perf_counter_pending(void) {}
-
-#define PERF_COUNTER_INDEX_OFFSET 0
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
new file mode 100644
index 00000000000..3840cbe7763
--- /dev/null
+++ b/arch/s390/include/asm/perf_event.h
@@ -0,0 +1,10 @@
+/*
+ * Performance event support - s390 specific definitions.
+ *
+ * Copyright 2009 Martin Schwidefsky, IBM Corporation.
+ */
+
+static inline void set_perf_event_pending(void) {}
+static inline void clear_perf_event_pending(void) {}
+
+#define PERF_EVENT_INDEX_OFFSET 0
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index c80602d7c88..cb5232df151 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -268,7 +268,7 @@
#define __NR_preadv 328
#define __NR_pwritev 329
#define __NR_rt_tgsigqueueinfo 330
-#define __NR_perf_counter_open 331
+#define __NR_perf_event_open 331
#define NR_syscalls 332
/*
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 88a83366819..624790042d4 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1832,11 +1832,11 @@ compat_sys_rt_tgsigqueueinfo_wrapper:
llgtr %r5,%r5 # struct compat_siginfo *
jg compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call
- .globl sys_perf_counter_open_wrapper
-sys_perf_counter_open_wrapper:
- llgtr %r2,%r2 # const struct perf_counter_attr *
+ .globl sys_perf_event_open_wrapper
+sys_perf_event_open_wrapper:
+ llgtr %r2,%r2 # const struct perf_event_attr *
lgfr %r3,%r3 # pid_t
lgfr %r4,%r4 # int
lgfr %r5,%r5 # int
llgfr %r6,%r6 # unsigned long
- jg sys_perf_counter_open # branch to system call
+ jg sys_perf_event_open # branch to system call
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index ad1acd20038..0b5083681e7 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -339,4 +339,4 @@ SYSCALL(sys_epoll_create1,sys_epoll_create1,sys_epoll_create1_wrapper)
SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper)
SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper)
SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */
-SYSCALL(sys_perf_counter_open,sys_perf_counter_open,sys_perf_counter_open_wrapper)
+SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper)
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 1abbadd497e..6d507462967 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -10,7 +10,7 @@
* Copyright (C) 1995 Linus Torvalds
*/
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -306,7 +306,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write)
* interrupts again and then search the VMAs
*/
local_irq_enable();
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
down_read(&mm->mmap_sem);
si_code = SEGV_MAPERR;
@@ -366,11 +366,11 @@ good_area:
}
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
} else {
tsk->min_flt++;
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
up_read(&mm->mmap_sem);
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 4df3570fe51..b940424f8cc 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -16,7 +16,7 @@ config SUPERH
select HAVE_IOREMAP_PROT if MMU
select HAVE_ARCH_TRACEHOOK
select HAVE_DMA_API_DEBUG
- select HAVE_PERF_COUNTERS
+ select HAVE_PERF_EVENTS
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_LZMA
diff --git a/arch/sh/include/asm/perf_counter.h b/arch/sh/include/asm/perf_counter.h
deleted file mode 100644
index d8e6bb9c0cc..00000000000
--- a/arch/sh/include/asm/perf_counter.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef __ASM_SH_PERF_COUNTER_H
-#define __ASM_SH_PERF_COUNTER_H
-
-/* SH only supports software counters through this interface. */
-static inline void set_perf_counter_pending(void) {}
-
-#define PERF_COUNTER_INDEX_OFFSET 0
-
-#endif /* __ASM_SH_PERF_COUNTER_H */
diff --git a/arch/sh/include/asm/perf_event.h b/arch/sh/include/asm/perf_event.h
new file mode 100644
index 00000000000..11a302297ab
--- /dev/null
+++ b/arch/sh/include/asm/perf_event.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_SH_PERF_EVENT_H
+#define __ASM_SH_PERF_EVENT_H
+
+/* SH only supports software events through this interface. */
+static inline void set_perf_event_pending(void) {}
+
+#define PERF_EVENT_INDEX_OFFSET 0
+
+#endif /* __ASM_SH_PERF_EVENT_H */
diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h
index 925dd40d9d5..f3fd1b9eb6b 100644
--- a/arch/sh/include/asm/unistd_32.h
+++ b/arch/sh/include/asm/unistd_32.h
@@ -344,7 +344,7 @@
#define __NR_preadv 333
#define __NR_pwritev 334
#define __NR_rt_tgsigqueueinfo 335
-#define __NR_perf_counter_open 336
+#define __NR_perf_event_open 336
#define NR_syscalls 337
diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h
index 2b84bc916bc..343ce8f073e 100644
--- a/arch/sh/include/asm/unistd_64.h
+++ b/arch/sh/include/asm/unistd_64.h
@@ -384,7 +384,7 @@
#define __NR_preadv 361
#define __NR_pwritev 362
#define __NR_rt_tgsigqueueinfo 363
-#define __NR_perf_counter_open 364
+#define __NR_perf_event_open 364
#ifdef __KERNEL__
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index 16ba225ede8..19fd11dd987 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -352,4 +352,4 @@ ENTRY(sys_call_table)
.long sys_preadv
.long sys_pwritev
.long sys_rt_tgsigqueueinfo /* 335 */
- .long sys_perf_counter_open
+ .long sys_perf_event_open
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index af6fb7410c2..5bfde6c7749 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -390,4 +390,4 @@ sys_call_table:
.long sys_preadv
.long sys_pwritev
.long sys_rt_tgsigqueueinfo
- .long sys_perf_counter_open
+ .long sys_perf_event_open
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index 781b413ff82..47530104e0a 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -15,7 +15,7 @@
#include <linux/mm.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <asm/io_trapped.h>
#include <asm/system.h>
#include <asm/mmu_context.h>
@@ -157,7 +157,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
if ((regs->sr & SR_IMASK) != SR_IMASK)
local_irq_enable();
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
/*
* If we're in an interrupt, have no user context or are running
@@ -208,11 +208,11 @@ survive:
}
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
} else {
tsk->min_flt++;
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
index 2dcc48528f7..de0b0e88182 100644
--- a/arch/sh/mm/tlbflush_64.c
+++ b/arch/sh/mm/tlbflush_64.c
@@ -20,7 +20,7 @@
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/interrupt.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -116,7 +116,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
/* Not an IO address, so reenable interrupts */
local_irq_enable();
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
/*
* If we're in an interrupt or have no user
@@ -201,11 +201,11 @@ survive:
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
} else {
tsk->min_flt++;
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 86b82348b97..97fca4695e0 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -25,7 +25,7 @@ config SPARC
select ARCH_WANT_OPTIONAL_GPIOLIB
select RTC_CLASS
select RTC_DRV_M48T59
- select HAVE_PERF_COUNTERS
+ select HAVE_PERF_EVENTS
select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
@@ -47,7 +47,7 @@ config SPARC64
select RTC_DRV_BQ4802
select RTC_DRV_SUN4V
select RTC_DRV_STARFIRE
- select HAVE_PERF_COUNTERS
+ select HAVE_PERF_EVENTS
config ARCH_DEFCONFIG
string
diff --git a/arch/sparc/include/asm/perf_counter.h b/arch/sparc/include/asm/perf_counter.h
deleted file mode 100644
index 5d7a8ca0e49..00000000000
--- a/arch/sparc/include/asm/perf_counter.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef __ASM_SPARC_PERF_COUNTER_H
-#define __ASM_SPARC_PERF_COUNTER_H
-
-extern void set_perf_counter_pending(void);
-
-#define PERF_COUNTER_INDEX_OFFSET 0
-
-#ifdef CONFIG_PERF_COUNTERS
-extern void init_hw_perf_counters(void);
-#else
-static inline void init_hw_perf_counters(void) { }
-#endif
-
-#endif
diff --git a/arch/sparc/include/asm/perf_event.h b/arch/sparc/include/asm/perf_event.h
new file mode 100644
index 00000000000..7e2669894ce
--- /dev/null
+++ b/arch/sparc/include/asm/perf_event.h
@@ -0,0 +1,14 @@
+#ifndef __ASM_SPARC_PERF_EVENT_H
+#define __ASM_SPARC_PERF_EVENT_H
+
+extern void set_perf_event_pending(void);
+
+#define PERF_EVENT_INDEX_OFFSET 0
+
+#ifdef CONFIG_PERF_EVENTS
+extern void init_hw_perf_events(void);
+#else
+static inline void init_hw_perf_events(void) { }
+#endif
+
+#endif
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index 706df669f3b..42f2316c3ea 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -395,7 +395,7 @@
#define __NR_preadv 324
#define __NR_pwritev 325
#define __NR_rt_tgsigqueueinfo 326
-#define __NR_perf_counter_open 327
+#define __NR_perf_event_open 327
#define NR_SYSCALLS 328
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 247cc620cee..3a048fad7ee 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -104,5 +104,5 @@ obj-$(CONFIG_AUDIT) += audit.o
audit--$(CONFIG_AUDIT) := compat_audit.o
obj-$(CONFIG_COMPAT) += $(audit--y)
-pc--$(CONFIG_PERF_COUNTERS) := perf_counter.o
+pc--$(CONFIG_PERF_EVENTS) := perf_event.o
obj-$(CONFIG_SPARC64) += $(pc--y)
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index 378eb53e077..b129611590a 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -19,7 +19,7 @@
#include <linux/delay.h>
#include <linux/smp.h>
-#include <asm/perf_counter.h>
+#include <asm/perf_event.h>
#include <asm/ptrace.h>
#include <asm/local.h>
#include <asm/pcr.h>
@@ -265,7 +265,7 @@ int __init nmi_init(void)
}
}
if (!err)
- init_hw_perf_counters();
+ init_hw_perf_events();
return err;
}
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index 68ff0010707..2d94e7a03af 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -7,7 +7,7 @@
#include <linux/init.h>
#include <linux/irq.h>
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <asm/pil.h>
#include <asm/pcr.h>
@@ -15,7 +15,7 @@
/* This code is shared between various users of the performance
* counters. Users will be oprofile, pseudo-NMI watchdog, and the
- * perf_counter support layer.
+ * perf_event support layer.
*/
#define PCR_SUN4U_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE)
@@ -42,14 +42,14 @@ void deferred_pcr_work_irq(int irq, struct pt_regs *regs)
old_regs = set_irq_regs(regs);
irq_enter();
-#ifdef CONFIG_PERF_COUNTERS
- perf_counter_do_pending();
+#ifdef CONFIG_PERF_EVENTS
+ perf_event_do_pending();
#endif
irq_exit();
set_irq_regs(old_regs);
}
-void set_perf_counter_pending(void)
+void set_perf_event_pending(void)
{
set_softint(1 << PIL_DEFERRED_PCR_WORK);
}
diff --git a/arch/sparc/kernel/perf_counter.c b/arch/sparc/kernel/perf_event.c
index b1265ce8a05..2d6a1b10c81 100644
--- a/arch/sparc/kernel/perf_counter.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1,8 +1,8 @@
-/* Performance counter support for sparc64.
+/* Performance event support for sparc64.
*
* Copyright (C) 2009 David S. Miller <davem@davemloft.net>
*
- * This code is based almost entirely upon the x86 perf counter
+ * This code is based almost entirely upon the x86 perf event
* code, which is:
*
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
@@ -12,7 +12,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*/
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/kprobes.h>
#include <linux/kernel.h>
#include <linux/kdebug.h>
@@ -46,19 +46,19 @@
* normal code.
*/
-#define MAX_HWCOUNTERS 2
+#define MAX_HWEVENTS 2
#define MAX_PERIOD ((1UL << 32) - 1)
#define PIC_UPPER_INDEX 0
#define PIC_LOWER_INDEX 1
-struct cpu_hw_counters {
- struct perf_counter *counters[MAX_HWCOUNTERS];
- unsigned long used_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)];
- unsigned long active_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)];
+struct cpu_hw_events {
+ struct perf_event *events[MAX_HWEVENTS];
+ unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
+ unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
int enabled;
};
-DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { .enabled = 1, };
+DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
struct perf_event_map {
u16 encoding;
@@ -87,9 +87,9 @@ static const struct perf_event_map ultra3i_perfmon_event_map[] = {
[PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
};
-static const struct perf_event_map *ultra3i_event_map(int event)
+static const struct perf_event_map *ultra3i_event_map(int event_id)
{
- return &ultra3i_perfmon_event_map[event];
+ return &ultra3i_perfmon_event_map[event_id];
}
static const struct sparc_pmu ultra3i_pmu = {
@@ -111,9 +111,9 @@ static const struct perf_event_map niagara2_perfmon_event_map[] = {
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
};
-static const struct perf_event_map *niagara2_event_map(int event)
+static const struct perf_event_map *niagara2_event_map(int event_id)
{
- return &niagara2_perfmon_event_map[event];
+ return &niagara2_perfmon_event_map[event_id];
}
static const struct sparc_pmu niagara2_pmu = {
@@ -130,13 +130,13 @@ static const struct sparc_pmu niagara2_pmu = {
static const struct sparc_pmu *sparc_pmu __read_mostly;
-static u64 event_encoding(u64 event, int idx)
+static u64 event_encoding(u64 event_id, int idx)
{
if (idx == PIC_UPPER_INDEX)
- event <<= sparc_pmu->upper_shift;
+ event_id <<= sparc_pmu->upper_shift;
else
- event <<= sparc_pmu->lower_shift;
- return event;
+ event_id <<= sparc_pmu->lower_shift;
+ return event_id;
}
static u64 mask_for_index(int idx)
@@ -151,7 +151,7 @@ static u64 nop_for_index(int idx)
sparc_pmu->lower_nop, idx);
}
-static inline void sparc_pmu_enable_counter(struct hw_perf_counter *hwc,
+static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc,
int idx)
{
u64 val, mask = mask_for_index(idx);
@@ -160,7 +160,7 @@ static inline void sparc_pmu_enable_counter(struct hw_perf_counter *hwc,
pcr_ops->write((val & ~mask) | hwc->config);
}
-static inline void sparc_pmu_disable_counter(struct hw_perf_counter *hwc,
+static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc,
int idx)
{
u64 mask = mask_for_index(idx);
@@ -172,7 +172,7 @@ static inline void sparc_pmu_disable_counter(struct hw_perf_counter *hwc,
void hw_perf_enable(void)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
u64 val;
int i;
@@ -184,9 +184,9 @@ void hw_perf_enable(void)
val = pcr_ops->read();
- for (i = 0; i < MAX_HWCOUNTERS; i++) {
- struct perf_counter *cp = cpuc->counters[i];
- struct hw_perf_counter *hwc;
+ for (i = 0; i < MAX_HWEVENTS; i++) {
+ struct perf_event *cp = cpuc->events[i];
+ struct hw_perf_event *hwc;
if (!cp)
continue;
@@ -199,7 +199,7 @@ void hw_perf_enable(void)
void hw_perf_disable(void)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
u64 val;
if (!cpuc->enabled)
@@ -241,8 +241,8 @@ static void write_pmc(int idx, u64 val)
write_pic(pic);
}
-static int sparc_perf_counter_set_period(struct perf_counter *counter,
- struct hw_perf_counter *hwc, int idx)
+static int sparc_perf_event_set_period(struct perf_event *event,
+ struct hw_perf_event *hwc, int idx)
{
s64 left = atomic64_read(&hwc->period_left);
s64 period = hwc->sample_period;
@@ -268,33 +268,33 @@ static int sparc_perf_counter_set_period(struct perf_counter *counter,
write_pmc(idx, (u64)(-left) & 0xffffffff);
- perf_counter_update_userpage(counter);
+ perf_event_update_userpage(event);
return ret;
}
-static int sparc_pmu_enable(struct perf_counter *counter)
+static int sparc_pmu_enable(struct perf_event *event)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
- struct hw_perf_counter *hwc = &counter->hw;
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
if (test_and_set_bit(idx, cpuc->used_mask))
return -EAGAIN;
- sparc_pmu_disable_counter(hwc, idx);
+ sparc_pmu_disable_event(hwc, idx);
- cpuc->counters[idx] = counter;
+ cpuc->events[idx] = event;
set_bit(idx, cpuc->active_mask);
- sparc_perf_counter_set_period(counter, hwc, idx);
- sparc_pmu_enable_counter(hwc, idx);
- perf_counter_update_userpage(counter);
+ sparc_perf_event_set_period(event, hwc, idx);
+ sparc_pmu_enable_event(hwc, idx);
+ perf_event_update_userpage(event);
return 0;
}
-static u64 sparc_perf_counter_update(struct perf_counter *counter,
- struct hw_perf_counter *hwc, int idx)
+static u64 sparc_perf_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc, int idx)
{
int shift = 64 - 32;
u64 prev_raw_count, new_raw_count;
@@ -311,79 +311,79 @@ again:
delta = (new_raw_count << shift) - (prev_raw_count << shift);
delta >>= shift;
- atomic64_add(delta, &counter->count);
+ atomic64_add(delta, &event->count);
atomic64_sub(delta, &hwc->period_left);
return new_raw_count;
}
-static void sparc_pmu_disable(struct perf_counter *counter)
+static void sparc_pmu_disable(struct perf_event *event)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
- struct hw_perf_counter *hwc = &counter->hw;
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
clear_bit(idx, cpuc->active_mask);
- sparc_pmu_disable_counter(hwc, idx);
+ sparc_pmu_disable_event(hwc, idx);
barrier();
- sparc_perf_counter_update(counter, hwc, idx);
- cpuc->counters[idx] = NULL;
+ sparc_perf_event_update(event, hwc, idx);
+ cpuc->events[idx] = NULL;
clear_bit(idx, cpuc->used_mask);
- perf_counter_update_userpage(counter);
+ perf_event_update_userpage(event);
}
-static void sparc_pmu_read(struct perf_counter *counter)
+static void sparc_pmu_read(struct perf_event *event)
{
- struct hw_perf_counter *hwc = &counter->hw;
- sparc_perf_counter_update(counter, hwc, hwc->idx);
+ struct hw_perf_event *hwc = &event->hw;
+ sparc_perf_event_update(event, hwc, hwc->idx);
}
-static void sparc_pmu_unthrottle(struct perf_counter *counter)
+static void sparc_pmu_unthrottle(struct perf_event *event)
{
- struct hw_perf_counter *hwc = &counter->hw;
- sparc_pmu_enable_counter(hwc, hwc->idx);
+ struct hw_perf_event *hwc = &event->hw;
+ sparc_pmu_enable_event(hwc, hwc->idx);
}
-static atomic_t active_counters = ATOMIC_INIT(0);
+static atomic_t active_events = ATOMIC_INIT(0);
static DEFINE_MUTEX(pmc_grab_mutex);
-void perf_counter_grab_pmc(void)
+void perf_event_grab_pmc(void)
{
- if (atomic_inc_not_zero(&active_counters))
+ if (atomic_inc_not_zero(&active_events))
return;
mutex_lock(&pmc_grab_mutex);
- if (atomic_read(&active_counters) == 0) {
+ if (atomic_read(&active_events) == 0) {
if (atomic_read(&nmi_active) > 0) {
on_each_cpu(stop_nmi_watchdog, NULL, 1);
BUG_ON(atomic_read(&nmi_active) != 0);
}
- atomic_inc(&active_counters);
+ atomic_inc(&active_events);
}
mutex_unlock(&pmc_grab_mutex);
}
-void perf_counter_release_pmc(void)
+void perf_event_release_pmc(void)
{
- if (atomic_dec_and_mutex_lock(&active_counters, &pmc_grab_mutex)) {
+ if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
if (atomic_read(&nmi_active) == 0)
on_each_cpu(start_nmi_watchdog, NULL, 1);
mutex_unlock(&pmc_grab_mutex);
}
}
-static void hw_perf_counter_destroy(struct perf_counter *counter)
+static void hw_perf_event_destroy(struct perf_event *event)
{
- perf_counter_release_pmc();
+ perf_event_release_pmc();
}
-static int __hw_perf_counter_init(struct perf_counter *counter)
+static int __hw_perf_event_init(struct perf_event *event)
{
- struct perf_counter_attr *attr = &counter->attr;
- struct hw_perf_counter *hwc = &counter->hw;
+ struct perf_event_attr *attr = &event->attr;
+ struct hw_perf_event *hwc = &event->hw;
const struct perf_event_map *pmap;
u64 enc;
@@ -396,8 +396,8 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
if (attr->config >= sparc_pmu->max_events)
return -EINVAL;
- perf_counter_grab_pmc();
- counter->destroy = hw_perf_counter_destroy;
+ perf_event_grab_pmc();
+ event->destroy = hw_perf_event_destroy;
/* We save the enable bits in the config_base. So to
* turn off sampling just write 'config', and to enable
@@ -439,16 +439,16 @@ static const struct pmu pmu = {
.unthrottle = sparc_pmu_unthrottle,
};
-const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
+const struct pmu *hw_perf_event_init(struct perf_event *event)
{
- int err = __hw_perf_counter_init(counter);
+ int err = __hw_perf_event_init(event);
if (err)
return ERR_PTR(err);
return &pmu;
}
-void perf_counter_print_debug(void)
+void perf_event_print_debug(void)
{
unsigned long flags;
u64 pcr, pic;
@@ -471,16 +471,16 @@ void perf_counter_print_debug(void)
local_irq_restore(flags);
}
-static int __kprobes perf_counter_nmi_handler(struct notifier_block *self,
+static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
unsigned long cmd, void *__args)
{
struct die_args *args = __args;
struct perf_sample_data data;
- struct cpu_hw_counters *cpuc;
+ struct cpu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
- if (!atomic_read(&active_counters))
+ if (!atomic_read(&active_events))
return NOTIFY_DONE;
switch (cmd) {
@@ -495,32 +495,32 @@ static int __kprobes perf_counter_nmi_handler(struct notifier_block *self,
data.addr = 0;
- cpuc = &__get_cpu_var(cpu_hw_counters);
- for (idx = 0; idx < MAX_HWCOUNTERS; idx++) {
- struct perf_counter *counter = cpuc->counters[idx];
- struct hw_perf_counter *hwc;
+ cpuc = &__get_cpu_var(cpu_hw_events);
+ for (idx = 0; idx < MAX_HWEVENTS; idx++) {
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
u64 val;
if (!test_bit(idx, cpuc->active_mask))
continue;
- hwc = &counter->hw;
- val = sparc_perf_counter_update(counter, hwc, idx);
+ hwc = &event->hw;
+ val = sparc_perf_event_update(event, hwc, idx);
if (val & (1ULL << 31))
continue;
- data.period = counter->hw.last_period;
- if (!sparc_perf_counter_set_period(counter, hwc, idx))
+ data.period = event->hw.last_period;
+ if (!sparc_perf_event_set_period(event, hwc, idx))
continue;
- if (perf_counter_overflow(counter, 1, &data, regs))
- sparc_pmu_disable_counter(hwc, idx);
+ if (perf_event_overflow(event, 1, &data, regs))
+ sparc_pmu_disable_event(hwc, idx);
}
return NOTIFY_STOP;
}
-static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
- .notifier_call = perf_counter_nmi_handler,
+static __read_mostly struct notifier_block perf_event_nmi_notifier = {
+ .notifier_call = perf_event_nmi_handler,
};
static bool __init supported_pmu(void)
@@ -536,9 +536,9 @@ static bool __init supported_pmu(void)
return false;
}
-void __init init_hw_perf_counters(void)
+void __init init_hw_perf_events(void)
{
- pr_info("Performance counters: ");
+ pr_info("Performance events: ");
if (!supported_pmu()) {
pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
@@ -547,10 +547,10 @@ void __init init_hw_perf_counters(void)
pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
- /* All sparc64 PMUs currently have 2 counters. But this simple
- * driver only supports one active counter at a time.
+ /* All sparc64 PMUs currently have 2 events. But this simple
+ * driver only supports one active event at a time.
*/
- perf_max_counters = 1;
+ perf_max_events = 1;
- register_die_notifier(&perf_counter_nmi_notifier);
+ register_die_notifier(&perf_event_nmi_notifier);
}
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 04181577cb6..0f1658d3749 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -82,5 +82,5 @@ sys_call_table:
/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
-/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open
+/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 91b06b7f7ac..009825f6e73 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -83,7 +83,7 @@ sys_call_table32:
/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
.word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
- .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_counter_open
+ .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open
#endif /* CONFIG_COMPAT */
@@ -158,4 +158,4 @@ sys_call_table:
/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
.word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
- .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open
+ .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 51c59015b28..e4ff5d1280c 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -24,7 +24,7 @@ config X86
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_IDE
select HAVE_OPROFILE
- select HAVE_PERF_COUNTERS if (!M386 && !M486)
+ select HAVE_PERF_EVENTS if (!M386 && !M486)
select HAVE_IOREMAP_PROT
select HAVE_KPROBES
select ARCH_WANT_OPTIONAL_GPIOLIB
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index ba331bfd111..74619c4f9fd 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -831,5 +831,5 @@ ia32_sys_call_table:
.quad compat_sys_preadv
.quad compat_sys_pwritev
.quad compat_sys_rt_tgsigqueueinfo /* 335 */
- .quad sys_perf_counter_open
+ .quad sys_perf_event_open
ia32_syscall_end:
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index 5e3f2044f0d..f5693c81a1d 100644
--- a/arch/x86/include/asm/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -49,7 +49,7 @@ BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
-#ifdef CONFIG_PERF_COUNTERS
+#ifdef CONFIG_PERF_EVENTS
BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR)
#endif
diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_event.h
index e7b7c938ae2..ad7ce3fd506 100644
--- a/arch/x86/include/asm/perf_counter.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -1,8 +1,8 @@
-#ifndef _ASM_X86_PERF_COUNTER_H
-#define _ASM_X86_PERF_COUNTER_H
+#ifndef _ASM_X86_PERF_EVENT_H
+#define _ASM_X86_PERF_EVENT_H
/*
- * Performance counter hw details:
+ * Performance event hw details:
*/
#define X86_PMC_MAX_GENERIC 8
@@ -43,7 +43,7 @@
union cpuid10_eax {
struct {
unsigned int version_id:8;
- unsigned int num_counters:8;
+ unsigned int num_events:8;
unsigned int bit_width:8;
unsigned int mask_length:8;
} split;
@@ -52,7 +52,7 @@ union cpuid10_eax {
union cpuid10_edx {
struct {
- unsigned int num_counters_fixed:4;
+ unsigned int num_events_fixed:4;
unsigned int reserved:28;
} split;
unsigned int full;
@@ -60,7 +60,7 @@ union cpuid10_edx {
/*
- * Fixed-purpose performance counters:
+ * Fixed-purpose performance events:
*/
/*
@@ -87,22 +87,22 @@ union cpuid10_edx {
/*
* We model BTS tracing as another fixed-mode PMC.
*
- * We choose a value in the middle of the fixed counter range, since lower
- * values are used by actual fixed counters and higher values are used
+ * We choose a value in the middle of the fixed event range, since lower
+ * values are used by actual fixed events and higher values are used
* to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
*/
#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
-#ifdef CONFIG_PERF_COUNTERS
-extern void init_hw_perf_counters(void);
-extern void perf_counters_lapic_init(void);
+#ifdef CONFIG_PERF_EVENTS
+extern void init_hw_perf_events(void);
+extern void perf_events_lapic_init(void);
-#define PERF_COUNTER_INDEX_OFFSET 0
+#define PERF_EVENT_INDEX_OFFSET 0
#else
-static inline void init_hw_perf_counters(void) { }
-static inline void perf_counters_lapic_init(void) { }
+static inline void init_hw_perf_events(void) { }
+static inline void perf_events_lapic_init(void) { }
#endif
-#endif /* _ASM_X86_PERF_COUNTER_H */
+#endif /* _ASM_X86_PERF_EVENT_H */
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h
index 8deaada61bc..6fb3c209a7e 100644
--- a/arch/x86/include/asm/unistd_32.h
+++ b/arch/x86/include/asm/unistd_32.h
@@ -341,7 +341,7 @@
#define __NR_preadv 333
#define __NR_pwritev 334
#define __NR_rt_tgsigqueueinfo 335
-#define __NR_perf_counter_open 336
+#define __NR_perf_event_open 336
#ifdef __KERNEL__
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h
index b9f3c60de5f..8d3ad0adbc6 100644
--- a/arch/x86/include/asm/unistd_64.h
+++ b/arch/x86/include/asm/unistd_64.h
@@ -659,8 +659,8 @@ __SYSCALL(__NR_preadv, sys_preadv)
__SYSCALL(__NR_pwritev, sys_pwritev)
#define __NR_rt_tgsigqueueinfo 297
__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
-#define __NR_perf_counter_open 298
-__SYSCALL(__NR_perf_counter_open, sys_perf_counter_open)
+#define __NR_perf_event_open 298
+__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
#ifndef __NO_STUBS
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index a34601f5298..754174d09de 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -14,7 +14,7 @@
* Mikael Pettersson : PM converted to driver model.
*/
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/kernel_stat.h>
#include <linux/mc146818rtc.h>
#include <linux/acpi_pmtmr.h>
@@ -35,7 +35,7 @@
#include <linux/smp.h>
#include <linux/mm.h>
-#include <asm/perf_counter.h>
+#include <asm/perf_event.h>
#include <asm/x86_init.h>
#include <asm/pgalloc.h>
#include <asm/atomic.h>
@@ -1189,7 +1189,7 @@ void __cpuinit setup_local_APIC(void)
apic_write(APIC_ESR, 0);
}
#endif
- perf_counters_lapic_init();
+ perf_events_lapic_init();
preempt_disable();
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 8dd30638fe4..68537e957a9 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -27,7 +27,7 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
-obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o
+obj-$(CONFIG_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_X86_MCE) += mcheck/
obj-$(CONFIG_MTRR) += mtrr/
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 2fea97eccf7..cc25c2b4a56 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -13,7 +13,7 @@
#include <linux/io.h>
#include <asm/stackprotector.h>
-#include <asm/perf_counter.h>
+#include <asm/perf_event.h>
#include <asm/mmu_context.h>
#include <asm/hypervisor.h>
#include <asm/processor.h>
@@ -869,7 +869,7 @@ void __init identify_boot_cpu(void)
#else
vgetcpu_set_mode();
#endif
- init_hw_perf_counters();
+ init_hw_perf_events();
}
void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_event.c
index b1f115696c8..0d03629fb1a 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1,5 +1,5 @@
/*
- * Performance counter x86 architecture code
+ * Performance events x86 architecture code
*
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
@@ -11,7 +11,7 @@
* For licencing details see kernel-base/COPYING
*/
-#include <linux/perf_counter.h>
+#include <linux/perf_event.h>
#include <linux/capability.h>
#include <linux/notifier.h>
#include <linux/hardirq.h>
@@ -27,10 +27,10 @@
#include <asm/stacktrace.h>
#include <asm/nmi.h>
-static u64 perf_counter_mask __read_mostly;
+static u64 perf_event_mask __read_mostly;
-/* The maximal number of PEBS counters: */
-#define MAX_PEBS_COUNTERS 4
+/* The maximal number of PEBS events: */
+#define MAX_PEBS_EVENTS 4
/* The size of a BTS record in bytes: */
#define BTS_RECORD_SIZE 24
@@ -65,11 +65,11 @@ struct debug_store {
u64 pebs_index;
u64 pebs_absolute_maximum;
u64 pebs_interrupt_threshold;
- u64 pebs_counter_reset[MAX_PEBS_COUNTERS];
+ u64 pebs_event_reset[MAX_PEBS_EVENTS];
};
-struct cpu_hw_counters {
- struct perf_counter *counters[X86_PMC_IDX_MAX];
+struct cpu_hw_events {
+ struct perf_event *events[X86_PMC_IDX_MAX];
unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
unsigned long interrupts;
@@ -86,17 +86,17 @@ struct x86_pmu {
int (*handle_irq)(struct pt_regs *);
void (*disable_all)(void);
void (*enable_all)(void);
- void (*enable)(struct hw_perf_counter *, int);
- void (*disable)(struct hw_perf_counter *, int);
+ void (*enable)(struct hw_perf_event *, int);
+ void (*disable)(struct hw_perf_event *, int);
unsigned eventsel;
unsigned perfctr;
u64 (*event_map)(int);
u64 (*raw_event)(u64);
int max_events;
- int num_counters;
- int num_counters_fixed;
- int counter_bits;
- u64 counter_mask;
+ int num_events;
+ int num_events_fixed;
+ int event_bits;
+ u64 event_mask;
int apic;
u64 max_period;
u64 intel_ctrl;
@@ -106,7 +106,7 @@ struct x86_pmu {
static struct x86_pmu x86_pmu __read_mostly;
-static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
+static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
.enabled = 1,
};
@@ -130,12 +130,12 @@ static u64 p6_pmu_event_map(int hw_event)
}
/*
- * Counter setting that is specified not to count anything.
+ * Event setting that is specified not to count anything.
* We use this to effectively disable a counter.
*
* L2_RQSTS with 0 MESI unit mask.
*/
-#define P6_NOP_COUNTER 0x0000002EULL
+#define P6_NOP_EVENT 0x0000002EULL
static u64 p6_pmu_raw_event(u64 hw_event)
{
@@ -143,14 +143,14 @@ static u64 p6_pmu_raw_event(u64 hw_event)
#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
#define P6_EVNTSEL_INV_MASK 0x00800000ULL
-#define P6_EVNTSEL_COUNTER_MASK 0xFF000000ULL
+#define P6_EVNTSEL_REG_MASK 0xFF000000ULL
#define P6_EVNTSEL_MASK \
(P6_EVNTSEL_EVENT_MASK | \
P6_EVNTSEL_UNIT_MASK | \
P6_EVNTSEL_EDGE_MASK | \
P6_EVNTSEL_INV_MASK | \
- P6_EVNTSEL_COUNTER_MASK)
+ P6_EVNTSEL_REG_MASK)
return hw_event & P6_EVNTSEL_MASK;
}
@@ -469,14 +469,14 @@ static u64 intel_pmu_raw_event(u64 hw_event)
#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
-#define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL
+#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
#define CORE_EVNTSEL_MASK \
(CORE_EVNTSEL_EVENT_MASK | \
CORE_EVNTSEL_UNIT_MASK | \
CORE_EVNTSEL_EDGE_MASK | \
CORE_EVNTSEL_INV_MASK | \
- CORE_EVNTSEL_COUNTER_MASK)
+ CORE_EVNTSEL_REG_MASK)
return hw_event & CORE_EVNTSEL_MASK;
}
@@ -596,28 +596,28 @@ static u64 amd_pmu_raw_event(u64 hw_event)
#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
#define K7_EVNTSEL_INV_MASK 0x000800000ULL
-#define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL
+#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
#define K7_EVNTSEL_MASK \
(K7_EVNTSEL_EVENT_MASK | \
K7_EVNTSEL_UNIT_MASK | \
K7_EVNTSEL_EDGE_MASK | \
K7_EVNTSEL_INV_MASK | \
- K7_EVNTSEL_COUNTER_MASK)
+ K7_EVNTSEL_REG_MASK)
return hw_event & K7_EVNTSEL_MASK;
}
/*
- * Propagate counter elapsed time into the generic counter.
- * Can only be executed on the CPU where the counter is active.
+ * Propagate event elapsed time into the generic event.
+ * Can only be executed on the CPU where the event is active.
* Returns the delta events processed.
*/
static u64
-x86_perf_counter_update(struct perf_counter *counter,
- struct hw_perf_counter *hwc, int idx)
+x86_perf_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc, int idx)
{
- int shift = 64 - x86_pmu.counter_bits;
+ int shift = 64 - x86_pmu.event_bits;
u64 prev_raw_count, new_raw_count;
s64 delta;
@@ -625,15 +625,15 @@ x86_perf_counter_update(struct perf_counter *counter,
return 0;
/*
- * Careful: an NMI might modify the previous counter value.
+ * Careful: an NMI might modify the previous event value.
*
* Our tactic to handle this is to first atomically read and
* exchange a new raw count - then add that new-prev delta
- * count to the generic counter atomically:
+ * count to the generic event atomically:
*/
again:
prev_raw_count = atomic64_read(&hwc->prev_count);
- rdmsrl(hwc->counter_base + idx, new_raw_count);
+ rdmsrl(hwc->event_base + idx, new_raw_count);
if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
@@ -642,7 +642,7 @@ again:
/*
* Now we have the new raw value and have updated the prev
* timestamp already. We can now calculate the elapsed delta
- * (counter-)time and add that to the generic counter.
+ * (event-)time and add that to the generic event.
*
* Careful, not all hw sign-extends above the physical width
* of the count.
@@ -650,13 +650,13 @@ again:
delta = (new_raw_count << shift) - (prev_raw_count << shift);
delta >>= shift;
- atomic64_add(delta, &counter->count);
+ atomic64_add(delta, &event->count);
atomic64_sub(delta, &hwc->period_left);
return new_raw_count;
}
-static atomic_t active_counters;
+static atomic_t active_events;
static DEFINE_MUTEX(pmc_reserve_mutex);
static bool reserve_pmc_hardware(void)
@@ -667,12 +667,12 @@ static bool reserve_pmc_hardware(void)
if (nmi_watchdog == NMI_LOCAL_APIC)
disable_lapic_nmi_watchdog();
- for (i = 0; i < x86_pmu.num_counters; i++) {
+ for (i = 0; i < x86_pmu.num_events; i++) {
if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
goto perfctr_fail;
}
- for (i = 0; i < x86_pmu.num_counters; i++) {
+ for (i = 0; i < x86_pmu.num_events; i++) {
if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
goto eventsel_fail;
}
@@ -685,7 +685,7 @@ eventsel_fail:
for (i--; i >= 0; i--)
release_evntsel_nmi(x86_pmu.eventsel + i);
- i = x86_pmu.num_counters;
+ i = x86_pmu.num_events;
perfctr_fail:
for (i--; i >= 0; i--)
@@ -703,7 +703,7 @@ static void release_pmc_hardware(void)
#ifdef CONFIG_X86_LOCAL_APIC
int i;
- for (i = 0; i < x86_pmu.num_counters; i++) {
+ for (i = 0; i < x86_pmu.num_events; i++) {
release_perfctr_nmi(x86_pmu.perfctr + i);
release_evntsel_nmi(x86_pmu.eventsel + i);
}
@@ -720,7 +720,7 @@ static inline bool bts_available(void)
static inline void init_debug_store_on_cpu(int cpu)
{
- struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds;
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
if (!ds)
return;
@@ -732,7 +732,7 @@ static inline void init_debug_store_on_cpu(int cpu)
static inline void fini_debug_store_on_cpu(int cpu)
{
- if (!per_cpu(cpu_hw_counters, cpu).ds)
+ if (!per_cpu(cpu_hw_events, cpu).ds)
return;
wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
@@ -751,12 +751,12 @@ static void release_bts_hardware(void)
fini_debug_store_on_cpu(cpu);
for_each_possible_cpu(cpu) {
- struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds;
+ struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
if (!ds)
continue;
- per_cpu(cpu_hw_counters, cpu).ds = NULL;
+ per_cpu(cpu_hw_events, cpu).ds = NULL;
kfree((void *)(unsigned long)ds->bts_buffer_base);
kfree(ds);
@@ -796,7 +796,7 @@ static int reserve_bts_hardware(void)
ds->bts_interrupt_threshold =
ds->bts_absolute_maximum - BTS_OVFL_TH;
- per_cpu(cpu_hw_counters, cpu).ds = ds;
+ per_cpu(cpu_hw_events, cpu).ds = ds;
err = 0;
}
@@ -812,9 +812,9 @@ static int reserve_bts_hardware(void)
return err;
}
-static void hw_perf_counter_destroy(struct perf_counter *counter)
+static void hw_perf_event_destroy(struct perf_event *event)
{
- if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) {
+ if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
release_pmc_hardware();
release_bts_hardware();
mutex_unlock(&pmc_reserve_mutex);
@@ -827,7 +827,7 @@ static inline int x86_pmu_initialized(void)
}
static inline int
-set_ext_hw_attr(struct hw_perf_counter *hwc, struct perf_counter_attr *attr)
+set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
{
unsigned int cache_type, cache_op, cache_result;
u64 config, val;
@@ -880,7 +880,7 @@ static void intel_pmu_enable_bts(u64 config)
static void intel_pmu_disable_bts(void)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
unsigned long debugctlmsr;
if (!cpuc->ds)
@@ -898,10 +898,10 @@ static void intel_pmu_disable_bts(void)
/*
* Setup the hardware configuration for a given attr_type
*/
-static int __hw_perf_counter_init(struct perf_counter *counter)
+static int __hw_perf_event_init(struct perf_event *event)
{
- struct perf_counter_attr *attr = &counter->attr;
- struct hw_perf_counter *hwc = &counter->hw;
+ struct perf_event_attr *attr = &event->attr;
+ struct hw_perf_event *hwc = &event->hw;
u64 config;
int err;
@@ -909,22 +909,22 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
return -ENODEV;
err = 0;
- if (!atomic_inc_not_zero(&active_counters)) {
+ if (!atomic_inc_not_zero(&active_events)) {
mutex_lock(&pmc_reserve_mutex);
- if (atomic_read(&active_counters) == 0) {
+ if (atomic_read(&active_events) == 0) {
if (!reserve_pmc_hardware())
err = -EBUSY;
else
err = reserve_bts_hardware();
}
if (!err)
- atomic_inc(&active_counters);
+ atomic_inc(&active_events);
mutex_unlock(&pmc_reserve_mutex);
}
if (err)
return err;
- counter->destroy = hw_perf_counter_destroy;
+ event->destroy = hw_perf_event_destroy;
/*
* Generate PMC IRQs:
@@ -948,8 +948,8 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
/*
* If we have a PMU initialized but no APIC
* interrupts, we cannot sample hardware
- * counters (user-space has to fall back and
- * sample via a hrtimer based software counter):
+ * events (user-space has to fall back and
+ * sample via a hrtimer based software event):
*/
if (!x86_pmu.apic)
return -EOPNOTSUPP;
@@ -1001,7 +1001,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
static void p6_pmu_disable_all(void)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
u64 val;
if (!cpuc->enabled)
@@ -1018,7 +1018,7 @@ static void p6_pmu_disable_all(void)
static void intel_pmu_disable_all(void)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (!cpuc->enabled)
return;
@@ -1034,7 +1034,7 @@ static void intel_pmu_disable_all(void)
static void amd_pmu_disable_all(void)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
if (!cpuc->enabled)
@@ -1043,12 +1043,12 @@ static void amd_pmu_disable_all(void)
cpuc->enabled = 0;
/*
* ensure we write the disable before we start disabling the
- * counters proper, so that amd_pmu_enable_counter() does the
+ * events proper, so that amd_pmu_enable_event() does the
* right thing.
*/
barrier();
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ for (idx = 0; idx < x86_pmu.num_events; idx++) {
u64 val;
if (!test_bit(idx, cpuc->active_mask))
@@ -1070,7 +1070,7 @@ void hw_perf_disable(void)
static void p6_pmu_enable_all(void)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
unsigned long val;
if (cpuc->enabled)
@@ -1087,7 +1087,7 @@ static void p6_pmu_enable_all(void)
static void intel_pmu_enable_all(void)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (cpuc->enabled)
return;
@@ -1098,19 +1098,19 @@ static void intel_pmu_enable_all(void)
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
- struct perf_counter *counter =
- cpuc->counters[X86_PMC_IDX_FIXED_BTS];
+ struct perf_event *event =
+ cpuc->events[X86_PMC_IDX_FIXED_BTS];
- if (WARN_ON_ONCE(!counter))
+ if (WARN_ON_ONCE(!event))
return;
- intel_pmu_enable_bts(counter->hw.config);
+ intel_pmu_enable_bts(event->hw.config);
}
}
static void amd_pmu_enable_all(void)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx;
if (cpuc->enabled)
@@ -1119,14 +1119,14 @@ static void amd_pmu_enable_all(void)
cpuc->enabled = 1;
barrier();
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
- struct perf_counter *counter = cpuc->counters[idx];
+ for (idx = 0; idx < x86_pmu.num_events; idx++) {
+ struct perf_event *event = cpuc->events[idx];
u64 val;
if (!test_bit(idx, cpuc->active_mask))
continue;
- val = counter->hw.config;
+ val = event->hw.config;
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
}
@@ -1153,19 +1153,19 @@ static inline void intel_pmu_ack_status(u64 ack)
wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
}
-static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
+static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
(void)checking_wrmsrl(hwc->config_base + idx,
hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
}
-static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
+static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
(void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
}
static inline void
-intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
+intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
{
int idx = __idx - X86_PMC_IDX_FIXED;
u64 ctrl_val, mask;
@@ -1178,10 +1178,10 @@ intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
}
static inline void
-p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
+p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
- u64 val = P6_NOP_COUNTER;
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ u64 val = P6_NOP_EVENT;
if (cpuc->enabled)
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
@@ -1190,7 +1190,7 @@ p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
}
static inline void
-intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
+intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
intel_pmu_disable_bts();
@@ -1202,24 +1202,24 @@ intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
return;
}
- x86_pmu_disable_counter(hwc, idx);
+ x86_pmu_disable_event(hwc, idx);
}
static inline void
-amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
+amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
- x86_pmu_disable_counter(hwc, idx);
+ x86_pmu_disable_event(hwc, idx);
}
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
/*
* Set the next IRQ period, based on the hwc->period_left value.
- * To be called with the counter disabled in hw:
+ * To be called with the event disabled in hw:
*/
static int
-x86_perf_counter_set_period(struct perf_counter *counter,
- struct hw_perf_counter *hwc, int idx)
+x86_perf_event_set_period(struct perf_event *event,
+ struct hw_perf_event *hwc, int idx)
{
s64 left = atomic64_read(&hwc->period_left);
s64 period = hwc->sample_period;
@@ -1256,21 +1256,21 @@ x86_perf_counter_set_period(struct perf_counter *counter,
per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
/*
- * The hw counter starts counting from this counter offset,
+ * The hw event starts counting from this event offset,
* mark it to be able to extra future deltas:
*/
atomic64_set(&hwc->prev_count, (u64)-left);
- err = checking_wrmsrl(hwc->counter_base + idx,
- (u64)(-left) & x86_pmu.counter_mask);
+ err = checking_wrmsrl(hwc->event_base + idx,
+ (u64)(-left) & x86_pmu.event_mask);
- perf_counter_update_userpage(counter);
+ perf_event_update_userpage(event);
return ret;
}
static inline void
-intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
+intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
{
int idx = __idx - X86_PMC_IDX_FIXED;
u64 ctrl_val, bits, mask;
@@ -1295,9 +1295,9 @@ intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
err = checking_wrmsrl(hwc->config_base, ctrl_val);
}
-static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
+static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
u64 val;
val = hwc->config;
@@ -1308,10 +1308,10 @@ static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
}
-static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
+static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
- if (!__get_cpu_var(cpu_hw_counters).enabled)
+ if (!__get_cpu_var(cpu_hw_events).enabled)
return;
intel_pmu_enable_bts(hwc->config);
@@ -1323,19 +1323,19 @@ static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
return;
}
- x86_pmu_enable_counter(hwc, idx);
+ x86_pmu_enable_event(hwc, idx);
}
-static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
+static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (cpuc->enabled)
- x86_pmu_enable_counter(hwc, idx);
+ x86_pmu_enable_event(hwc, idx);
}
static int
-fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
+fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc)
{
unsigned int hw_event;
@@ -1346,7 +1346,7 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
(hwc->sample_period == 1)))
return X86_PMC_IDX_FIXED_BTS;
- if (!x86_pmu.num_counters_fixed)
+ if (!x86_pmu.num_events_fixed)
return -1;
if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
@@ -1360,97 +1360,97 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
}
/*
- * Find a PMC slot for the freshly enabled / scheduled in counter:
+ * Find a PMC slot for the freshly enabled / scheduled in event:
*/
-static int x86_pmu_enable(struct perf_counter *counter)
+static int x86_pmu_enable(struct perf_event *event)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
- struct hw_perf_counter *hwc = &counter->hw;
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
int idx;
- idx = fixed_mode_idx(counter, hwc);
+ idx = fixed_mode_idx(event, hwc);
if (idx == X86_PMC_IDX_FIXED_BTS) {
/* BTS is already occupied. */
if (test_and_set_bit(idx, cpuc->used_mask))
return -EAGAIN;
hwc->config_base = 0;
- hwc->counter_base = 0;
+ hwc->event_base = 0;
hwc->idx = idx;
} else if (idx >= 0) {
/*
- * Try to get the fixed counter, if that is already taken
- * then try to get a generic counter:
+ * Try to get the fixed event, if that is already taken
+ * then try to get a generic event:
*/
if (test_and_set_bit(idx, cpuc->used_mask))
goto try_generic;
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
/*
- * We set it so that counter_base + idx in wrmsr/rdmsr maps to
+ * We set it so that event_base + idx in wrmsr/rdmsr maps to
* MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
*/
- hwc->counter_base =
+ hwc->event_base =
MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
hwc->idx = idx;
} else {
idx = hwc->idx;
- /* Try to get the previous generic counter again */
+ /* Try to get the previous generic event again */
if (test_and_set_bit(idx, cpuc->used_mask)) {
try_generic:
idx = find_first_zero_bit(cpuc->used_mask,
- x86_pmu.num_counters);
- if (idx == x86_pmu.num_counters)
+ x86_pmu.num_events);
+ if (idx == x86_pmu.num_events)
return -EAGAIN;
set_bit(idx, cpuc->used_mask);
hwc->idx = idx;
}
hwc->config_base = x86_pmu.eventsel;
- hwc->counter_base = x86_pmu.perfctr;
+ hwc->event_base = x86_pmu.perfctr;
}
- perf_counters_lapic_init();
+ perf_events_lapic_init();
x86_pmu.disable(hwc, idx);
- cpuc->counters[idx] = counter;
+ cpuc->events[idx] = event;
set_bit(idx, cpuc->active_mask);
- x86_perf_counter_set_period(counter, hwc, idx);
+ x86_perf_event_set_period(event, hwc, idx);
x86_pmu.enable(hwc, idx);
- perf_counter_update_userpage(counter);
+ perf_event_update_userpage(event);
return 0;
}
-static void x86_pmu_unthrottle(struct perf_counter *counter)
+static void x86_pmu_unthrottle(struct perf_event *event)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
- struct hw_perf_counter *hwc = &counter->hw;
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
- cpuc->counters[hwc->idx] != counter))
+ cpuc->events[hwc->idx] != event))
return;
x86_pmu.enable(hwc, hwc->idx);
}
-void perf_counter_print_debug(void)
+void perf_event_print_debug(void)
{
u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
- struct cpu_hw_counters *cpuc;
+ struct cpu_hw_events *cpuc;
unsigned long flags;
int cpu, idx;
- if (!x86_pmu.num_counters)
+ if (!x86_pmu.num_events)
return;
local_irq_save(flags);
cpu = smp_processor_id();
- cpuc = &per_cpu(cpu_hw_counters, cpu);
+ cpuc = &per_cpu(cpu_hw_events, cpu);
if (x86_pmu.version >= 2) {
rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
@@ -1466,7 +1466,7 @@ void perf_counter_print_debug(void)
}
pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask);
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ for (idx = 0; idx < x86_pmu.num_events; idx++) {
rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
rdmsrl(x86_pmu.perfctr + idx, pmc_count);
@@ -1479,7 +1479,7 @@ void perf_counter_print_debug(void)
pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
cpu, idx, prev_left);
}
- for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
+ for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
@@ -1488,7 +1488,7 @@ void perf_counter_print_debug(void)
local_irq_restore(flags);
}
-static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc)
+static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
{
struct debug_store *ds = cpuc->ds;
struct bts_record {
@@ -1496,14 +1496,14 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc)
u64 to;
u64 flags;
};
- struct perf_counter *counter = cpuc->counters[X86_PMC_IDX_FIXED_BTS];
+ struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
struct bts_record *at, *top;
struct perf_output_handle handle;
struct perf_event_header header;
struct perf_sample_data data;
struct pt_regs regs;
- if (!counter)
+ if (!event)
return;
if (!ds)
@@ -1518,7 +1518,7 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc)
ds->bts_index = ds->bts_buffer_base;
- data.period = counter->hw.last_period;
+ data.period = event->hw.last_period;
data.addr = 0;
regs.ip = 0;
@@ -1527,9 +1527,9 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc)
* We will overwrite the from and to address before we output
* the sample.
*/
- perf_prepare_sample(&header, &data, counter, &regs);
+ perf_prepare_sample(&header, &data, event, &regs);
- if (perf_output_begin(&handle, counter,
+ if (perf_output_begin(&handle, event,
header.size * (top - at), 1, 1))
return;
@@ -1537,20 +1537,20 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc)
data.ip = at->from;
data.addr = at->to;
- perf_output_sample(&handle, &header, &data, counter);
+ perf_output_sample(&handle, &header, &data, event);
}
perf_output_end(&handle);
/* There's new data available. */
- counter->hw.interrupts++;
- counter->pending_kill = POLL_IN;
+ event->hw.interrupts++;
+ event->pending_kill = POLL_IN;
}
-static void x86_pmu_disable(struct perf_counter *counter)
+static void x86_pmu_disable(struct perf_event *event)
{
- struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
- struct hw_perf_counter *hwc = &counter->hw;
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
/*
@@ -1562,63 +1562,63 @@ static void x86_pmu_disable(struct perf_counter *counter)
/*
* Make sure the cleared pointer becomes visible before we
- * (potentially) free the counter:
+ * (potentially) free the event:
*/
barrier();
/*
- * Drain the remaining delta count out of a counter
+ * Drain the remaining delta count out of a event
* that we are disabling:
*/
- x86_perf_counter_update(counter, hwc, idx);
+ x86_perf_event_update(event, hwc, idx);
/* Drain the remaining BTS records. */
if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
intel_pmu_drain_bts_buffer(cpuc);
- cpuc->counters[idx] = NULL;
+ cpuc->events[idx] = NULL;
clear_bit(idx, cpuc->used_mask);
- perf_counter_update_userpage(counter);
+ perf_event_update_userpage(event);
}
/*
- * Save and restart an expired counter. Called by NMI contexts,
- * so it has to be careful about preempting normal counter ops:
+ * Save and restart an expired event. Called by NMI contexts,
+ * so it has to be careful about preempting normal event ops:
*/
-static int intel_pmu_save_and_restart(struct perf_counter *counter)
+static int intel_pmu_save_and_restart(struct perf_event *event)
{
- struct hw_perf_counter *hwc = &counter->hw;
+ struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
int ret;
- x86_perf_counter_update(counter, hwc, idx);
- ret = x86_perf_counter_set_period(counter, hwc, idx);
+ x86_perf_event_update(event, hwc, idx);
+ ret = x86_perf_event_set_period(event, hwc, idx);
- if (counter->state == PERF_COUNTER_STATE_ACTIVE)
- intel_pmu_enable_counter(hwc, idx);
+ if (event->state == PERF_EVENT_STATE_ACTIVE)
+ intel_pmu_enable_event(hwc, idx);
return ret;
}
static void intel_pmu_reset(void)
{
- struct debug_store *ds = __get_cpu_var(cpu_hw_counters).ds;
+ struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
unsigned long flags;
int idx;
- if (!x86_pmu.num_counters)
+ if (!x86_pmu.num_events)
return;
local_irq_save(flags);
printk("clearing PMU state on CPU#%d\n", smp_processor_id());
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ for (idx = 0; idx < x86_pmu.num_events; idx++) {
checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
}
- for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
+ for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
}
if (ds)
@@ -1630,38 +1630,38 @@ static void intel_pmu_reset(void)
static int p6_pmu_handle_irq(struct pt_regs *regs)
{
struct perf_sample_data data;
- struct cpu_hw_counters *cpuc;
- struct perf_counter *counter;
- struct hw_perf_counter *hwc;
+ struct cpu_hw_events *cpuc;
+ struct perf_event *event;
+ struct hw_perf_event *hwc;
int idx, handled = 0;
u64 val;
data.addr = 0;
- cpuc = &__get_cpu_var(cpu_hw_counters);
+ cpuc = &__get_cpu_var(cpu_hw_events);
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ for (idx = 0; idx < x86_pmu.num_events; idx++) {
if (!test_bit(idx, cpuc->active_mask))
continue;
- counter = cpuc->counters[idx];
- hwc = &counter->hw;
+ event = cpuc->events[idx];
+ hwc = &event->hw;
- val = x86_perf_counter_update(counter, hwc, idx);
- if (val & (1ULL << (x86_pmu.counter_bits - 1)))
+ val = x86_perf_event_update(event, hwc, idx);
+ if (val & (1ULL << (x86_pmu.event_bits - 1)))
continue;
/*
- * counter overflow
+ * event overflow
*/
handled = 1;
- data.period = counter->hw.last_period;
+ data.period = event->hw.last_period;
- if (!x86_perf_counter_set_period(counter, hwc, idx))
+ if (!x86_perf_event_set_period(event, hwc, idx))
continue;
- if (perf_counter_overflow(counter, 1, &data, regs))
- p6_pmu_disable_counter(hwc, idx);
+ if (perf_event_overflow(event, 1, &data, regs))
+ p6_pmu_disable_event(hwc, idx);
}
if (handled)
@@ -1677,13 +1677,13 @@ static int p6_pmu_handle_irq(struct pt_regs *regs)
static int intel_pmu_handle_irq(struct pt_regs *regs)
{
struct perf_sample_data data;
- struct cpu_hw_counters *cpuc;
+ struct cpu_hw_events *cpuc;
int bit, loops;
u64 ack, status;
data.addr = 0;
- cpuc = &__get_cpu_var(cpu_hw_counters);
+ cpuc = &__get_cpu_var(cpu_hw_events);
perf_disable();
intel_pmu_drain_bts_buffer(cpuc);
@@ -1696,8 +1696,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
loops = 0;
again:
if (++loops > 100) {
- WARN_ONCE(1, "perfcounters: irq loop stuck!\n");
- perf_counter_print_debug();
+ WARN_ONCE(1, "perfevents: irq loop stuck!\n");
+ perf_event_print_debug();
intel_pmu_reset();
perf_enable();
return 1;
@@ -1706,19 +1706,19 @@ again:
inc_irq_stat(apic_perf_irqs);
ack = status;
for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
- struct perf_counter *counter = cpuc->counters[bit];
+ struct perf_event *event = cpuc->events[bit];
clear_bit(bit, (unsigned long *) &status);
if (!test_bit(bit, cpuc->active_mask))
continue;
- if (!intel_pmu_save_and_restart(counter))
+ if (!intel_pmu_save_and_restart(event))
continue;
- data.period = counter->hw.last_period;
+ data.period = event->hw.last_period;
- if (perf_counter_overflow(counter, 1, &data, regs))
- intel_pmu_disable_counter(&counter->hw, bit);
+ if (perf_event_overflow(event, 1, &data, regs))
+ intel_pmu_disable_event(&event->hw, bit);
}
intel_pmu_ack_status(ack);
@@ -1738,38 +1738,38 @@ again:
static int amd_pmu_handle_irq(struct pt_regs *regs)
{
struct perf_sample_data data;
- struct cpu_hw_counters *cpuc;
- struct perf_counter *counter;
- struct hw_perf_counter *hwc;
+ struct cpu_hw_events *cpuc;
+ struct perf_event *event;
+ struct hw_perf_event *hwc;
int idx, handled = 0;
u64 val;
data.addr = 0;
- cpuc = &__get_cpu_var(cpu_hw_counters);
+ cpuc = &__get_cpu_var(cpu_hw_events);
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ for (idx = 0; idx < x86_pmu.num_events; idx++) {
if (!test_bit(idx, cpuc->active_mask))
continue;
- counter = cpuc->counters[idx];
- hwc = &counter->hw;
+ event = cpuc->events[idx];
+ hwc = &event->hw;
- val = x86_perf_counter_update(counter, hwc, idx);
- if (val & (1ULL << (x86_pmu.counter_bits - 1)))
+ val = x86_perf_event_update(event, hwc, idx);
+ if (val & (1ULL << (x86_pmu.event_bits - 1)))
continue;
/*
- * counter overflow
+ * event overflow
*/
handled = 1;
- data.period = counter->hw.last_period;
+ data.period = event->hw.last_period;
- if (!x86_perf_counter_set_period(counter, hwc, idx))
+ if (!x86_perf_event_set_period(event, hwc, idx))
continue;
- if (perf_counter_overflow(counter, 1, &data, regs))
- amd_pmu_disable_counter(hwc, idx);
+ if (perf_event_overflow(event, 1, &data, regs))
+ amd_pmu_disable_event(hwc, idx);
}
if (handled)
@@ -1783,18 +1783,18 @@ void smp_perf_pending_interrupt(struct pt_regs *regs)
irq_enter();
ack_APIC_irq();
inc_irq_stat(apic_pending_irqs);
- perf_counter_do_pending();
+ perf_event_do_pending();
irq_exit();
}
-void set_perf_counter_pending(void)
+void set_perf_event_pending(void)
{
#ifdef CONFIG_X86_LOCAL_APIC
apic->send_IPI_self(LOCAL_PENDING_VECTOR);
#endif
}
-void perf_counters_lapic_init(void)
+void perf_events_lapic_init(void)
{
#ifdef CONFIG_X86_LOCAL_APIC
if (!x86_pmu.apic || !x86_pmu_initialized())
@@ -1808,13 +1808,13 @@ void perf_counters_lapic_init(void)
}
static int __kprobes
-perf_counter_nmi_handler(struct notifier_block *self,
+perf_event_nmi_handler(struct notifier_block *self,
unsigned long cmd, void *__args)
{
struct die_args *args = __args;
struct pt_regs *regs;
- if (!atomic_read(&active_counters))
+ if (!atomic_read(&active_events))
return NOTIFY_DONE;
switch (cmd) {
@@ -1833,7 +1833,7 @@ perf_counter_nmi_handler(struct notifier_block *self,
#endif
/*
* Can't rely on the handled return value to say it was our NMI, two
- * counters could trigger 'simultaneously' raising two back-to-back NMIs.
+ * events could trigger 'simultaneously' raising two back-to-back NMIs.
*
* If the first NMI handles both, the latter will be empty and daze
* the CPU.
@@ -1843,8 +1843,8 @@ perf_counter_nmi_handler(struct notifier_block *self,
return NOTIFY_STOP;
}
-static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
- .notifier_call = perf_counter_nmi_handler,
+static __read_mostly struct notifier_block perf_event_nmi_notifier = {
+ .notifier_call = perf_event_nmi_handler,
.next = NULL,
.priority = 1
};
@@ -1854,8 +1854,8 @@ static struct x86_pmu p6_pmu = {
.handle_irq = p6_pmu_handle_irq,
.disable_all = p6_pmu_disable_all,
.enable_all = p6_pmu_enable_all,
- .enable = p6_pmu_enable_counter,
- .disable = p6_pmu_disable_counter,
+ .enable = p6_pmu_enable_event,
+ .disable = p6_pmu_disable_event,
.eventsel = MSR_P6_EVNTSEL0,
.perfctr = MSR_P6_PERFCTR0,
.event_map = p6_pmu_event_map,
@@ -1864,16 +1864,16 @@ static struct x86_pmu p6_pmu = {
.apic = 1,
.max_period = (1ULL << 31) - 1,
.version = 0,
- .num_counters = 2,
+ .num_events = 2,
/*
- * Counters have 40 bits implemented. However they are designed such
+ * Events have 40 bits implemented. However they are designed such
* that bits [32-39] are sign extensions of bit 31. As such the
- * effective width of a counter for P6-like PMU is 32 bits only.
+ * effective width of a event for P6-like PMU is 32 bits only.
*
* See IA-32 Intel Architecture Software developer manual Vol 3B
*/
- .counter_bits = 32,
- .counter_mask = (1ULL << 32) - 1,
+ .event_bits = 32,
+ .event_mask = (1ULL << 32) - 1,
};
static struct x86_pmu intel_pmu = {
@@ -1881,8 +1881,8 @@ static struct x86_pmu intel_pmu = {
.handle_irq = intel_pmu_handle_irq,
.disable_all = intel_pmu_disable_all,
.enable_all = intel_pmu_enable_all,
- .enable = intel_pmu_enable_counter,
- .disable = intel_pmu_disable_counter,
+ .enable = intel_pmu_enable_event,
+ .disable = intel_pmu_disable_event,
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
.event_map = intel_pmu_event_map,
@@ -1892,7 +1892,7 @@ static struct x86_pmu intel_pmu = {
/*
* Intel PMCs cannot be accessed sanely above 32 bit width,
* so we install an artificial 1<<31 period regardless of
- * the generic counter period:
+ * the generic event period:
*/
.max_period = (1ULL << 31) - 1,
.enable_bts = intel_pmu_enable_bts,
@@ -1904,16 +1904,16 @@ static struct x86_pmu amd_pmu = {
.handle_irq = amd_pmu_handle_irq,
.disable_all = amd_pmu_disable_all,
.enable_all = amd_pmu_enable_all,
- .enable = amd_pmu_enable_counter,
- .disable = amd_pmu_disable_counter,
+ .enable = amd_pmu_enable_event,
+ .disable = amd_pmu_disable_event,
.eventsel = MSR_K7_EVNTSEL0,
.perfctr = MSR_K7_PERFCTR0,
.event_map = amd_pmu_event_map,
.raw_event = amd_pmu_raw_event,
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
- .num_counters = 4,
- .counter_bits = 48,
- .counter_mask = (1ULL << 48) - 1,
+ .num_events = 4,
+ .event_bits = 48,
+ .event_mask = (1ULL << 48) - 1,
.apic = 1,
/* use highest bit to detect overflow */
.max_period = (1ULL << 47) - 1,
@@ -1982,15 +1982,15 @@ static int intel_pmu_init(void)
x86_pmu = intel_pmu;
x86_pmu.version = version;
- x86_pmu.num_counters = eax.split.num_counters;
- x86_pmu.counter_bits = eax.split.bit_width;
- x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1;
+ x86_pmu.num_events = eax.split.num_events;
+ x86_pmu.event_bits = eax.split.bit_width;
+ x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
/*
- * Quirk: v2 perfmon does not report fixed-purpose counters, so
- * assume at least 3 counters:
+ * Quirk: v2 perfmon does not report fixed-purpose events, so
+ * assume at least 3 events:
*/
- x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
+ x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
/*
* Install the hw-cache-events table:
@@ -2037,11 +2037,11 @@ static int amd_pmu_init(void)
return 0;
}
-void __init init_hw_perf_counters(void)
+void __init init_hw_perf_events(void)
{
int err;
- pr_info("Performance Counters: ");
+ pr_info("Performance Events: ");
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
@@ -2054,45 +2054,45 @@ void __init init_hw_perf_counters(void)
return;
}
if (err != 0) {
- pr_cont("no PMU driver, software counters only.\n");
+ pr_cont("no PMU driver, software events only.\n");
return;
}
pr_cont("%s PMU driver.\n", x86_pmu.name);
- if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
- WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
- x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
- x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
+ if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
+ WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
+ x86_pmu.num_events, X86_PMC_MAX_GENERIC);
+ x86_pmu.num_events = X86_PMC_MAX_GENERIC;
}
- perf_counter_mask = (1 << x86_pmu.num_counters) - 1;
- perf_max_counters = x86_pmu.num_counters;
+ perf_event_mask = (1 << x86_pmu.num_events) - 1;
+ perf_max_events = x86_pmu.num_events;
- if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
- WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
- x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
- x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
+ if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
+ WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
+ x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
+ x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
}
- perf_counter_mask |=
- ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
- x86_pmu.intel_ctrl = perf_counter_mask;
+ perf_event_mask |=
+ ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
+ x86_pmu.intel_ctrl = perf_event_mask;
- perf_counters_lapic_init();
- register_die_notifier(&perf_counter_nmi_notifier);
+ perf_events_lapic_init();
+ register_die_notifier(&perf_event_nmi_notifier);
pr_info("... version: %d\n", x86_pmu.version);
- pr_info("... bit width: %d\n", x86_pmu.counter_bits);
- pr_info("... generic counters: %d\n", x86_pmu.num_counters);
- pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask);
+ pr_info("... bit width: %d\n", x86_pmu.event_bits);
+ pr_info("... generic events: %d\n", x86_pmu.num_events);
+ pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
pr_info("... max period: %016Lx\n", x86_pmu.max_period);
- pr_info("... fixed-purpose counters: %d\n", x86_pmu.num_counters_fixed);
- pr_info("... counter mask: %016Lx\n", perf_counter_mask);
+ pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
+ pr_info("... event mask: %016Lx\n", perf_event_mask);
}
-static inline void x86_pmu_read(struct perf_counter *counter)
+static inline void x86_pmu_read(struct perf_event *event)
{
- x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
+ x86_perf_event_update(event, &event->hw, event->hw.idx);
}
static const struct pmu pmu = {
@@ -2102,14 +2102,14 @@ static const struct pmu pmu = {
.unthrottle = x86_pmu_unthrottle,
};
-const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
+const struct pmu *hw_perf_event_init(struct perf_event *event)
{
int err;
- err = __hw_perf_counter_init(counter);
+ err = __hw_perf_event_init(event);
if (err) {
- if (counter->destroy)
- counter->destroy(counter);
+ if (event->destroy)
+ event->destroy(event);
return ERR_PTR(err);
}
@@ -2292,7 +2292,7 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
return entry;
}
-void hw_perf_counter_setup_online(int cpu)
+void hw_perf_event_setup_online(int cpu)
{
init_debug_store_on_cpu(cpu);
}
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 392bea43b89..fab786f60ed 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -20,7 +20,7 @@
#include <linux/kprobes.h>
#include <asm/apic.h>
-#include <asm/perf_counter.h>
+#include <asm/perf_event.h>
struct nmi_watchdog_ctlblk {
unsigned int cccr_msr;
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index d59fe323807..681c3fda739 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1021,7 +1021,7 @@ apicinterrupt ERROR_APIC_VECTOR \
apicinterrupt SPURIOUS_APIC_VECTOR \
spurious_interrupt smp_spurious_interrupt
-#ifdef CONFIG_PERF_COUNTERS
+#ifdef CONFIG_PERF_EVENTS
apicinterrupt LOCAL_PENDING_VECTOR \
perf_pending_interrupt smp_perf_pending_interrupt
#endif
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 300883112e3..40f30773fb2 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -208,7 +208,7 @@ static void __init apic_intr_init(void)
alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
/* Performance monitoring interrupts: */
-# ifdef CONFIG_PERF_COUNTERS
+# ifdef CONFIG_PERF_EVENTS
alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt);
# endif
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index d51321ddafd..0157cd26d7c 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -335,4 +335,4 @@ ENTRY(sys_call_table)
.long sys_preadv
.long sys_pwritev
.long sys_rt_tgsigqueueinfo /* 335 */
- .long sys_perf_counter_open
+ .long sys_perf_event_open
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 775a020990a..82728f2c6d5 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -10,7 +10,7 @@
#include <linux/bootmem.h> /* max_low_pfn */
#include <linux/kprobes.h> /* __kprobes, ... */
#include <linux/mmiotrace.h> /* kmmio_handler, ... */
-#include <linux/perf_counter.h> /* perf_swcounter_event */
+#include <linux/perf_event.h> /* perf_sw_event */
#include <asm/traps.h> /* dotraplinkage, ... */
#include <asm/pgalloc.h> /* pgd_*(), ... */
@@ -1017,7 +1017,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
if (unlikely(error_code & PF_RSVD))
pgtable_bad(regs, error_code, address);
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
/*
* If we're in an interrupt, have no user context or are running
@@ -1114,11 +1114,11 @@ good_area:
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
} else {
tsk->min_flt++;
- perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 4899215999d..8eb05878554 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -234,11 +234,11 @@ static void arch_perfmon_setup_counters(void)
if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 &&
current_cpu_data.x86_model == 15) {
eax.split.version_id = 2;
- eax.split.num_counters = 2;
+ eax.split.num_events = 2;
eax.split.bit_width = 40;
}
- num_counters = eax.split.num_counters;
+ num_counters = eax.split.num_events;
op_arch_perfmon_spec.num_counters = num_counters;
op_arch_perfmon_spec.num_controls = num_counters;
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
index b83776180c7..7b8e75d1608 100644
--- a/arch/x86/oprofile/op_x86_model.h
+++ b/arch/x86/oprofile/op_x86_model.h
@@ -13,7 +13,7 @@
#define OP_X86_MODEL_H
#include <asm/types.h>
-#include <asm/perf_counter.h>
+#include <asm/perf_event.h>
struct op_msr {
unsigned long addr;