From a6c93afed38c242ccf4ec5bcb5ff26ff2521cf36 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 15 Apr 2011 11:14:38 +0100 Subject: ARM: perf: de-const struct arm_pmu This patch removes const qualifiers from instances of struct arm_pmu, and functions initialising them, in preparation for generalising arm_pmu usage to system (AKA uncore) PMUs. This will allow for dynamically modifiable structures (locks, struct pmu) to be added as members of struct arm_pmu. Acked-by: Jamie Iles Reviewed-by: Jean Pihet Signed-off-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event_v7.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch/arm/kernel/perf_event_v7.c') diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 4c851834f68..74f9119906d 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -1188,7 +1188,7 @@ static u32 __init armv7_read_num_pmnc_events(void) return nb_cnt + 1; } -static const struct arm_pmu *__init armv7_a8_pmu_init(void) +static struct arm_pmu *__init armv7_a8_pmu_init(void) { armv7pmu.id = ARM_PERF_PMU_ID_CA8; armv7pmu.name = "ARMv7 Cortex-A8"; @@ -1198,7 +1198,7 @@ static const struct arm_pmu *__init armv7_a8_pmu_init(void) return &armv7pmu; } -static const struct arm_pmu *__init armv7_a9_pmu_init(void) +static struct arm_pmu *__init armv7_a9_pmu_init(void) { armv7pmu.id = ARM_PERF_PMU_ID_CA9; armv7pmu.name = "ARMv7 Cortex-A9"; @@ -1208,7 +1208,7 @@ static const struct arm_pmu *__init armv7_a9_pmu_init(void) return &armv7pmu; } -static const struct arm_pmu *__init armv7_a5_pmu_init(void) +static struct arm_pmu *__init armv7_a5_pmu_init(void) { armv7pmu.id = ARM_PERF_PMU_ID_CA5; armv7pmu.name = "ARMv7 Cortex-A5"; @@ -1218,7 +1218,7 @@ static const struct arm_pmu *__init armv7_a5_pmu_init(void) return &armv7pmu; } -static const struct arm_pmu *__init armv7_a15_pmu_init(void) +static struct arm_pmu *__init armv7_a15_pmu_init(void) { armv7pmu.id = ARM_PERF_PMU_ID_CA15; armv7pmu.name = "ARMv7 Cortex-A15"; @@ -1228,22 +1228,22 @@ static const struct arm_pmu *__init armv7_a15_pmu_init(void) return &armv7pmu; } #else -static const struct arm_pmu *__init armv7_a8_pmu_init(void) +static struct arm_pmu *__init armv7_a8_pmu_init(void) { return NULL; } -static const struct arm_pmu *__init armv7_a9_pmu_init(void) +static struct arm_pmu *__init armv7_a9_pmu_init(void) { return NULL; } -static const struct arm_pmu *__init armv7_a5_pmu_init(void) +static struct arm_pmu *__init armv7_a5_pmu_init(void) { return NULL; } -static const struct arm_pmu *__init armv7_a15_pmu_init(void) +static struct arm_pmu *__init armv7_a15_pmu_init(void) { return NULL; } -- cgit v1.2.3-70-g09d2 From 6330aae7dcd54df893813392e310141be7aa5323 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 19 Jul 2011 14:55:57 +0100 Subject: ARM: perf: use u32 instead of unsigned long for PMNC register The ARMv7 perf backend mixes up u32 and unsigned long, which is rather ugly. This patch makes the ARMv7 PMU code consistently use the u32 type instead. Acked-by: Jamie Iles Reviewed-by: Jean Pihet Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event_v7.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/arm/kernel/perf_event_v7.c') diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 74f9119906d..f4f260dabee 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -758,26 +758,26 @@ enum armv7_counters { #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK -static inline unsigned long armv7_pmnc_read(void) +static inline u32 armv7_pmnc_read(void) { u32 val; asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val)); return val; } -static inline void armv7_pmnc_write(unsigned long val) +static inline void armv7_pmnc_write(u32 val) { val &= ARMV7_PMNC_MASK; isb(); asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); } -static inline int armv7_pmnc_has_overflowed(unsigned long pmnc) +static inline int armv7_pmnc_has_overflowed(u32 pmnc) { return pmnc & ARMV7_OVERFLOWED_MASK; } -static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc, +static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, enum armv7_counters counter) { int ret = 0; @@ -812,7 +812,7 @@ static inline int armv7_pmnc_select_counter(unsigned int idx) static inline u32 armv7pmu_read_counter(int idx) { - unsigned long value = 0; + u32 value = 0; if (idx == ARMV7_CYCLE_COUNTER) asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value)); @@ -1044,7 +1044,7 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) { - unsigned long pmnc; + u32 pmnc; struct perf_sample_data data; struct cpu_hw_events *cpuc; struct pt_regs *regs; -- cgit v1.2.3-70-g09d2 From 25e29c7c0f4fcbe911b77a69f015bd6424cedcd0 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 19 Jul 2011 22:17:48 +0100 Subject: ARM: perf: use integers for ARMv7 event indices This patch ensures that integers are used to represent event indices in the ARMv7 PMU backend. This ensures consistency between functions and also with the arm_pmu structure. Acked-by: Jamie Iles Reviewed-by: Jean Pihet Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event_v7.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/arm/kernel/perf_event_v7.c') diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index f4f260dabee..e39bc8935cb 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -793,7 +793,7 @@ static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, return ret; } -static inline int armv7_pmnc_select_counter(unsigned int idx) +static inline int armv7_pmnc_select_counter(int idx) { u32 val; @@ -840,7 +840,7 @@ static inline void armv7pmu_write_counter(int idx, u32 value) smp_processor_id(), idx); } -static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val) +static inline void armv7_pmnc_write_evtsel(int idx, u32 val) { if (armv7_pmnc_select_counter(idx) == idx) { val &= ARMV7_EVTSEL_MASK; @@ -848,7 +848,7 @@ static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val) } } -static inline u32 armv7_pmnc_enable_counter(unsigned int idx) +static inline int armv7_pmnc_enable_counter(int idx) { u32 val; @@ -869,7 +869,7 @@ static inline u32 armv7_pmnc_enable_counter(unsigned int idx) return idx; } -static inline u32 armv7_pmnc_disable_counter(unsigned int idx) +static inline int armv7_pmnc_disable_counter(int idx) { u32 val; @@ -891,7 +891,7 @@ static inline u32 armv7_pmnc_disable_counter(unsigned int idx) return idx; } -static inline u32 armv7_pmnc_enable_intens(unsigned int idx) +static inline int armv7_pmnc_enable_intens(int idx) { u32 val; @@ -912,7 +912,7 @@ static inline u32 armv7_pmnc_enable_intens(unsigned int idx) return idx; } -static inline u32 armv7_pmnc_disable_intens(unsigned int idx) +static inline int armv7_pmnc_disable_intens(int idx) { u32 val; -- cgit v1.2.3-70-g09d2 From c691bb6249b25104fcb6dad31bd772c139ce4a50 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 19 Jul 2011 22:25:55 +0100 Subject: ARM: perf: index ARMv7 event counters starting from zero The current ARMv7 PMU backend indexes event counters from two, with index zero being reserved and index one being used to represent the cycle counter. This patch tidies up the code by indexing from one instead (with zero for the cycle counter). This allows us to remove many of the accessor macros along with the counter enumeration and makes the code much more readable. Acked-by: Jamie Iles Reviewed-by: Jean Pihet Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event_v7.c | 239 +++++++++++++++------------------------- 1 file changed, 88 insertions(+), 151 deletions(-) (limited to 'arch/arm/kernel/perf_event_v7.c') diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index e39bc8935cb..0934c821430 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -676,23 +676,24 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }; /* - * Perf Events counters + * Perf Events' indices */ -enum armv7_counters { - ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */ - ARMV7_COUNTER0 = 2, /* First event counter */ -}; +#define ARMV7_IDX_CYCLE_COUNTER 0 +#define ARMV7_IDX_COUNTER0 1 +#define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + armpmu->num_events - 1) + +#define ARMV7_MAX_COUNTERS 32 +#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1) /* - * The cycle counter is ARMV7_CYCLE_COUNTER. - * The first event counter is ARMV7_COUNTER0. - * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1). + * ARMv7 low level PMNC access */ -#define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1) /* - * ARMv7 low level PMNC access + * Perf Event to low level counters mapping */ +#define ARMV7_IDX_TO_COUNTER(x) \ + (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK) /* * Per-CPU PMNC: config reg @@ -707,54 +708,14 @@ enum armv7_counters { #define ARMV7_PMNC_N_MASK 0x1f #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */ -/* - * Available counters - */ -#define ARMV7_CNT0 0 /* First event counter */ -#define ARMV7_CCNT 31 /* Cycle counter */ - -/* Perf Event to low level counters mapping */ -#define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0) - -/* - * CNTENS: counters enable reg - */ -#define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) -#define ARMV7_CNTENS_C (1 << ARMV7_CCNT) - -/* - * CNTENC: counters disable reg - */ -#define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) -#define ARMV7_CNTENC_C (1 << ARMV7_CCNT) - -/* - * INTENS: counters overflow interrupt enable reg - */ -#define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) -#define ARMV7_INTENS_C (1 << ARMV7_CCNT) - -/* - * INTENC: counters overflow interrupt disable reg - */ -#define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) -#define ARMV7_INTENC_C (1 << ARMV7_CCNT) - /* * EVTSEL: Event selection reg */ #define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */ -/* - * SELECT: Counter selection reg - */ -#define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */ - /* * FLAG: counters overflow flag status reg */ -#define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) -#define ARMV7_FLAG_C (1 << ARMV7_CCNT) #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK @@ -777,34 +738,39 @@ static inline int armv7_pmnc_has_overflowed(u32 pmnc) return pmnc & ARMV7_OVERFLOWED_MASK; } -static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, - enum armv7_counters counter) +static inline int armv7_pmnc_counter_valid(int idx) +{ + return idx >= ARMV7_IDX_CYCLE_COUNTER && idx <= ARMV7_IDX_COUNTER_LAST; +} + +static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx) { int ret = 0; + u32 counter; - if (counter == ARMV7_CYCLE_COUNTER) - ret = pmnc & ARMV7_FLAG_C; - else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST)) - ret = pmnc & ARMV7_FLAG_P(counter); - else + if (!armv7_pmnc_counter_valid(idx)) { pr_err("CPU%u checking wrong counter %d overflow status\n", - smp_processor_id(), counter); + smp_processor_id(), idx); + } else { + counter = ARMV7_IDX_TO_COUNTER(idx); + ret = pmnc & BIT(counter); + } return ret; } static inline int armv7_pmnc_select_counter(int idx) { - u32 val; + u32 counter; - if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) { - pr_err("CPU%u selecting wrong PMNC counter" - " %d\n", smp_processor_id(), idx); - return -1; + if (!armv7_pmnc_counter_valid(idx)) { + pr_err("CPU%u selecting wrong PMNC counter %d\n", + smp_processor_id(), idx); + return -EINVAL; } - val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK; - asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val)); + counter = ARMV7_IDX_TO_COUNTER(idx); + asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter)); isb(); return idx; @@ -814,30 +780,26 @@ static inline u32 armv7pmu_read_counter(int idx) { u32 value = 0; - if (idx == ARMV7_CYCLE_COUNTER) - asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value)); - else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { - if (armv7_pmnc_select_counter(idx) == idx) - asm volatile("mrc p15, 0, %0, c9, c13, 2" - : "=r" (value)); - } else + if (!armv7_pmnc_counter_valid(idx)) pr_err("CPU%u reading wrong counter %d\n", smp_processor_id(), idx); + else if (idx == ARMV7_IDX_CYCLE_COUNTER) + asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value)); + else if (armv7_pmnc_select_counter(idx) == idx) + asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value)); return value; } static inline void armv7pmu_write_counter(int idx, u32 value) { - if (idx == ARMV7_CYCLE_COUNTER) - asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value)); - else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { - if (armv7_pmnc_select_counter(idx) == idx) - asm volatile("mcr p15, 0, %0, c9, c13, 2" - : : "r" (value)); - } else + if (!armv7_pmnc_counter_valid(idx)) pr_err("CPU%u writing wrong counter %d\n", smp_processor_id(), idx); + else if (idx == ARMV7_IDX_CYCLE_COUNTER) + asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value)); + else if (armv7_pmnc_select_counter(idx) == idx) + asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value)); } static inline void armv7_pmnc_write_evtsel(int idx, u32 val) @@ -850,86 +812,61 @@ static inline void armv7_pmnc_write_evtsel(int idx, u32 val) static inline int armv7_pmnc_enable_counter(int idx) { - u32 val; + u32 counter; - if ((idx != ARMV7_CYCLE_COUNTER) && - ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { - pr_err("CPU%u enabling wrong PMNC counter" - " %d\n", smp_processor_id(), idx); - return -1; + if (!armv7_pmnc_counter_valid(idx)) { + pr_err("CPU%u enabling wrong PMNC counter %d\n", + smp_processor_id(), idx); + return -EINVAL; } - if (idx == ARMV7_CYCLE_COUNTER) - val = ARMV7_CNTENS_C; - else - val = ARMV7_CNTENS_P(idx); - - asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val)); - + counter = ARMV7_IDX_TO_COUNTER(idx); + asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter))); return idx; } static inline int armv7_pmnc_disable_counter(int idx) { - u32 val; + u32 counter; - - if ((idx != ARMV7_CYCLE_COUNTER) && - ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { - pr_err("CPU%u disabling wrong PMNC counter" - " %d\n", smp_processor_id(), idx); - return -1; + if (!armv7_pmnc_counter_valid(idx)) { + pr_err("CPU%u disabling wrong PMNC counter %d\n", + smp_processor_id(), idx); + return -EINVAL; } - if (idx == ARMV7_CYCLE_COUNTER) - val = ARMV7_CNTENC_C; - else - val = ARMV7_CNTENC_P(idx); - - asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val)); - + counter = ARMV7_IDX_TO_COUNTER(idx); + asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter))); return idx; } static inline int armv7_pmnc_enable_intens(int idx) { - u32 val; + u32 counter; - if ((idx != ARMV7_CYCLE_COUNTER) && - ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { - pr_err("CPU%u enabling wrong PMNC counter" - " interrupt enable %d\n", smp_processor_id(), idx); - return -1; + if (!armv7_pmnc_counter_valid(idx)) { + pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n", + smp_processor_id(), idx); + return -EINVAL; } - if (idx == ARMV7_CYCLE_COUNTER) - val = ARMV7_INTENS_C; - else - val = ARMV7_INTENS_P(idx); - - asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val)); - + counter = ARMV7_IDX_TO_COUNTER(idx); + asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter))); return idx; } static inline int armv7_pmnc_disable_intens(int idx) { - u32 val; + u32 counter; - if ((idx != ARMV7_CYCLE_COUNTER) && - ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { - pr_err("CPU%u disabling wrong PMNC counter" - " interrupt enable %d\n", smp_processor_id(), idx); - return -1; + if (!armv7_pmnc_counter_valid(idx)) { + pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n", + smp_processor_id(), idx); + return -EINVAL; } - if (idx == ARMV7_CYCLE_COUNTER) - val = ARMV7_INTENC_C; - else - val = ARMV7_INTENC_P(idx); - - asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val)); - + counter = ARMV7_IDX_TO_COUNTER(idx); + asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter))); return idx; } @@ -973,14 +910,14 @@ static void armv7_pmnc_dump_regs(void) asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); printk(KERN_INFO "CCNT =0x%08x\n", val); - for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) { + for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST; cnt++) { armv7_pmnc_select_counter(cnt); asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); printk(KERN_INFO "CNT[%d] count =0x%08x\n", - cnt-ARMV7_EVENT_CNT_TO_CNTx, val); + ARMV7_IDX_TO_COUNTER(cnt), val); asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", - cnt-ARMV7_EVENT_CNT_TO_CNTx, val); + ARMV7_IDX_TO_COUNTER(cnt), val); } } #endif @@ -1004,7 +941,7 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) * Set event (if destined for PMNx counters) * We don't need to set the event if it's a cycle count */ - if (idx != ARMV7_CYCLE_COUNTER) + if (idx != ARMV7_IDX_CYCLE_COUNTER) armv7_pmnc_write_evtsel(idx, hwc->config_base); /* @@ -1069,7 +1006,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); - for (idx = 0; idx <= armpmu->num_events; ++idx) { + for (idx = 0; idx < armpmu->num_events; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; @@ -1132,23 +1069,23 @@ static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, /* Always place a cycle counter into the cycle counter. */ if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) { - if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask)) + if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask)) return -EAGAIN; - return ARMV7_CYCLE_COUNTER; - } else { - /* - * For anything other than a cycle counter, try and use - * the events counters - */ - for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) { - if (!test_and_set_bit(idx, cpuc->used_mask)) - return idx; - } + return ARMV7_IDX_CYCLE_COUNTER; + } - /* The counters are all in use. */ - return -EAGAIN; + /* + * For anything other than a cycle counter, try and use + * the events counters + */ + for (idx = ARMV7_IDX_COUNTER0; idx < armpmu->num_events; ++idx) { + if (!test_and_set_bit(idx, cpuc->used_mask)) + return idx; } + + /* The counters are all in use. */ + return -EAGAIN; } static void armv7pmu_reset(void *info) @@ -1156,7 +1093,7 @@ static void armv7pmu_reset(void *info) u32 idx, nb_cnt = armpmu->num_events; /* The counter and interrupt enable registers are unknown at reset. */ - for (idx = 1; idx < nb_cnt; ++idx) + for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) armv7pmu_disable_event(NULL, idx); /* Initialize & Reset PMNC: C and P bits */ -- cgit v1.2.3-70-g09d2 From a505addc366525cd8d9358298be0dc8655be5953 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 19 Jul 2011 13:53:36 +0100 Subject: ARM: perf: add mode exclusion for Cortex-A15 PMU The Cortex-A15 PMU implements the PMUv2 specification and therefore has support for some mode exclusion. This patch adds support for excluding user, kernel and hypervisor counts from a given event. Acked-by: Jamie Iles Reviewed-by: Jean Pihet Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event_v7.c | 58 ++++++++++++++++++++++++++++++++++------- 1 file changed, 49 insertions(+), 9 deletions(-) (limited to 'arch/arm/kernel/perf_event_v7.c') diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 0934c821430..fe6c931d2c4 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -17,6 +17,9 @@ */ #ifdef CONFIG_CPU_V7 + +static struct arm_pmu armv7pmu; + /* * Common ARMv7 event types * @@ -708,17 +711,25 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] #define ARMV7_PMNC_N_MASK 0x1f #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */ -/* - * EVTSEL: Event selection reg - */ -#define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */ - /* * FLAG: counters overflow flag status reg */ #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK +/* + * PMXEVTYPER: Event selection reg + */ +#define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */ +#define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */ + +/* + * Event filters for PMUv2 + */ +#define ARMV7_EXCLUDE_PL1 (1 << 31) +#define ARMV7_EXCLUDE_USER (1 << 30) +#define ARMV7_INCLUDE_HYP (1 << 27) + static inline u32 armv7_pmnc_read(void) { u32 val; @@ -805,7 +816,7 @@ static inline void armv7pmu_write_counter(int idx, u32 value) static inline void armv7_pmnc_write_evtsel(int idx, u32 val) { if (armv7_pmnc_select_counter(idx) == idx) { - val &= ARMV7_EVTSEL_MASK; + val &= ARMV7_EVTYPE_MASK; asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val)); } } @@ -939,9 +950,10 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) /* * Set event (if destined for PMNx counters) - * We don't need to set the event if it's a cycle count + * We only need to set the event for the cycle counter if we + * have the ability to perform event filtering. */ - if (idx != ARMV7_IDX_CYCLE_COUNTER) + if (armv7pmu.set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER) armv7_pmnc_write_evtsel(idx, hwc->config_base); /* @@ -1066,9 +1078,10 @@ static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *event) { int idx; + unsigned long evtype = event->config_base & ARMV7_EVTYPE_EVENT; /* Always place a cycle counter into the cycle counter. */ - if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) { + if (evtype == ARMV7_PERFCTR_CPU_CYCLES) { if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask)) return -EAGAIN; @@ -1088,6 +1101,32 @@ static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, return -EAGAIN; } +/* + * Add an event filter to a given event. This will only work for PMUv2 PMUs. + */ +static int armv7pmu_set_event_filter(struct hw_perf_event *event, + struct perf_event_attr *attr) +{ + unsigned long config_base = 0; + + if (attr->exclude_idle) + return -EPERM; + if (attr->exclude_user) + config_base |= ARMV7_EXCLUDE_USER; + if (attr->exclude_kernel) + config_base |= ARMV7_EXCLUDE_PL1; + if (!attr->exclude_hv) + config_base |= ARMV7_INCLUDE_HYP; + + /* + * Install the filter into config_base as this is used to + * construct the event type. + */ + event->config_base = config_base; + + return 0; +} + static void armv7pmu_reset(void *info) { u32 idx, nb_cnt = armpmu->num_events; @@ -1162,6 +1201,7 @@ static struct arm_pmu *__init armv7_a15_pmu_init(void) armv7pmu.cache_map = &armv7_a15_perf_cache_map; armv7pmu.event_map = &armv7_a15_perf_map; armv7pmu.num_events = armv7_read_num_pmnc_events(); + armv7pmu.set_event_filter = armv7pmu_set_event_filter; return &armv7pmu; } #else -- cgit v1.2.3-70-g09d2 From c47f8684baefa2bf52c4320f894e73db08dc8a0a Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 19 Jul 2011 09:37:10 +0100 Subject: ARM: perf: remove active_mask Currently, pmu_hw_events::active_mask is used to keep track of which events are active in hardware. As we can stop counters and their interrupts, this is unnecessary. Signed-off-by: Mark Rutland Reviewed-by: Will Deacon Reviewed-by: Jamie Iles Reviewed-by: Ashwin Chaugule Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event.c | 8 -------- arch/arm/kernel/perf_event_v6.c | 19 ++++++++++++++++++- arch/arm/kernel/perf_event_v7.c | 3 --- arch/arm/kernel/perf_event_xscale.c | 6 ------ 4 files changed, 18 insertions(+), 18 deletions(-) (limited to 'arch/arm/kernel/perf_event_v7.c') diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index dfde9283aec..438482ff749 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -57,12 +57,6 @@ struct cpu_hw_events { * an event. A 0 means that the counter can be used. */ unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)]; - - /* - * A 1 bit for an index indicates that the counter is actively being - * used. - */ - unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)]; }; static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); @@ -295,7 +289,6 @@ armpmu_del(struct perf_event *event, int flags) WARN_ON(idx < 0); - clear_bit(idx, cpuc->active_mask); armpmu_stop(event, PERF_EF_UPDATE); cpuc->events[idx] = NULL; clear_bit(idx, cpuc->used_mask); @@ -327,7 +320,6 @@ armpmu_add(struct perf_event *event, int flags) event->hw.idx = idx; armpmu->disable(hwc, idx); cpuc->events[idx] = event; - set_bit(idx, cpuc->active_mask); hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; if (flags & PERF_EF_START) diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 87f29b553b8..83901286226 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -462,6 +462,23 @@ armv6pmu_enable_event(struct hw_perf_event *hwc, raw_spin_unlock_irqrestore(&pmu_lock, flags); } +static int counter_is_active(unsigned long pmcr, int idx) +{ + unsigned long mask = 0; + if (idx == ARMV6_CYCLE_COUNTER) + mask = ARMV6_PMCR_CCOUNT_IEN; + else if (idx == ARMV6_COUNTER0) + mask = ARMV6_PMCR_COUNT0_IEN; + else if (idx == ARMV6_COUNTER1) + mask = ARMV6_PMCR_COUNT1_IEN; + + if (mask) + return pmcr & mask; + + WARN_ONCE(1, "invalid counter number (%d)\n", idx); + return 0; +} + static irqreturn_t armv6pmu_handle_irq(int irq_num, void *dev) @@ -491,7 +508,7 @@ armv6pmu_handle_irq(int irq_num, struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; - if (!test_bit(idx, cpuc->active_mask)) + if (!counter_is_active(pmcr, idx)) continue; /* diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index fe6c931d2c4..f4170fc228b 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -1022,9 +1022,6 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; - if (!test_bit(idx, cpuc->active_mask)) - continue; - /* * We have a single interrupt for all counters. Check that * each counter has overflowed before we process it. diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 54312fc45ca..ca89a06c8e9 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -253,9 +253,6 @@ xscale1pmu_handle_irq(int irq_num, void *dev) struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; - if (!test_bit(idx, cpuc->active_mask)) - continue; - if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) continue; @@ -585,9 +582,6 @@ xscale2pmu_handle_irq(int irq_num, void *dev) struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; - if (!test_bit(idx, cpuc->active_mask)) - continue; - if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) continue; -- cgit v1.2.3-70-g09d2 From 0f78d2d5ccf72ec834da6901886a40fd8e3b7615 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 28 Apr 2011 10:17:04 +0100 Subject: ARM: perf: lock PMU registers per-CPU Currently, a single lock serialises access to CPU PMU registers. This global locking is unnecessary as PMU registers are local to the CPU they monitor. This patch replaces the global lock with a per-CPU lock. As the lock is in struct cpu_hw_events, PMUs providing a single cpu_hw_events instance can be locked globally. Signed-off-by: Mark Rutland Reviewed-by: Will Deacon Reviewed-by: Jamie Iles Reviewed-by: Ashwin Chaugule Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event.c | 17 ++++++++++------ arch/arm/kernel/perf_event_v6.c | 25 +++++++++++++---------- arch/arm/kernel/perf_event_v7.c | 20 +++++++++++-------- arch/arm/kernel/perf_event_xscale.c | 40 ++++++++++++++++++++++--------------- 4 files changed, 62 insertions(+), 40 deletions(-) (limited to 'arch/arm/kernel/perf_event_v7.c') diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 5ce6c333291..9331d573144 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -26,12 +26,6 @@ #include #include -/* - * Hardware lock to serialize accesses to PMU registers. Needed for the - * read/modify/write sequences. - */ -static DEFINE_RAW_SPINLOCK(pmu_lock); - /* * ARMv6 supports a maximum of 3 events, starting from index 0. If we add * another platform that supports more, we need to increase this to be the @@ -55,6 +49,12 @@ struct cpu_hw_events { * an event. A 0 means that the counter can be used. */ unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)]; + + /* + * Hardware lock to serialize accesses to PMU registers. Needed for the + * read/modify/write sequences. + */ + raw_spinlock_t pmu_lock; }; static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); @@ -685,6 +685,11 @@ static struct cpu_hw_events *armpmu_get_cpu_events(void) static void __init cpu_pmu_init(struct arm_pmu *armpmu) { + int cpu; + for_each_possible_cpu(cpu) { + struct cpu_hw_events *events = &per_cpu(cpu_hw_events, cpu); + raw_spin_lock_init(&events->pmu_lock); + } armpmu->get_hw_events = armpmu_get_cpu_events; } diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 83901286226..68cf70425f2 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -433,6 +433,7 @@ armv6pmu_enable_event(struct hw_perf_event *hwc, int idx) { unsigned long val, mask, evt, flags; + struct cpu_hw_events *events = armpmu->get_hw_events(); if (ARMV6_CYCLE_COUNTER == idx) { mask = 0; @@ -454,12 +455,12 @@ armv6pmu_enable_event(struct hw_perf_event *hwc, * Mask out the current event and set the counter to count the event * that we're interested in. */ - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val &= ~mask; val |= evt; armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static int counter_is_active(unsigned long pmcr, int idx) @@ -544,24 +545,26 @@ static void armv6pmu_start(void) { unsigned long flags, val; + struct cpu_hw_events *events = armpmu->get_hw_events(); - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val |= ARMV6_PMCR_ENABLE; armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void armv6pmu_stop(void) { unsigned long flags, val; + struct cpu_hw_events *events = armpmu->get_hw_events(); - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val &= ~ARMV6_PMCR_ENABLE; armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static int @@ -595,6 +598,7 @@ armv6pmu_disable_event(struct hw_perf_event *hwc, int idx) { unsigned long val, mask, evt, flags; + struct cpu_hw_events *events = armpmu->get_hw_events(); if (ARMV6_CYCLE_COUNTER == idx) { mask = ARMV6_PMCR_CCOUNT_IEN; @@ -615,12 +619,12 @@ armv6pmu_disable_event(struct hw_perf_event *hwc, * of ETM bus signal assertion cycles. The external reporting should * be disabled and so this should never increment. */ - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val &= ~mask; val |= evt; armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void @@ -628,6 +632,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, int idx) { unsigned long val, mask, flags, evt = 0; + struct cpu_hw_events *events = armpmu->get_hw_events(); if (ARMV6_CYCLE_COUNTER == idx) { mask = ARMV6_PMCR_CCOUNT_IEN; @@ -644,12 +649,12 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, * Unlike UP ARMv6, we don't have a way of stopping the counters. We * simply disable the interrupt reporting. */ - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); val &= ~mask; val |= evt; armv6_pmcr_write(val); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static struct arm_pmu armv6pmu = { diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index f4170fc228b..68ac522fd94 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -936,12 +936,13 @@ static void armv7_pmnc_dump_regs(void) static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) { unsigned long flags; + struct cpu_hw_events *events = armpmu->get_hw_events(); /* * Enable counter and interrupt, and set the counter to count * the event that we're interested in. */ - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); /* * Disable counter @@ -966,17 +967,18 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) */ armv7_pmnc_enable_counter(idx); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) { unsigned long flags; + struct cpu_hw_events *events = armpmu->get_hw_events(); /* * Disable counter and interrupt */ - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); /* * Disable counter @@ -988,7 +990,7 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) */ armv7_pmnc_disable_intens(idx); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) @@ -1054,21 +1056,23 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) static void armv7pmu_start(void) { unsigned long flags; + struct cpu_hw_events *events = armpmu->get_hw_events(); - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Enable all counters */ armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void armv7pmu_stop(void) { unsigned long flags; + struct cpu_hw_events *events = armpmu->get_hw_events(); - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable all counters */ armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index ca89a06c8e9..18e4823a0a6 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -281,6 +281,7 @@ static void xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) { unsigned long val, mask, evt, flags; + struct cpu_hw_events *events = armpmu->get_hw_events(); switch (idx) { case XSCALE_CYCLE_COUNTER: @@ -302,18 +303,19 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) return; } - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale1pmu_read_pmnc(); val &= ~mask; val |= evt; xscale1pmu_write_pmnc(val); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) { unsigned long val, mask, evt, flags; + struct cpu_hw_events *events = armpmu->get_hw_events(); switch (idx) { case XSCALE_CYCLE_COUNTER: @@ -333,12 +335,12 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) return; } - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale1pmu_read_pmnc(); val &= ~mask; val |= evt; xscale1pmu_write_pmnc(val); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static int @@ -365,24 +367,26 @@ static void xscale1pmu_start(void) { unsigned long flags, val; + struct cpu_hw_events *events = armpmu->get_hw_events(); - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale1pmu_read_pmnc(); val |= XSCALE_PMU_ENABLE; xscale1pmu_write_pmnc(val); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void xscale1pmu_stop(void) { unsigned long flags, val; + struct cpu_hw_events *events = armpmu->get_hw_events(); - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale1pmu_read_pmnc(); val &= ~XSCALE_PMU_ENABLE; xscale1pmu_write_pmnc(val); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static inline u32 @@ -610,6 +614,7 @@ static void xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) { unsigned long flags, ien, evtsel; + struct cpu_hw_events *events = armpmu->get_hw_events(); ien = xscale2pmu_read_int_enable(); evtsel = xscale2pmu_read_event_select(); @@ -643,16 +648,17 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) return; } - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); xscale2pmu_write_event_select(evtsel); xscale2pmu_write_int_enable(ien); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) { unsigned long flags, ien, evtsel; + struct cpu_hw_events *events = armpmu->get_hw_events(); ien = xscale2pmu_read_int_enable(); evtsel = xscale2pmu_read_event_select(); @@ -686,10 +692,10 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) return; } - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); xscale2pmu_write_event_select(evtsel); xscale2pmu_write_int_enable(ien); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static int @@ -712,24 +718,26 @@ static void xscale2pmu_start(void) { unsigned long flags, val; + struct cpu_hw_events *events = armpmu->get_hw_events(); - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; val |= XSCALE_PMU_ENABLE; xscale2pmu_write_pmnc(val); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void xscale2pmu_stop(void) { unsigned long flags, val; + struct cpu_hw_events *events = armpmu->get_hw_events(); - raw_spin_lock_irqsave(&pmu_lock, flags); + raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale2pmu_read_pmnc(); val &= ~XSCALE_PMU_ENABLE; xscale2pmu_write_pmnc(val); - raw_spin_unlock_irqrestore(&pmu_lock, flags); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static inline u32 -- cgit v1.2.3-70-g09d2 From e1f431b57ef9e4a68281540933fa74865cbb7a74 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 28 Apr 2011 15:47:10 +0100 Subject: ARM: perf: refactor event mapping Currently mapping an event type to a hardware configuration value depends on the data being pointed to from struct arm_pmu. These fields (cache_map, event_map, raw_event_mask) are currently specific to CPU PMUs, and do not serve the general case well. This patch replaces the event map pointers on struct arm_pmu with a new 'map_event' function pointer. Small shim functions are used to reuse the existing common code. Signed-off-by: Mark Rutland Reviewed-by: Will Deacon Reviewed-by: Jamie Iles Reviewed-by: Ashwin Chaugule Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event.c | 67 ++++++++++++++++++++----------------- arch/arm/kernel/perf_event_v6.c | 21 ++++++++---- arch/arm/kernel/perf_event_v7.c | 37 +++++++++++++++----- arch/arm/kernel/perf_event_xscale.c | 14 ++++---- 4 files changed, 87 insertions(+), 52 deletions(-) (limited to 'arch/arm/kernel/perf_event_v7.c') diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 1a2ebbf07fb..b13bf23ceba 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -75,11 +75,7 @@ struct arm_pmu { void (*start)(void); void (*stop)(void); void (*reset)(void *); - const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX]; - const unsigned (*event_map)[PERF_COUNT_HW_MAX]; - u32 raw_event_mask; + int (*map_event)(struct perf_event *event); int num_events; atomic_t active_events; struct mutex reserve_mutex; @@ -129,7 +125,11 @@ EXPORT_SYMBOL_GPL(perf_num_counters); #define CACHE_OP_UNSUPPORTED 0xFFFF static int -armpmu_map_cache_event(u64 config) +armpmu_map_cache_event(const unsigned (*cache_map) + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX], + u64 config) { unsigned int cache_type, cache_op, cache_result, ret; @@ -145,7 +145,7 @@ armpmu_map_cache_event(u64 config) if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) return -EINVAL; - ret = (int)(*armpmu->cache_map)[cache_type][cache_op][cache_result]; + ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; if (ret == CACHE_OP_UNSUPPORTED) return -ENOENT; @@ -154,16 +154,38 @@ armpmu_map_cache_event(u64 config) } static int -armpmu_map_event(u64 config) +armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) { - int mapping = (*armpmu->event_map)[config]; - return mapping == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : mapping; + int mapping = (*event_map)[config]; + return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; } static int -armpmu_map_raw_event(u64 config) +armpmu_map_raw_event(u32 raw_event_mask, u64 config) { - return (int)(config & armpmu->raw_event_mask); + return (int)(config & raw_event_mask); +} + +static int map_cpu_event(struct perf_event *event, + const unsigned (*event_map)[PERF_COUNT_HW_MAX], + const unsigned (*cache_map) + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX], + u32 raw_event_mask) +{ + u64 config = event->attr.config; + + switch (event->attr.type) { + case PERF_TYPE_HARDWARE: + return armpmu_map_event(event_map, config); + case PERF_TYPE_HW_CACHE: + return armpmu_map_cache_event(cache_map, config); + case PERF_TYPE_RAW: + return armpmu_map_raw_event(raw_event_mask, config); + } + + return -ENOENT; } static int @@ -484,17 +506,7 @@ __hw_perf_event_init(struct perf_event *event) struct hw_perf_event *hwc = &event->hw; int mapping, err; - /* Decode the generic type into an ARM event identifier. */ - if (PERF_TYPE_HARDWARE == event->attr.type) { - mapping = armpmu_map_event(event->attr.config); - } else if (PERF_TYPE_HW_CACHE == event->attr.type) { - mapping = armpmu_map_cache_event(event->attr.config); - } else if (PERF_TYPE_RAW == event->attr.type) { - mapping = armpmu_map_raw_event(event->attr.config); - } else { - pr_debug("event type %x not supported\n", event->attr.type); - return -EOPNOTSUPP; - } + mapping = armpmu->map_event(event); if (mapping < 0) { pr_debug("event %x:%llx not supported\n", event->attr.type, @@ -550,15 +562,8 @@ static int armpmu_event_init(struct perf_event *event) int err = 0; atomic_t *active_events = &armpmu->active_events; - switch (event->attr.type) { - case PERF_TYPE_RAW: - case PERF_TYPE_HARDWARE: - case PERF_TYPE_HW_CACHE: - break; - - default: + if (armpmu->map_event(event) == -ENOENT) return -ENOENT; - } event->destroy = hw_perf_event_destroy; diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 68cf70425f2..a4c5aa9baa4 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -657,6 +657,12 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } +static int armv6_map_event(struct perf_event *event) +{ + return map_cpu_event(event, &armv6_perf_map, + &armv6_perf_cache_map, 0xFF); +} + static struct arm_pmu armv6pmu = { .id = ARM_PERF_PMU_ID_V6, .name = "v6", @@ -668,9 +674,7 @@ static struct arm_pmu armv6pmu = { .get_event_idx = armv6pmu_get_event_idx, .start = armv6pmu_start, .stop = armv6pmu_stop, - .cache_map = &armv6_perf_cache_map, - .event_map = &armv6_perf_map, - .raw_event_mask = 0xFF, + .map_event = armv6_map_event, .num_events = 3, .max_period = (1LLU << 32) - 1, }; @@ -687,6 +691,13 @@ static struct arm_pmu *__init armv6pmu_init(void) * disable the interrupt reporting and update the event. When unthrottling we * reset the period and enable the interrupt reporting. */ + +static int armv6mpcore_map_event(struct perf_event *event) +{ + return map_cpu_event(event, &armv6mpcore_perf_map, + &armv6mpcore_perf_cache_map, 0xFF); +} + static struct arm_pmu armv6mpcore_pmu = { .id = ARM_PERF_PMU_ID_V6MP, .name = "v6mpcore", @@ -698,9 +709,7 @@ static struct arm_pmu armv6mpcore_pmu = { .get_event_idx = armv6pmu_get_event_idx, .start = armv6pmu_start, .stop = armv6pmu_stop, - .cache_map = &armv6mpcore_perf_cache_map, - .event_map = &armv6mpcore_perf_map, - .raw_event_mask = 0xFF, + .map_event = armv6mpcore_map_event, .num_events = 3, .max_period = (1LLU << 32) - 1, }; diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 68ac522fd94..be7b58a2cc6 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -1140,6 +1140,30 @@ static void armv7pmu_reset(void *info) armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); } +static int armv7_a8_map_event(struct perf_event *event) +{ + return map_cpu_event(event, &armv7_a8_perf_map, + &armv7_a8_perf_cache_map, 0xFF); +} + +static int armv7_a9_map_event(struct perf_event *event) +{ + return map_cpu_event(event, &armv7_a9_perf_map, + &armv7_a9_perf_cache_map, 0xFF); +} + +static int armv7_a5_map_event(struct perf_event *event) +{ + return map_cpu_event(event, &armv7_a5_perf_map, + &armv7_a5_perf_cache_map, 0xFF); +} + +static int armv7_a15_map_event(struct perf_event *event) +{ + return map_cpu_event(event, &armv7_a15_perf_map, + &armv7_a15_perf_cache_map, 0xFF); +} + static struct arm_pmu armv7pmu = { .handle_irq = armv7pmu_handle_irq, .enable = armv7pmu_enable_event, @@ -1150,7 +1174,6 @@ static struct arm_pmu armv7pmu = { .start = armv7pmu_start, .stop = armv7pmu_stop, .reset = armv7pmu_reset, - .raw_event_mask = 0xFF, .max_period = (1LLU << 32) - 1, }; @@ -1169,8 +1192,7 @@ static struct arm_pmu *__init armv7_a8_pmu_init(void) { armv7pmu.id = ARM_PERF_PMU_ID_CA8; armv7pmu.name = "ARMv7 Cortex-A8"; - armv7pmu.cache_map = &armv7_a8_perf_cache_map; - armv7pmu.event_map = &armv7_a8_perf_map; + armv7pmu.map_event = armv7_a8_map_event; armv7pmu.num_events = armv7_read_num_pmnc_events(); return &armv7pmu; } @@ -1179,8 +1201,7 @@ static struct arm_pmu *__init armv7_a9_pmu_init(void) { armv7pmu.id = ARM_PERF_PMU_ID_CA9; armv7pmu.name = "ARMv7 Cortex-A9"; - armv7pmu.cache_map = &armv7_a9_perf_cache_map; - armv7pmu.event_map = &armv7_a9_perf_map; + armv7pmu.map_event = armv7_a9_map_event; armv7pmu.num_events = armv7_read_num_pmnc_events(); return &armv7pmu; } @@ -1189,8 +1210,7 @@ static struct arm_pmu *__init armv7_a5_pmu_init(void) { armv7pmu.id = ARM_PERF_PMU_ID_CA5; armv7pmu.name = "ARMv7 Cortex-A5"; - armv7pmu.cache_map = &armv7_a5_perf_cache_map; - armv7pmu.event_map = &armv7_a5_perf_map; + armv7pmu.map_event = armv7_a5_map_event; armv7pmu.num_events = armv7_read_num_pmnc_events(); return &armv7pmu; } @@ -1199,8 +1219,7 @@ static struct arm_pmu *__init armv7_a15_pmu_init(void) { armv7pmu.id = ARM_PERF_PMU_ID_CA15; armv7pmu.name = "ARMv7 Cortex-A15"; - armv7pmu.cache_map = &armv7_a15_perf_cache_map; - armv7pmu.event_map = &armv7_a15_perf_map; + armv7pmu.map_event = armv7_a15_map_event; armv7pmu.num_events = armv7_read_num_pmnc_events(); armv7pmu.set_event_filter = armv7pmu_set_event_filter; return &armv7pmu; diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 18e4823a0a6..d4c7610d25b 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -425,6 +425,12 @@ xscale1pmu_write_counter(int counter, u32 val) } } +static int xscale_map_event(struct perf_event *event) +{ + return map_cpu_event(event, &xscale_perf_map, + &xscale_perf_cache_map, 0xFF); +} + static struct arm_pmu xscale1pmu = { .id = ARM_PERF_PMU_ID_XSCALE1, .name = "xscale1", @@ -436,9 +442,7 @@ static struct arm_pmu xscale1pmu = { .get_event_idx = xscale1pmu_get_event_idx, .start = xscale1pmu_start, .stop = xscale1pmu_stop, - .cache_map = &xscale_perf_cache_map, - .event_map = &xscale_perf_map, - .raw_event_mask = 0xFF, + .map_event = xscale_map_event, .num_events = 3, .max_period = (1LLU << 32) - 1, }; @@ -799,9 +803,7 @@ static struct arm_pmu xscale2pmu = { .get_event_idx = xscale2pmu_get_event_idx, .start = xscale2pmu_start, .stop = xscale2pmu_stop, - .cache_map = &xscale_perf_cache_map, - .event_map = &xscale_perf_map, - .raw_event_mask = 0xFF, + .map_event = xscale_map_event, .num_events = 5, .max_period = (1LLU << 32) - 1, }; -- cgit v1.2.3-70-g09d2 From 8be3f9a2385f91f7bf5c58f351e24b9247898e8f Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 17 May 2011 11:20:11 +0100 Subject: ARM: perf: remove cpu-related misnomers Currently struct cpu_hw_events stores data on events running on a PMU associated with a CPU. As this data is general enough to be used for system PMUs, this name is a misnomer, and may cause confusion when it is used for system PMUs. Additionally, 'armpmu' is commonly used as a parameter name for an instance of struct arm_pmu. The name is also used for a global instance which represents the CPU's PMU. As cpu_hw_events is now not tied to CPU PMUs, it is renamed to pmu_hw_events, with instances of it renamed similarly. As the global 'armpmu' is CPU-specfic, it is renamed to cpu_pmu. This should make it clearer which code is generic, and which is coupled with the CPU. Signed-off-by: Mark Rutland Reviewed-by: Will Deacon Reviewed-by: Jamie Iles Reviewed-by: Ashwin Chaugule Signed-off-by: Will Deacon --- arch/arm/kernel/perf_event.c | 84 ++++++++++++++++++------------------- arch/arm/kernel/perf_event_v6.c | 18 ++++---- arch/arm/kernel/perf_event_v7.c | 22 +++++----- arch/arm/kernel/perf_event_xscale.c | 32 +++++++------- 4 files changed, 78 insertions(+), 78 deletions(-) (limited to 'arch/arm/kernel/perf_event_v7.c') diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index e1db5550078..831513342d5 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -37,10 +37,10 @@ */ #define ARMPMU_MAX_HWEVENTS 32 -/* The events for a given CPU. */ -struct cpu_hw_events { +/* The events for a given PMU register set. */ +struct pmu_hw_events { /* - * The events that are active on the CPU for the given index. + * The events that are active on the PMU for the given index. */ struct perf_event **events; @@ -59,7 +59,7 @@ struct cpu_hw_events { static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); -static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); +static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); struct arm_pmu { struct pmu pmu; @@ -70,7 +70,7 @@ struct arm_pmu { irqreturn_t (*handle_irq)(int irq_num, void *dev); void (*enable)(struct hw_perf_event *evt, int idx); void (*disable)(struct hw_perf_event *evt, int idx); - int (*get_event_idx)(struct cpu_hw_events *cpuc, + int (*get_event_idx)(struct pmu_hw_events *hw_events, struct hw_perf_event *hwc); int (*set_event_filter)(struct hw_perf_event *evt, struct perf_event_attr *attr); @@ -85,21 +85,21 @@ struct arm_pmu { struct mutex reserve_mutex; u64 max_period; struct platform_device *plat_device; - struct cpu_hw_events *(*get_hw_events)(void); + struct pmu_hw_events *(*get_hw_events)(void); }; #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) /* Set at runtime when we know what CPU type we are. */ -static struct arm_pmu *armpmu; +static struct arm_pmu *cpu_pmu; enum arm_perf_pmu_ids armpmu_get_pmu_id(void) { int id = -ENODEV; - if (armpmu != NULL) - id = armpmu->id; + if (cpu_pmu != NULL) + id = cpu_pmu->id; return id; } @@ -110,8 +110,8 @@ armpmu_get_max_events(void) { int max_events = 0; - if (armpmu != NULL) - max_events = armpmu->num_events; + if (cpu_pmu != NULL) + max_events = cpu_pmu->num_events; return max_events; } @@ -319,15 +319,15 @@ static void armpmu_del(struct perf_event *event, int flags) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct cpu_hw_events *cpuc = armpmu->get_hw_events(); + struct pmu_hw_events *hw_events = armpmu->get_hw_events(); struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; WARN_ON(idx < 0); armpmu_stop(event, PERF_EF_UPDATE); - cpuc->events[idx] = NULL; - clear_bit(idx, cpuc->used_mask); + hw_events->events[idx] = NULL; + clear_bit(idx, hw_events->used_mask); perf_event_update_userpage(event); } @@ -336,7 +336,7 @@ static int armpmu_add(struct perf_event *event, int flags) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct cpu_hw_events *cpuc = armpmu->get_hw_events(); + struct pmu_hw_events *hw_events = armpmu->get_hw_events(); struct hw_perf_event *hwc = &event->hw; int idx; int err = 0; @@ -344,7 +344,7 @@ armpmu_add(struct perf_event *event, int flags) perf_pmu_disable(event->pmu); /* If we don't have a space for the counter then finish early. */ - idx = armpmu->get_event_idx(cpuc, hwc); + idx = armpmu->get_event_idx(hw_events, hwc); if (idx < 0) { err = idx; goto out; @@ -356,7 +356,7 @@ armpmu_add(struct perf_event *event, int flags) */ event->hw.idx = idx; armpmu->disable(hwc, idx); - cpuc->events[idx] = event; + hw_events->events[idx] = event; hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; if (flags & PERF_EF_START) @@ -371,7 +371,7 @@ out: } static int -validate_event(struct cpu_hw_events *cpuc, +validate_event(struct pmu_hw_events *hw_events, struct perf_event *event) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); @@ -381,14 +381,14 @@ validate_event(struct cpu_hw_events *cpuc, if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) return 1; - return armpmu->get_event_idx(cpuc, &fake_event) >= 0; + return armpmu->get_event_idx(hw_events, &fake_event) >= 0; } static int validate_group(struct perf_event *event) { struct perf_event *sibling, *leader = event->group_leader; - struct cpu_hw_events fake_pmu; + struct pmu_hw_events fake_pmu; memset(&fake_pmu, 0, sizeof(fake_pmu)); @@ -604,13 +604,13 @@ static int armpmu_event_init(struct perf_event *event) static void armpmu_enable(struct pmu *pmu) { - struct arm_pmu *armpmu = to_arm_pmu(pmu); /* Enable all of the perf events on hardware. */ + struct arm_pmu *armpmu = to_arm_pmu(pmu); int idx, enabled = 0; - struct cpu_hw_events *cpuc = armpmu->get_hw_events(); + struct pmu_hw_events *hw_events = armpmu->get_hw_events(); for (idx = 0; idx < armpmu->num_events; ++idx) { - struct perf_event *event = cpuc->events[idx]; + struct perf_event *event = hw_events->events[idx]; if (!event) continue; @@ -662,13 +662,13 @@ static int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type) * This requires SMP to be available, so exists as a separate initcall. */ static int __init -armpmu_reset(void) +cpu_pmu_reset(void) { - if (armpmu && armpmu->reset) - return on_each_cpu(armpmu->reset, NULL, 1); + if (cpu_pmu && cpu_pmu->reset) + return on_each_cpu(cpu_pmu->reset, NULL, 1); return 0; } -arch_initcall(armpmu_reset); +arch_initcall(cpu_pmu_reset); /* * PMU platform driver and devicetree bindings. @@ -688,7 +688,7 @@ static struct platform_device_id armpmu_plat_device_ids[] = { static int __devinit armpmu_device_probe(struct platform_device *pdev) { - armpmu->plat_device = pdev; + cpu_pmu->plat_device = pdev; return 0; } @@ -707,7 +707,7 @@ static int __init register_pmu_driver(void) } device_initcall(register_pmu_driver); -static struct cpu_hw_events *armpmu_get_cpu_events(void) +static struct pmu_hw_events *armpmu_get_cpu_events(void) { return &__get_cpu_var(cpu_hw_events); } @@ -716,7 +716,7 @@ static void __init cpu_pmu_init(struct arm_pmu *armpmu) { int cpu; for_each_possible_cpu(cpu) { - struct cpu_hw_events *events = &per_cpu(cpu_hw_events, cpu); + struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); events->events = per_cpu(hw_events, cpu); events->used_mask = per_cpu(used_mask, cpu); raw_spin_lock_init(&events->pmu_lock); @@ -741,22 +741,22 @@ init_hw_perf_events(void) case 0xB360: /* ARM1136 */ case 0xB560: /* ARM1156 */ case 0xB760: /* ARM1176 */ - armpmu = armv6pmu_init(); + cpu_pmu = armv6pmu_init(); break; case 0xB020: /* ARM11mpcore */ - armpmu = armv6mpcore_pmu_init(); + cpu_pmu = armv6mpcore_pmu_init(); break; case 0xC080: /* Cortex-A8 */ - armpmu = armv7_a8_pmu_init(); + cpu_pmu = armv7_a8_pmu_init(); break; case 0xC090: /* Cortex-A9 */ - armpmu = armv7_a9_pmu_init(); + cpu_pmu = armv7_a9_pmu_init(); break; case 0xC050: /* Cortex-A5 */ - armpmu = armv7_a5_pmu_init(); + cpu_pmu = armv7_a5_pmu_init(); break; case 0xC0F0: /* Cortex-A15 */ - armpmu = armv7_a15_pmu_init(); + cpu_pmu = armv7_a15_pmu_init(); break; } /* Intel CPUs [xscale]. */ @@ -764,19 +764,19 @@ init_hw_perf_events(void) part_number = (cpuid >> 13) & 0x7; switch (part_number) { case 1: - armpmu = xscale1pmu_init(); + cpu_pmu = xscale1pmu_init(); break; case 2: - armpmu = xscale2pmu_init(); + cpu_pmu = xscale2pmu_init(); break; } } - if (armpmu) { + if (cpu_pmu) { pr_info("enabled with %s PMU driver, %d counters available\n", - armpmu->name, armpmu->num_events); - cpu_pmu_init(armpmu); - armpmu_register(armpmu, "cpu", PERF_TYPE_RAW); + cpu_pmu->name, cpu_pmu->num_events); + cpu_pmu_init(cpu_pmu); + armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW); } else { pr_info("no hardware support available\n"); } diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index a4c5aa9baa4..e63d8115c01 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -433,7 +433,7 @@ armv6pmu_enable_event(struct hw_perf_event *hwc, int idx) { unsigned long val, mask, evt, flags; - struct cpu_hw_events *events = armpmu->get_hw_events(); + struct pmu_hw_events *events = cpu_pmu->get_hw_events(); if (ARMV6_CYCLE_COUNTER == idx) { mask = 0; @@ -486,7 +486,7 @@ armv6pmu_handle_irq(int irq_num, { unsigned long pmcr = armv6_pmcr_read(); struct perf_sample_data data; - struct cpu_hw_events *cpuc; + struct pmu_hw_events *cpuc; struct pt_regs *regs; int idx; @@ -505,7 +505,7 @@ armv6pmu_handle_irq(int irq_num, perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); - for (idx = 0; idx < armpmu->num_events; ++idx) { + for (idx = 0; idx < cpu_pmu->num_events; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; @@ -526,7 +526,7 @@ armv6pmu_handle_irq(int irq_num, continue; if (perf_event_overflow(event, &data, regs)) - armpmu->disable(hwc, idx); + cpu_pmu->disable(hwc, idx); } /* @@ -545,7 +545,7 @@ static void armv6pmu_start(void) { unsigned long flags, val; - struct cpu_hw_events *events = armpmu->get_hw_events(); + struct pmu_hw_events *events = cpu_pmu->get_hw_events(); raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); @@ -558,7 +558,7 @@ static void armv6pmu_stop(void) { unsigned long flags, val; - struct cpu_hw_events *events = armpmu->get_hw_events(); + struct pmu_hw_events *events = cpu_pmu->get_hw_events(); raw_spin_lock_irqsave(&events->pmu_lock, flags); val = armv6_pmcr_read(); @@ -568,7 +568,7 @@ armv6pmu_stop(void) } static int -armv6pmu_get_event_idx(struct cpu_hw_events *cpuc, +armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, struct hw_perf_event *event) { /* Always place a cycle counter into the cycle counter. */ @@ -598,7 +598,7 @@ armv6pmu_disable_event(struct hw_perf_event *hwc, int idx) { unsigned long val, mask, evt, flags; - struct cpu_hw_events *events = armpmu->get_hw_events(); + struct pmu_hw_events *events = cpu_pmu->get_hw_events(); if (ARMV6_CYCLE_COUNTER == idx) { mask = ARMV6_PMCR_CCOUNT_IEN; @@ -632,7 +632,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, int idx) { unsigned long val, mask, flags, evt = 0; - struct cpu_hw_events *events = armpmu->get_hw_events(); + struct pmu_hw_events *events = cpu_pmu->get_hw_events(); if (ARMV6_CYCLE_COUNTER == idx) { mask = ARMV6_PMCR_CCOUNT_IEN; diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index be7b58a2cc6..98b75738345 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -683,7 +683,7 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] */ #define ARMV7_IDX_CYCLE_COUNTER 0 #define ARMV7_IDX_COUNTER0 1 -#define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + armpmu->num_events - 1) +#define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) #define ARMV7_MAX_COUNTERS 32 #define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1) @@ -936,7 +936,7 @@ static void armv7_pmnc_dump_regs(void) static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) { unsigned long flags; - struct cpu_hw_events *events = armpmu->get_hw_events(); + struct pmu_hw_events *events = cpu_pmu->get_hw_events(); /* * Enable counter and interrupt, and set the counter to count @@ -973,7 +973,7 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) { unsigned long flags; - struct cpu_hw_events *events = armpmu->get_hw_events(); + struct pmu_hw_events *events = cpu_pmu->get_hw_events(); /* * Disable counter and interrupt @@ -997,7 +997,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) { u32 pmnc; struct perf_sample_data data; - struct cpu_hw_events *cpuc; + struct pmu_hw_events *cpuc; struct pt_regs *regs; int idx; @@ -1020,7 +1020,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); - for (idx = 0; idx < armpmu->num_events; ++idx) { + for (idx = 0; idx < cpu_pmu->num_events; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; @@ -1038,7 +1038,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) continue; if (perf_event_overflow(event, &data, regs)) - armpmu->disable(hwc, idx); + cpu_pmu->disable(hwc, idx); } /* @@ -1056,7 +1056,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) static void armv7pmu_start(void) { unsigned long flags; - struct cpu_hw_events *events = armpmu->get_hw_events(); + struct pmu_hw_events *events = cpu_pmu->get_hw_events(); raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Enable all counters */ @@ -1067,7 +1067,7 @@ static void armv7pmu_start(void) static void armv7pmu_stop(void) { unsigned long flags; - struct cpu_hw_events *events = armpmu->get_hw_events(); + struct pmu_hw_events *events = cpu_pmu->get_hw_events(); raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable all counters */ @@ -1075,7 +1075,7 @@ static void armv7pmu_stop(void) raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } -static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, +static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc, struct hw_perf_event *event) { int idx; @@ -1093,7 +1093,7 @@ static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, * For anything other than a cycle counter, try and use * the events counters */ - for (idx = ARMV7_IDX_COUNTER0; idx < armpmu->num_events; ++idx) { + for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) { if (!test_and_set_bit(idx, cpuc->used_mask)) return idx; } @@ -1130,7 +1130,7 @@ static int armv7pmu_set_event_filter(struct hw_perf_event *event, static void armv7pmu_reset(void *info) { - u32 idx, nb_cnt = armpmu->num_events; + u32 idx, nb_cnt = cpu_pmu->num_events; /* The counter and interrupt enable registers are unknown at reset. */ for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index d4c7610d25b..e0cca10a841 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -222,7 +222,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) { unsigned long pmnc; struct perf_sample_data data; - struct cpu_hw_events *cpuc; + struct pmu_hw_events *cpuc; struct pt_regs *regs; int idx; @@ -249,7 +249,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); - for (idx = 0; idx < armpmu->num_events; ++idx) { + for (idx = 0; idx < cpu_pmu->num_events; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; @@ -263,7 +263,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) continue; if (perf_event_overflow(event, &data, regs)) - armpmu->disable(hwc, idx); + cpu_pmu->disable(hwc, idx); } irq_work_run(); @@ -281,7 +281,7 @@ static void xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) { unsigned long val, mask, evt, flags; - struct cpu_hw_events *events = armpmu->get_hw_events(); + struct pmu_hw_events *events = cpu_pmu->get_hw_events(); switch (idx) { case XSCALE_CYCLE_COUNTER: @@ -315,7 +315,7 @@ static void xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) { unsigned long val, mask, evt, flags; - struct cpu_hw_events *events = armpmu->get_hw_events(); + struct pmu_hw_events *events = cpu_pmu->get_hw_events(); switch (idx) { case XSCALE_CYCLE_COUNTER: @@ -344,7 +344,7 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) } static int -xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc, +xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, struct hw_perf_event *event) { if (XSCALE_PERFCTR_CCNT == event->config_base) { @@ -367,7 +367,7 @@ static void xscale1pmu_start(void) { unsigned long flags, val; - struct cpu_hw_events *events = armpmu->get_hw_events(); + struct pmu_hw_events *events = cpu_pmu->get_hw_events(); raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale1pmu_read_pmnc(); @@ -380,7 +380,7 @@ static void xscale1pmu_stop(void) { unsigned long flags, val; - struct cpu_hw_events *events = armpmu->get_hw_events(); + struct pmu_hw_events *events = cpu_pmu->get_hw_events(); raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale1pmu_read_pmnc(); @@ -565,7 +565,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) { unsigned long pmnc, of_flags; struct perf_sample_data data; - struct cpu_hw_events *cpuc; + struct pmu_hw_events *cpuc; struct pt_regs *regs; int idx; @@ -586,7 +586,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); - for (idx = 0; idx < armpmu->num_events; ++idx) { + for (idx = 0; idx < cpu_pmu->num_events; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; @@ -600,7 +600,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) continue; if (perf_event_overflow(event, &data, regs)) - armpmu->disable(hwc, idx); + cpu_pmu->disable(hwc, idx); } irq_work_run(); @@ -618,7 +618,7 @@ static void xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) { unsigned long flags, ien, evtsel; - struct cpu_hw_events *events = armpmu->get_hw_events(); + struct pmu_hw_events *events = cpu_pmu->get_hw_events(); ien = xscale2pmu_read_int_enable(); evtsel = xscale2pmu_read_event_select(); @@ -662,7 +662,7 @@ static void xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) { unsigned long flags, ien, evtsel; - struct cpu_hw_events *events = armpmu->get_hw_events(); + struct pmu_hw_events *events = cpu_pmu->get_hw_events(); ien = xscale2pmu_read_int_enable(); evtsel = xscale2pmu_read_event_select(); @@ -703,7 +703,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) } static int -xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc, +xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc, struct hw_perf_event *event) { int idx = xscale1pmu_get_event_idx(cpuc, event); @@ -722,7 +722,7 @@ static void xscale2pmu_start(void) { unsigned long flags, val; - struct cpu_hw_events *events = armpmu->get_hw_events(); + struct pmu_hw_events *events = cpu_pmu->get_hw_events(); raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; @@ -735,7 +735,7 @@ static void xscale2pmu_stop(void) { unsigned long flags, val; - struct cpu_hw_events *events = armpmu->get_hw_events(); + struct pmu_hw_events *events = cpu_pmu->get_hw_events(); raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale2pmu_read_pmnc(); -- cgit v1.2.3-70-g09d2