From 79a4cb28a07c4e24103d00741a3dc2618532efe6 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 23 Jul 2014 21:12:36 +1000 Subject: powerpc/perf: Clear all MMCR settings before calling compute_mmcr() Because we reuse cpuhw->mmcr on each call to compute_mmcr() there's a risk that we could forget to set one of the values and use whatever value was in there previously. Currently all the implementations are careful to set all the values, but it's safer to clear them all before we call compute_mmcr(). Signed-off-by: Michael Ellerman Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/perf/core-book3s.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/perf') diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index fe52db2eea6..f82c0973866 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -1219,8 +1219,10 @@ static void power_pmu_enable(struct pmu *pmu) } /* - * Compute MMCR* values for the new set of events + * Clear all MMCR settings and recompute them for the new set of events. */ + memset(cpuhw->mmcr, 0, sizeof(cpuhw->mmcr)); + if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index, cpuhw->mmcr)) { /* shouldn't ever get here */ -- cgit v1.2.3-70-g09d2 From 8abd818fc76705065f3699a753ad2df594dafe86 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 23 Jul 2014 21:12:37 +1000 Subject: powerpc/perf: Pass the struct perf_events down to compute_mmcr() To support per-event exclude settings on Power8 we need access to the struct perf_events in compute_mmcr(). Signed-off-by: Michael Ellerman Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/perf_event_server.h | 5 ++++- arch/powerpc/perf/core-book3s.c | 2 +- arch/powerpc/perf/mpc7450-pmu.c | 5 +++-- arch/powerpc/perf/power4-pmu.c | 2 +- arch/powerpc/perf/power5+-pmu.c | 2 +- arch/powerpc/perf/power5-pmu.c | 2 +- arch/powerpc/perf/power6-pmu.c | 2 +- arch/powerpc/perf/power7-pmu.c | 2 +- arch/powerpc/perf/power8-pmu.c | 3 ++- arch/powerpc/perf/ppc970-pmu.c | 2 +- 10 files changed, 16 insertions(+), 11 deletions(-) (limited to 'arch/powerpc/perf') diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index b3e936027b2..814622146d5 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -19,6 +19,8 @@ #define MAX_EVENT_ALTERNATIVES 8 #define MAX_LIMITED_HWCOUNTERS 2 +struct perf_event; + /* * This struct provides the constants and functions needed to * describe the PMU on a particular POWER-family CPU. @@ -30,7 +32,8 @@ struct power_pmu { unsigned long add_fields; unsigned long test_adder; int (*compute_mmcr)(u64 events[], int n_ev, - unsigned int hwc[], unsigned long mmcr[]); + unsigned int hwc[], unsigned long mmcr[], + struct perf_event *pevents[]); int (*get_constraint)(u64 event_id, unsigned long *mskp, unsigned long *valp); int (*get_alternatives)(u64 event_id, unsigned int flags, diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index f82c0973866..01b30238d7d 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -1224,7 +1224,7 @@ static void power_pmu_enable(struct pmu *pmu) memset(cpuhw->mmcr, 0, sizeof(cpuhw->mmcr)); if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index, - cpuhw->mmcr)) { + cpuhw->mmcr, cpuhw->event)) { /* shouldn't ever get here */ printk(KERN_ERR "oops compute_mmcr failed\n"); goto out; diff --git a/arch/powerpc/perf/mpc7450-pmu.c b/arch/powerpc/perf/mpc7450-pmu.c index fe21b515ca4..d115c5635bf 100644 --- a/arch/powerpc/perf/mpc7450-pmu.c +++ b/arch/powerpc/perf/mpc7450-pmu.c @@ -260,8 +260,9 @@ static const u32 pmcsel_mask[N_COUNTER] = { /* * Compute MMCR0/1/2 values for a set of events. */ -static int mpc7450_compute_mmcr(u64 event[], int n_ev, - unsigned int hwc[], unsigned long mmcr[]) +static int mpc7450_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], + unsigned long mmcr[], + struct perf_event *pevents[]) { u8 event_index[N_CLASSES][N_COUNTER]; int n_classevent[N_CLASSES]; diff --git a/arch/powerpc/perf/power4-pmu.c b/arch/powerpc/perf/power4-pmu.c index 9103a1de864..ce6072fa481 100644 --- a/arch/powerpc/perf/power4-pmu.c +++ b/arch/powerpc/perf/power4-pmu.c @@ -356,7 +356,7 @@ static int p4_get_alternatives(u64 event, unsigned int flags, u64 alt[]) } static int p4_compute_mmcr(u64 event[], int n_ev, - unsigned int hwc[], unsigned long mmcr[]) + unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[]) { unsigned long mmcr0 = 0, mmcr1 = 0, mmcra = 0; unsigned int pmc, unit, byte, psel, lower; diff --git a/arch/powerpc/perf/power5+-pmu.c b/arch/powerpc/perf/power5+-pmu.c index b03b6dc0172..0526dac6600 100644 --- a/arch/powerpc/perf/power5+-pmu.c +++ b/arch/powerpc/perf/power5+-pmu.c @@ -452,7 +452,7 @@ static int power5p_marked_instr_event(u64 event) } static int power5p_compute_mmcr(u64 event[], int n_ev, - unsigned int hwc[], unsigned long mmcr[]) + unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[]) { unsigned long mmcr1 = 0; unsigned long mmcra = 0; diff --git a/arch/powerpc/perf/power5-pmu.c b/arch/powerpc/perf/power5-pmu.c index 1e8ce423c3a..4dc99f9f796 100644 --- a/arch/powerpc/perf/power5-pmu.c +++ b/arch/powerpc/perf/power5-pmu.c @@ -383,7 +383,7 @@ static int power5_marked_instr_event(u64 event) } static int power5_compute_mmcr(u64 event[], int n_ev, - unsigned int hwc[], unsigned long mmcr[]) + unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[]) { unsigned long mmcr1 = 0; unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; diff --git a/arch/powerpc/perf/power6-pmu.c b/arch/powerpc/perf/power6-pmu.c index 31128e086fe..9c9d646b68a 100644 --- a/arch/powerpc/perf/power6-pmu.c +++ b/arch/powerpc/perf/power6-pmu.c @@ -175,7 +175,7 @@ static int power6_marked_instr_event(u64 event) * Assign PMC numbers and compute MMCR1 value for a set of events */ static int p6_compute_mmcr(u64 event[], int n_ev, - unsigned int hwc[], unsigned long mmcr[]) + unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[]) { unsigned long mmcr1 = 0; unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c index 56c67bca2f7..5b62f238929 100644 --- a/arch/powerpc/perf/power7-pmu.c +++ b/arch/powerpc/perf/power7-pmu.c @@ -245,7 +245,7 @@ static int power7_marked_instr_event(u64 event) } static int power7_compute_mmcr(u64 event[], int n_ev, - unsigned int hwc[], unsigned long mmcr[]) + unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[]) { unsigned long mmcr1 = 0; unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS; diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c index 639cd915658..19bbddf495d 100644 --- a/arch/powerpc/perf/power8-pmu.c +++ b/arch/powerpc/perf/power8-pmu.c @@ -393,7 +393,8 @@ static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long } static int power8_compute_mmcr(u64 event[], int n_ev, - unsigned int hwc[], unsigned long mmcr[]) + unsigned int hwc[], unsigned long mmcr[], + struct perf_event *pevents[]) { unsigned long mmcra, mmcr1, unit, combine, psel, cache, val; unsigned int pmc, pmc_inuse; diff --git a/arch/powerpc/perf/ppc970-pmu.c b/arch/powerpc/perf/ppc970-pmu.c index 20139ceeacf..8b6a8a36fa3 100644 --- a/arch/powerpc/perf/ppc970-pmu.c +++ b/arch/powerpc/perf/ppc970-pmu.c @@ -257,7 +257,7 @@ static int p970_get_alternatives(u64 event, unsigned int flags, u64 alt[]) } static int p970_compute_mmcr(u64 event[], int n_ev, - unsigned int hwc[], unsigned long mmcr[]) + unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[]) { unsigned long mmcr0 = 0, mmcr1 = 0, mmcra = 0; unsigned int pmc, unit, byte, psel; -- cgit v1.2.3-70-g09d2 From 9de5cb0f6df83243c58b2d1e3754a3c237d954ff Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 23 Jul 2014 21:12:38 +1000 Subject: powerpc/perf: Add per-event excludes on Power8 Power8 has a new register (MMCR2), which contains individual freeze bits for each counter. This is an improvement on previous chips as it means we can have multiple events on the PMU at the same time with different exclude_{user,kernel,hv} settings. Previously we had to ensure all events on the PMU had the same exclude settings. The core of the patch is fairly simple. We use the 207S feature flag to indicate that the PMU backend supports per-event excludes, if it's set we skip the generic logic that enforces the equality of excludes between events. We also use that flag to skip setting the freeze bits in MMCR0, the PMU backend is expected to have handled setting them in MMCR2. The complication arises with EBB. The FCxP bits in MMCR2 are accessible R/W to a task using EBB. Which means a task using EBB will be able to see that we are using MMCR2 for freezing, whereas the old logic which used MMCR0 is not user visible. The task can not see or affect exclude_kernel & exclude_hv, so we only need to consider exclude_user. The table below summarises the behaviour both before and after this commit is applied: exclude_user true false ------------------------------------ | User visible | N N Before | Can freeze | Y Y | Can unfreeze | N Y ------------------------------------ | User visible | Y Y After | Can freeze | Y Y | Can unfreeze | Y/N Y ------------------------------------ So firstly I assert that the simple visibility of the exclude_user setting in MMCR2 is a non-issue. The event belongs to the task, and was most likely created by the task. So the exclude_user setting is not privileged information in any way. Secondly, the behaviour in the exclude_user = false case is unchanged. This is important as it is the case that is actually useful, ie. the event is created with no exclude setting and the task uses MMCR2 to implement exclusion manually. For exclude_user = true there is no meaningful change to freezing the event. Previously the task could use MMCR2 to freeze the event, though it was already frozen with MMCR0. With the new code the task can use MMCR2 to freeze the event, though it was already frozen with MMCR2. The only real change is when exclude_user = true and the task tries to use MMCR2 to unfreeze the event. Previously this had no effect, because the event was already frozen in MMCR0. With the new code the task can unfreeze the event in MMCR2, but at some indeterminate time in the future the kernel will overwrite its setting and refreeze the event. Therefore my final assertion is that any task using exclude_user = true and also fiddling with MMCR2 was deeply confused before this change, and remains so after it. Signed-off-by: Michael Ellerman Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/perf/core-book3s.c | 67 +++++++++++++++++++++++++++-------------- arch/powerpc/perf/power8-pmu.c | 24 +++++++++++++-- 2 files changed, 67 insertions(+), 24 deletions(-) (limited to 'arch/powerpc/perf') diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 01b30238d7d..b7cd00b0171 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -36,7 +36,12 @@ struct cpu_hw_events { struct perf_event *event[MAX_HWEVENTS]; u64 events[MAX_HWEVENTS]; unsigned int flags[MAX_HWEVENTS]; - unsigned long mmcr[3]; + /* + * The order of the MMCR array is: + * - 64-bit, MMCR0, MMCR1, MMCRA, MMCR2 + * - 32-bit, MMCR0, MMCR1, MMCR2 + */ + unsigned long mmcr[4]; struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS]; u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; @@ -112,9 +117,9 @@ static bool is_ebb_event(struct perf_event *event) { return false; } static int ebb_event_check(struct perf_event *event) { return 0; } static void ebb_event_add(struct perf_event *event) { } static void ebb_switch_out(unsigned long mmcr0) { } -static unsigned long ebb_switch_in(bool ebb, unsigned long mmcr0) +static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw) { - return mmcr0; + return cpuhw->mmcr[0]; } static inline void power_pmu_bhrb_enable(struct perf_event *event) {} @@ -542,8 +547,10 @@ static void ebb_switch_out(unsigned long mmcr0) current->thread.mmcr2 = mfspr(SPRN_MMCR2) & MMCR2_USER_MASK; } -static unsigned long ebb_switch_in(bool ebb, unsigned long mmcr0) +static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw) { + unsigned long mmcr0 = cpuhw->mmcr[0]; + if (!ebb) goto out; @@ -568,7 +575,15 @@ static unsigned long ebb_switch_in(bool ebb, unsigned long mmcr0) mtspr(SPRN_SIAR, current->thread.siar); mtspr(SPRN_SIER, current->thread.sier); mtspr(SPRN_SDAR, current->thread.sdar); - mtspr(SPRN_MMCR2, current->thread.mmcr2); + + /* + * Merge the kernel & user values of MMCR2. The semantics we implement + * are that the user MMCR2 can set bits, ie. cause counters to freeze, + * but not clear bits. If a task wants to be able to clear bits, ie. + * unfreeze counters, it should not set exclude_xxx in its events and + * instead manage the MMCR2 entirely by itself. + */ + mtspr(SPRN_MMCR2, cpuhw->mmcr[3] | current->thread.mmcr2); out: return mmcr0; } @@ -915,6 +930,14 @@ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[], int i, n, first; struct perf_event *event; + /* + * If the PMU we're on supports per event exclude settings then we + * don't need to do any of this logic. NB. This assumes no PMU has both + * per event exclude and limited PMCs. + */ + if (ppmu->flags & PPMU_ARCH_207S) + return 0; + n = n_prev + n_new; if (n <= 1) return 0; @@ -1230,19 +1253,20 @@ static void power_pmu_enable(struct pmu *pmu) goto out; } - /* - * Add in MMCR0 freeze bits corresponding to the - * attr.exclude_* bits for the first event. - * We have already checked that all events have the - * same values for these bits as the first event. - */ - event = cpuhw->event[0]; - if (event->attr.exclude_user) - cpuhw->mmcr[0] |= MMCR0_FCP; - if (event->attr.exclude_kernel) - cpuhw->mmcr[0] |= freeze_events_kernel; - if (event->attr.exclude_hv) - cpuhw->mmcr[0] |= MMCR0_FCHV; + if (!(ppmu->flags & PPMU_ARCH_207S)) { + /* + * Add in MMCR0 freeze bits corresponding to the attr.exclude_* + * bits for the first event. We have already checked that all + * events have the same value for these bits as the first event. + */ + event = cpuhw->event[0]; + if (event->attr.exclude_user) + cpuhw->mmcr[0] |= MMCR0_FCP; + if (event->attr.exclude_kernel) + cpuhw->mmcr[0] |= freeze_events_kernel; + if (event->attr.exclude_hv) + cpuhw->mmcr[0] |= MMCR0_FCHV; + } /* * Write the new configuration to MMCR* with the freeze @@ -1254,6 +1278,8 @@ static void power_pmu_enable(struct pmu *pmu) mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) | MMCR0_FC); + if (ppmu->flags & PPMU_ARCH_207S) + mtspr(SPRN_MMCR2, cpuhw->mmcr[3]); /* * Read off any pre-existing events that need to move @@ -1309,10 +1335,7 @@ static void power_pmu_enable(struct pmu *pmu) out_enable: pmao_restore_workaround(ebb); - if (ppmu->flags & PPMU_ARCH_207S) - mtspr(SPRN_MMCR2, 0); - - mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]); + mmcr0 = ebb_switch_in(ebb, cpuhw); mb(); if (cpuhw->bhrb_users) diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c index 19bbddf495d..396351db601 100644 --- a/arch/powerpc/perf/power8-pmu.c +++ b/arch/powerpc/perf/power8-pmu.c @@ -15,6 +15,7 @@ #include #include #include +#include /* @@ -266,6 +267,11 @@ #define MMCRA_SDAR_MODE_TLB (1ull << 42) #define MMCRA_IFM_SHIFT 30 +/* Bits in MMCR2 for POWER8 */ +#define MMCR2_FCS(pmc) (1ull << (63 - (((pmc) - 1) * 9))) +#define MMCR2_FCP(pmc) (1ull << (62 - (((pmc) - 1) * 9))) +#define MMCR2_FCH(pmc) (1ull << (57 - (((pmc) - 1) * 9))) + static inline bool event_is_fab_match(u64 event) { @@ -396,7 +402,7 @@ static int power8_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[]) { - unsigned long mmcra, mmcr1, unit, combine, psel, cache, val; + unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val; unsigned int pmc, pmc_inuse; int i; @@ -411,7 +417,7 @@ static int power8_compute_mmcr(u64 event[], int n_ev, /* In continous sampling mode, update SDAR on TLB miss */ mmcra = MMCRA_SDAR_MODE_TLB; - mmcr1 = 0; + mmcr1 = mmcr2 = 0; /* Second pass: assign PMCs, set all MMCR1 fields */ for (i = 0; i < n_ev; ++i) { @@ -473,6 +479,19 @@ static int power8_compute_mmcr(u64 event[], int n_ev, mmcra |= val << MMCRA_IFM_SHIFT; } + if (pevents[i]->attr.exclude_user) + mmcr2 |= MMCR2_FCP(pmc); + + if (pevents[i]->attr.exclude_hv) + mmcr2 |= MMCR2_FCH(pmc); + + if (pevents[i]->attr.exclude_kernel) { + if (cpu_has_feature(CPU_FTR_HVMODE)) + mmcr2 |= MMCR2_FCH(pmc); + else + mmcr2 |= MMCR2_FCS(pmc); + } + hwc[i] = pmc - 1; } @@ -492,6 +511,7 @@ static int power8_compute_mmcr(u64 event[], int n_ev, mmcr[1] = mmcr1; mmcr[2] = mmcra; + mmcr[3] = mmcr2; return 0; } -- cgit v1.2.3-70-g09d2