From aa2bc1ade59003a379ffc485d6da2d92ea3370a6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 9 Nov 2011 17:56:37 +0100 Subject: perf: Don't use -ENOSPC for out of PMU resources People (Linus) objected to using -ENOSPC to signal not having enough resources on the PMU to satisfy the request. Use -EINVAL. Requested-by: Linus Torvalds Cc: Stephane Eranian Cc: Will Deacon Cc: Deng-Cheng Zhu Cc: David Daney Cc: Ralf Baechle Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/n/tip-xv8geaz2zpbjhlx0svmpp28n@git.kernel.org [ merged to newer kernel, fixed up MIPS impact ] Signed-off-by: Ingo Molnar --- arch/mips/kernel/perf_event_mipsxx.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/mips/kernel/perf_event_mipsxx.c') diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 4f2971bcf8e..315fc0b250f 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -623,7 +623,7 @@ static int mipspmu_event_init(struct perf_event *event) if (!atomic_inc_not_zero(&active_events)) { if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { atomic_dec(&active_events); - return -ENOSPC; + return -EINVAL; } mutex_lock(&pmu_reserve_mutex); @@ -732,15 +732,15 @@ static int validate_group(struct perf_event *event) memset(&fake_cpuc, 0, sizeof(fake_cpuc)); if (!validate_event(&fake_cpuc, leader)) - return -ENOSPC; + return -EINVAL; list_for_each_entry(sibling, &leader->sibling_list, group_entry) { if (!validate_event(&fake_cpuc, sibling)) - return -ENOSPC; + return -EINVAL; } if (!validate_event(&fake_cpuc, event)) - return -ENOSPC; + return -EINVAL; return 0; } -- cgit v1.2.3-70-g09d2 From 2c1b54d331bde7afbf8da24789cce2402e155495 Mon Sep 17 00:00:00 2001 From: Deng-Cheng Zhu Date: Tue, 22 Nov 2011 03:28:45 +0800 Subject: MIPS/Perf-events: Don't do validation on raw events MIPS licensees may want to modify performance counters to count extra events. Also, now that the user is working on raw events, the manual is being used for sure. And feeding unsupported events shouldn't cause hardware failure and the like. [ralf@linux-mips.org: performance events also being used in internal performance evaluation and have a tendency to change as the micro- architecture evolves, even for minor revisions that may not be distinguishable by PrID. It's not very practicable to maintain a list of all events and there is no real benefit.] Signed-off-by: Deng-Cheng Zhu Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo Cc: David Daney Cc: Eyal Barzilay Cc: Zenon Fortuna Patchwork: https://patchwork.linux-mips.org/patch/3107/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/perf_event_mipsxx.c | 34 ++-------------------------------- 1 file changed, 2 insertions(+), 32 deletions(-) (limited to 'arch/mips/kernel/perf_event_mipsxx.c') diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 4f2971bcf8e..ab4c761cfed 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -1380,20 +1380,10 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev) } /* 24K */ -#define IS_UNSUPPORTED_24K_EVENT(r, b) \ - ((b) == 12 || (r) == 151 || (r) == 152 || (b) == 26 || \ - (b) == 27 || (r) == 28 || (r) == 158 || (b) == 31 || \ - (b) == 32 || (b) == 34 || (b) == 36 || (r) == 168 || \ - (r) == 172 || (b) == 47 || ((b) >= 56 && (b) <= 63) || \ - ((b) >= 68 && (b) <= 127)) #define IS_BOTH_COUNTERS_24K_EVENT(b) \ ((b) == 0 || (b) == 1 || (b) == 11) /* 34K */ -#define IS_UNSUPPORTED_34K_EVENT(r, b) \ - ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 36 || \ - (b) == 38 || (r) == 175 || ((b) >= 56 && (b) <= 63) || \ - ((b) >= 68 && (b) <= 127)) #define IS_BOTH_COUNTERS_34K_EVENT(b) \ ((b) == 0 || (b) == 1 || (b) == 11) #ifdef CONFIG_MIPS_MT_SMP @@ -1406,20 +1396,10 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev) #endif /* 74K */ -#define IS_UNSUPPORTED_74K_EVENT(r, b) \ - ((r) == 5 || ((r) >= 135 && (r) <= 137) || \ - ((b) >= 10 && (b) <= 12) || (b) == 22 || (b) == 27 || \ - (b) == 33 || (b) == 34 || ((b) >= 47 && (b) <= 49) || \ - (r) == 178 || (b) == 55 || (b) == 57 || (b) == 60 || \ - (b) == 61 || (r) == 62 || (r) == 191 || \ - ((b) >= 64 && (b) <= 127)) #define IS_BOTH_COUNTERS_74K_EVENT(b) \ ((b) == 0 || (b) == 1) /* 1004K */ -#define IS_UNSUPPORTED_1004K_EVENT(r, b) \ - ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 38 || \ - (r) == 175 || (b) == 63 || ((b) >= 68 && (b) <= 127)) #define IS_BOTH_COUNTERS_1004K_EVENT(b) \ ((b) == 0 || (b) == 1 || (b) == 11) #ifdef CONFIG_MIPS_MT_SMP @@ -1445,11 +1425,10 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) unsigned int raw_id = config & 0xff; unsigned int base_id = raw_id & 0x7f; + raw_event.event_id = base_id; + switch (current_cpu_type()) { case CPU_24K: - if (IS_UNSUPPORTED_24K_EVENT(raw_id, base_id)) - return ERR_PTR(-EOPNOTSUPP); - raw_event.event_id = base_id; if (IS_BOTH_COUNTERS_24K_EVENT(base_id)) raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; else @@ -1464,9 +1443,6 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) #endif break; case CPU_34K: - if (IS_UNSUPPORTED_34K_EVENT(raw_id, base_id)) - return ERR_PTR(-EOPNOTSUPP); - raw_event.event_id = base_id; if (IS_BOTH_COUNTERS_34K_EVENT(base_id)) raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; else @@ -1482,9 +1458,6 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) #endif break; case CPU_74K: - if (IS_UNSUPPORTED_74K_EVENT(raw_id, base_id)) - return ERR_PTR(-EOPNOTSUPP); - raw_event.event_id = base_id; if (IS_BOTH_COUNTERS_74K_EVENT(base_id)) raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; else @@ -1495,9 +1468,6 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) #endif break; case CPU_1004K: - if (IS_UNSUPPORTED_1004K_EVENT(raw_id, base_id)) - return ERR_PTR(-EOPNOTSUPP); - raw_event.event_id = base_id; if (IS_BOTH_COUNTERS_1004K_EVENT(base_id)) raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; else -- cgit v1.2.3-70-g09d2 From 74653ccf231a3100dd03e16e7a4178868a37332e Mon Sep 17 00:00:00 2001 From: Deng-Cheng Zhu Date: Tue, 22 Nov 2011 03:28:46 +0800 Subject: MIPS/Perf-events: Remove erroneous check on active_events Port the following patch for ARM by Mark Rutland: - 57ce9bb39b476accf8fba6e16aea67ed76ea523d ARM: 6902/1: perf: Remove erroneous check on active_events When initialising a PMU, there is a check to protect against races with other CPUs filling all of the available event slots. Since armpmu_add checks that an event can be scheduled, we do not need to do this at initialisation time. Furthermore the current code is broken because it assumes that atomic_inc_not_zero will unconditionally increment active_counts and then tries to decrement it again on failure. This patch removes the broken, redundant code. Signed-off-by: Deng-Cheng Zhu Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo Cc: David Daney Cc: Eyal Barzilay Cc: Zenon Fortuna Patchwork: https://patchwork.linux-mips.org/patch/3106/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/perf_event_mipsxx.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch/mips/kernel/perf_event_mipsxx.c') diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index ab4c761cfed..b5d6b3fa5a4 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -621,11 +621,6 @@ static int mipspmu_event_init(struct perf_event *event) return -ENODEV; if (!atomic_inc_not_zero(&active_events)) { - if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { - atomic_dec(&active_events); - return -ENOSPC; - } - mutex_lock(&pmu_reserve_mutex); if (atomic_read(&active_events) == 0) err = mipspmu_get_irq(); -- cgit v1.2.3-70-g09d2 From 266623b7597c97e6ff987b45719540b227751420 Mon Sep 17 00:00:00 2001 From: Deng-Cheng Zhu Date: Tue, 22 Nov 2011 03:28:47 +0800 Subject: MIPS/Perf-events: Remove pmu and event state checking in validate_event() Why removing pmu checking: Since 3.2-rc1, when arch level event init is called, the event is already connected to its PMU. Also, validate_event() is _only_ called by validate_group() in event init, so there is no need of checking or temporarily assigning event pmu during validate_group(). Why removing event state checking: Events could be created in PERF_EVENT_STATE_OFF (attr->disabled == 1), when these events go through this checking, validate_group() does dummy work. But we do need to do group scheduling emulation for them in event init. Again, validate_event() is _only_ called by validate_group(). Reference: http://www.spinics.net/lists/mips/msg42190.html Signed-off-by: Deng-Cheng Zhu Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo Cc: David Daney Cc: Eyal Barzilay Cc: Zenon Fortuna Patchwork: https://patchwork.linux-mips.org/patch/3108/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/perf_event_mipsxx.c | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) (limited to 'arch/mips/kernel/perf_event_mipsxx.c') diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index b5d6b3fa5a4..b22cc5fd596 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -707,18 +707,6 @@ static const struct mips_perf_event *mipspmu_map_cache_event(u64 config) } -static int validate_event(struct cpu_hw_events *cpuc, - struct perf_event *event) -{ - struct hw_perf_event fake_hwc = event->hw; - - /* Allow mixed event group. So return 1 to pass validation. */ - if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) - return 1; - - return mipsxx_pmu_alloc_counter(cpuc, &fake_hwc) >= 0; -} - static int validate_group(struct perf_event *event) { struct perf_event *sibling, *leader = event->group_leader; @@ -726,15 +714,15 @@ static int validate_group(struct perf_event *event) memset(&fake_cpuc, 0, sizeof(fake_cpuc)); - if (!validate_event(&fake_cpuc, leader)) + if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0) return -ENOSPC; list_for_each_entry(sibling, &leader->sibling_list, group_entry) { - if (!validate_event(&fake_cpuc, sibling)) + if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0) return -ENOSPC; } - if (!validate_event(&fake_cpuc, event)) + if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0) return -ENOSPC; return 0; -- cgit v1.2.3-70-g09d2 From ff5d7265cfb88e8f8943a55afde90255fc5deacb Mon Sep 17 00:00:00 2001 From: Deng-Cheng Zhu Date: Tue, 22 Nov 2011 03:28:48 +0800 Subject: MIPS/Perf-events: Cleanup event->destroy at event init Simplify the code by changing the place of event->destroy(). Signed-off-by: Deng-Cheng Zhu Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo Cc: David Daney Cc: Eyal Barzilay Cc: Zenon Fortuna Patchwork: https://patchwork.linux-mips.org/patch/3109/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/perf_event_mipsxx.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'arch/mips/kernel/perf_event_mipsxx.c') diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index b22cc5fd596..bda4bc9e698 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -633,11 +633,7 @@ static int mipspmu_event_init(struct perf_event *event) if (err) return err; - err = __hw_perf_event_init(event); - if (err) - hw_perf_event_destroy(event); - - return err; + return __hw_perf_event_init(event); } static struct pmu pmu = { @@ -1262,13 +1258,14 @@ static int __hw_perf_event_init(struct perf_event *event) } err = 0; - if (event->group_leader != event) { + if (event->group_leader != event) err = validate_group(event); - if (err) - return -EINVAL; - } event->destroy = hw_perf_event_destroy; + + if (err) + event->destroy(event); + return err; } -- cgit v1.2.3-70-g09d2