summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/perf_event_xscale.c
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2012-03-06 17:34:50 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-03-07 09:40:49 +0000
commitf6f5a30c834135c9f2fa10400c59ebbdd9188567 (patch)
tree208358216772eedab5998070878db55df5d4e772 /arch/arm/kernel/perf_event_xscale.c
parent99c1745b9c76910e195889044f914b4898b7c9a5 (diff)
ARM: 7356/1: perf: check that we have an event in the PMU IRQ handlers
The PMU IRQ handlers in perf assume that if a counter has overflowed then perf must be responsible. In the paranoid world of crazy hardware, this could be false, so check that we do have a valid event before attempting to dereference NULL in the interrupt path. Cc: <stable@vger.kernel.org> Signed-off-by: Ming Lei <tom.leiming@gmail.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel/perf_event_xscale.c')
-rw-r--r--arch/arm/kernel/perf_event_xscale.c6
1 files changed, 6 insertions, 0 deletions
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 831e019b017..a5bbd360cc4 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -255,6 +255,9 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
+ if (!event)
+ continue;
+
if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
continue;
@@ -592,6 +595,9 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
+ if (!event)
+ continue;
+
if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
continue;