summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/perf_event.c
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2014-02-11 18:08:41 +0000
committerWill Deacon <will.deacon@arm.com>2014-02-21 11:11:21 +0000
commit5f5092e72cc25a6a5785308270e0085b2b2772cc (patch)
treea3f5d15b978418cd29200a95d7abd33440a16577 /arch/arm/kernel/perf_event.c
parentd84c47837d800c0db1c9f2015a1565504417f812 (diff)
ARM: perf: hook up perf_sample_event_took around pmu irq handling
Since we indirect all of our PMU IRQ handling through a dispatcher, it's trivial to hook up perf_sample_event_took to prevent applications such as oprofile from generating interrupt storms due to an unrealisticly low sample period. Reported-by: Robert Richter <rric@kernel.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm/kernel/perf_event.c')
-rw-r--r--arch/arm/kernel/perf_event.c11
1 files changed, 9 insertions, 2 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 361a1aaee7c..a6bc431cde7 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -302,6 +302,8 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
struct arm_pmu *armpmu;
struct platform_device *plat_device;
struct arm_pmu_platdata *plat;
+ int ret;
+ u64 start_clock, finish_clock;
if (irq_is_percpu(irq))
dev = *(void **)dev;
@@ -309,10 +311,15 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
plat_device = armpmu->plat_device;
plat = dev_get_platdata(&plat_device->dev);
+ start_clock = sched_clock();
if (plat && plat->handle_irq)
- return plat->handle_irq(irq, dev, armpmu->handle_irq);
+ ret = plat->handle_irq(irq, dev, armpmu->handle_irq);
else
- return armpmu->handle_irq(irq, dev);
+ ret = armpmu->handle_irq(irq, dev);
+ finish_clock = sched_clock();
+
+ perf_sample_event_took(finish_clock - start_clock);
+ return ret;
}
static void