summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/perf_event.c
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2012-03-13 17:38:09 -0700
committerOlof Johansson <olof@lixom.net>2012-03-13 17:38:09 -0700
commitae0b82504e515fdb9bc23c0b770d2b30efd49dc9 (patch)
tree665d1f26b32f9e68346a9529388fbadec519e865 /arch/arm/kernel/perf_event.c
parentf7c8faedf98aa5ec372e0191078ac7fe1e7fb067 (diff)
parenta6e24019468009a21b674e392d74283a90f415dd (diff)
Merge branch 'soc' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/renesas into next/soc
* 'soc' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/renesas: (234 commits) ARM: shmobile: remove additional __io() macro use ARM: mach-shmobile: default to no earlytimer ARM: mach-shmobile: r8a7779 and Marzen timer rework ARM: mach-shmobile: r8a7740 and Bonito timer rework ARM: mach-shmobile: sh73a0, AG5EVM and Kota2 timer rework ARM: mach-shmobile: sh7372, AP4EVB and Mackerel timer rework ARM: mach-shmobile: sh7377 and G4EVM timer rework ARM: mach-shmobile: sh7367 and G3EVM timer rework ARM: mach-shmobile: add shmobile_earlytimer_init() ARM: mach-shmobile: Move sh7372 AP4EVB external clk setup ARM: mach-shmobile: Move sh7372 Mackerel external clk setup ARM: mach-shmobile: rename clk_init() to shmobile_clk_init() ARM: mach-shmobile: r8a7779 L2 cache support ARM: mach-shmobile: r8a7779 map_io and init_early update ARM: mach-shmobile: r8a7740 map_io and init_early update ARM: mach-shmobile: sh73a0 map_io and init_early update ARM: mach-shmobile: sh7372 map_io and init_early update ARM: mach-shmobile: sh7377 map_io and init_early update ARM: mach-shmobile: sh7367 map_io and init_early update sh: remove clk_ops ... (includes an update to v3.3-rc7) Conflicts: arch/arm/mach-omap2/id.c
Diffstat (limited to 'arch/arm/kernel/perf_event.c')
-rw-r--r--arch/arm/kernel/perf_event.c45
1 files changed, 34 insertions, 11 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 5bb91bf3d47..b2abfa18f13 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -180,7 +180,7 @@ armpmu_event_set_period(struct perf_event *event,
u64
armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
- int idx, int overflow)
+ int idx)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
u64 delta, prev_raw_count, new_raw_count;
@@ -193,13 +193,7 @@ again:
new_raw_count) != prev_raw_count)
goto again;
- new_raw_count &= armpmu->max_period;
- prev_raw_count &= armpmu->max_period;
-
- if (overflow)
- delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
- else
- delta = new_raw_count - prev_raw_count;
+ delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
@@ -216,7 +210,7 @@ armpmu_read(struct perf_event *event)
if (hwc->idx < 0)
return;
- armpmu_event_update(event, hwc, hwc->idx, 0);
+ armpmu_event_update(event, hwc, hwc->idx);
}
static void
@@ -232,7 +226,7 @@ armpmu_stop(struct perf_event *event, int flags)
if (!(hwc->state & PERF_HES_STOPPED)) {
armpmu->disable(hwc, hwc->idx);
barrier(); /* why? */
- armpmu_event_update(event, hwc, hwc->idx, 0);
+ armpmu_event_update(event, hwc, hwc->idx);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
@@ -518,7 +512,13 @@ __hw_perf_event_init(struct perf_event *event)
hwc->config_base |= (unsigned long)mapping;
if (!hwc->sample_period) {
- hwc->sample_period = armpmu->max_period;
+ /*
+ * For non-sampling runs, limit the sample_period to half
+ * of the counter width. That way, the new counter value
+ * is far less likely to overtake the previous one unless
+ * you have some serious IRQ latency issues.
+ */
+ hwc->sample_period = armpmu->max_period >> 1;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
@@ -680,6 +680,28 @@ static void __init cpu_pmu_init(struct arm_pmu *armpmu)
}
/*
+ * PMU hardware loses all context when a CPU goes offline.
+ * When a CPU is hotplugged back in, since some hardware registers are
+ * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
+ * junk values out of them.
+ */
+static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
+ unsigned long action, void *hcpu)
+{
+ if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
+ return NOTIFY_DONE;
+
+ if (cpu_pmu && cpu_pmu->reset)
+ cpu_pmu->reset(NULL);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata pmu_cpu_notifier = {
+ .notifier_call = pmu_cpu_notify,
+};
+
+/*
* CPU PMU identification and registration.
*/
static int __init
@@ -730,6 +752,7 @@ init_hw_perf_events(void)
pr_info("enabled with %s PMU driver, %d counters available\n",
cpu_pmu->name, cpu_pmu->num_events);
cpu_pmu_init(cpu_pmu);
+ register_cpu_notifier(&pmu_cpu_notifier);
armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
} else {
pr_info("no hardware support available\n");