summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorVenkatesh Srinivas <venkateshs@google.com>2014-03-13 12:36:26 -0700
committerIngo Molnar <mingo@kernel.org>2014-04-18 12:14:26 +0200
commit24223657806a0ebd0ae5c9caaf7b021091889cf2 (patch)
tree46f5e36797458f19f0fd1f53f6e0ac00de82f00a /arch/x86
parent6381c24cd6d5d6373620426ab0a96c80ed953e20 (diff)
perf/x86/intel: Use rdmsrl_safe() when initializing RAPL PMU
CPUs which should support the RAPL counters according to Family/Model/Stepping may still issue #GP when attempting to access the RAPL MSRs. This may happen when Linux is running under KVM and we are passing-through host F/M/S data, for example. Use rdmsrl_safe to first access the RAPL_POWER_UNIT MSR; if this fails, do not attempt to use this PMU. Signed-off-by: Venkatesh Srinivas <venkateshs@google.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1394739386-22260-1-git-send-email-venkateshs@google.com Cc: zheng.z.yan@intel.com Cc: eranian@google.com Cc: ak@linux.intel.com Cc: linux-kernel@vger.kernel.org [ The patch also silently fixes another bug: rapl_pmu_init() didn't handle the memory alloc failure case previously. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_rapl.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
index 4b9a9e9466b..7c87424d414 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -535,6 +535,7 @@ static int rapl_cpu_prepare(int cpu)
struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu);
int phys_id = topology_physical_package_id(cpu);
u64 ms;
+ u64 msr_rapl_power_unit_bits;
if (pmu)
return 0;
@@ -542,6 +543,9 @@ static int rapl_cpu_prepare(int cpu)
if (phys_id < 0)
return -1;
+ if (!rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
+ return -1;
+
pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
if (!pmu)
return -1;
@@ -555,8 +559,7 @@ static int rapl_cpu_prepare(int cpu)
*
* we cache in local PMU instance
*/
- rdmsrl(MSR_RAPL_POWER_UNIT, pmu->hw_unit);
- pmu->hw_unit = (pmu->hw_unit >> 8) & 0x1FULL;
+ pmu->hw_unit = (msr_rapl_power_unit_bits >> 8) & 0x1FULL;
pmu->pmu = &rapl_pmu_class;
/*
@@ -677,7 +680,9 @@ static int __init rapl_pmu_init(void)
cpu_notifier_register_begin();
for_each_online_cpu(cpu) {
- rapl_cpu_prepare(cpu);
+ ret = rapl_cpu_prepare(cpu);
+ if (ret)
+ goto out;
rapl_cpu_init(cpu);
}
@@ -700,6 +705,7 @@ static int __init rapl_pmu_init(void)
hweight32(rapl_cntr_mask),
ktime_to_ms(pmu->timer_interval));
+out:
cpu_notifier_register_done();
return 0;