summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c1
-rw-r--r--kernel/perf_counter.c12
2 files changed, 7 insertions, 6 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index c109819c2cb..6cc1660db8d 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -740,6 +740,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
again:
if (++loops > 100) {
WARN_ONCE(1, "perfcounters: irq loop stuck!\n");
+ perf_counter_print_debug();
return 1;
}
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 69d4de81596..08584c16049 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -208,18 +208,17 @@ static void __perf_counter_remove_from_context(void *info)
return;
spin_lock_irqsave(&ctx->lock, flags);
+ /*
+ * Protect the list operation against NMI by disabling the
+ * counters on a global level.
+ */
+ perf_disable();
counter_sched_out(counter, cpuctx, ctx);
counter->task = NULL;
- /*
- * Protect the list operation against NMI by disabling the
- * counters on a global level. NOP for non NMI based counters.
- */
- perf_disable();
list_del_counter(counter, ctx);
- perf_enable();
if (!ctx->task) {
/*
@@ -231,6 +230,7 @@ static void __perf_counter_remove_from_context(void *info)
perf_max_counters - perf_reserved_percpu);
}
+ perf_enable();
spin_unlock_irqrestore(&ctx->lock, flags);
}