summaryrefslogtreecommitdiffstats
path: root/kernel/hw_breakpoint.c
diff options
context:
space:
mode:
authorJason Wessel <jason.wessel@windriver.com>2010-01-28 17:04:43 -0600
committerIngo Molnar <mingo@elte.hu>2010-01-30 08:42:21 +0100
commit5352ae638e2d7d5c9b2e4d528676bbf2af6fd6f3 (patch)
tree95bab4d28f7c91bc5b7e79b3e1c879dfe96c52b9 /kernel/hw_breakpoint.c
parentcc0967490c1c3824bc5b75718b6ca8a51d9f2617 (diff)
perf, hw_breakpoint, kgdb: Do not take mutex for kernel debugger
This patch fixes the regression in functionality where the kernel debugger and the perf API do not nicely share hw breakpoint reservations. The kernel debugger cannot use any mutex_lock() calls because it can start the kernel running from an invalid context. A mutex free version of the reservation API needed to get created for the kernel debugger to safely update hw breakpoint reservations. The possibility for a breakpoint reservation to be concurrently processed at the time that kgdb interrupts the system is improbable. Should this corner case occur the end user is warned, and the kernel debugger will prohibit updating the hardware breakpoint reservations. Any time the kernel debugger reserves a hardware breakpoint it will be a system wide reservation. Signed-off-by: Jason Wessel <jason.wessel@windriver.com> Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: kgdb-bugreport@lists.sourceforge.net Cc: K.Prasad <prasad@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Alan Stern <stern@rowland.harvard.edu> Cc: torvalds@linux-foundation.org LKML-Reference: <1264719883-7285-3-git-send-email-jason.wessel@windriver.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/hw_breakpoint.c')
-rw-r--r--kernel/hw_breakpoint.c52
1 files changed, 42 insertions, 10 deletions
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index c030ae657f2..8a5c7d55ac9 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -243,38 +243,70 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
* ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
* + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
*/
-int reserve_bp_slot(struct perf_event *bp)
+static int __reserve_bp_slot(struct perf_event *bp)
{
struct bp_busy_slots slots = {0};
- int ret = 0;
-
- mutex_lock(&nr_bp_mutex);
fetch_bp_busy_slots(&slots, bp);
/* Flexible counters need to keep at least one slot */
- if (slots.pinned + (!!slots.flexible) == HBP_NUM) {
- ret = -ENOSPC;
- goto end;
- }
+ if (slots.pinned + (!!slots.flexible) == HBP_NUM)
+ return -ENOSPC;
toggle_bp_slot(bp, true);
-end:
+ return 0;
+}
+
+int reserve_bp_slot(struct perf_event *bp)
+{
+ int ret;
+
+ mutex_lock(&nr_bp_mutex);
+
+ ret = __reserve_bp_slot(bp);
+
mutex_unlock(&nr_bp_mutex);
return ret;
}
+static void __release_bp_slot(struct perf_event *bp)
+{
+ toggle_bp_slot(bp, false);
+}
+
void release_bp_slot(struct perf_event *bp)
{
mutex_lock(&nr_bp_mutex);
- toggle_bp_slot(bp, false);
+ __release_bp_slot(bp);
mutex_unlock(&nr_bp_mutex);
}
+/*
+ * Allow the kernel debugger to reserve breakpoint slots without
+ * taking a lock using the dbg_* variant of for the reserve and
+ * release breakpoint slots.
+ */
+int dbg_reserve_bp_slot(struct perf_event *bp)
+{
+ if (mutex_is_locked(&nr_bp_mutex))
+ return -1;
+
+ return __reserve_bp_slot(bp);
+}
+
+int dbg_release_bp_slot(struct perf_event *bp)
+{
+ if (mutex_is_locked(&nr_bp_mutex))
+ return -1;
+
+ __release_bp_slot(bp);
+
+ return 0;
+}
int register_perf_hw_breakpoint(struct perf_event *bp)
{