summaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-05-12 21:21:00 +0200
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 21:50:21 +0200
commitbb065afb8ebd07a03155502dba29ebf0f6fe67e8 (patch)
treeaa32a9461cd38f336797cfba4d07c58789a6f1be /kernel/lockdep.c
parenta98a3c3fde3ae7614f19758a043691b6f59dac53 (diff)
lockdep: update lockdep_recursion on graph_lock
With the introduction of ftrace, it is possible to recurse into the lockdep functions via the mcount call. To prevent possible lockups, updating the lockdep_recursion counter on grabbing the internal lockdep_lock should prevent deadlocks. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c3
1 files changed, 3 insertions, 0 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 90a440cbd6d..65548eff029 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -82,6 +82,8 @@ static int graph_lock(void)
__raw_spin_unlock(&lockdep_lock);
return 0;
}
+ /* prevent any recursions within lockdep from causing deadlocks */
+ current->lockdep_recursion++;
return 1;
}
@@ -90,6 +92,7 @@ static inline int graph_unlock(void)
if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
return DEBUG_LOCKS_WARN_ON(1);
+ current->lockdep_recursion--;
__raw_spin_unlock(&lockdep_lock);
return 0;
}