From 0b5e1c5255e7ee8670e077e8224e5c2281229a5b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 7 Jun 2011 11:15:33 +0200 Subject: printk: Release console_sem after logbuf_lock Release console_sem after unlocking the logbuf_lock so that we don't generate wakeups while holding logbuf_lock. This avoids some lock inversion troubles once we remove the lockdep_off bits between logbuf_lock and rq->lock (prints while holding rq->lock vs doing wakeups while holding logbuf_lock). There's of course still an actual deadlock where the printk()s under rq->lock will issue a wakeup from the up() call, but lockdep won't warn about that since semaphores are not tracked. Signed-off-by: Peter Zijlstra Cc: Linus Torvalds Cc: Andrew Morton Link: http://lkml.kernel.org/n/tip-j8swthl12u73h4znbvitljzd@git.kernel.org Signed-off-by: Ingo Molnar --- kernel/printk.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/kernel/printk.c b/kernel/printk.c index 35185392173..751e7b84e9e 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -782,7 +782,7 @@ static inline int can_use_console(unsigned int cpu) static int console_trylock_for_printk(unsigned int cpu) __releases(&logbuf_lock) { - int retval = 0; + int retval = 0, wake = 0; if (console_trylock()) { retval = 1; @@ -795,12 +795,14 @@ static int console_trylock_for_printk(unsigned int cpu) */ if (!can_use_console(cpu)) { console_locked = 0; - up(&console_sem); + wake = 1; retval = 0; } } printk_cpu = UINT_MAX; spin_unlock(&logbuf_lock); + if (wake) + up(&console_sem); return retval; } static const char recursion_bug_msg [] = @@ -1271,8 +1273,8 @@ void console_unlock(void) if (unlikely(exclusive_console)) exclusive_console = NULL; - up(&console_sem); spin_unlock_irqrestore(&logbuf_lock, flags); + up(&console_sem); if (wake_klogd) wake_up_klogd(); } -- cgit v1.2.3-70-g09d2 From 4f2a8d3cf5e0486fd547633fa86c5d130ae98cad Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 22 Jun 2011 11:20:09 +0200 Subject: printk: Fix console_sem vs logbuf_lock unlock race Fix up the fallout from commit 0b5e1c5255 ("printk: Release console_sem after logbuf_lock"). The reason for unlocking the console_sem under the logbuf_lock is that a concurrent printk() might fill up the buffer but fail to acquire the console sem, resulting in a missed write to the console until a subsequent console_sem acquire/release cycle. Signed-off-by: Peter Zijlstra Cc: efault@gmx.de Cc: Linus Torvalds Cc: Andrew Morton Link: http://lkml.kernel.org/r/1308734409.1022.14.camel@twins Signed-off-by: Ingo Molnar --- kernel/printk.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/kernel/printk.c b/kernel/printk.c index 751e7b84e9e..37dff3429ad 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -1244,7 +1244,7 @@ void console_unlock(void) { unsigned long flags; unsigned _con_start, _log_end; - unsigned wake_klogd = 0; + unsigned wake_klogd = 0, retry = 0; if (console_suspended) { up(&console_sem); @@ -1253,6 +1253,7 @@ void console_unlock(void) console_may_schedule = 0; +again: for ( ; ; ) { spin_lock_irqsave(&logbuf_lock, flags); wake_klogd |= log_start - log_end; @@ -1273,8 +1274,23 @@ void console_unlock(void) if (unlikely(exclusive_console)) exclusive_console = NULL; - spin_unlock_irqrestore(&logbuf_lock, flags); + spin_unlock(&logbuf_lock); + up(&console_sem); + + /* + * Someone could have filled up the buffer again, so re-check if there's + * something to flush. In case we cannot trylock the console_sem again, + * there's a new owner and the console_unlock() from them will do the + * flush, no worries. + */ + spin_lock(&logbuf_lock); + if (con_start != log_end) + retry = 1; + spin_unlock_irqrestore(&logbuf_lock, flags); + if (retry && console_trylock()) + goto again; + if (wake_klogd) wake_up_klogd(); } -- cgit v1.2.3-70-g09d2 From dd4e5d3ac4a76b868daf30e35bd572def96c30ed Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 21 Jun 2011 17:17:27 +0200 Subject: lockdep: Fix trace_[soft,hard]irqs_[on,off]() recursion Commit: 1efc5da3cf56: [PATCH] order of lockdep off/on in vprintk() should be changed explains the reason for having raw_local_irq_*() and lockdep_off() in printk(). Instead of working around the broken recursion detection of interrupt state tracking, fix it. Signed-off-by: Peter Zijlstra Cc: efault@gmx.de Cc: Linus Torvalds Cc: Andrew Morton Link: http://lkml.kernel.org/r/20110621153806.185242734@chello.nl Signed-off-by: Ingo Molnar --- kernel/lockdep.c | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 63437d065ac..81968a065b4 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -2478,15 +2478,10 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark) /* * Hardirqs will be enabled: */ -void trace_hardirqs_on_caller(unsigned long ip) +static void __trace_hardirqs_on_caller(unsigned long ip) { struct task_struct *curr = current; - time_hardirqs_on(CALLER_ADDR0, ip); - - if (unlikely(!debug_locks || current->lockdep_recursion)) - return; - if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled))) return; @@ -2502,8 +2497,6 @@ void trace_hardirqs_on_caller(unsigned long ip) /* we'll do an OFF -> ON transition: */ curr->hardirqs_enabled = 1; - if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) - return; if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) return; /* @@ -2525,6 +2518,21 @@ void trace_hardirqs_on_caller(unsigned long ip) curr->hardirq_enable_event = ++curr->irq_events; debug_atomic_inc(hardirqs_on_events); } + +void trace_hardirqs_on_caller(unsigned long ip) +{ + time_hardirqs_on(CALLER_ADDR0, ip); + + if (unlikely(!debug_locks || current->lockdep_recursion)) + return; + + if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) + return; + + current->lockdep_recursion = 1; + __trace_hardirqs_on_caller(ip); + current->lockdep_recursion = 0; +} EXPORT_SYMBOL(trace_hardirqs_on_caller); void trace_hardirqs_on(void) @@ -2574,7 +2582,7 @@ void trace_softirqs_on(unsigned long ip) { struct task_struct *curr = current; - if (unlikely(!debug_locks)) + if (unlikely(!debug_locks || current->lockdep_recursion)) return; if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) @@ -2585,6 +2593,7 @@ void trace_softirqs_on(unsigned long ip) return; } + current->lockdep_recursion = 1; /* * We'll do an OFF -> ON transition: */ @@ -2599,6 +2608,7 @@ void trace_softirqs_on(unsigned long ip) */ if (curr->hardirqs_enabled) mark_held_locks(curr, SOFTIRQ); + current->lockdep_recursion = 0; } /* @@ -2608,7 +2618,7 @@ void trace_softirqs_off(unsigned long ip) { struct task_struct *curr = current; - if (unlikely(!debug_locks)) + if (unlikely(!debug_locks || current->lockdep_recursion)) return; if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) -- cgit v1.2.3-70-g09d2