diff options
author | Ingo Molnar <mingo@kernel.org> | 2013-10-02 07:56:46 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-10-02 07:57:37 +0200 |
commit | b8d490c3de797c88cfcc3364c6e04d8900121cf9 (patch) | |
tree | ac62b90323e561bca35a407211242dab8987099b /kernel | |
parent | 8a60d42d26d31df8121b251c2b5c56eb24e729f1 (diff) | |
parent | 62d26c8200a8382e1c67419ca3aff78d37898cc5 (diff) |
Merge branch 'irq/core-v6' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into irq/core
Pull hardirq and softirq nesting updates from Frederic Weisbecker,
which fix nesting related stack overruns such as:
http://lkml.kernel.org/r/1378330796.4321.50.camel%40pasglop
Beyond being a fix, this series also optimizes and reorganizes arch
hardirq/softirq stack processing to be faster and more robust.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/softirq.c | 40 |
1 files changed, 31 insertions, 9 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c index 53cc09ceb0b..dacd0ab51df 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -29,7 +29,6 @@ #define CREATE_TRACE_POINTS #include <trace/events/irq.h> -#include <asm/irq.h> /* - No shared variables, all the data are CPU local. - If a softirq needs serialization, let it serialize itself @@ -134,7 +133,6 @@ EXPORT_SYMBOL(local_bh_disable); static void __local_bh_enable(unsigned int cnt) { - WARN_ON_ONCE(in_irq()); WARN_ON_ONCE(!irqs_disabled()); if (softirq_count() == cnt) @@ -149,6 +147,7 @@ static void __local_bh_enable(unsigned int cnt) */ void _local_bh_enable(void) { + WARN_ON_ONCE(in_irq()); __local_bh_enable(SOFTIRQ_DISABLE_OFFSET); } @@ -171,8 +170,13 @@ static inline void _local_bh_enable_ip(unsigned long ip) */ sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1); - if (unlikely(!in_interrupt() && local_softirq_pending())) + if (unlikely(!in_interrupt() && local_softirq_pending())) { + /* + * Run softirq if any pending. And do it in its own stack + * as we may be calling this deep in a task call stack already. + */ do_softirq(); + } dec_preempt_count(); #ifdef CONFIG_TRACE_IRQFLAGS @@ -280,10 +284,11 @@ restart: account_irq_exit_time(current); __local_bh_enable(SOFTIRQ_OFFSET); + WARN_ON_ONCE(in_interrupt()); tsk_restore_flags(current, old_flags, PF_MEMALLOC); } -#ifndef __ARCH_HAS_DO_SOFTIRQ + asmlinkage void do_softirq(void) { @@ -298,13 +303,11 @@ asmlinkage void do_softirq(void) pending = local_softirq_pending(); if (pending) - __do_softirq(); + do_softirq_own_stack(); local_irq_restore(flags); } -#endif - /* * Enter an interrupt context. */ @@ -328,10 +331,25 @@ void irq_enter(void) static inline void invoke_softirq(void) { - if (!force_irqthreads) + if (!force_irqthreads) { +#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK + /* + * We can safely execute softirq on the current stack if + * it is the irq stack, because it should be near empty + * at this stage. + */ __do_softirq(); - else +#else + /* + * Otherwise, irq_exit() is called on the task stack that can + * be potentially deep already. So call softirq in its own stack + * to prevent from any overrun. + */ + do_softirq_own_stack(); +#endif + } else { wakeup_softirqd(); + } } static inline void tick_irq_exit(void) @@ -762,6 +780,10 @@ static void run_ksoftirqd(unsigned int cpu) { local_irq_disable(); if (local_softirq_pending()) { + /* + * We can safely run softirq on inline stack, as we are not deep + * in the task stack here. + */ __do_softirq(); rcu_note_context_switch(cpu); local_irq_enable(); |