diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/fork.c | 5 | ||||
-rw-r--r-- | kernel/lockdep.c | 12 | ||||
-rw-r--r-- | kernel/sched.c | 4 | ||||
-rw-r--r-- | kernel/softlockup.c | 114 | ||||
-rw-r--r-- | kernel/sysctl.c | 27 |
5 files changed, 149 insertions, 13 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 8dd8ff28100..09c0b90a69c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1059,6 +1059,11 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->prev_utime = cputime_zero; p->prev_stime = cputime_zero; +#ifdef CONFIG_DETECT_SOFTLOCKUP + p->last_switch_count = 0; + p->last_switch_timestamp = 0; +#endif + #ifdef CONFIG_TASK_XACCT p->rchar = 0; /* I/O counter: bytes read */ p->wchar = 0; /* I/O counter: bytes written */ diff --git a/kernel/lockdep.c b/kernel/lockdep.c index e2c07ece367..3574379f4d6 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -3206,7 +3206,11 @@ retry: EXPORT_SYMBOL_GPL(debug_show_all_locks); -void debug_show_held_locks(struct task_struct *task) +/* + * Careful: only use this function if you are sure that + * the task cannot run in parallel! + */ +void __debug_show_held_locks(struct task_struct *task) { if (unlikely(!debug_locks)) { printk("INFO: lockdep is turned off.\n"); @@ -3214,6 +3218,12 @@ void debug_show_held_locks(struct task_struct *task) } lockdep_print_held_locks(task); } +EXPORT_SYMBOL_GPL(__debug_show_held_locks); + +void debug_show_held_locks(struct task_struct *task) +{ + __debug_show_held_locks(task); +} EXPORT_SYMBOL_GPL(debug_show_held_locks); diff --git a/kernel/sched.c b/kernel/sched.c index c0e2db683e2..5b3d46574ee 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4945,7 +4945,7 @@ out_unlock: static const char stat_nam[] = "RSDTtZX"; -static void show_task(struct task_struct *p) +void sched_show_task(struct task_struct *p) { unsigned long free = 0; unsigned state; @@ -4998,7 +4998,7 @@ void show_state_filter(unsigned long state_filter) */ touch_nmi_watchdog(); if (!state_filter || (p->state & state_filter)) - show_task(p); + sched_show_task(p); } while_each_thread(g, p); touch_all_softlockup_watchdogs(); diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 11df812263c..02f0ad53444 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c @@ -8,6 +8,7 @@ */ #include <linux/mm.h> #include <linux/cpu.h> +#include <linux/nmi.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/freezer.h> @@ -24,7 +25,7 @@ static DEFINE_PER_CPU(unsigned long, print_timestamp); static DEFINE_PER_CPU(struct task_struct *, watchdog_task); static int did_panic; -int softlockup_thresh = 10; +int softlockup_thresh = 60; static int softlock_panic(struct notifier_block *this, unsigned long event, void *ptr) @@ -45,7 +46,7 @@ static struct notifier_block panic_block = { */ static unsigned long get_timestamp(int this_cpu) { - return cpu_clock(this_cpu) >> 30; /* 2^30 ~= 10^9 */ + return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ } void touch_softlockup_watchdog(void) @@ -100,11 +101,7 @@ void softlockup_tick(void) now = get_timestamp(this_cpu); - /* Wake up the high-prio watchdog task every second: */ - if (now > (touch_timestamp + 1)) - wake_up_process(per_cpu(watchdog_task, this_cpu)); - - /* Warn about unreasonable 10+ seconds delays: */ + /* Warn about unreasonable delays: */ if (now <= (touch_timestamp + softlockup_thresh)) return; @@ -122,11 +119,93 @@ void softlockup_tick(void) } /* + * Have a reasonable limit on the number of tasks checked: + */ +unsigned long sysctl_hung_task_check_count = 1024; + +/* + * Zero means infinite timeout - no checking done: + */ +unsigned long sysctl_hung_task_timeout_secs = 120; + +long sysctl_hung_task_warnings = 10; + +/* + * Only do the hung-tasks check on one CPU: + */ +static int check_cpu __read_mostly = -1; + +static void check_hung_task(struct task_struct *t, unsigned long now) +{ + unsigned long switch_count = t->nvcsw + t->nivcsw; + + if (t->flags & PF_FROZEN) + return; + + if (switch_count != t->last_switch_count || !t->last_switch_timestamp) { + t->last_switch_count = switch_count; + t->last_switch_timestamp = now; + return; + } + if ((long)(now - t->last_switch_timestamp) < + sysctl_hung_task_timeout_secs) + return; + if (sysctl_hung_task_warnings < 0) + return; + sysctl_hung_task_warnings--; + + /* + * Ok, the task did not get scheduled for more than 2 minutes, + * complain: + */ + printk(KERN_ERR "INFO: task %s:%d blocked for more than " + "%ld seconds.\n", t->comm, t->pid, + sysctl_hung_task_timeout_secs); + printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\"" + " disables this message.\n"); + sched_show_task(t); + __debug_show_held_locks(t); + + t->last_switch_timestamp = now; + touch_nmi_watchdog(); +} + +/* + * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for + * a really long time (120 seconds). If that happens, print out + * a warning. + */ +static void check_hung_uninterruptible_tasks(int this_cpu) +{ + int max_count = sysctl_hung_task_check_count; + unsigned long now = get_timestamp(this_cpu); + struct task_struct *g, *t; + + /* + * If the system crashed already then all bets are off, + * do not report extra hung tasks: + */ + if ((tainted & TAINT_DIE) || did_panic) + return; + + read_lock(&tasklist_lock); + do_each_thread(g, t) { + if (!--max_count) + break; + if (t->state & TASK_UNINTERRUPTIBLE) + check_hung_task(t, now); + } while_each_thread(g, t); + + read_unlock(&tasklist_lock); +} + +/* * The watchdog thread - runs every second and touches the timestamp. */ static int watchdog(void *__bind_cpu) { struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; + int this_cpu = (long)__bind_cpu; sched_setscheduler(current, SCHED_FIFO, ¶m); @@ -135,13 +214,18 @@ static int watchdog(void *__bind_cpu) /* * Run briefly once per second to reset the softlockup timestamp. - * If this gets delayed for more than 10 seconds then the + * If this gets delayed for more than 60 seconds then the * debug-printout triggers in softlockup_tick(). */ while (!kthread_should_stop()) { - set_current_state(TASK_INTERRUPTIBLE); touch_softlockup_watchdog(); - schedule(); + msleep_interruptible(10000); + + if (this_cpu != check_cpu) + continue; + + if (sysctl_hung_task_timeout_secs) + check_hung_uninterruptible_tasks(this_cpu); } return 0; @@ -171,6 +255,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: + check_cpu = any_online_cpu(cpu_online_map); wake_up_process(per_cpu(watchdog_task, hotcpu)); break; #ifdef CONFIG_HOTPLUG_CPU @@ -181,6 +266,15 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) /* Unbind so it can run. Fall thru. */ kthread_bind(per_cpu(watchdog_task, hotcpu), any_online_cpu(cpu_online_map)); + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + if (hotcpu == check_cpu) { + cpumask_t temp_cpu_online_map = cpu_online_map; + + cpu_clear(hotcpu, temp_cpu_online_map); + check_cpu = any_online_cpu(temp_cpu_online_map); + } + break; case CPU_DEAD: case CPU_DEAD_FROZEN: p = per_cpu(watchdog_task, hotcpu); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index c95f3ed3447..96f31c1bc4f 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -753,6 +753,33 @@ static struct ctl_table kern_table[] = { .extra1 = &one, .extra2 = &sixty, }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "hung_task_check_count", + .data = &sysctl_hung_task_check_count, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "hung_task_timeout_secs", + .data = &sysctl_hung_task_timeout_secs, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + }, + { + .ctl_name = CTL_UNNUMBERED, + .procname = "hung_task_warnings", + .data = &sysctl_hung_task_warnings, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + }, #endif #ifdef CONFIG_COMPAT { |