summaryrefslogtreecommitdiffstats
path: root/kernel/hung_task.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2012-03-12 20:44:07 +0100
committerIngo Molnar <mingo@elte.hu>2012-03-12 20:44:11 +0100
commit35239e23c66f1614c76739b62a299c3c92d6eb68 (patch)
tree7b1e068df888ec9a00b43c1dd7517a6490da6a94 /kernel/hung_task.c
parent3f33ab1c0c741bfab2138c14ba1918a7905a1e8b (diff)
parent87e24f4b67e68d9fd8df16e0bf9c66d1ad2a2533 (diff)
Merge branch 'perf/urgent' into perf/core
Merge reason: We are going to queue up a dependent patch. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/hung_task.c')
-rw-r--r--kernel/hung_task.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 2e48ec0c2e9..c21449f85a2 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -119,15 +119,20 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
* For preemptible RCU it is sufficient to call rcu_read_unlock in order
* to exit the grace period. For classic RCU, a reschedule is required.
*/
-static void rcu_lock_break(struct task_struct *g, struct task_struct *t)
+static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)
{
+ bool can_cont;
+
get_task_struct(g);
get_task_struct(t);
rcu_read_unlock();
cond_resched();
rcu_read_lock();
+ can_cont = pid_alive(g) && pid_alive(t);
put_task_struct(t);
put_task_struct(g);
+
+ return can_cont;
}
/*
@@ -154,9 +159,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
goto unlock;
if (!--batch_count) {
batch_count = HUNG_TASK_BATCHING;
- rcu_lock_break(g, t);
- /* Exit if t or g was unhashed during refresh. */
- if (t->state == TASK_DEAD || g->state == TASK_DEAD)
+ if (!rcu_lock_break(g, t))
goto unlock;
}
/* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */