summaryrefslogtreecommitdiffstats
path: root/mm/oom_kill.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2015-01-12 10:51:13 +0100
committerThomas Gleixner <tglx@linutronix.de>2015-01-12 10:51:13 +0100
commit2f5eaf66e580f64032b365a00157b6b58c266b37 (patch)
tree7852017c864f0eb3833782e2a017952bd8531458 /mm/oom_kill.c
parentc291ee622165cb2c8d4e7af63fffd499354a23be (diff)
parent91d1179212161f220938198b742c328ad38fd0a3 (diff)
Merge tag 'irqchip-urgent-3.19' of git://git.infradead.org/users/jcooper/linux into irq/urgent
irqchip urgent fixes for v3.19 from Jason Cooper - mtk-sysirq: Fix error handling - hip04: Fix cpu map for 16bit value - gic-v3-its: Clear a warning regarding decimal constants - omap-intc: Fix legacy DMA regression - atmel-aic-common: Retain priority when changing type
Diffstat (limited to 'mm/oom_kill.c')
-rw-r--r--mm/oom_kill.c21
1 files changed, 8 insertions, 13 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 5340f6b9131..d503e9ce1c7 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -119,7 +119,7 @@ found:
/* return true if the task is not adequate as candidate victim task. */
static bool oom_unkillable_task(struct task_struct *p,
- const struct mem_cgroup *memcg, const nodemask_t *nodemask)
+ struct mem_cgroup *memcg, const nodemask_t *nodemask)
{
if (is_global_init(p))
return true;
@@ -233,7 +233,7 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
/* Check this allocation failure is caused by cpuset's wall function */
for_each_zone_zonelist_nodemask(zone, z, zonelist,
high_zoneidx, nodemask)
- if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
+ if (!cpuset_zone_allowed(zone, gfp_mask))
cpuset_limited = true;
if (cpuset_limited) {
@@ -281,14 +281,9 @@ enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
if (oom_task_origin(task))
return OOM_SCAN_SELECT;
- if (task->flags & PF_EXITING && !force_kill) {
- /*
- * If this task is not being ptraced on exit, then wait for it
- * to finish before killing some other task unnecessarily.
- */
- if (!(task->group_leader->ptrace & PT_TRACE_EXIT))
- return OOM_SCAN_ABORT;
- }
+ if (task_will_free_mem(task) && !force_kill)
+ return OOM_SCAN_ABORT;
+
return OOM_SCAN_OK;
}
@@ -353,7 +348,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
* State information includes task's pid, uid, tgid, vm size, rss, nr_ptes,
* swapents, oom_score_adj value, and name.
*/
-static void dump_tasks(const struct mem_cgroup *memcg, const nodemask_t *nodemask)
+static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
{
struct task_struct *p;
struct task_struct *task;
@@ -443,7 +438,7 @@ void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
* If the task is already exiting, don't alarm the sysadmin or kill
* its children or threads, just set TIF_MEMDIE so it can die quickly
*/
- if (p->flags & PF_EXITING) {
+ if (task_will_free_mem(p)) {
set_tsk_thread_flag(p, TIF_MEMDIE);
put_task_struct(p);
return;
@@ -649,7 +644,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
* select it. The goal is to allow it to allocate so that it may
* quickly exit and free its memory.
*/
- if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
+ if (fatal_signal_pending(current) || task_will_free_mem(current)) {
set_thread_flag(TIF_MEMDIE);
return;
}