diff options
author | Andrea Arcangeli <aarcange@redhat.com> | 2011-01-13 15:47:10 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 17:32:46 -0800 |
commit | 878aee7d6b5504e01b9caffce080e792b6b8d090 (patch) | |
tree | c4a01a78885c25b6b3b1e0c74af7cb83c98a07c5 /mm | |
parent | 8ee53820edfd1f3b6554c593f337148dd3d7fc91 (diff) |
thp: freeze khugepaged and ksmd
It's unclear why schedule friendly kernel threads can't be taken away by
the CPU through the scheduler itself. It's safer to stop them as they can
trigger memory allocation, if kswapd also freezes itself to avoid
generating I/O they have too.
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/huge_memory.c | 14 | ||||
-rw-r--r-- | mm/ksm.c | 8 |
2 files changed, 18 insertions, 4 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 39d7df40c06..45b6d53bcfb 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -15,6 +15,7 @@ #include <linux/mm_inline.h> #include <linux/kthread.h> #include <linux/khugepaged.h> +#include <linux/freezer.h> #include <asm/tlb.h> #include <asm/pgalloc.h> #include "internal.h" @@ -2085,6 +2086,9 @@ static void khugepaged_do_scan(struct page **hpage) break; #endif + if (unlikely(kthread_should_stop() || freezing(current))) + break; + spin_lock(&khugepaged_mm_lock); if (!khugepaged_scan.mm_slot) pass_through_head++; @@ -2147,6 +2151,9 @@ static void khugepaged_loop(void) if (hpage) put_page(hpage); #endif + try_to_freeze(); + if (unlikely(kthread_should_stop())) + break; if (khugepaged_has_work()) { DEFINE_WAIT(wait); if (!khugepaged_scan_sleep_millisecs) @@ -2157,8 +2164,8 @@ static void khugepaged_loop(void) khugepaged_scan_sleep_millisecs)); remove_wait_queue(&khugepaged_wait, &wait); } else if (khugepaged_enabled()) - wait_event_interruptible(khugepaged_wait, - khugepaged_wait_event()); + wait_event_freezable(khugepaged_wait, + khugepaged_wait_event()); } } @@ -2166,6 +2173,7 @@ static int khugepaged(void *none) { struct mm_slot *mm_slot; + set_freezable(); set_user_nice(current, 19); /* serialize with start_khugepaged() */ @@ -2180,6 +2188,8 @@ static int khugepaged(void *none) mutex_lock(&khugepaged_mutex); if (!khugepaged_enabled()) break; + if (unlikely(kthread_should_stop())) + break; } spin_lock(&khugepaged_mm_lock); @@ -34,6 +34,7 @@ #include <linux/swap.h> #include <linux/ksm.h> #include <linux/hash.h> +#include <linux/freezer.h> #include <asm/tlbflush.h> #include "internal.h" @@ -1365,7 +1366,7 @@ static void ksm_do_scan(unsigned int scan_npages) struct rmap_item *rmap_item; struct page *uninitialized_var(page); - while (scan_npages--) { + while (scan_npages-- && likely(!freezing(current))) { cond_resched(); rmap_item = scan_get_next_rmap_item(&page); if (!rmap_item) @@ -1383,6 +1384,7 @@ static int ksmd_should_run(void) static int ksm_scan_thread(void *nothing) { + set_freezable(); set_user_nice(current, 5); while (!kthread_should_stop()) { @@ -1391,11 +1393,13 @@ static int ksm_scan_thread(void *nothing) ksm_do_scan(ksm_thread_pages_to_scan); mutex_unlock(&ksm_thread_mutex); + try_to_freeze(); + if (ksmd_should_run()) { schedule_timeout_interruptible( msecs_to_jiffies(ksm_thread_sleep_millisecs)); } else { - wait_event_interruptible(ksm_thread_wait, + wait_event_freezable(ksm_thread_wait, ksmd_should_run() || kthread_should_stop()); } } |