summaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c29
1 files changed, 15 insertions, 14 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 95c08a8cc2b..a6b7d14812e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1061,6 +1061,19 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
unsigned long nr_scanned = 0;
unsigned long nr_reclaimed = 0;
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
+ int lumpy_reclaim = 0;
+
+ /*
+ * If we need a large contiguous chunk of memory, or have
+ * trouble getting a small set of contiguous pages, we
+ * will reclaim both active and inactive pages.
+ *
+ * We use the same threshold as pageout congestion_wait below.
+ */
+ if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
+ lumpy_reclaim = 1;
+ else if (sc->order && priority < DEF_PRIORITY - 2)
+ lumpy_reclaim = 1;
pagevec_init(&pvec, 1);
@@ -1073,19 +1086,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
unsigned long nr_freed;
unsigned long nr_active;
unsigned int count[NR_LRU_LISTS] = { 0, };
- int mode = ISOLATE_INACTIVE;
-
- /*
- * If we need a large contiguous chunk of memory, or have
- * trouble getting a small set of contiguous pages, we
- * will reclaim both active and inactive pages.
- *
- * We use the same threshold as pageout congestion_wait below.
- */
- if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
- mode = ISOLATE_BOTH;
- else if (sc->order && priority < DEF_PRIORITY - 2)
- mode = ISOLATE_BOTH;
+ int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE;
nr_taken = sc->isolate_pages(sc->swap_cluster_max,
&page_list, &nr_scan, sc->order, mode,
@@ -1122,7 +1123,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
* but that should be acceptable to the caller
*/
if (nr_freed < nr_taken && !current_is_kswapd() &&
- sc->order > PAGE_ALLOC_COSTLY_ORDER) {
+ lumpy_reclaim) {
congestion_wait(WRITE, HZ/10);
/*