summaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c104
1 files changed, 70 insertions, 34 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7e0116150dc..faa0a088f9c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -173,7 +173,7 @@ static unsigned long zone_nr_lru_pages(struct zone *zone,
struct scan_control *sc, enum lru_list lru)
{
if (!scanning_global_lru(sc))
- return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
+ return mem_cgroup_zone_nr_lru_pages(sc->mem_cgroup, zone, lru);
return zone_page_state(zone, NR_LRU_BASE + lru);
}
@@ -1718,26 +1718,6 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
}
/*
- * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
- * until we collected @swap_cluster_max pages to scan.
- */
-static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
- unsigned long *nr_saved_scan)
-{
- unsigned long nr;
-
- *nr_saved_scan += nr_to_scan;
- nr = *nr_saved_scan;
-
- if (nr >= SWAP_CLUSTER_MAX)
- *nr_saved_scan = 0;
- else
- nr = 0;
-
- return nr;
-}
-
-/*
* Determine how aggressively the anon and file LRU lists should be
* scanned. The relative value of each set of LRU lists is determined
* by looking at the fraction of the pages scanned we did rotate back
@@ -1755,6 +1735,22 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
u64 fraction[2], denominator;
enum lru_list l;
int noswap = 0;
+ int force_scan = 0;
+
+
+ anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
+ zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
+ file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
+ zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
+
+ if (((anon + file) >> priority) < SWAP_CLUSTER_MAX) {
+ /* kswapd does zone balancing and need to scan this zone */
+ if (scanning_global_lru(sc) && current_is_kswapd())
+ force_scan = 1;
+ /* memcg may have small limit and need to avoid priority drop */
+ if (!scanning_global_lru(sc))
+ force_scan = 1;
+ }
/* If we have no swap space, do not bother scanning anon pages. */
if (!sc->may_swap || (nr_swap_pages <= 0)) {
@@ -1765,11 +1761,6 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
goto out;
}
- anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
- zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
- file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
- zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
-
if (scanning_global_lru(sc)) {
free = zone_page_state(zone, NR_FREE_PAGES);
/* If we have very few page cache pages,
@@ -1836,8 +1827,23 @@ out:
scan >>= priority;
scan = div64_u64(scan * fraction[file], denominator);
}
- nr[l] = nr_scan_try_batch(scan,
- &reclaim_stat->nr_saved_scan[l]);
+
+ /*
+ * If zone is small or memcg is small, nr[l] can be 0.
+ * This results no-scan on this priority and priority drop down.
+ * For global direct reclaim, it can visit next zone and tend
+ * not to have problems. For global kswapd, it's for zone
+ * balancing and it need to scan a small amounts. When using
+ * memcg, priority drop can cause big latency. So, it's better
+ * to scan small amount. See may_noscan above.
+ */
+ if (!scan && force_scan) {
+ if (file)
+ scan = SWAP_CLUSTER_MAX;
+ else if (!noswap)
+ scan = SWAP_CLUSTER_MAX;
+ }
+ nr[l] = scan;
}
}
@@ -1977,11 +1983,14 @@ restart:
* If a zone is deemed to be full of pinned pages then just give it a light
* scan then give up on it.
*/
-static void shrink_zones(int priority, struct zonelist *zonelist,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
{
struct zoneref *z;
struct zone *zone;
+ unsigned long nr_soft_reclaimed;
+ unsigned long nr_soft_scanned;
+ unsigned long total_scanned = 0;
for_each_zone_zonelist_nodemask(zone, z, zonelist,
gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -1998,8 +2007,17 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
continue; /* Let kswapd poll it */
}
+ nr_soft_scanned = 0;
+ nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
+ sc->order, sc->gfp_mask,
+ &nr_soft_scanned);
+ sc->nr_reclaimed += nr_soft_reclaimed;
+ total_scanned += nr_soft_scanned;
+
shrink_zone(priority, zone, sc);
}
+
+ return total_scanned;
}
static bool zone_reclaimable(struct zone *zone)
@@ -2064,7 +2082,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
sc->nr_scanned = 0;
if (!priority)
disable_swap_token();
- shrink_zones(priority, zonelist, sc);
+ total_scanned += shrink_zones(priority, zonelist, sc);
/*
* Don't shrink slabs when reclaiming memory from
* over limit cgroups
@@ -2171,9 +2189,11 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
gfp_t gfp_mask, bool noswap,
unsigned int swappiness,
- struct zone *zone)
+ struct zone *zone,
+ unsigned long *nr_scanned)
{
struct scan_control sc = {
+ .nr_scanned = 0,
.nr_to_reclaim = SWAP_CLUSTER_MAX,
.may_writepage = !laptop_mode,
.may_unmap = 1,
@@ -2182,6 +2202,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
.order = 0,
.mem_cgroup = mem,
};
+
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
@@ -2200,6 +2221,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
+ *nr_scanned = sc.nr_scanned;
return sc.nr_reclaimed;
}
@@ -2210,6 +2232,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
{
struct zonelist *zonelist;
unsigned long nr_reclaimed;
+ int nid;
struct scan_control sc = {
.may_writepage = !laptop_mode,
.may_unmap = 1,
@@ -2226,7 +2249,14 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
.gfp_mask = sc.gfp_mask,
};
- zonelist = NODE_DATA(numa_node_id())->node_zonelists;
+ /*
+ * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
+ * take care of from where we get pages. So the node where we start the
+ * scan does not need to be the current node.
+ */
+ nid = mem_cgroup_select_victim_node(mem_cont);
+
+ zonelist = NODE_DATA(nid)->node_zonelists;
trace_mm_vmscan_memcg_reclaim_begin(0,
sc.may_writepage,
@@ -2347,6 +2377,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
unsigned long total_scanned;
struct reclaim_state *reclaim_state = current->reclaim_state;
+ unsigned long nr_soft_reclaimed;
+ unsigned long nr_soft_scanned;
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
.may_unmap = 1,
@@ -2439,11 +2471,15 @@ loop_again:
sc.nr_scanned = 0;
+ nr_soft_scanned = 0;
/*
* Call soft limit reclaim before calling shrink_zone.
- * For now we ignore the return value
*/
- mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask);
+ nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
+ order, sc.gfp_mask,
+ &nr_soft_scanned);
+ sc.nr_reclaimed += nr_soft_reclaimed;
+ total_scanned += nr_soft_scanned;
/*
* We put equal pressure on every zone, unless