summaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c87
1 files changed, 34 insertions, 53 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8ed1b775bdc..53f2f82f83a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -48,6 +48,7 @@
#include <asm/div64.h>
#include <linux/swapops.h>
+#include <linux/balloon_compaction.h>
#include "internal.h"
@@ -139,23 +140,11 @@ static bool global_reclaim(struct scan_control *sc)
{
return !sc->target_mem_cgroup;
}
-
-static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
-{
- struct mem_cgroup *root = sc->target_mem_cgroup;
- return !mem_cgroup_disabled() &&
- mem_cgroup_soft_reclaim_eligible(root, root) != SKIP_TREE;
-}
#else
static bool global_reclaim(struct scan_control *sc)
{
return true;
}
-
-static bool mem_cgroup_should_soft_reclaim(struct scan_control *sc)
-{
- return false;
-}
#endif
unsigned long zone_reclaimable_pages(struct zone *zone)
@@ -1125,7 +1114,8 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
LIST_HEAD(clean_pages);
list_for_each_entry_safe(page, next, page_list, lru) {
- if (page_is_file_cache(page) && !PageDirty(page)) {
+ if (page_is_file_cache(page) && !PageDirty(page) &&
+ !isolated_balloon_page(page)) {
ClearPageActive(page);
list_move(&page->lru, &clean_pages);
}
@@ -2176,11 +2166,9 @@ static inline bool should_continue_reclaim(struct zone *zone,
}
}
-static int
-__shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
+static void shrink_zone(struct zone *zone, struct scan_control *sc)
{
unsigned long nr_reclaimed, nr_scanned;
- int groups_scanned = 0;
do {
struct mem_cgroup *root = sc->target_mem_cgroup;
@@ -2188,17 +2176,15 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
.zone = zone,
.priority = sc->priority,
};
- struct mem_cgroup *memcg = NULL;
- mem_cgroup_iter_filter filter = (soft_reclaim) ?
- mem_cgroup_soft_reclaim_eligible : NULL;
+ struct mem_cgroup *memcg;
nr_reclaimed = sc->nr_reclaimed;
nr_scanned = sc->nr_scanned;
- while ((memcg = mem_cgroup_iter_cond(root, memcg, &reclaim, filter))) {
+ memcg = mem_cgroup_iter(root, NULL, &reclaim);
+ do {
struct lruvec *lruvec;
- groups_scanned++;
lruvec = mem_cgroup_zone_lruvec(zone, memcg);
shrink_lruvec(lruvec, sc);
@@ -2218,7 +2204,8 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
mem_cgroup_iter_break(root, memcg);
break;
}
- }
+ memcg = mem_cgroup_iter(root, memcg, &reclaim);
+ } while (memcg);
vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
sc->nr_scanned - nr_scanned,
@@ -2226,37 +2213,6 @@ __shrink_zone(struct zone *zone, struct scan_control *sc, bool soft_reclaim)
} while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
sc->nr_scanned - nr_scanned, sc));
-
- return groups_scanned;
-}
-
-
-static void shrink_zone(struct zone *zone, struct scan_control *sc)
-{
- bool do_soft_reclaim = mem_cgroup_should_soft_reclaim(sc);
- unsigned long nr_scanned = sc->nr_scanned;
- int scanned_groups;
-
- scanned_groups = __shrink_zone(zone, sc, do_soft_reclaim);
- /*
- * memcg iterator might race with other reclaimer or start from
- * a incomplete tree walk so the tree walk in __shrink_zone
- * might have missed groups that are above the soft limit. Try
- * another loop to catch up with others. Do it just once to
- * prevent from reclaim latencies when other reclaimers always
- * preempt this one.
- */
- if (do_soft_reclaim && !scanned_groups)
- __shrink_zone(zone, sc, do_soft_reclaim);
-
- /*
- * No group is over the soft limit or those that are do not have
- * pages in the zone we are reclaiming so we have to reclaim everybody
- */
- if (do_soft_reclaim && (sc->nr_scanned == nr_scanned)) {
- __shrink_zone(zone, sc, false);
- return;
- }
}
/* Returns true if compaction should go ahead for a high-order request */
@@ -2320,6 +2276,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
{
struct zoneref *z;
struct zone *zone;
+ unsigned long nr_soft_reclaimed;
+ unsigned long nr_soft_scanned;
bool aborted_reclaim = false;
/*
@@ -2359,6 +2317,18 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
continue;
}
}
+ /*
+ * This steals pages from memory cgroups over softlimit
+ * and returns the number of reclaimed pages and
+ * scanned pages. This works for global memory pressure
+ * and balancing, not for a memcg's limit.
+ */
+ nr_soft_scanned = 0;
+ nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
+ sc->order, sc->gfp_mask,
+ &nr_soft_scanned);
+ sc->nr_reclaimed += nr_soft_reclaimed;
+ sc->nr_scanned += nr_soft_scanned;
/* need some check for avoid more shrink_zone() */
}
@@ -2952,6 +2922,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
{
int i;
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
+ unsigned long nr_soft_reclaimed;
+ unsigned long nr_soft_scanned;
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
.priority = DEF_PRIORITY,
@@ -3066,6 +3038,15 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
sc.nr_scanned = 0;
+ nr_soft_scanned = 0;
+ /*
+ * Call soft limit reclaim before calling shrink_zone.
+ */
+ nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
+ order, sc.gfp_mask,
+ &nr_soft_scanned);
+ sc.nr_reclaimed += nr_soft_reclaimed;
+
/*
* There should be no need to raise the scanning
* priority if enough pages are already being scanned