summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c15
-rw-r--r--mm/vmscan.c23
2 files changed, 17 insertions, 21 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e013b8e57d2..506d116a7d3 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -249,7 +249,7 @@ struct mem_cgroup {
atomic_t oom_lock;
atomic_t refcnt;
- unsigned int swappiness;
+ int swappiness;
/* OOM-Killer disable */
int oom_kill_disable;
@@ -1329,7 +1329,7 @@ static unsigned long mem_cgroup_margin(struct mem_cgroup *mem)
return margin >> PAGE_SHIFT;
}
-static unsigned int get_swappiness(struct mem_cgroup *memcg)
+int mem_cgroup_swappiness(struct mem_cgroup *memcg)
{
struct cgroup *cgrp = memcg->css.cgroup;
@@ -1776,12 +1776,11 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
/* we use swappiness of local cgroup */
if (check_soft) {
ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
- noswap, get_swappiness(victim), zone,
- &nr_scanned);
+ noswap, zone, &nr_scanned);
*total_scanned += nr_scanned;
} else
ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
- noswap, get_swappiness(victim));
+ noswap);
css_put(&victim->css);
/*
* At shrinking usage, we can't check we should stop here or
@@ -3826,7 +3825,7 @@ try_to_free:
goto out;
}
progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
- false, get_swappiness(mem));
+ false);
if (!progress) {
nr_retries--;
/* maybe some writeback is necessary */
@@ -4288,7 +4287,7 @@ static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
- return get_swappiness(memcg);
+ return mem_cgroup_swappiness(memcg);
}
static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
@@ -4997,7 +4996,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
INIT_LIST_HEAD(&mem->oom_notify);
if (parent)
- mem->swappiness = get_swappiness(parent);
+ mem->swappiness = mem_cgroup_swappiness(parent);
atomic_set(&mem->refcnt, 1);
mem->move_charge_at_immigrate = 0;
mutex_init(&mem->thresholds_lock);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index febbc044e79..05637491f24 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -95,8 +95,6 @@ struct scan_control {
/* Can pages be swapped as part of reclaim? */
int may_swap;
- int swappiness;
-
int order;
/*
@@ -1770,6 +1768,13 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
}
+static int vmscan_swappiness(struct scan_control *sc)
+{
+ if (scanning_global_lru(sc))
+ return vm_swappiness;
+ return mem_cgroup_swappiness(sc->mem_cgroup);
+}
+
/*
* Determine how aggressively the anon and file LRU lists should be
* scanned. The relative value of each set of LRU lists is determined
@@ -1830,8 +1835,8 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
* With swappiness at 100, anonymous and file have the same priority.
* This scanning priority is essentially the inverse of IO cost.
*/
- anon_prio = sc->swappiness;
- file_prio = 200 - sc->swappiness;
+ anon_prio = vmscan_swappiness(sc);
+ file_prio = 200 - vmscan_swappiness(sc);
/*
* OK, so we have swap space and a fair amount of page cache
@@ -2220,7 +2225,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
.nr_to_reclaim = SWAP_CLUSTER_MAX,
.may_unmap = 1,
.may_swap = 1,
- .swappiness = vm_swappiness,
.order = order,
.mem_cgroup = NULL,
.nodemask = nodemask,
@@ -2244,7 +2248,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
gfp_t gfp_mask, bool noswap,
- unsigned int swappiness,
struct zone *zone,
unsigned long *nr_scanned)
{
@@ -2254,7 +2257,6 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
.may_writepage = !laptop_mode,
.may_unmap = 1,
.may_swap = !noswap,
- .swappiness = swappiness,
.order = 0,
.mem_cgroup = mem,
};
@@ -2283,8 +2285,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
gfp_t gfp_mask,
- bool noswap,
- unsigned int swappiness)
+ bool noswap)
{
struct zonelist *zonelist;
unsigned long nr_reclaimed;
@@ -2294,7 +2295,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
.may_unmap = 1,
.may_swap = !noswap,
.nr_to_reclaim = SWAP_CLUSTER_MAX,
- .swappiness = swappiness,
.order = 0,
.mem_cgroup = mem_cont,
.nodemask = NULL, /* we don't care the placement */
@@ -2445,7 +2445,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
* we want to put equal scanning pressure on each zone.
*/
.nr_to_reclaim = ULONG_MAX,
- .swappiness = vm_swappiness,
.order = order,
.mem_cgroup = NULL,
};
@@ -2915,7 +2914,6 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
.may_writepage = 1,
.nr_to_reclaim = nr_to_reclaim,
.hibernation_mode = 1,
- .swappiness = vm_swappiness,
.order = 0,
};
struct shrink_control shrink = {
@@ -3102,7 +3100,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
.nr_to_reclaim = max_t(unsigned long, nr_pages,
SWAP_CLUSTER_MAX),
.gfp_mask = gfp_mask,
- .swappiness = vm_swappiness,
.order = order,
};
struct shrink_control shrink = {