summaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c661
1 files changed, 385 insertions, 276 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index da53a252b25..1f0b460fe58 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -73,15 +73,6 @@ static int really_do_swap_account __initdata = 0;
#define do_swap_account (0)
#endif
-/*
- * Per memcg event counter is incremented at every pagein/pageout. This counter
- * is used for trigger some periodic events. This is straightforward and better
- * than using jiffies etc. to handle periodic memcg event.
- *
- * These values will be used as !((event) & ((1 <<(thresh)) - 1))
- */
-#define THRESHOLDS_EVENTS_THRESH (7) /* once in 128 */
-#define SOFTLIMIT_EVENTS_THRESH (10) /* once in 1024 */
/*
* Statistics for memory cgroup.
@@ -93,19 +84,36 @@ enum mem_cgroup_stat_index {
MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
- MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
- MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
- /* incremented at every pagein/pageout */
- MEM_CGROUP_EVENTS = MEM_CGROUP_STAT_DATA,
MEM_CGROUP_ON_MOVE, /* someone is moving account between groups */
-
MEM_CGROUP_STAT_NSTATS,
};
+enum mem_cgroup_events_index {
+ MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
+ MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
+ MEM_CGROUP_EVENTS_COUNT, /* # of pages paged in/out */
+ MEM_CGROUP_EVENTS_NSTATS,
+};
+/*
+ * Per memcg event counter is incremented at every pagein/pageout. With THP,
+ * it will be incremated by the number of pages. This counter is used for
+ * for trigger some periodic events. This is straightforward and better
+ * than using jiffies etc. to handle periodic memcg event.
+ */
+enum mem_cgroup_events_target {
+ MEM_CGROUP_TARGET_THRESH,
+ MEM_CGROUP_TARGET_SOFTLIMIT,
+ MEM_CGROUP_NTARGETS,
+};
+#define THRESHOLDS_EVENTS_TARGET (128)
+#define SOFTLIMIT_EVENTS_TARGET (1024)
+
struct mem_cgroup_stat_cpu {
- s64 count[MEM_CGROUP_STAT_NSTATS];
+ long count[MEM_CGROUP_STAT_NSTATS];
+ unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
+ unsigned long targets[MEM_CGROUP_NTARGETS];
};
/*
@@ -218,12 +226,6 @@ struct mem_cgroup {
* per zone LRU lists.
*/
struct mem_cgroup_lru_info info;
-
- /*
- protect against reclaim related member.
- */
- spinlock_t reclaim_param_lock;
-
/*
* While reclaiming in a hierarchy, we cache the last child we
* reclaimed from.
@@ -327,13 +329,6 @@ enum charge_type {
NR_CHARGE_TYPE,
};
-/* only for here (for easy reading.) */
-#define PCGF_CACHE (1UL << PCG_CACHE)
-#define PCGF_USED (1UL << PCG_USED)
-#define PCGF_LOCK (1UL << PCG_LOCK)
-/* Not used, but added here for completeness */
-#define PCGF_ACCT (1UL << PCG_ACCT)
-
/* for encoding cft->private value on file */
#define _MEM (0)
#define _MEMSWAP (1)
@@ -371,14 +366,10 @@ struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
}
static struct mem_cgroup_per_zone *
-page_cgroup_zoneinfo(struct page_cgroup *pc)
+page_cgroup_zoneinfo(struct mem_cgroup *mem, struct page *page)
{
- struct mem_cgroup *mem = pc->mem_cgroup;
- int nid = page_cgroup_nid(pc);
- int zid = page_cgroup_zid(pc);
-
- if (!mem)
- return NULL;
+ int nid = page_to_nid(page);
+ int zid = page_zonenum(page);
return mem_cgroup_zoneinfo(mem, nid, zid);
}
@@ -504,11 +495,6 @@ static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
}
}
-static inline unsigned long mem_cgroup_get_excess(struct mem_cgroup *mem)
-{
- return res_counter_soft_limit_excess(&mem->res) >> PAGE_SHIFT;
-}
-
static struct mem_cgroup_per_zone *
__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
{
@@ -565,11 +551,11 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
* common workload, threashold and synchonization as vmstat[] should be
* implemented.
*/
-static s64 mem_cgroup_read_stat(struct mem_cgroup *mem,
- enum mem_cgroup_stat_index idx)
+static long mem_cgroup_read_stat(struct mem_cgroup *mem,
+ enum mem_cgroup_stat_index idx)
{
+ long val = 0;
int cpu;
- s64 val = 0;
get_online_cpus();
for_each_online_cpu(cpu)
@@ -583,9 +569,9 @@ static s64 mem_cgroup_read_stat(struct mem_cgroup *mem,
return val;
}
-static s64 mem_cgroup_local_usage(struct mem_cgroup *mem)
+static long mem_cgroup_local_usage(struct mem_cgroup *mem)
{
- s64 ret;
+ long ret;
ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
@@ -599,6 +585,22 @@ static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
}
+static unsigned long mem_cgroup_read_events(struct mem_cgroup *mem,
+ enum mem_cgroup_events_index idx)
+{
+ unsigned long val = 0;
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ val += per_cpu(mem->stat->events[idx], cpu);
+#ifdef CONFIG_HOTPLUG_CPU
+ spin_lock(&mem->pcp_counter_lock);
+ val += mem->nocpu_base.events[idx];
+ spin_unlock(&mem->pcp_counter_lock);
+#endif
+ return val;
+}
+
static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
bool file, int nr_pages)
{
@@ -611,13 +613,13 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
/* pagein of a big page is an event. So, ignore page size */
if (nr_pages > 0)
- __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]);
+ __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
else {
- __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]);
+ __this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
nr_pages = -nr_pages; /* for event */
}
- __this_cpu_add(mem->stat->count[MEM_CGROUP_EVENTS], nr_pages);
+ __this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages);
preempt_enable();
}
@@ -637,13 +639,34 @@ static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
return total;
}
-static bool __memcg_event_check(struct mem_cgroup *mem, int event_mask_shift)
+static bool __memcg_event_check(struct mem_cgroup *mem, int target)
{
- s64 val;
+ unsigned long val, next;
+
+ val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]);
+ next = this_cpu_read(mem->stat->targets[target]);
+ /* from time_after() in jiffies.h */
+ return ((long)next - (long)val < 0);
+}
+
+static void __mem_cgroup_target_update(struct mem_cgroup *mem, int target)
+{
+ unsigned long val, next;
- val = this_cpu_read(mem->stat->count[MEM_CGROUP_EVENTS]);
+ val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]);
- return !(val & ((1 << event_mask_shift) - 1));
+ switch (target) {
+ case MEM_CGROUP_TARGET_THRESH:
+ next = val + THRESHOLDS_EVENTS_TARGET;
+ break;
+ case MEM_CGROUP_TARGET_SOFTLIMIT:
+ next = val + SOFTLIMIT_EVENTS_TARGET;
+ break;
+ default:
+ return;
+ }
+
+ this_cpu_write(mem->stat->targets[target], next);
}
/*
@@ -653,10 +676,15 @@ static bool __memcg_event_check(struct mem_cgroup *mem, int event_mask_shift)
static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
{
/* threshold event is triggered in finer grain than soft limit */
- if (unlikely(__memcg_event_check(mem, THRESHOLDS_EVENTS_THRESH))) {
+ if (unlikely(__memcg_event_check(mem, MEM_CGROUP_TARGET_THRESH))) {
mem_cgroup_threshold(mem);
- if (unlikely(__memcg_event_check(mem, SOFTLIMIT_EVENTS_THRESH)))
+ __mem_cgroup_target_update(mem, MEM_CGROUP_TARGET_THRESH);
+ if (unlikely(__memcg_event_check(mem,
+ MEM_CGROUP_TARGET_SOFTLIMIT))){
mem_cgroup_update_tree(mem, page);
+ __mem_cgroup_target_update(mem,
+ MEM_CGROUP_TARGET_SOFTLIMIT);
+ }
}
}
@@ -815,7 +843,7 @@ void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
* We don't check PCG_USED bit. It's cleared when the "page" is finally
* removed from global LRU.
*/
- mz = page_cgroup_zoneinfo(pc);
+ mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
/* huge page split is done under lru_lock. so, we have no races. */
MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
if (mem_cgroup_is_root(pc->mem_cgroup))
@@ -829,6 +857,32 @@ void mem_cgroup_del_lru(struct page *page)
mem_cgroup_del_lru_list(page, page_lru(page));
}
+/*
+ * Writeback is about to end against a page which has been marked for immediate
+ * reclaim. If it still appears to be reclaimable, move it to the tail of the
+ * inactive list.
+ */
+void mem_cgroup_rotate_reclaimable_page(struct page *page)
+{
+ struct mem_cgroup_per_zone *mz;
+ struct page_cgroup *pc;
+ enum lru_list lru = page_lru(page);
+
+ if (mem_cgroup_disabled())
+ return;
+
+ pc = lookup_page_cgroup(page);
+ /* unused or root page is not rotated. */
+ if (!PageCgroupUsed(pc))
+ return;
+ /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
+ smp_rmb();
+ if (mem_cgroup_is_root(pc->mem_cgroup))
+ return;
+ mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
+ list_move_tail(&pc->lru, &mz->lists[lru]);
+}
+
void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
{
struct mem_cgroup_per_zone *mz;
@@ -845,7 +899,7 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
smp_rmb();
if (mem_cgroup_is_root(pc->mem_cgroup))
return;
- mz = page_cgroup_zoneinfo(pc);
+ mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
list_move(&pc->lru, &mz->lists[lru]);
}
@@ -862,7 +916,7 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
return;
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
smp_rmb();
- mz = page_cgroup_zoneinfo(pc);
+ mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
/* huge page split is done under lru_lock. so, we have no races. */
MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
SetPageCgroupAcctLRU(pc);
@@ -872,18 +926,28 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
}
/*
- * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
- * lru because the page may.be reused after it's fully uncharged (because of
- * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
- * it again. This function is only used to charge SwapCache. It's done under
- * lock_page and expected that zone->lru_lock is never held.
+ * At handling SwapCache and other FUSE stuff, pc->mem_cgroup may be changed
+ * while it's linked to lru because the page may be reused after it's fully
+ * uncharged. To handle that, unlink page_cgroup from LRU when charge it again.
+ * It's done under lock_page and expected that zone->lru_lock isnever held.
*/
-static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
+static void mem_cgroup_lru_del_before_commit(struct page *page)
{
unsigned long flags;
struct zone *zone = page_zone(page);
struct page_cgroup *pc = lookup_page_cgroup(page);
+ /*
+ * Doing this check without taking ->lru_lock seems wrong but this
+ * is safe. Because if page_cgroup's USED bit is unset, the page
+ * will not be added to any memcg's LRU. If page_cgroup's USED bit is
+ * set, the commit after this will fail, anyway.
+ * This all charge/uncharge is done under some mutual execustion.
+ * So, we don't need to taking care of changes in USED bit.
+ */
+ if (likely(!PageLRU(page)))
+ return;
+
spin_lock_irqsave(&zone->lru_lock, flags);
/*
* Forget old LRU when this page_cgroup is *not* used. This Used bit
@@ -894,12 +958,15 @@ static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
spin_unlock_irqrestore(&zone->lru_lock, flags);
}
-static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
+static void mem_cgroup_lru_add_after_commit(struct page *page)
{
unsigned long flags;
struct zone *zone = page_zone(page);
struct page_cgroup *pc = lookup_page_cgroup(page);
+ /* taking care of that the page is added to LRU while we commit it */
+ if (likely(!PageLRU(page)))
+ return;
spin_lock_irqsave(&zone->lru_lock, flags);
/* link when the page is linked to LRU but page_cgroup isn't */
if (PageLRU(page) && !PageCgroupAcctLRU(pc))
@@ -1032,10 +1099,7 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page)
return NULL;
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
smp_rmb();
- mz = page_cgroup_zoneinfo(pc);
- if (!mz)
- return NULL;
-
+ mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
return &mz->reclaim_stat;
}
@@ -1067,9 +1131,11 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
if (scan >= nr_to_scan)
break;
- page = pc->page;
if (unlikely(!PageCgroupUsed(pc)))
continue;
+
+ page = lookup_cgroup_page(pc);
+
if (unlikely(!PageLRU(page)))
continue;
@@ -1101,49 +1167,32 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
#define mem_cgroup_from_res_counter(counter, member) \
container_of(counter, struct mem_cgroup, member)
-static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
-{
- if (do_swap_account) {
- if (res_counter_check_under_limit(&mem->res) &&
- res_counter_check_under_limit(&mem->memsw))
- return true;
- } else
- if (res_counter_check_under_limit(&mem->res))
- return true;
- return false;
-}
-
/**
- * mem_cgroup_check_margin - check if the memory cgroup allows charging
- * @mem: memory cgroup to check
- * @bytes: the number of bytes the caller intends to charge
+ * mem_cgroup_margin - calculate chargeable space of a memory cgroup
+ * @mem: the memory cgroup
*
- * Returns a boolean value on whether @mem can be charged @bytes or
- * whether this would exceed the limit.
+ * Returns the maximum amount of memory @mem can be charged with, in
+ * pages.
*/
-static bool mem_cgroup_check_margin(struct mem_cgroup *mem, unsigned long bytes)
+static unsigned long mem_cgroup_margin(struct mem_cgroup *mem)
{
- if (!res_counter_check_margin(&mem->res, bytes))
- return false;
- if (do_swap_account && !res_counter_check_margin(&mem->memsw, bytes))
- return false;
- return true;
+ unsigned long long margin;
+
+ margin = res_counter_margin(&mem->res);
+ if (do_swap_account)
+ margin = min(margin, res_counter_margin(&mem->memsw));
+ return margin >> PAGE_SHIFT;
}
static unsigned int get_swappiness(struct mem_cgroup *memcg)
{
struct cgroup *cgrp = memcg->css.cgroup;
- unsigned int swappiness;
/* root ? */
if (cgrp->parent == NULL)
return vm_swappiness;
- spin_lock(&memcg->reclaim_param_lock);
- swappiness = memcg->swappiness;
- spin_unlock(&memcg->reclaim_param_lock);
-
- return swappiness;
+ return memcg->swappiness;
}
static void mem_cgroup_start_move(struct mem_cgroup *mem)
@@ -1359,13 +1408,11 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem)
rcu_read_unlock();
/* Updates scanning parameter */
- spin_lock(&root_mem->reclaim_param_lock);
if (!css) {
/* this means start scan from ID:1 */
root_mem->last_scanned_child = 0;
} else
root_mem->last_scanned_child = found;
- spin_unlock(&root_mem->reclaim_param_lock);
}
return ret;
@@ -1394,7 +1441,9 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
- unsigned long excess = mem_cgroup_get_excess(root_mem);
+ unsigned long excess;
+
+ excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
/* If memsw_is_minimum==1, swap-out is of-no-use. */
if (root_mem->memsw_is_minimum)
@@ -1451,9 +1500,9 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
return ret;
total += ret;
if (check_soft) {
- if (res_counter_check_under_soft_limit(&root_mem->res))
+ if (!res_counter_soft_limit_excess(&root_mem->res))
return total;
- } else if (mem_cgroup_check_under_limit(root_mem))
+ } else if (mem_cgroup_margin(root_mem))
return 1 + total;
}
return total;
@@ -1661,17 +1710,17 @@ EXPORT_SYMBOL(mem_cgroup_update_page_stat);
* size of first charge trial. "32" comes from vmscan.c's magic value.
* TODO: maybe necessary to use big numbers in big irons.
*/
-#define CHARGE_SIZE (32 * PAGE_SIZE)
+#define CHARGE_BATCH 32U
struct memcg_stock_pcp {
struct mem_cgroup *cached; /* this never be root cgroup */
- int charge;
+ unsigned int nr_pages;
struct work_struct work;
};
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
static atomic_t memcg_drain_count;
/*
- * Try to consume stocked charge on this cpu. If success, PAGE_SIZE is consumed
+ * Try to consume stocked charge on this cpu. If success, one page is consumed
* from local stock and true is returned. If the stock is 0 or charges from a
* cgroup which is not current target, returns false. This stock will be
* refilled.
@@ -1682,8 +1731,8 @@ static bool consume_stock(struct mem_cgroup *mem)
bool ret = true;
stock = &get_cpu_var(memcg_stock);
- if (mem == stock->cached && stock->charge)
- stock->charge -= PAGE_SIZE;
+ if (mem == stock->cached && stock->nr_pages)
+ stock->nr_pages--;
else /* need to call res_counter_charge */
ret = false;
put_cpu_var(memcg_stock);
@@ -1697,13 +1746,15 @@ static void drain_stock(struct memcg_stock_pcp *stock)
{
struct mem_cgroup *old = stock->cached;
- if (stock->charge) {
- res_counter_uncharge(&old->res, stock->charge);
+ if (stock->nr_pages) {
+ unsigned long bytes = stock->nr_pages * PAGE_SIZE;
+
+ res_counter_uncharge(&old->res, bytes);
if (do_swap_account)
- res_counter_uncharge(&old->memsw, stock->charge);
+ res_counter_uncharge(&old->memsw, bytes);
+ stock->nr_pages = 0;
}
stock->cached = NULL;
- stock->charge = 0;
}
/*
@@ -1720,7 +1771,7 @@ static void drain_local_stock(struct work_struct *dummy)
* Cache charges(val) which is from res_counter, to local per_cpu area.
* This will be consumed by consume_stock() function, later.
*/
-static void refill_stock(struct mem_cgroup *mem, int val)
+static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages)
{
struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
@@ -1728,7 +1779,7 @@ static void refill_stock(struct mem_cgroup *mem, int val)
drain_stock(stock);
stock->cached = mem;
}
- stock->charge += val;
+ stock->nr_pages += nr_pages;
put_cpu_var(memcg_stock);
}
@@ -1780,11 +1831,17 @@ static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu)
spin_lock(&mem->pcp_counter_lock);
for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
- s64 x = per_cpu(mem->stat->count[i], cpu);
+ long x = per_cpu(mem->stat->count[i], cpu);
per_cpu(mem->stat->count[i], cpu) = 0;
mem->nocpu_base.count[i] += x;
}
+ for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
+ unsigned long x = per_cpu(mem->stat->events[i], cpu);
+
+ per_cpu(mem->stat->events[i], cpu) = 0;
+ mem->nocpu_base.events[i] += x;
+ }
/* need to clear ON_MOVE value, works as a kind of lock. */
per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0;
spin_unlock(&mem->pcp_counter_lock);
@@ -1834,9 +1891,10 @@ enum {
CHARGE_OOM_DIE, /* the current is killed because of OOM */
};
-static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
- int csize, bool oom_check)
+static int mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
+ unsigned int nr_pages, bool oom_check)
{
+ unsigned long csize = nr_pages * PAGE_SIZE;
struct mem_cgroup *mem_over_limit;
struct res_counter *fail_res;
unsigned long flags = 0;
@@ -1857,14 +1915,13 @@ static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
} else
mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
/*
- * csize can be either a huge page (HPAGE_SIZE), a batch of
- * regular pages (CHARGE_SIZE), or a single regular page
- * (PAGE_SIZE).
+ * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch
+ * of regular pages (CHARGE_BATCH), or a single regular page (1).
*
* Never reclaim on behalf of optional batching, retry with a
* single page instead.
*/
- if (csize == CHARGE_SIZE)
+ if (nr_pages == CHARGE_BATCH)
return CHARGE_RETRY;
if (!(gfp_mask & __GFP_WAIT))
@@ -1872,7 +1929,7 @@ static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
gfp_mask, flags);
- if (mem_cgroup_check_margin(mem_over_limit, csize))
+ if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
return CHARGE_RETRY;
/*
* Even though the limit is exceeded at this point, reclaim
@@ -1883,7 +1940,7 @@ static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
* unlikely to succeed so close to the limit, and we fall back
* to regular pages anyway in case of failure.
*/
- if (csize == PAGE_SIZE && ret)
+ if (nr_pages == 1 && ret)
return CHARGE_RETRY;
/*
@@ -1909,13 +1966,14 @@ static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
*/
static int __mem_cgroup_try_charge(struct mm_struct *mm,
gfp_t gfp_mask,
- struct mem_cgroup **memcg, bool oom,
- int page_size)
+ unsigned int nr_pages,
+ struct mem_cgroup **memcg,
+ bool oom)
{
+ unsigned int batch = max(CHARGE_BATCH, nr_pages);
int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
struct mem_cgroup *mem = NULL;
int ret;
- int csize = max(CHARGE_SIZE, (unsigned long) page_size);
/*
* Unlike gloval-vm's OOM-kill, we're not in memory shortage
@@ -1940,7 +1998,7 @@ again:
VM_BUG_ON(css_is_removed(&mem->css));
if (mem_cgroup_is_root(mem))
goto done;
- if (page_size == PAGE_SIZE && consume_stock(mem))
+ if (nr_pages == 1 && consume_stock(mem))
goto done;
css_get(&mem->css);
} else {
@@ -1963,7 +2021,7 @@ again:
rcu_read_unlock();
goto done;
}
- if (page_size == PAGE_SIZE && consume_stock(mem)) {
+ if (nr_pages == 1 && consume_stock(mem)) {
/*
* It seems dagerous to access memcg without css_get().
* But considering how consume_stok works, it's not
@@ -1998,13 +2056,12 @@ again:
nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
}
- ret = __mem_cgroup_do_charge(mem, gfp_mask, csize, oom_check);
-
+ ret = mem_cgroup_do_charge(mem, gfp_mask, batch, oom_check);
switch (ret) {
case CHARGE_OK:
break;
case CHARGE_RETRY: /* not in OOM situation but retry */
- csize = page_size;
+ batch = nr_pages;
css_put(&mem->css);
mem = NULL;
goto again;
@@ -2025,8 +2082,8 @@ again:
}
} while (ret != CHARGE_OK);
- if (csize > page_size)
- refill_stock(mem, csize - page_size);
+ if (batch > nr_pages)
+ refill_stock(mem, batch - nr_pages);
css_put(&mem->css);
done:
*memcg = mem;
@@ -2045,21 +2102,17 @@ bypass:
* gotten by try_charge().
*/
static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem,
- unsigned long count)
+ unsigned int nr_pages)
{
if (!mem_cgroup_is_root(mem)) {
- res_counter_uncharge(&mem->res, PAGE_SIZE * count);
+ unsigned long bytes = nr_pages * PAGE_SIZE;
+
+ res_counter_uncharge(&mem->res, bytes);
if (do_swap_account)
- res_counter_uncharge(&mem->memsw, PAGE_SIZE * count);
+ res_counter_uncharge(&mem->memsw, bytes);
}
}
-static void mem_cgroup_cancel_charge(struct mem_cgroup *mem,
- int page_size)
-{
- __mem_cgroup_cancel_charge(mem, page_size >> PAGE_SHIFT);
-}
-
/*
* A helper function to get mem_cgroup from ID. must be called under
* rcu_read_lock(). The caller must check css_is_removed() or some if
@@ -2108,20 +2161,15 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
}
static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
+ struct page *page,
+ unsigned int nr_pages,
struct page_cgroup *pc,
- enum charge_type ctype,
- int page_size)
+ enum charge_type ctype)
{
- int nr_pages = page_size >> PAGE_SHIFT;
-
- /* try_charge() can return NULL to *memcg, taking care of it. */
- if (!mem)
- return;
-
lock_page_cgroup(pc);
if (unlikely(PageCgroupUsed(pc))) {
unlock_page_cgroup(pc);
- mem_cgroup_cancel_charge(mem, page_size);
+ __mem_cgroup_cancel_charge(mem, nr_pages);
return;
}
/*
@@ -2158,7 +2206,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
* Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
* if they exceeds softlimit.
*/
- memcg_check_events(mem, pc->page);
+ memcg_check_events(mem, page);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -2195,7 +2243,7 @@ void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail)
* We hold lru_lock, then, reduce counter directly.
*/
lru = page_lru(head);
- mz = page_cgroup_zoneinfo(head_pc);
+ mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head);
MEM_CGROUP_ZSTAT(mz, lru) -= 1;
}
tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
@@ -2204,7 +2252,9 @@ void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail)
#endif
/**
- * __mem_cgroup_move_account - move account of the page
+ * mem_cgroup_move_account - move account of the page
+ * @page: the page
+ * @nr_pages: number of regular pages (>1 for huge pages)
* @pc: page_cgroup of the page.
* @from: mem_cgroup which the page is moved from.
* @to: mem_cgroup which the page is moved to. @from != @to.
@@ -2212,25 +2262,42 @@ void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail)
*
* The caller must confirm following.
* - page is not on LRU (isolate_page() is useful.)
- * - the pc is locked, used, and ->mem_cgroup points to @from.
+ * - compound_lock is held when nr_pages > 1
*
* This function doesn't do "charge" nor css_get to new cgroup. It should be
* done by a caller(__mem_cgroup_try_charge would be usefull). If @uncharge is
* true, this function does "uncharge" from old cgroup, but it doesn't if
* @uncharge is false, so a caller should do "uncharge".
*/
-
-static void __mem_cgroup_move_account(struct page_cgroup *pc,
- struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge,
- int charge_size)
+static int mem_cgroup_move_account(struct page *page,
+ unsigned int nr_pages,
+ struct page_cgroup *pc,
+ struct mem_cgroup *from,
+ struct mem_cgroup *to,
+ bool uncharge)
{
- int nr_pages = charge_size >> PAGE_SHIFT;
+ unsigned long flags;
+ int ret;
VM_BUG_ON(from == to);
- VM_BUG_ON(PageLRU(pc->page));
- VM_BUG_ON(!page_is_cgroup_locked(pc));
- VM_BUG_ON(!PageCgroupUsed(pc));
- VM_BUG_ON(pc->mem_cgroup != from);
+ VM_BUG_ON(PageLRU(page));
+ /*
+ * The page is isolated from LRU. So, collapse function
+ * will not handle this page. But page splitting can happen.
+ * Do this check under compound_page_lock(). The caller should
+ * hold it.
+ */
+ ret = -EBUSY;
+ if (nr_pages > 1 && !PageTransHuge(page))
+ goto out;
+
+ lock_page_cgroup(pc);
+
+ ret = -EINVAL;
+ if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
+ goto unlock;
+
+ move_lock_page_cgroup(pc, &flags);
if (PageCgroupFileMapped(pc)) {
/* Update mapped_file data for mem_cgroup */
@@ -2242,7 +2309,7 @@ static void __mem_cgroup_move_account(struct page_cgroup *pc,
mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages);
if (uncharge)
/* This is not "cancel", but cancel_charge does all we need. */
- mem_cgroup_cancel_charge(from, charge_size);
+ __mem_cgroup_cancel_charge(from, nr_pages);
/* caller should have done css_get */
pc->mem_cgroup = to;
@@ -2254,40 +2321,16 @@ static void __mem_cgroup_move_account(struct page_cgroup *pc,
* garanteed that "to" is never removed. So, we don't check rmdir
* status here.
*/
-}
-
-/*
- * check whether the @pc is valid for moving account and call
- * __mem_cgroup_move_account()
- */
-static int mem_cgroup_move_account(struct page_cgroup *pc,
- struct mem_cgroup *from, struct mem_cgroup *to,
- bool uncharge, int charge_size)
-{
- int ret = -EINVAL;
- unsigned long flags;
- /*
- * The page is isolated from LRU. So, collapse function
- * will not handle this page. But page splitting can happen.
- * Do this check under compound_page_lock(). The caller should
- * hold it.
- */
- if ((charge_size > PAGE_SIZE) && !PageTransHuge(pc->page))
- return -EBUSY;
-
- lock_page_cgroup(pc);
- if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
- move_lock_page_cgroup(pc, &flags);
- __mem_cgroup_move_account(pc, from, to, uncharge, charge_size);
- move_unlock_page_cgroup(pc, &flags);
- ret = 0;
- }
+ move_unlock_page_cgroup(pc, &flags);
+ ret = 0;
+unlock:
unlock_page_cgroup(pc);
/*
* check events
*/
- memcg_check_events(to, pc->page);
- memcg_check_events(from, pc->page);
+ memcg_check_events(to, page);
+ memcg_check_events(from, page);
+out:
return ret;
}
@@ -2295,16 +2338,16 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
* move charges to its parent.
*/
-static int mem_cgroup_move_parent(struct page_cgroup *pc,
+static int mem_cgroup_move_parent(struct page *page,
+ struct page_cgroup *pc,
struct mem_cgroup *child,
gfp_t gfp_mask)
{
- struct page *page = pc->page;
struct cgroup *cg = child->css.cgroup;
struct cgroup *pcg = cg->parent;
struct mem_cgroup *parent;
- int page_size = PAGE_SIZE;
- unsigned long flags;
+ unsigned int nr_pages;
+ unsigned long uninitialized_var(flags);
int ret;
/* Is ROOT ? */
@@ -2317,23 +2360,21 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
if (isolate_lru_page(page))
goto put;
- if (PageTransHuge(page))
- page_size = HPAGE_SIZE;
+ nr_pages = hpage_nr_pages(page);
parent = mem_cgroup_from_cont(pcg);
- ret = __mem_cgroup_try_charge(NULL, gfp_mask,
- &parent, false, page_size);
+ ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false);
if (ret || !parent)
goto put_back;
- if (page_size > PAGE_SIZE)
+ if (nr_pages > 1)
flags = compound_lock_irqsave(page);
- ret = mem_cgroup_move_account(pc, child, parent, true, page_size);
+ ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true);
if (ret)
- mem_cgroup_cancel_charge(parent, page_size);
+ __mem_cgroup_cancel_charge(parent, nr_pages);
- if (page_size > PAGE_SIZE)
+ if (nr_pages > 1)
compound_unlock_irqrestore(page, flags);
put_back:
putback_lru_page(page);
@@ -2353,13 +2394,13 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, enum charge_type ctype)
{
struct mem_cgroup *mem = NULL;
- int page_size = PAGE_SIZE;
+ unsigned int nr_pages = 1;
struct page_cgroup *pc;
bool oom = true;
int ret;
if (PageTransHuge(page)) {
- page_size <<= compound_order(page);
+ nr_pages <<= compound_order(page);
VM_BUG_ON(!PageTransHuge(page));
/*
* Never OOM-kill a process for a huge page. The
@@ -2369,16 +2410,13 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
}
pc = lookup_page_cgroup(page);
- /* can happen at boot */
- if (unlikely(!pc))
- return 0;
- prefetchw(pc);
+ BUG_ON(!pc); /* XXX: remove this and move pc lookup into commit */
- ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, oom, page_size);
+ ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &mem, oom);
if (ret || !mem)
return ret;
- __mem_cgroup_commit_charge(mem, pc, ctype, page_size);
+ __mem_cgroup_commit_charge(mem, page, nr_pages, pc, ctype);
return 0;
}
@@ -2406,9 +2444,26 @@ static void
__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
enum charge_type ctype);
+static void
+__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *mem,
+ enum charge_type ctype)
+{
+ struct page_cgroup *pc = lookup_page_cgroup(page);
+ /*
+ * In some case, SwapCache, FUSE(splice_buf->radixtree), the page
+ * is already on LRU. It means the page may on some other page_cgroup's
+ * LRU. Take care of it.
+ */
+ mem_cgroup_lru_del_before_commit(page);
+ __mem_cgroup_commit_charge(mem, page, 1, pc, ctype);
+ mem_cgroup_lru_add_after_commit(page);
+ return;
+}
+
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask)
{
+ struct mem_cgroup *mem = NULL;
int ret;
if (mem_cgroup_disabled())
@@ -2443,14 +2498,22 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
if (unlikely(!mm))
mm = &init_mm;
- if (page_is_file_cache(page))
- return mem_cgroup_charge_common(page, mm, gfp_mask,
- MEM_CGROUP_CHARGE_TYPE_CACHE);
+ if (page_is_file_cache(page)) {
+ ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, &mem, true);
+ if (ret || !mem)
+ return ret;
+ /*
+ * FUSE reuses pages without going through the final
+ * put that would remove them from the LRU list, make
+ * sure that they get relinked properly.
+ */
+ __mem_cgroup_commit_charge_lrucare(page, mem,
+ MEM_CGROUP_CHARGE_TYPE_CACHE);
+ return ret;
+ }
/* shmem */
if (PageSwapCache(page)) {
- struct mem_cgroup *mem = NULL;
-
ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
if (!ret)
__mem_cgroup_commit_charge_swapin(page, mem,
@@ -2475,6 +2538,8 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
struct mem_cgroup *mem;
int ret;
+ *ptr = NULL;
+
if (mem_cgroup_disabled())
return 0;
@@ -2492,30 +2557,26 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
if (!mem)
goto charge_cur_mm;
*ptr = mem;
- ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, PAGE_SIZE);
+ ret = __mem_cgroup_try_charge(NULL, mask, 1, ptr, true);
css_put(&mem->css);
return ret;
charge_cur_mm:
if (unlikely(!mm))
mm = &init_mm;
- return __mem_cgroup_try_charge(mm, mask, ptr, true, PAGE_SIZE);
+ return __mem_cgroup_try_charge(mm, mask, 1, ptr, true);
}
static void
__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
enum charge_type ctype)
{
- struct page_cgroup *pc;
-
if (mem_cgroup_disabled())
return;
if (!ptr)
return;
cgroup_exclude_rmdir(&ptr->css);
- pc = lookup_page_cgroup(page);
- mem_cgroup_lru_del_before_commit_swapcache(page);
- __mem_cgroup_commit_charge(ptr, pc, ctype, PAGE_SIZE);
- mem_cgroup_lru_add_after_commit_swapcache(page);
+
+ __mem_cgroup_commit_charge_lrucare(page, ptr, ctype);
/*
* Now swap is on-memory. This means this page may be
* counted both as mem and swap....double count.
@@ -2563,15 +2624,16 @@ void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
return;
if (!mem)
return;
- mem_cgroup_cancel_charge(mem, PAGE_SIZE);
+ __mem_cgroup_cancel_charge(mem, 1);
}
-static void
-__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype,
- int page_size)
+static void mem_cgroup_do_uncharge(struct mem_cgroup *mem,
+ unsigned int nr_pages,
+ const enum charge_type ctype)
{
struct memcg_batch_info *batch = NULL;
bool uncharge_memsw = true;
+
/* If swapout, usage of swap doesn't decrease */
if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
uncharge_memsw = false;
@@ -2595,7 +2657,7 @@ __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype,
if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
goto direct_uncharge;
- if (page_size != PAGE_SIZE)
+ if (nr_pages > 1)
goto direct_uncharge;
/*
@@ -2606,14 +2668,14 @@ __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype,
if (batch->memcg != mem)
goto direct_uncharge;
/* remember freed charge and uncharge it later */
- batch->bytes += PAGE_SIZE;
+ batch->nr_pages++;
if (uncharge_memsw)
- batch->memsw_bytes += PAGE_SIZE;
+ batch->memsw_nr_pages++;
return;
direct_uncharge:
- res_counter_uncharge(&mem->res, page_size);
+ res_counter_uncharge(&mem->res, nr_pages * PAGE_SIZE);
if (uncharge_memsw)
- res_counter_uncharge(&mem->memsw, page_size);
+ res_counter_uncharge(&mem->memsw, nr_pages * PAGE_SIZE);
if (unlikely(batch->memcg != mem))
memcg_oom_recover(mem);
return;
@@ -2625,10 +2687,9 @@ direct_uncharge:
static struct mem_cgroup *
__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
{
- int count;
- struct page_cgroup *pc;
struct mem_cgroup *mem = NULL;
- int page_size = PAGE_SIZE;
+ unsigned int nr_pages = 1;
+ struct page_cgroup *pc;
if (mem_cgroup_disabled())
return NULL;
@@ -2637,11 +2698,9 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
return NULL;
if (PageTransHuge(page)) {
- page_size <<= compound_order(page);
+ nr_pages <<= compound_order(page);
VM_BUG_ON(!PageTransHuge(page));
}
-
- count = page_size >> PAGE_SHIFT;
/*
* Check if our page_cgroup is valid
*/
@@ -2674,7 +2733,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
break;
}
- mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), -count);
+ mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), -nr_pages);
ClearPageCgroupUsed(pc);
/*
@@ -2695,7 +2754,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
mem_cgroup_get(mem);
}
if (!mem_cgroup_is_root(mem))
- __do_uncharge(mem, ctype, page_size);
+ mem_cgroup_do_uncharge(mem, nr_pages, ctype);
return mem;
@@ -2735,8 +2794,8 @@ void mem_cgroup_uncharge_start(void)
/* We can do nest. */
if (current->memcg_batch.do_batch == 1) {
current->memcg_batch.memcg = NULL;
- current->memcg_batch.bytes = 0;
- current->memcg_batch.memsw_bytes = 0;
+ current->memcg_batch.nr_pages = 0;
+ current->memcg_batch.memsw_nr_pages = 0;
}
}
@@ -2757,10 +2816,12 @@ void mem_cgroup_uncharge_end(void)
* This "batch->memcg" is valid without any css_get/put etc...
* bacause we hide charges behind us.
*/
- if (batch->bytes)
- res_counter_uncharge(&batch->memcg->res, batch->bytes);
- if (batch->memsw_bytes)
- res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes);
+ if (batch->nr_pages)
+ res_counter_uncharge(&batch->memcg->res,
+ batch->nr_pages * PAGE_SIZE);
+ if (batch->memsw_nr_pages)
+ res_counter_uncharge(&batch->memcg->memsw,
+ batch->memsw_nr_pages * PAGE_SIZE);
memcg_oom_recover(batch->memcg);
/* forget this pointer (for sanity check) */
batch->memcg = NULL;
@@ -2883,13 +2944,15 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
* page belongs to.
*/
int mem_cgroup_prepare_migration(struct page *page,
- struct page *newpage, struct mem_cgroup **ptr)
+ struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask)
{
- struct page_cgroup *pc;
struct mem_cgroup *mem = NULL;
+ struct page_cgroup *pc;
enum charge_type ctype;
int ret = 0;
+ *ptr = NULL;
+
VM_BUG_ON(PageTransHuge(page));
if (mem_cgroup_disabled())
return 0;
@@ -2940,7 +3003,7 @@ int mem_cgroup_prepare_migration(struct page *page,
return 0;
*ptr = mem;
- ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false, PAGE_SIZE);
+ ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, ptr, false);
css_put(&mem->css);/* drop extra refcnt */
if (ret || *ptr == NULL) {
if (PageAnon(page)) {
@@ -2967,7 +3030,7 @@ int mem_cgroup_prepare_migration(struct page *page,
ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
else
ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
- __mem_cgroup_commit_charge(mem, pc, ctype, PAGE_SIZE);
+ __mem_cgroup_commit_charge(mem, page, 1, pc, ctype);
return ret;
}
@@ -3032,7 +3095,7 @@ int mem_cgroup_shmem_charge_fallback(struct page *page,
struct mm_struct *mm,
gfp_t gfp_mask)
{
- struct mem_cgroup *mem = NULL;
+ struct mem_cgroup *mem;
int ret;
if (mem_cgroup_disabled())
@@ -3045,6 +3108,52 @@ int mem_cgroup_shmem_charge_fallback(struct page *page,
return ret;
}
+#ifdef CONFIG_DEBUG_VM
+static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
+{
+ struct page_cgroup *pc;
+
+ pc = lookup_page_cgroup(page);
+ if (likely(pc) && PageCgroupUsed(pc))
+ return pc;
+ return NULL;
+}
+
+bool mem_cgroup_bad_page_check(struct page *page)
+{
+ if (mem_cgroup_disabled())
+ return false;
+
+ return lookup_page_cgroup_used(page) != NULL;
+}
+
+void mem_cgroup_print_bad_page(struct page *page)
+{
+ struct page_cgroup *pc;
+
+ pc = lookup_page_cgroup_used(page);
+ if (pc) {
+ int ret = -1;
+ char *path;
+
+ printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p",
+ pc, pc->flags, pc->mem_cgroup);
+
+ path = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (path) {
+ rcu_read_lock();
+ ret = cgroup_path(pc->mem_cgroup->css.cgroup,
+ path, PATH_MAX);
+ rcu_read_unlock();
+ }
+
+ printk(KERN_CONT "(%s)\n",
+ (ret < 0) ? "cannot get the path" : path);
+ kfree(path);
+ }
+}
+#endif
+
static DEFINE_MUTEX(set_limit_mutex);
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
@@ -3288,6 +3397,8 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
loop += 256;
busy = NULL;
while (loop--) {
+ struct page *page;
+
ret = 0;
spin_lock_irqsave(&zone->lru_lock, flags);
if (list_empty(list)) {
@@ -3303,7 +3414,9 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
}
spin_unlock_irqrestore(&zone->lru_lock, flags);
- ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
+ page = lookup_cgroup_page(pc);
+
+ ret = mem_cgroup_move_parent(page, pc, mem, GFP_KERNEL);
if (ret == -ENOMEM)
break;
@@ -3451,13 +3564,13 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
}
-static u64 mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
- enum mem_cgroup_stat_index idx)
+static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *mem,
+ enum mem_cgroup_stat_index idx)
{
struct mem_cgroup *iter;
- s64 val = 0;
+ long val = 0;
- /* each per cpu's value can be minus.Then, use s64 */
+ /* Per-cpu values can be negative, use a signed accumulator */
for_each_mem_cgroup_tree(iter, mem)
val += mem_cgroup_read_stat(iter, idx);
@@ -3477,12 +3590,11 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
return res_counter_read_u64(&mem->memsw, RES_USAGE);
}
- val = mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE);
- val += mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS);
+ val = mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_CACHE);
+ val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_RSS);
if (swap)
- val += mem_cgroup_get_recursive_idx_stat(mem,
- MEM_CGROUP_STAT_SWAPOUT);
+ val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
return val << PAGE_SHIFT;
}
@@ -3702,9 +3814,9 @@ mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
s->stat[MCS_RSS] += val * PAGE_SIZE;
val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED);
s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
- val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGIN_COUNT);
+ val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGIN);
s->stat[MCS_PGPGIN] += val;
- val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGOUT_COUNT);
+ val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGOUT);
s->stat[MCS_PGPGOUT] += val;
if (do_swap_account) {
val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
@@ -3828,9 +3940,7 @@ static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
return -EINVAL;
}
- spin_lock(&memcg->reclaim_param_lock);
memcg->swappiness = val;
- spin_unlock(&memcg->reclaim_param_lock);
cgroup_unlock();
@@ -4486,7 +4596,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
res_counter_init(&mem->memsw, NULL);
}
mem->last_scanned_child = 0;
- spin_lock_init(&mem->reclaim_param_lock);
INIT_LIST_HEAD(&mem->oom_notify);
if (parent)
@@ -4574,8 +4683,7 @@ one_by_one:
batch_count = PRECHARGE_COUNT_AT_ONCE;
cond_resched();
}
- ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false,
- PAGE_SIZE);
+ ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, 1, &mem, false);
if (ret || !mem)
/* mem_cgroup_clear_mc() will do uncharge later */
return -ENOMEM;
@@ -4737,7 +4845,8 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
pte_t *pte;
spinlock_t *ptl;
- VM_BUG_ON(pmd_trans_huge(*pmd));
+ split_huge_page_pmd(walk->mm, pmd);
+
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE)
if (is_target_pte_for_mc(vma, addr, *pte, NULL))
@@ -4899,8 +5008,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
pte_t *pte;
spinlock_t *ptl;
+ split_huge_page_pmd(walk->mm, pmd);
retry:
- VM_BUG_ON(pmd_trans_huge(*pmd));
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; addr += PAGE_SIZE) {
pte_t ptent = *(pte++);
@@ -4920,8 +5029,8 @@ retry:
if (isolate_lru_page(page))
goto put;
pc = lookup_page_cgroup(page);
- if (!mem_cgroup_move_account(pc,
- mc.from, mc.to, false, PAGE_SIZE)) {
+ if (!mem_cgroup_move_account(page, 1, pc,
+ mc.from, mc.to, false)) {
mc.precharge--;
/* we uncharge from mc.from later. */
mc.moved_charge++;