From 8c7c6e34a1256a5082d38c8e9bd1474476912715 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:08:00 -0800 Subject: memcg: mem+swap controller core This patch implements per cgroup limit for usage of memory+swap. However there are SwapCache, double counting of swap-cache and swap-entry is avoided. Mem+Swap controller works as following. - memory usage is limited by memory.limit_in_bytes. - memory + swap usage is limited by memory.memsw_limit_in_bytes. This has following benefits. - A user can limit total resource usage of mem+swap. Without this, because memory resource controller doesn't take care of usage of swap, a process can exhaust all the swap (by memory leak.) We can avoid this case. And Swap is shared resource but it cannot be reclaimed (goes back to memory) until it's used. This characteristic can be trouble when the memory is divided into some parts by cpuset or memcg. Assume group A and group B. After some application executes, the system can be.. Group A -- very large free memory space but occupy 99% of swap. Group B -- under memory shortage but cannot use swap...it's nearly full. Ability to set appropriate swap limit for each group is required. Maybe someone wonder "why not swap but mem+swap ?" - The global LRU(kswapd) can swap out arbitrary pages. Swap-out means to move account from memory to swap...there is no change in usage of mem+swap. In other words, when we want to limit the usage of swap without affecting global LRU, mem+swap limit is better than just limiting swap. Accounting target information is stored in swap_cgroup which is per swap entry record. Charge is done as following. map - charge page and memsw. unmap - uncharge page/memsw if not SwapCache. swap-out (__delete_from_swap_cache) - uncharge page - record mem_cgroup information to swap_cgroup. swap-in (do_swap_page) - charged as page and memsw. record in swap_cgroup is cleared. memsw accounting is decremented. swap-free (swap_free()) - if swap entry is freed, memsw is uncharged by PAGE_SIZE. There are people work under never-swap environments and consider swap as something bad. For such people, this mem+swap controller extension is just an overhead. This overhead is avoided by config or boot option. (see Kconfig. detail is not in this patch.) TODO: - maybe more optimization can be don in swap-in path. (but not very safe.) But we just do simple accounting at this stage. [nishimura@mxp.nes.nec.co.jp: make resize limit hold mutex] [hugh@veritas.com: memswap controller core swapcache fixes] Signed-off-by: KAMEZAWA Hiroyuki Cc: Li Zefan Cc: Balbir Singh Cc: Pavel Emelyanov Signed-off-by: Daisuke Nishimura Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'mm/vmscan.c') diff --git a/mm/vmscan.c b/mm/vmscan.c index b07c48b09a9..f63b20dd771 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1661,7 +1661,8 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, #ifdef CONFIG_CGROUP_MEM_RES_CTLR unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, - gfp_t gfp_mask) + gfp_t gfp_mask, + bool noswap) { struct scan_control sc = { .may_writepage = !laptop_mode, @@ -1674,6 +1675,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, }; struct zonelist *zonelist; + if (noswap) + sc.may_swap = 0; + sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); zonelist = NODE_DATA(numa_node_id())->node_zonelists; -- cgit v1.2.3-70-g09d2 From 08e552c69c6930d64722de3ec18c51844d06ee28 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:08:01 -0800 Subject: memcg: synchronized LRU A big patch for changing memcg's LRU semantics. Now, - page_cgroup is linked to mem_cgroup's its own LRU (per zone). - LRU of page_cgroup is not synchronous with global LRU. - page and page_cgroup is one-to-one and statically allocated. - To find page_cgroup is on what LRU, you have to check pc->mem_cgroup as - lru = page_cgroup_zoneinfo(pc, nid_of_pc, zid_of_pc); - SwapCache is handled. And, when we handle LRU list of page_cgroup, we do following. pc = lookup_page_cgroup(page); lock_page_cgroup(pc); .....................(1) mz = page_cgroup_zoneinfo(pc); spin_lock(&mz->lru_lock); .....add to LRU spin_unlock(&mz->lru_lock); unlock_page_cgroup(pc); But (1) is spin_lock and we have to be afraid of dead-lock with zone->lru_lock. So, trylock() is used at (1), now. Without (1), we can't trust "mz" is correct. This is a trial to remove this dirty nesting of locks. This patch changes mz->lru_lock to be zone->lru_lock. Then, above sequence will be written as spin_lock(&zone->lru_lock); # in vmscan.c or swap.c via global LRU mem_cgroup_add/remove/etc_lru() { pc = lookup_page_cgroup(page); mz = page_cgroup_zoneinfo(pc); if (PageCgroupUsed(pc)) { ....add to LRU } spin_lock(&zone->lru_lock); # in vmscan.c or swap.c via global LRU This is much simpler. (*) We're safe even if we don't take lock_page_cgroup(pc). Because.. 1. When pc->mem_cgroup can be modified. - at charge. - at account_move(). 2. at charge the PCG_USED bit is not set before pc->mem_cgroup is fixed. 3. at account_move() the page is isolated and not on LRU. Pros. - easy for maintenance. - memcg can make use of laziness of pagevec. - we don't have to duplicated LRU/Active/Unevictable bit in page_cgroup. - LRU status of memcg will be synchronized with global LRU's one. - # of locks are reduced. - account_move() is simplified very much. Cons. - may increase cost of LRU rotation. (no impact if memcg is not configured.) Signed-off-by: KAMEZAWA Hiroyuki Cc: Li Zefan Cc: Balbir Singh Cc: Pavel Emelyanov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/splice.c | 1 + include/linux/memcontrol.h | 29 +++- include/linux/mm_inline.h | 3 + include/linux/page_cgroup.h | 17 --- mm/memcontrol.c | 323 +++++++++++++++++++------------------------- mm/page_cgroup.c | 1 + mm/swap.c | 1 - mm/vmscan.c | 9 +- 8 files changed, 178 insertions(+), 206 deletions(-) (limited to 'mm/vmscan.c') diff --git a/fs/splice.c b/fs/splice.c index 1abab5cee4b..a54b3e3f10a 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index ca51ac72d6c..32c07b1852d 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -40,7 +40,12 @@ extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr); extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); -extern void mem_cgroup_move_lists(struct page *page, enum lru_list lru); +extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru); +extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru); +extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru); +extern void mem_cgroup_del_lru(struct page *page); +extern void mem_cgroup_move_lists(struct page *page, + enum lru_list from, enum lru_list to); extern void mem_cgroup_uncharge_page(struct page *page); extern void mem_cgroup_uncharge_cache_page(struct page *page); extern int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask); @@ -131,7 +136,27 @@ static inline int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) return 0; } -static inline void mem_cgroup_move_lists(struct page *page, bool active) +static inline void mem_cgroup_add_lru_list(struct page *page, int lru) +{ +} + +static inline void mem_cgroup_del_lru_list(struct page *page, int lru) +{ + return ; +} + +static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru) +{ + return ; +} + +static inline void mem_cgroup_del_lru(struct page *page) +{ + return ; +} + +static inline void +mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to) { } diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index c948350c378..37ef13d0f01 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -28,6 +28,7 @@ add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l) { list_add(&page->lru, &zone->lru[l].list); __inc_zone_state(zone, NR_LRU_BASE + l); + mem_cgroup_add_lru_list(page, l); } static inline void @@ -35,6 +36,7 @@ del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l) { list_del(&page->lru); __dec_zone_state(zone, NR_LRU_BASE + l); + mem_cgroup_del_lru_list(page, l); } static inline void @@ -54,6 +56,7 @@ del_page_from_lru(struct zone *zone, struct page *page) l += page_is_file_cache(page); } __dec_zone_state(zone, NR_LRU_BASE + l); + mem_cgroup_del_lru_list(page, l); } /** diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index d754b2dfbf2..602cc1fdee9 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h @@ -26,10 +26,6 @@ enum { PCG_LOCK, /* page cgroup is locked */ PCG_CACHE, /* charged as cache */ PCG_USED, /* this object is in use. */ - /* flags for LRU placement */ - PCG_ACTIVE, /* page is active in this cgroup */ - PCG_FILE, /* page is file system backed */ - PCG_UNEVICTABLE, /* page is unevictableable */ }; #define TESTPCGFLAG(uname, lname) \ @@ -50,19 +46,6 @@ TESTPCGFLAG(Cache, CACHE) TESTPCGFLAG(Used, USED) CLEARPCGFLAG(Used, USED) -/* LRU management flags (from global-lru definition) */ -TESTPCGFLAG(File, FILE) -SETPCGFLAG(File, FILE) -CLEARPCGFLAG(File, FILE) - -TESTPCGFLAG(Active, ACTIVE) -SETPCGFLAG(Active, ACTIVE) -CLEARPCGFLAG(Active, ACTIVE) - -TESTPCGFLAG(Unevictable, UNEVICTABLE) -SETPCGFLAG(Unevictable, UNEVICTABLE) -CLEARPCGFLAG(Unevictable, UNEVICTABLE) - static inline int page_cgroup_nid(struct page_cgroup *pc) { return page_to_nid(pc->page); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 2efcf38f3b7..8ce4e9e4795 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -36,6 +36,7 @@ #include #include #include +#include "internal.h" #include @@ -100,7 +101,6 @@ struct mem_cgroup_per_zone { /* * spin_lock to protect the per cgroup LRU */ - spinlock_t lru_lock; struct list_head lists[NR_LRU_LISTS]; unsigned long count[NR_LRU_LISTS]; }; @@ -163,14 +163,12 @@ enum charge_type { /* only for here (for easy reading.) */ #define PCGF_CACHE (1UL << PCG_CACHE) #define PCGF_USED (1UL << PCG_USED) -#define PCGF_ACTIVE (1UL << PCG_ACTIVE) #define PCGF_LOCK (1UL << PCG_LOCK) -#define PCGF_FILE (1UL << PCG_FILE) static const unsigned long pcg_default_flags[NR_CHARGE_TYPE] = { - PCGF_CACHE | PCGF_FILE | PCGF_USED | PCGF_LOCK, /* File Cache */ - PCGF_ACTIVE | PCGF_USED | PCGF_LOCK, /* Anon */ - PCGF_ACTIVE | PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */ + PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */ + PCGF_USED | PCGF_LOCK, /* Anon */ + PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */ 0, /* FORCE */ }; @@ -185,9 +183,6 @@ pcg_default_flags[NR_CHARGE_TYPE] = { static void mem_cgroup_get(struct mem_cgroup *mem); static void mem_cgroup_put(struct mem_cgroup *mem); -/* - * Always modified under lru lock. Then, not necessary to preempt_disable() - */ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, struct page_cgroup *pc, bool charge) @@ -195,10 +190,9 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int val = (charge)? 1 : -1; struct mem_cgroup_stat *stat = &mem->stat; struct mem_cgroup_stat_cpu *cpustat; + int cpu = get_cpu(); - VM_BUG_ON(!irqs_disabled()); - - cpustat = &stat->cpustat[smp_processor_id()]; + cpustat = &stat->cpustat[cpu]; if (PageCgroupCache(pc)) __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val); else @@ -210,6 +204,7 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, else __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_PGPGOUT_COUNT, 1); + put_cpu(); } static struct mem_cgroup_per_zone * @@ -264,80 +259,95 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) struct mem_cgroup, css); } -static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz, - struct page_cgroup *pc) -{ - int lru = LRU_BASE; +/* + * Following LRU functions are allowed to be used without PCG_LOCK. + * Operations are called by routine of global LRU independently from memcg. + * What we have to take care of here is validness of pc->mem_cgroup. + * + * Changes to pc->mem_cgroup happens when + * 1. charge + * 2. moving account + * In typical case, "charge" is done before add-to-lru. Exception is SwapCache. + * It is added to LRU before charge. + * If PCG_USED bit is not set, page_cgroup is not added to this private LRU. + * When moving account, the page is not on LRU. It's isolated. + */ - if (PageCgroupUnevictable(pc)) - lru = LRU_UNEVICTABLE; - else { - if (PageCgroupActive(pc)) - lru += LRU_ACTIVE; - if (PageCgroupFile(pc)) - lru += LRU_FILE; - } +void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru) +{ + struct page_cgroup *pc; + struct mem_cgroup *mem; + struct mem_cgroup_per_zone *mz; + if (mem_cgroup_subsys.disabled) + return; + pc = lookup_page_cgroup(page); + /* can happen while we handle swapcache. */ + if (list_empty(&pc->lru)) + return; + mz = page_cgroup_zoneinfo(pc); + mem = pc->mem_cgroup; MEM_CGROUP_ZSTAT(mz, lru) -= 1; - - mem_cgroup_charge_statistics(pc->mem_cgroup, pc, false); - list_del(&pc->lru); + list_del_init(&pc->lru); + return; } -static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz, - struct page_cgroup *pc, bool hot) +void mem_cgroup_del_lru(struct page *page) { - int lru = LRU_BASE; + mem_cgroup_del_lru_list(page, page_lru(page)); +} - if (PageCgroupUnevictable(pc)) - lru = LRU_UNEVICTABLE; - else { - if (PageCgroupActive(pc)) - lru += LRU_ACTIVE; - if (PageCgroupFile(pc)) - lru += LRU_FILE; - } +void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) +{ + struct mem_cgroup_per_zone *mz; + struct page_cgroup *pc; - MEM_CGROUP_ZSTAT(mz, lru) += 1; - if (hot) - list_add(&pc->lru, &mz->lists[lru]); - else - list_add_tail(&pc->lru, &mz->lists[lru]); + if (mem_cgroup_subsys.disabled) + return; - mem_cgroup_charge_statistics(pc->mem_cgroup, pc, true); + pc = lookup_page_cgroup(page); + smp_rmb(); + /* unused page is not rotated. */ + if (!PageCgroupUsed(pc)) + return; + mz = page_cgroup_zoneinfo(pc); + list_move(&pc->lru, &mz->lists[lru]); } -static void __mem_cgroup_move_lists(struct page_cgroup *pc, enum lru_list lru) +void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) { - struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc); - int active = PageCgroupActive(pc); - int file = PageCgroupFile(pc); - int unevictable = PageCgroupUnevictable(pc); - enum lru_list from = unevictable ? LRU_UNEVICTABLE : - (LRU_FILE * !!file + !!active); + struct page_cgroup *pc; + struct mem_cgroup_per_zone *mz; - if (lru == from) + if (mem_cgroup_subsys.disabled) + return; + pc = lookup_page_cgroup(page); + /* barrier to sync with "charge" */ + smp_rmb(); + if (!PageCgroupUsed(pc)) return; - MEM_CGROUP_ZSTAT(mz, from) -= 1; - /* - * However this is done under mz->lru_lock, another flags, which - * are not related to LRU, will be modified from out-of-lock. - * We have to use atomic set/clear flags. - */ - if (is_unevictable_lru(lru)) { - ClearPageCgroupActive(pc); - SetPageCgroupUnevictable(pc); - } else { - if (is_active_lru(lru)) - SetPageCgroupActive(pc); - else - ClearPageCgroupActive(pc); - ClearPageCgroupUnevictable(pc); - } - + mz = page_cgroup_zoneinfo(pc); MEM_CGROUP_ZSTAT(mz, lru) += 1; - list_move(&pc->lru, &mz->lists[lru]); + list_add(&pc->lru, &mz->lists[lru]); +} +/* + * To add swapcache into LRU. Be careful to all this function. + * zone->lru_lock shouldn't be held and irq must not be disabled. + */ +static void mem_cgroup_lru_fixup(struct page *page) +{ + if (!isolate_lru_page(page)) + putback_lru_page(page); +} + +void mem_cgroup_move_lists(struct page *page, + enum lru_list from, enum lru_list to) +{ + if (mem_cgroup_subsys.disabled) + return; + mem_cgroup_del_lru_list(page, from); + mem_cgroup_add_lru_list(page, to); } int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) @@ -350,37 +360,6 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) return ret; } -/* - * This routine assumes that the appropriate zone's lru lock is already held - */ -void mem_cgroup_move_lists(struct page *page, enum lru_list lru) -{ - struct page_cgroup *pc; - struct mem_cgroup_per_zone *mz; - unsigned long flags; - - if (mem_cgroup_subsys.disabled) - return; - - /* - * We cannot lock_page_cgroup while holding zone's lru_lock, - * because other holders of lock_page_cgroup can be interrupted - * with an attempt to rotate_reclaimable_page. But we cannot - * safely get to page_cgroup without it, so just try_lock it: - * mem_cgroup_isolate_pages allows for page left on wrong list. - */ - pc = lookup_page_cgroup(page); - if (!trylock_page_cgroup(pc)) - return; - if (pc && PageCgroupUsed(pc)) { - mz = page_cgroup_zoneinfo(pc); - spin_lock_irqsave(&mz->lru_lock, flags); - __mem_cgroup_move_lists(pc, lru); - spin_unlock_irqrestore(&mz->lru_lock, flags); - } - unlock_page_cgroup(pc); -} - /* * Calculate mapped_ratio under memory controller. This will be used in * vmscan.c for deteremining we have to reclaim mapped pages. @@ -460,40 +439,24 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); src = &mz->lists[lru]; - spin_lock(&mz->lru_lock); scan = 0; list_for_each_entry_safe_reverse(pc, tmp, src, lru) { if (scan >= nr_to_scan) break; + + page = pc->page; if (unlikely(!PageCgroupUsed(pc))) continue; - page = pc->page; - if (unlikely(!PageLRU(page))) continue; - /* - * TODO: play better with lumpy reclaim, grabbing anything. - */ - if (PageUnevictable(page) || - (PageActive(page) && !active) || - (!PageActive(page) && active)) { - __mem_cgroup_move_lists(pc, page_lru(page)); - continue; - } - scan++; - list_move(&pc->lru, &pc_list); - if (__isolate_lru_page(page, mode, file) == 0) { list_move(&page->lru, dst); nr_taken++; } } - list_splice(&pc_list, src); - spin_unlock(&mz->lru_lock); - *scanned = scan; return nr_taken; } @@ -608,9 +571,6 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, struct page_cgroup *pc, enum charge_type ctype) { - struct mem_cgroup_per_zone *mz; - unsigned long flags; - /* try_charge() can return NULL to *memcg, taking care of it. */ if (!mem) return; @@ -625,17 +585,11 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, return; } pc->mem_cgroup = mem; - /* - * If a page is accounted as a page cache, insert to inactive list. - * If anon, insert to active list. - */ + smp_wmb(); pc->flags = pcg_default_flags[ctype]; - mz = page_cgroup_zoneinfo(pc); + mem_cgroup_charge_statistics(mem, pc, true); - spin_lock_irqsave(&mz->lru_lock, flags); - __mem_cgroup_add_list(mz, pc, true); - spin_unlock_irqrestore(&mz->lru_lock, flags); unlock_page_cgroup(pc); } @@ -646,8 +600,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, * @to: mem_cgroup which the page is moved to. @from != @to. * * The caller must confirm following. - * 1. disable irq. - * 2. lru_lock of old mem_cgroup(@from) should be held. + * - page is not on LRU (isolate_page() is useful.) * * returns 0 at success, * returns -EBUSY when lock is busy or "pc" is unstable. @@ -663,15 +616,14 @@ static int mem_cgroup_move_account(struct page_cgroup *pc, int nid, zid; int ret = -EBUSY; - VM_BUG_ON(!irqs_disabled()); VM_BUG_ON(from == to); + VM_BUG_ON(PageLRU(pc->page)); nid = page_cgroup_nid(pc); zid = page_cgroup_zid(pc); from_mz = mem_cgroup_zoneinfo(from, nid, zid); to_mz = mem_cgroup_zoneinfo(to, nid, zid); - if (!trylock_page_cgroup(pc)) return ret; @@ -681,18 +633,15 @@ static int mem_cgroup_move_account(struct page_cgroup *pc, if (pc->mem_cgroup != from) goto out; - if (spin_trylock(&to_mz->lru_lock)) { - __mem_cgroup_remove_list(from_mz, pc); - css_put(&from->css); - res_counter_uncharge(&from->res, PAGE_SIZE); - if (do_swap_account) - res_counter_uncharge(&from->memsw, PAGE_SIZE); - pc->mem_cgroup = to; - css_get(&to->css); - __mem_cgroup_add_list(to_mz, pc, false); - ret = 0; - spin_unlock(&to_mz->lru_lock); - } + css_put(&from->css); + res_counter_uncharge(&from->res, PAGE_SIZE); + mem_cgroup_charge_statistics(from, pc, false); + if (do_swap_account) + res_counter_uncharge(&from->memsw, PAGE_SIZE); + pc->mem_cgroup = to; + mem_cgroup_charge_statistics(to, pc, true); + css_get(&to->css); + ret = 0; out: unlock_page_cgroup(pc); return ret; @@ -706,39 +655,47 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc, struct mem_cgroup *child, gfp_t gfp_mask) { + struct page *page = pc->page; struct cgroup *cg = child->css.cgroup; struct cgroup *pcg = cg->parent; struct mem_cgroup *parent; - struct mem_cgroup_per_zone *mz; - unsigned long flags; int ret; /* Is ROOT ? */ if (!pcg) return -EINVAL; + parent = mem_cgroup_from_cont(pcg); + ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false); if (ret) return ret; - mz = mem_cgroup_zoneinfo(child, - page_cgroup_nid(pc), page_cgroup_zid(pc)); + if (!get_page_unless_zero(page)) + return -EBUSY; + + ret = isolate_lru_page(page); + + if (ret) + goto cancel; - spin_lock_irqsave(&mz->lru_lock, flags); ret = mem_cgroup_move_account(pc, child, parent); - spin_unlock_irqrestore(&mz->lru_lock, flags); - /* drop extra refcnt */ + /* drop extra refcnt by try_charge() (move_account increment one) */ css_put(&parent->css); - /* uncharge if move fails */ - if (ret) { - res_counter_uncharge(&parent->res, PAGE_SIZE); - if (do_swap_account) - res_counter_uncharge(&parent->memsw, PAGE_SIZE); + putback_lru_page(page); + if (!ret) { + put_page(page); + return 0; } - + /* uncharge if move fails */ +cancel: + res_counter_uncharge(&parent->res, PAGE_SIZE); + if (do_swap_account) + res_counter_uncharge(&parent->memsw, PAGE_SIZE); + put_page(page); return ret; } @@ -912,6 +869,8 @@ int mem_cgroup_cache_charge_swapin(struct page *page, } if (!locked) unlock_page(page); + /* add this page(page_cgroup) to the LRU we want. */ + mem_cgroup_lru_fixup(page); return ret; } @@ -944,6 +903,8 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) } } + /* add this page(page_cgroup) to the LRU we want. */ + mem_cgroup_lru_fixup(page); } void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem) @@ -968,7 +929,6 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) struct page_cgroup *pc; struct mem_cgroup *mem = NULL; struct mem_cgroup_per_zone *mz; - unsigned long flags; if (mem_cgroup_subsys.disabled) return NULL; @@ -1010,12 +970,10 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)) res_counter_uncharge(&mem->memsw, PAGE_SIZE); + mem_cgroup_charge_statistics(mem, pc, false); ClearPageCgroupUsed(pc); mz = page_cgroup_zoneinfo(pc); - spin_lock_irqsave(&mz->lru_lock, flags); - __mem_cgroup_remove_list(mz, pc); - spin_unlock_irqrestore(&mz->lru_lock, flags); unlock_page_cgroup(pc); css_put(&mem->css); @@ -1281,21 +1239,22 @@ int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, return ret; } - /* * This routine traverse page_cgroup in given list and drop them all. * *And* this routine doesn't reclaim page itself, just removes page_cgroup. */ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem, - struct mem_cgroup_per_zone *mz, - enum lru_list lru) + int node, int zid, enum lru_list lru) { + struct zone *zone; + struct mem_cgroup_per_zone *mz; struct page_cgroup *pc, *busy; - unsigned long flags; - unsigned long loop; + unsigned long flags, loop; struct list_head *list; int ret = 0; + zone = &NODE_DATA(node)->node_zones[zid]; + mz = mem_cgroup_zoneinfo(mem, node, zid); list = &mz->lists[lru]; loop = MEM_CGROUP_ZSTAT(mz, lru); @@ -1304,19 +1263,19 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem, busy = NULL; while (loop--) { ret = 0; - spin_lock_irqsave(&mz->lru_lock, flags); + spin_lock_irqsave(&zone->lru_lock, flags); if (list_empty(list)) { - spin_unlock_irqrestore(&mz->lru_lock, flags); + spin_unlock_irqrestore(&zone->lru_lock, flags); break; } pc = list_entry(list->prev, struct page_cgroup, lru); if (busy == pc) { list_move(&pc->lru, list); busy = 0; - spin_unlock_irqrestore(&mz->lru_lock, flags); + spin_unlock_irqrestore(&zone->lru_lock, flags); continue; } - spin_unlock_irqrestore(&mz->lru_lock, flags); + spin_unlock_irqrestore(&zone->lru_lock, flags); ret = mem_cgroup_move_parent(pc, mem, GFP_HIGHUSER_MOVABLE); if (ret == -ENOMEM) @@ -1329,6 +1288,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem, } else busy = NULL; } + if (!ret && !list_empty(list)) return -EBUSY; return ret; @@ -1364,12 +1324,10 @@ move_account: ret = 0; for_each_node_state(node, N_POSSIBLE) { for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) { - struct mem_cgroup_per_zone *mz; enum lru_list l; - mz = mem_cgroup_zoneinfo(mem, node, zid); for_each_lru(l) { ret = mem_cgroup_force_empty_list(mem, - mz, l); + node, zid, l); if (ret) break; } @@ -1413,6 +1371,7 @@ try_to_free: } } + lru_add_drain(); /* try move_account...there may be some *locked* pages. */ if (mem->res.usage) goto move_account; @@ -1657,7 +1616,6 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) for (zone = 0; zone < MAX_NR_ZONES; zone++) { mz = &pn->zoneinfo[zone]; - spin_lock_init(&mz->lru_lock); for_each_lru(l) INIT_LIST_HEAD(&mz->lists[l]); } @@ -1706,8 +1664,15 @@ static struct mem_cgroup *mem_cgroup_alloc(void) static void mem_cgroup_free(struct mem_cgroup *mem) { + int node; + if (atomic_read(&mem->refcnt) > 0) return; + + + for_each_node_state(node, N_POSSIBLE) + free_mem_cgroup_per_zone_info(mem, node); + if (mem_cgroup_size() < PAGE_SIZE) kfree(mem); else @@ -1780,12 +1745,6 @@ static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss, static void mem_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cont) { - int node; - struct mem_cgroup *mem = mem_cgroup_from_cont(cont); - - for_each_node_state(node, N_POSSIBLE) - free_mem_cgroup_per_zone_info(mem, node); - mem_cgroup_free(mem_cgroup_from_cont(cont)); } diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 685e7c8e1fd..74ae8e01d07 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c @@ -16,6 +16,7 @@ __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn) pc->flags = 0; pc->mem_cgroup = NULL; pc->page = pfn_to_page(pfn); + INIT_LIST_HEAD(&pc->lru); } static unsigned long total_usage; diff --git a/mm/swap.c b/mm/swap.c index ba2c0e8b8b5..8a98a9c9070 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -168,7 +168,6 @@ void activate_page(struct page *page) lru += LRU_ACTIVE; add_page_to_lru_list(zone, page, lru); __count_vm_event(PGACTIVATE); - mem_cgroup_move_lists(page, lru); zone->recent_rotated[!!file]++; zone->recent_scanned[!!file]++; diff --git a/mm/vmscan.c b/mm/vmscan.c index f63b20dd771..45983af1de3 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -512,7 +512,6 @@ redo: lru = LRU_UNEVICTABLE; add_page_to_unevictable_list(page); } - mem_cgroup_move_lists(page, lru); /* * page's status can change while we move it among lru. If an evictable @@ -547,7 +546,6 @@ void putback_lru_page(struct page *page) lru = !!TestClearPageActive(page) + page_is_file_cache(page); lru_cache_add_lru(page, lru); - mem_cgroup_move_lists(page, lru); put_page(page); } #endif /* CONFIG_UNEVICTABLE_LRU */ @@ -813,6 +811,7 @@ int __isolate_lru_page(struct page *page, int mode, int file) return ret; ret = -EBUSY; + if (likely(get_page_unless_zero(page))) { /* * Be careful not to clear PageLRU until after we're @@ -821,6 +820,7 @@ int __isolate_lru_page(struct page *page, int mode, int file) */ ClearPageLRU(page); ret = 0; + mem_cgroup_del_lru(page); } return ret; @@ -1134,7 +1134,6 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, SetPageLRU(page); lru = page_lru(page); add_page_to_lru_list(zone, page, lru); - mem_cgroup_move_lists(page, lru); if (PageActive(page) && scan_global_lru(sc)) { int file = !!page_is_file_cache(page); zone->recent_rotated[file]++; @@ -1263,7 +1262,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, ClearPageActive(page); list_move(&page->lru, &zone->lru[lru].list); - mem_cgroup_move_lists(page, lru); + mem_cgroup_add_lru_list(page, lru); pgmoved++; if (!pagevec_add(&pvec, page)) { __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); @@ -2408,6 +2407,7 @@ retry: __dec_zone_state(zone, NR_UNEVICTABLE); list_move(&page->lru, &zone->lru[l].list); + mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l); __inc_zone_state(zone, NR_INACTIVE_ANON + l); __count_vm_event(UNEVICTABLE_PGRESCUED); } else { @@ -2416,6 +2416,7 @@ retry: */ SetPageUnevictable(page); list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list); + mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE); if (page_evictable(page, NULL)) goto retry; } -- cgit v1.2.3-70-g09d2 From f89eb90e33fd4e4e0cc1a6d20afd63c5a561885a Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 7 Jan 2009 18:08:14 -0800 Subject: inactive_anon_is_low: move to vmscan The inactive_anon_is_low() is called only vmscan. Then it can move to vmscan.c This patch doesn't have any functional change. Reviewd-by: KAMEZAWA Hiroyuki Acked-by: Rik van Riel Signed-off-by: KOSAKI Motohiro Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm_inline.h | 19 ------------------- mm/vmscan.c | 20 ++++++++++++++++++++ 2 files changed, 20 insertions(+), 19 deletions(-) (limited to 'mm/vmscan.c') diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 37ef13d0f01..7fbb9726755 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -81,23 +81,4 @@ static inline enum lru_list page_lru(struct page *page) return lru; } -/** - * inactive_anon_is_low - check if anonymous pages need to be deactivated - * @zone: zone to check - * - * Returns true if the zone does not have enough inactive anon pages, - * meaning some active anon pages need to be deactivated. - */ -static inline int inactive_anon_is_low(struct zone *zone) -{ - unsigned long active, inactive; - - active = zone_page_state(zone, NR_ACTIVE_ANON); - inactive = zone_page_state(zone, NR_INACTIVE_ANON); - - if (inactive * zone->inactive_ratio < active) - return 1; - - return 0; -} #endif diff --git a/mm/vmscan.c b/mm/vmscan.c index 45983af1de3..f75d924cb4f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1291,6 +1291,26 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, pagevec_release(&pvec); } +/** + * inactive_anon_is_low - check if anonymous pages need to be deactivated + * @zone: zone to check + * + * Returns true if the zone does not have enough inactive anon pages, + * meaning some active anon pages need to be deactivated. + */ +static int inactive_anon_is_low(struct zone *zone) +{ + unsigned long active, inactive; + + active = zone_page_state(zone, NR_ACTIVE_ANON); + inactive = zone_page_state(zone, NR_INACTIVE_ANON); + + if (inactive * zone->inactive_ratio < active) + return 1; + + return 0; +} + static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, struct zone *zone, struct scan_control *sc, int priority) { -- cgit v1.2.3-70-g09d2 From 6e9015716ae9b59e9635d692fddfcfb9582c146c Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 7 Jan 2009 18:08:15 -0800 Subject: mm: introduce zone_reclaim struct Add zone_reclam_stat struct for later enhancement. A later patch uses this. This patch doesn't any behavior change (yet). Reviewed-by: KAMEZAWA Hiroyuki Signed-off-by: KOSAKI Motohiro Acked-by: Rik van Riel Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 24 ++++++++++++++---------- mm/page_alloc.c | 8 ++++---- mm/swap.c | 12 ++++++++---- mm/vmscan.c | 47 ++++++++++++++++++++++++++++++----------------- 4 files changed, 56 insertions(+), 35 deletions(-) (limited to 'mm/vmscan.c') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 35a7b5e1946..09c14e213b6 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -263,6 +263,19 @@ enum zone_type { #error ZONES_SHIFT -- too many zones configured adjust calculation #endif +struct zone_reclaim_stat { + /* + * The pageout code in vmscan.c keeps track of how many of the + * mem/swap backed and file backed pages are refeferenced. + * The higher the rotated/scanned ratio, the more valuable + * that cache is. + * + * The anon LRU stats live in [0], file LRU stats in [1] + */ + unsigned long recent_rotated[2]; + unsigned long recent_scanned[2]; +}; + struct zone { /* Fields commonly accessed by the page allocator */ unsigned long pages_min, pages_low, pages_high; @@ -315,16 +328,7 @@ struct zone { unsigned long nr_scan; } lru[NR_LRU_LISTS]; - /* - * The pageout code in vmscan.c keeps track of how many of the - * mem/swap backed and file backed pages are refeferenced. - * The higher the rotated/scanned ratio, the more valuable - * that cache is. - * - * The anon LRU stats live in [0], file LRU stats in [1] - */ - unsigned long recent_rotated[2]; - unsigned long recent_scanned[2]; + struct zone_reclaim_stat reclaim_stat; unsigned long pages_scanned; /* since last reclaim */ unsigned long flags; /* zone flags, see below */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7bf22e04531..5675b307385 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3523,10 +3523,10 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, INIT_LIST_HEAD(&zone->lru[l].list); zone->lru[l].nr_scan = 0; } - zone->recent_rotated[0] = 0; - zone->recent_rotated[1] = 0; - zone->recent_scanned[0] = 0; - zone->recent_scanned[1] = 0; + zone->reclaim_stat.recent_rotated[0] = 0; + zone->reclaim_stat.recent_rotated[1] = 0; + zone->reclaim_stat.recent_scanned[0] = 0; + zone->reclaim_stat.recent_scanned[1] = 0; zap_zone_vm_stats(zone); zone->flags = 0; if (!size) diff --git a/mm/swap.c b/mm/swap.c index 8a98a9c9070..26b07e7bc3d 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -157,6 +157,7 @@ void rotate_reclaimable_page(struct page *page) void activate_page(struct page *page) { struct zone *zone = page_zone(page); + struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat; spin_lock_irq(&zone->lru_lock); if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { @@ -169,8 +170,8 @@ void activate_page(struct page *page) add_page_to_lru_list(zone, page, lru); __count_vm_event(PGACTIVATE); - zone->recent_rotated[!!file]++; - zone->recent_scanned[!!file]++; + reclaim_stat->recent_rotated[!!file]++; + reclaim_stat->recent_scanned[!!file]++; } spin_unlock_irq(&zone->lru_lock); } @@ -385,6 +386,8 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) { int i; struct zone *zone = NULL; + struct zone_reclaim_stat *reclaim_stat = NULL; + VM_BUG_ON(is_unevictable_lru(lru)); for (i = 0; i < pagevec_count(pvec); i++) { @@ -396,6 +399,7 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) if (zone) spin_unlock_irq(&zone->lru_lock); zone = pagezone; + reclaim_stat = &zone->reclaim_stat; spin_lock_irq(&zone->lru_lock); } VM_BUG_ON(PageActive(page)); @@ -403,10 +407,10 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) VM_BUG_ON(PageLRU(page)); SetPageLRU(page); file = is_file_lru(lru); - zone->recent_scanned[file]++; + reclaim_stat->recent_scanned[file]++; if (is_active_lru(lru)) { SetPageActive(page); - zone->recent_rotated[file]++; + reclaim_stat->recent_rotated[file]++; } add_page_to_lru_list(zone, page, lru); } diff --git a/mm/vmscan.c b/mm/vmscan.c index f75d924cb4f..03ca923c665 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -130,6 +130,12 @@ static DECLARE_RWSEM(shrinker_rwsem); #define scan_global_lru(sc) (1) #endif +static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, + struct scan_control *sc) +{ + return &zone->reclaim_stat; +} + /* * Add a shrinker callback to be called from the vm */ @@ -1029,6 +1035,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, struct pagevec pvec; unsigned long nr_scanned = 0; unsigned long nr_reclaimed = 0; + struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); pagevec_init(&pvec, 1); @@ -1072,10 +1079,14 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, if (scan_global_lru(sc)) { zone->pages_scanned += nr_scan; - zone->recent_scanned[0] += count[LRU_INACTIVE_ANON]; - zone->recent_scanned[0] += count[LRU_ACTIVE_ANON]; - zone->recent_scanned[1] += count[LRU_INACTIVE_FILE]; - zone->recent_scanned[1] += count[LRU_ACTIVE_FILE]; + reclaim_stat->recent_scanned[0] += + count[LRU_INACTIVE_ANON]; + reclaim_stat->recent_scanned[0] += + count[LRU_ACTIVE_ANON]; + reclaim_stat->recent_scanned[1] += + count[LRU_INACTIVE_FILE]; + reclaim_stat->recent_scanned[1] += + count[LRU_ACTIVE_FILE]; } spin_unlock_irq(&zone->lru_lock); @@ -1136,7 +1147,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, add_page_to_lru_list(zone, page, lru); if (PageActive(page) && scan_global_lru(sc)) { int file = !!page_is_file_cache(page); - zone->recent_rotated[file]++; + reclaim_stat->recent_rotated[file]++; } if (!pagevec_add(&pvec, page)) { spin_unlock_irq(&zone->lru_lock); @@ -1196,6 +1207,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, struct page *page; struct pagevec pvec; enum lru_list lru; + struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); lru_add_drain(); spin_lock_irq(&zone->lru_lock); @@ -1208,7 +1220,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, */ if (scan_global_lru(sc)) { zone->pages_scanned += pgscanned; - zone->recent_scanned[!!file] += pgmoved; + reclaim_stat->recent_scanned[!!file] += pgmoved; } if (file) @@ -1251,7 +1263,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, * pages in get_scan_ratio. */ if (scan_global_lru(sc)) - zone->recent_rotated[!!file] += pgmoved; + reclaim_stat->recent_rotated[!!file] += pgmoved; while (!list_empty(&l_inactive)) { page = lru_to_page(&l_inactive); @@ -1344,6 +1356,7 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, unsigned long anon, file, free; unsigned long anon_prio, file_prio; unsigned long ap, fp; + struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); /* If we have no swap space, do not bother scanning anon pages. */ if (nr_swap_pages <= 0) { @@ -1376,17 +1389,17 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, * * anon in [0], file in [1] */ - if (unlikely(zone->recent_scanned[0] > anon / 4)) { + if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { spin_lock_irq(&zone->lru_lock); - zone->recent_scanned[0] /= 2; - zone->recent_rotated[0] /= 2; + reclaim_stat->recent_scanned[0] /= 2; + reclaim_stat->recent_rotated[0] /= 2; spin_unlock_irq(&zone->lru_lock); } - if (unlikely(zone->recent_scanned[1] > file / 4)) { + if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) { spin_lock_irq(&zone->lru_lock); - zone->recent_scanned[1] /= 2; - zone->recent_rotated[1] /= 2; + reclaim_stat->recent_scanned[1] /= 2; + reclaim_stat->recent_rotated[1] /= 2; spin_unlock_irq(&zone->lru_lock); } @@ -1402,11 +1415,11 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, * proportional to the fraction of recently scanned pages on * each list that were recently referenced and in active use. */ - ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1); - ap /= zone->recent_rotated[0] + 1; + ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1); + ap /= reclaim_stat->recent_rotated[0] + 1; - fp = (file_prio + 1) * (zone->recent_scanned[1] + 1); - fp /= zone->recent_rotated[1] + 1; + fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1); + fp /= reclaim_stat->recent_rotated[1] + 1; /* Normalize to percentages */ percent[0] = 100 * ap / (ap + fp + 1); -- cgit v1.2.3-70-g09d2 From c9f299d9862deadf9fbee3ca28d915fdb006975a Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 7 Jan 2009 18:08:16 -0800 Subject: mm: add zone nr_pages helper function Add zone_nr_pages() helper function. It is used by a later patch. This patch doesn't have any functional change. Reviewed-by: KAMEZAWA Hiroyuki Signed-off-by: KOSAKI Motohiro Acked-by: Rik van Riel Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'mm/vmscan.c') diff --git a/mm/vmscan.c b/mm/vmscan.c index 03ca923c665..6827d35954f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -136,6 +136,13 @@ static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, return &zone->reclaim_stat; } +static unsigned long zone_nr_pages(struct zone *zone, struct scan_control *sc, + enum lru_list lru) +{ + return zone_page_state(zone, NR_LRU_BASE + lru); +} + + /* * Add a shrinker callback to be called from the vm */ @@ -1365,10 +1372,10 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, return; } - anon = zone_page_state(zone, NR_ACTIVE_ANON) + - zone_page_state(zone, NR_INACTIVE_ANON); - file = zone_page_state(zone, NR_ACTIVE_FILE) + - zone_page_state(zone, NR_INACTIVE_FILE); + anon = zone_nr_pages(zone, sc, LRU_ACTIVE_ANON) + + zone_nr_pages(zone, sc, LRU_INACTIVE_ANON); + file = zone_nr_pages(zone, sc, LRU_ACTIVE_FILE) + + zone_nr_pages(zone, sc, LRU_INACTIVE_FILE); free = zone_page_state(zone, NR_FREE_PAGES); /* If we have very few page cache pages, force-scan anon pages. */ -- cgit v1.2.3-70-g09d2 From eeee9a8cd1e93c8b94e7788790fa9e2f8910c779 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 7 Jan 2009 18:08:17 -0800 Subject: mm: make get_scan_ratio() safe for memcg Currently, get_scan_ratio() always calculate the balancing value for global reclaim and memcg reclaim doesn't use it. Therefore it doesn't have scan_global_lru() condition. However, we plan to expand get_scan_ratio() to be usable for memcg too, latter. Then, The dependency code of global reclaim in the get_scan_ratio() insert into scan_global_lru() condision explictly. This patch doesn't have any functional change. Acked-by: Rik van Riel Signed-off-by: KOSAKI Motohiro Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'mm/vmscan.c') diff --git a/mm/vmscan.c b/mm/vmscan.c index 6827d35954f..e2b31a522a6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1376,13 +1376,16 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, zone_nr_pages(zone, sc, LRU_INACTIVE_ANON); file = zone_nr_pages(zone, sc, LRU_ACTIVE_FILE) + zone_nr_pages(zone, sc, LRU_INACTIVE_FILE); - free = zone_page_state(zone, NR_FREE_PAGES); - /* If we have very few page cache pages, force-scan anon pages. */ - if (unlikely(file + free <= zone->pages_high)) { - percent[0] = 100; - percent[1] = 0; - return; + if (scan_global_lru(sc)) { + free = zone_page_state(zone, NR_FREE_PAGES); + /* If we have very few page cache pages, + force-scan anon pages. */ + if (unlikely(file + free <= zone->pages_high)) { + percent[0] = 100; + percent[1] = 0; + return; + } } /* -- cgit v1.2.3-70-g09d2 From 14797e2363c2b2f1ce139fd1c5a215e4e05aa1d9 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 7 Jan 2009 18:08:18 -0800 Subject: memcg: add inactive_anon_is_low() The inactive_anon_is_low() is key component of active/inactive anon balancing on reclaim. However current inactive_anon_is_low() function only consider global reclaim. Therefore, we need following ugly scan_global_lru() condition. if (lru == LRU_ACTIVE_ANON && (!scan_global_lru(sc) || inactive_anon_is_low(zone))) { shrink_active_list(nr_to_scan, zone, sc, priority, file); return 0; it cause that memcg reclaim always deactivate pages when shrink_list() is called. To make mem_cgroup_inactive_anon_is_low() improve active/inactive anon balancing of memcgroup. Acked-by: KAMEZAWA Hiroyuki Acked-by: Rik van Riel Signed-off-by: KOSAKI Motohiro Cc: Cyrill Gorcunov Cc: "Pekka Enberg" Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 9 +++++++++ mm/memcontrol.c | 46 +++++++++++++++++++++++++++++++++++++++++++++- mm/vmscan.c | 37 +++++++++++++++++++++++-------------- 3 files changed, 77 insertions(+), 15 deletions(-) (limited to 'mm/vmscan.c') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 59ac95a6450..aad9377c982 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -100,6 +100,8 @@ extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, extern long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone, int priority, enum lru_list lru); +int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, + struct zone *zone); #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP extern int do_swap_account; @@ -251,6 +253,13 @@ static inline bool mem_cgroup_oom_called(struct task_struct *task) { return false; } + +static inline int +mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone) +{ + return 1; +} + #endif /* CONFIG_CGROUP_MEM_CONT */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 457d671029b..6611328460e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -156,6 +156,9 @@ struct mem_cgroup { unsigned long last_oom_jiffies; int obsolete; atomic_t refcnt; + + unsigned int inactive_ratio; + /* * statistics. This must be placed at the end of memcg. */ @@ -431,6 +434,20 @@ long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone, return (nr_pages >> priority); } +int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone) +{ + unsigned long active; + unsigned long inactive; + + inactive = mem_cgroup_get_all_zonestat(memcg, LRU_INACTIVE_ANON); + active = mem_cgroup_get_all_zonestat(memcg, LRU_ACTIVE_ANON); + + if (inactive * memcg->inactive_ratio < active) + return 1; + + return 0; +} + unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, struct list_head *dst, unsigned long *scanned, int order, @@ -1360,6 +1377,29 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) return 0; } +/* + * The inactive anon list should be small enough that the VM never has to + * do too much work, but large enough that each inactive page has a chance + * to be referenced again before it is swapped out. + * + * this calculation is straightforward porting from + * page_alloc.c::setup_per_zone_inactive_ratio(). + * it describe more detail. + */ +static void mem_cgroup_set_inactive_ratio(struct mem_cgroup *memcg) +{ + unsigned int gb, ratio; + + gb = res_counter_read_u64(&memcg->res, RES_LIMIT) >> 30; + if (gb) + ratio = int_sqrt(10 * gb); + else + ratio = 1; + + memcg->inactive_ratio = ratio; + +} + static DEFINE_MUTEX(set_limit_mutex); static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, @@ -1398,6 +1438,10 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, GFP_KERNEL, false); if (!progress) retry_count--; } + + if (!ret) + mem_cgroup_set_inactive_ratio(memcg); + return ret; } @@ -1982,7 +2026,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) res_counter_init(&mem->res, NULL); res_counter_init(&mem->memsw, NULL); } - + mem_cgroup_set_inactive_ratio(mem); mem->last_scanned_child = NULL; return &mem->css; diff --git a/mm/vmscan.c b/mm/vmscan.c index e2b31a522a6..b2bc06bffcf 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1310,14 +1310,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, pagevec_release(&pvec); } -/** - * inactive_anon_is_low - check if anonymous pages need to be deactivated - * @zone: zone to check - * - * Returns true if the zone does not have enough inactive anon pages, - * meaning some active anon pages need to be deactivated. - */ -static int inactive_anon_is_low(struct zone *zone) +static int inactive_anon_is_low_global(struct zone *zone) { unsigned long active, inactive; @@ -1330,6 +1323,25 @@ static int inactive_anon_is_low(struct zone *zone) return 0; } +/** + * inactive_anon_is_low - check if anonymous pages need to be deactivated + * @zone: zone to check + * @sc: scan control of this context + * + * Returns true if the zone does not have enough inactive anon pages, + * meaning some active anon pages need to be deactivated. + */ +static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc) +{ + int low; + + if (scan_global_lru(sc)) + low = inactive_anon_is_low_global(zone); + else + low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup, zone); + return low; +} + static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, struct zone *zone, struct scan_control *sc, int priority) { @@ -1340,8 +1352,7 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, return 0; } - if (lru == LRU_ACTIVE_ANON && - (!scan_global_lru(sc) || inactive_anon_is_low(zone))) { + if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) { shrink_active_list(nr_to_scan, zone, sc, priority, file); return 0; } @@ -1509,9 +1520,7 @@ static void shrink_zone(int priority, struct zone *zone, * Even if we did not try to evict anon pages at all, we want to * rebalance the anon lru active/inactive ratio. */ - if (!scan_global_lru(sc) || inactive_anon_is_low(zone)) - shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0); - else if (!scan_global_lru(sc)) + if (inactive_anon_is_low(zone, sc)) shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0); throttle_vm_writeout(sc->gfp_mask); @@ -1807,7 +1816,7 @@ loop_again: * Do some background aging of the anon list, to give * pages a chance to be referenced before reclaiming. */ - if (inactive_anon_is_low(zone)) + if (inactive_anon_is_low(zone, &sc)) shrink_active_list(SWAP_CLUSTER_MAX, zone, &sc, priority, 0); -- cgit v1.2.3-70-g09d2 From a3d8e0549d913e30968fa02e505dfe02c0a23e0d Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 7 Jan 2009 18:08:19 -0800 Subject: memcg: add mem_cgroup_zone_nr_pages() Introduce mem_cgroup_zone_nr_pages(). It is called by zone_nr_pages() helper function. This patch doesn't have any behavior change. Acked-by: KAMEZAWA Hiroyuki Acked-by: Rik van Riel Signed-off-by: KOSAKI Motohiro Acked-by: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 11 +++++++++++ mm/memcontrol.c | 12 +++++++++++- mm/vmscan.c | 3 +++ 3 files changed, 25 insertions(+), 1 deletion(-) (limited to 'mm/vmscan.c') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index aad9377c982..b1defd6a278 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -102,6 +102,9 @@ extern long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone, int priority, enum lru_list lru); int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone); +unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, + struct zone *zone, + enum lru_list lru); #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP extern int do_swap_account; @@ -260,6 +263,14 @@ mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone) return 1; } +static inline unsigned long +mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone, + enum lru_list lru) +{ + return 0; +} + + #endif /* CONFIG_CGROUP_MEM_CONT */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6611328460e..313247e6c50 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -186,7 +186,6 @@ pcg_default_flags[NR_CHARGE_TYPE] = { 0, /* FORCE */ }; - /* for encoding cft->private value on file */ #define _MEM (0) #define _MEMSWAP (1) @@ -448,6 +447,17 @@ int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone) return 0; } +unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, + struct zone *zone, + enum lru_list lru) +{ + int nid = zone->zone_pgdat->node_id; + int zid = zone_idx(zone); + struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid); + + return MEM_CGROUP_ZSTAT(mz, lru); +} + unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, struct list_head *dst, unsigned long *scanned, int order, diff --git a/mm/vmscan.c b/mm/vmscan.c index b2bc06bffcf..d958d624d3a 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -139,6 +139,9 @@ static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, static unsigned long zone_nr_pages(struct zone *zone, struct scan_control *sc, enum lru_list lru) { + if (!scan_global_lru(sc)) + return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru); + return zone_page_state(zone, NR_LRU_BASE + lru); } -- cgit v1.2.3-70-g09d2 From 3e2f41f1f64744f7942980d93cc93dd3e5924560 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 7 Jan 2009 18:08:20 -0800 Subject: memcg: add zone_reclaim_stat Introduce mem_cgroup_per_zone::reclaim_stat member and its statics collecting function. Now, get_scan_ratio() can calculate correct value on memcg reclaim. [hugh@veritas.com: avoid reclaim_stat oops when disabled] Acked-by: KAMEZAWA Hiroyuki Acked-by: Rik van Riel Signed-off-by: KOSAKI Motohiro Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 16 ++++++++++++++++ mm/memcontrol.c | 29 +++++++++++++++++++++++++++++ mm/swap.c | 34 +++++++++++++++++++++++++--------- mm/vmscan.c | 27 +++++++++++++-------------- 4 files changed, 83 insertions(+), 23 deletions(-) (limited to 'mm/vmscan.c') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index b1defd6a278..36b8ebb39b8 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -105,6 +105,10 @@ int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone, enum lru_list lru); +struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, + struct zone *zone); +struct zone_reclaim_stat* +mem_cgroup_get_reclaim_stat_from_page(struct page *page); #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP extern int do_swap_account; @@ -271,6 +275,18 @@ mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone, } +static inline struct zone_reclaim_stat* +mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone) +{ + return NULL; +} + +static inline struct zone_reclaim_stat* +mem_cgroup_get_reclaim_stat_from_page(struct page *page) +{ + return NULL; +} + #endif /* CONFIG_CGROUP_MEM_CONT */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 313247e6c50..7b7f4dc0503 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -103,6 +103,8 @@ struct mem_cgroup_per_zone { */ struct list_head lists[NR_LRU_LISTS]; unsigned long count[NR_LRU_LISTS]; + + struct zone_reclaim_stat reclaim_stat; }; /* Macro for accessing counter */ #define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)]) @@ -458,6 +460,33 @@ unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, return MEM_CGROUP_ZSTAT(mz, lru); } +struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, + struct zone *zone) +{ + int nid = zone->zone_pgdat->node_id; + int zid = zone_idx(zone); + struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid); + + return &mz->reclaim_stat; +} + +struct zone_reclaim_stat * +mem_cgroup_get_reclaim_stat_from_page(struct page *page) +{ + struct page_cgroup *pc; + struct mem_cgroup_per_zone *mz; + + if (mem_cgroup_disabled()) + return NULL; + + pc = lookup_page_cgroup(page); + mz = page_cgroup_zoneinfo(pc); + if (!mz) + return NULL; + + return &mz->reclaim_stat; +} + unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, struct list_head *dst, unsigned long *scanned, int order, diff --git a/mm/swap.c b/mm/swap.c index 26b07e7bc3d..8adb9feb61e 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -151,13 +151,32 @@ void rotate_reclaimable_page(struct page *page) } } +static void update_page_reclaim_stat(struct zone *zone, struct page *page, + int file, int rotated) +{ + struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat; + struct zone_reclaim_stat *memcg_reclaim_stat; + + memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page); + + reclaim_stat->recent_scanned[file]++; + if (rotated) + reclaim_stat->recent_rotated[file]++; + + if (!memcg_reclaim_stat) + return; + + memcg_reclaim_stat->recent_scanned[file]++; + if (rotated) + memcg_reclaim_stat->recent_rotated[file]++; +} + /* * FIXME: speed this up? */ void activate_page(struct page *page) { struct zone *zone = page_zone(page); - struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat; spin_lock_irq(&zone->lru_lock); if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { @@ -170,8 +189,7 @@ void activate_page(struct page *page) add_page_to_lru_list(zone, page, lru); __count_vm_event(PGACTIVATE); - reclaim_stat->recent_rotated[!!file]++; - reclaim_stat->recent_scanned[!!file]++; + update_page_reclaim_stat(zone, page, !!file, 1); } spin_unlock_irq(&zone->lru_lock); } @@ -386,7 +404,6 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) { int i; struct zone *zone = NULL; - struct zone_reclaim_stat *reclaim_stat = NULL; VM_BUG_ON(is_unevictable_lru(lru)); @@ -394,24 +411,23 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) struct page *page = pvec->pages[i]; struct zone *pagezone = page_zone(page); int file; + int active; if (pagezone != zone) { if (zone) spin_unlock_irq(&zone->lru_lock); zone = pagezone; - reclaim_stat = &zone->reclaim_stat; spin_lock_irq(&zone->lru_lock); } VM_BUG_ON(PageActive(page)); VM_BUG_ON(PageUnevictable(page)); VM_BUG_ON(PageLRU(page)); SetPageLRU(page); + active = is_active_lru(lru); file = is_file_lru(lru); - reclaim_stat->recent_scanned[file]++; - if (is_active_lru(lru)) { + if (active) SetPageActive(page); - reclaim_stat->recent_rotated[file]++; - } + update_page_reclaim_stat(zone, page, file, active); add_page_to_lru_list(zone, page, lru); } if (zone) diff --git a/mm/vmscan.c b/mm/vmscan.c index d958d624d3a..56fc7abe4d2 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -133,6 +133,9 @@ static DECLARE_RWSEM(shrinker_rwsem); static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, struct scan_control *sc) { + if (!scan_global_lru(sc)) + return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone); + return &zone->reclaim_stat; } @@ -1087,17 +1090,14 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, __mod_zone_page_state(zone, NR_INACTIVE_ANON, -count[LRU_INACTIVE_ANON]); - if (scan_global_lru(sc)) { + if (scan_global_lru(sc)) zone->pages_scanned += nr_scan; - reclaim_stat->recent_scanned[0] += - count[LRU_INACTIVE_ANON]; - reclaim_stat->recent_scanned[0] += - count[LRU_ACTIVE_ANON]; - reclaim_stat->recent_scanned[1] += - count[LRU_INACTIVE_FILE]; - reclaim_stat->recent_scanned[1] += - count[LRU_ACTIVE_FILE]; - } + + reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON]; + reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON]; + reclaim_stat->recent_scanned[1] += count[LRU_INACTIVE_FILE]; + reclaim_stat->recent_scanned[1] += count[LRU_ACTIVE_FILE]; + spin_unlock_irq(&zone->lru_lock); nr_scanned += nr_scan; @@ -1155,7 +1155,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, SetPageLRU(page); lru = page_lru(page); add_page_to_lru_list(zone, page, lru); - if (PageActive(page) && scan_global_lru(sc)) { + if (PageActive(page)) { int file = !!page_is_file_cache(page); reclaim_stat->recent_rotated[file]++; } @@ -1230,8 +1230,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, */ if (scan_global_lru(sc)) { zone->pages_scanned += pgscanned; - reclaim_stat->recent_scanned[!!file] += pgmoved; } + reclaim_stat->recent_scanned[!!file] += pgmoved; if (file) __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved); @@ -1272,8 +1272,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, * This helps balance scan pressure between file and anonymous * pages in get_scan_ratio. */ - if (scan_global_lru(sc)) - reclaim_stat->recent_rotated[!!file] += pgmoved; + reclaim_stat->recent_rotated[!!file] += pgmoved; while (!list_empty(&l_inactive)) { page = lru_to_page(&l_inactive); -- cgit v1.2.3-70-g09d2 From 9439c1c95b5c25b8031b2a7eb7e1590eb84be7f5 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 7 Jan 2009 18:08:21 -0800 Subject: memcg: remove mem_cgroup_cal_reclaim() Now, get_scan_ratio() return correct value although memcg reclaim. Then, mem_cgroup_calc_reclaim() can be removed. So, memcg reclaim get the same capability of anon/file reclaim balancing as global reclaim now. Acked-by: KAMEZAWA Hiroyuki Acked-by: Rik van Riel Signed-off-by: KOSAKI Motohiro Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 10 ---------- mm/memcontrol.c | 21 --------------------- mm/vmscan.c | 27 ++++++++++----------------- 3 files changed, 10 insertions(+), 48 deletions(-) (limited to 'mm/vmscan.c') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 36b8ebb39b8..8752052da8d 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -97,9 +97,6 @@ extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority); extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority); - -extern long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone, - int priority, enum lru_list lru); int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone); unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, @@ -244,13 +241,6 @@ static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, { } -static inline long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, - struct zone *zone, int priority, - enum lru_list lru) -{ - return 0; -} - static inline bool mem_cgroup_disabled(void) { return true; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7b7f4dc0503..b8c1e5acc25 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -414,27 +414,6 @@ void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority) mem->prev_priority = priority; } -/* - * Calculate # of pages to be scanned in this priority/zone. - * See also vmscan.c - * - * priority starts from "DEF_PRIORITY" and decremented in each loop. - * (see include/linux/mmzone.h) - */ - -long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone, - int priority, enum lru_list lru) -{ - long nr_pages; - int nid = zone->zone_pgdat->node_id; - int zid = zone_idx(zone); - struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid); - - nr_pages = MEM_CGROUP_ZSTAT(mz, lru); - - return (nr_pages >> priority); -} - int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone) { unsigned long active; diff --git a/mm/vmscan.c b/mm/vmscan.c index 56fc7abe4d2..66bb6ef44b5 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1466,30 +1466,23 @@ static void shrink_zone(int priority, struct zone *zone, get_scan_ratio(zone, sc, percent); for_each_evictable_lru(l) { - if (scan_global_lru(sc)) { - int file = is_file_lru(l); - int scan; + int file = is_file_lru(l); + int scan; - scan = zone_page_state(zone, NR_LRU_BASE + l); - if (priority) { - scan >>= priority; - scan = (scan * percent[file]) / 100; - } + scan = zone_page_state(zone, NR_LRU_BASE + l); + if (priority) { + scan >>= priority; + scan = (scan * percent[file]) / 100; + } + if (scan_global_lru(sc)) { zone->lru[l].nr_scan += scan; nr[l] = zone->lru[l].nr_scan; if (nr[l] >= swap_cluster_max) zone->lru[l].nr_scan = 0; else nr[l] = 0; - } else { - /* - * This reclaim occurs not because zone memory shortage - * but because memory controller hits its limit. - * Don't modify zone reclaim related data. - */ - nr[l] = mem_cgroup_calc_reclaim(sc->mem_cgroup, zone, - priority, l); - } + } else + nr[l] = scan; } while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || -- cgit v1.2.3-70-g09d2 From e72e2bd6747c7a5c432197b6614cf3a387e61a0e Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:08:23 -0800 Subject: memcg: rename scan global lru Rename scan_global_lru() to scanning_global_lru(). Signed-off-by: KAMEZAWA Hiroyuki Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'mm/vmscan.c') diff --git a/mm/vmscan.c b/mm/vmscan.c index 66bb6ef44b5..f03c239440a 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -125,15 +125,15 @@ static LIST_HEAD(shrinker_list); static DECLARE_RWSEM(shrinker_rwsem); #ifdef CONFIG_CGROUP_MEM_RES_CTLR -#define scan_global_lru(sc) (!(sc)->mem_cgroup) +#define scanning_global_lru(sc) (!(sc)->mem_cgroup) #else -#define scan_global_lru(sc) (1) +#define scanning_global_lru(sc) (1) #endif static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, struct scan_control *sc) { - if (!scan_global_lru(sc)) + if (!scanning_global_lru(sc)) return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone); return &zone->reclaim_stat; @@ -142,7 +142,7 @@ static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, static unsigned long zone_nr_pages(struct zone *zone, struct scan_control *sc, enum lru_list lru) { - if (!scan_global_lru(sc)) + if (!scanning_global_lru(sc)) return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru); return zone_page_state(zone, NR_LRU_BASE + lru); @@ -1090,7 +1090,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, __mod_zone_page_state(zone, NR_INACTIVE_ANON, -count[LRU_INACTIVE_ANON]); - if (scan_global_lru(sc)) + if (scanning_global_lru(sc)) zone->pages_scanned += nr_scan; reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON]; @@ -1129,7 +1129,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, if (current_is_kswapd()) { __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan); __count_vm_events(KSWAPD_STEAL, nr_freed); - } else if (scan_global_lru(sc)) + } else if (scanning_global_lru(sc)) __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan); __count_zone_vm_events(PGSTEAL, zone, nr_freed); @@ -1228,7 +1228,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, * zone->pages_scanned is used for detect zone's oom * mem_cgroup remembers nr_scan by itself. */ - if (scan_global_lru(sc)) { + if (scanning_global_lru(sc)) { zone->pages_scanned += pgscanned; } reclaim_stat->recent_scanned[!!file] += pgmoved; @@ -1337,7 +1337,7 @@ static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc) { int low; - if (scan_global_lru(sc)) + if (scanning_global_lru(sc)) low = inactive_anon_is_low_global(zone); else low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup, zone); @@ -1390,7 +1390,7 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, file = zone_nr_pages(zone, sc, LRU_ACTIVE_FILE) + zone_nr_pages(zone, sc, LRU_INACTIVE_FILE); - if (scan_global_lru(sc)) { + if (scanning_global_lru(sc)) { free = zone_page_state(zone, NR_FREE_PAGES); /* If we have very few page cache pages, force-scan anon pages. */ @@ -1474,7 +1474,7 @@ static void shrink_zone(int priority, struct zone *zone, scan >>= priority; scan = (scan * percent[file]) / 100; } - if (scan_global_lru(sc)) { + if (scanning_global_lru(sc)) { zone->lru[l].nr_scan += scan; nr[l] = zone->lru[l].nr_scan; if (nr[l] >= swap_cluster_max) @@ -1550,7 +1550,7 @@ static void shrink_zones(int priority, struct zonelist *zonelist, * Take care memory controller reclaiming has small influence * to global LRU. */ - if (scan_global_lru(sc)) { + if (scanning_global_lru(sc)) { if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) continue; note_zone_scanning_priority(zone, priority); @@ -1603,12 +1603,12 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, delayacct_freepages_start(); - if (scan_global_lru(sc)) + if (scanning_global_lru(sc)) count_vm_event(ALLOCSTALL); /* * mem_cgroup will not do shrink_slab. */ - if (scan_global_lru(sc)) { + if (scanning_global_lru(sc)) { for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) @@ -1627,7 +1627,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, * Don't shrink slabs when reclaiming memory from * over limit cgroups */ - if (scan_global_lru(sc)) { + if (scanning_global_lru(sc)) { shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages); if (reclaim_state) { sc->nr_reclaimed += reclaim_state->reclaimed_slab; @@ -1658,7 +1658,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, congestion_wait(WRITE, HZ/10); } /* top priority shrink_zones still had more to do? don't OOM, then */ - if (!sc->all_unreclaimable && scan_global_lru(sc)) + if (!sc->all_unreclaimable && scanning_global_lru(sc)) ret = sc->nr_reclaimed; out: /* @@ -1671,7 +1671,7 @@ out: if (priority < 0) priority = 0; - if (scan_global_lru(sc)) { + if (scanning_global_lru(sc)) { for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) -- cgit v1.2.3-70-g09d2 From a7885eb8ad465ec9db99ac5b5e6680f0ca8e11c8 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 7 Jan 2009 18:08:24 -0800 Subject: memcg: swappiness Currently, /proc/sys/vm/swappiness can change swappiness ratio for global reclaim. However, memcg reclaim doesn't have tuning parameter for itself. In general, the optimal swappiness depend on workload. (e.g. hpc workload need to low swappiness than the others.) Then, per cgroup swappiness improve administrator tunability. Signed-off-by: KAMEZAWA Hiroyuki Signed-off-by: KOSAKI Motohiro Cc: Balbir Singh Cc: Daisuke Nishimura Cc: Hugh Dickins Cc: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/controllers/memory.txt | 9 +++++ include/linux/swap.h | 3 +- mm/memcontrol.c | 78 ++++++++++++++++++++++++++++++++---- mm/vmscan.c | 7 ++-- 4 files changed, 86 insertions(+), 11 deletions(-) (limited to 'mm/vmscan.c') diff --git a/Documentation/controllers/memory.txt b/Documentation/controllers/memory.txt index d71745cc2f0..e1501964df1 100644 --- a/Documentation/controllers/memory.txt +++ b/Documentation/controllers/memory.txt @@ -314,6 +314,15 @@ will be charged as a new owner of it. showing for better debug please see the code for meanings. +5.3 swappiness + Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only. + + Following cgroup's swapiness can't be changed. + - root cgroup (uses /proc/sys/vm/swappiness). + - a cgroup which uses hierarchy and it has child cgroup. + - a cgroup which uses hierarchy and not the root of hierarchy. + + 6. Hierarchy support The memory controller supports a deep hierarchy and hierarchical accounting. diff --git a/include/linux/swap.h b/include/linux/swap.h index be938ce4895..4ccca25d0f0 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -214,7 +214,8 @@ static inline void lru_cache_add_active_file(struct page *page) extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask); extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, - gfp_t gfp_mask, bool noswap); + gfp_t gfp_mask, bool noswap, + unsigned int swappiness); extern int __isolate_lru_page(struct page *page, int mode, int file); extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 027c0dd7a83..ab2ecbb95b8 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -164,6 +164,9 @@ struct mem_cgroup { int obsolete; atomic_t refcnt; + unsigned int swappiness; + + unsigned int inactive_ratio; /* @@ -636,6 +639,22 @@ static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem) return false; } +static unsigned int get_swappiness(struct mem_cgroup *memcg) +{ + struct cgroup *cgrp = memcg->css.cgroup; + unsigned int swappiness; + + /* root ? */ + if (cgrp->parent == NULL) + return vm_swappiness; + + spin_lock(&memcg->reclaim_param_lock); + swappiness = memcg->swappiness; + spin_unlock(&memcg->reclaim_param_lock); + + return swappiness; +} + /* * Dance down the hierarchy if needed to reclaim memory. We remember the * last child we reclaimed from, so that we don't end up penalizing @@ -656,7 +675,8 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, * but there might be left over accounting, even after children * have left. */ - ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap); + ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap, + get_swappiness(root_mem)); if (mem_cgroup_check_under_limit(root_mem)) return 0; if (!root_mem->use_hierarchy) @@ -672,7 +692,8 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, cgroup_unlock(); continue; } - ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap); + ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap, + get_swappiness(next_mem)); if (mem_cgroup_check_under_limit(root_mem)) return 0; cgroup_lock(); @@ -1400,7 +1421,8 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) rcu_read_unlock(); do { - progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true); + progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true, + get_swappiness(mem)); progress += mem_cgroup_check_under_limit(mem); } while (!progress && --retry); @@ -1468,7 +1490,9 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, break; progress = try_to_free_mem_cgroup_pages(memcg, - GFP_KERNEL, false); + GFP_KERNEL, + false, + get_swappiness(memcg)); if (!progress) retry_count--; } @@ -1512,7 +1536,8 @@ int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, break; oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); - try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, true); + try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, true, + get_swappiness(memcg)); curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); if (curusage >= oldusage) retry_count--; @@ -1643,8 +1668,8 @@ try_to_free: ret = -EINTR; goto out; } - progress = try_to_free_mem_cgroup_pages(mem, - GFP_KERNEL, false); + progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL, + false, get_swappiness(mem)); if (!progress) { nr_retries--; /* maybe some writeback is necessary */ @@ -1864,6 +1889,37 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, return 0; } +static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft) +{ + struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); + + return get_swappiness(memcg); +} + +static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft, + u64 val) +{ + struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); + struct mem_cgroup *parent; + if (val > 100) + return -EINVAL; + + if (cgrp->parent == NULL) + return -EINVAL; + + parent = mem_cgroup_from_cont(cgrp->parent); + /* If under hierarchy, only empty-root can set this value */ + if ((parent->use_hierarchy) || + (memcg->use_hierarchy && !list_empty(&cgrp->children))) + return -EINVAL; + + spin_lock(&memcg->reclaim_param_lock); + memcg->swappiness = val; + spin_unlock(&memcg->reclaim_param_lock); + + return 0; +} + static struct cftype mem_cgroup_files[] = { { @@ -1902,6 +1958,11 @@ static struct cftype mem_cgroup_files[] = { .write_u64 = mem_cgroup_hierarchy_write, .read_u64 = mem_cgroup_hierarchy_read, }, + { + .name = "swappiness", + .read_u64 = mem_cgroup_swappiness_read, + .write_u64 = mem_cgroup_swappiness_write, + }, }; #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP @@ -2093,6 +2154,9 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) mem->last_scanned_child = NULL; spin_lock_init(&mem->reclaim_param_lock); + if (parent) + mem->swappiness = get_swappiness(parent); + return &mem->css; free_out: for_each_node_state(node, N_POSSIBLE) diff --git a/mm/vmscan.c b/mm/vmscan.c index f03c239440a..ece2f405187 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1707,14 +1707,15 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, #ifdef CONFIG_CGROUP_MEM_RES_CTLR unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, - gfp_t gfp_mask, - bool noswap) + gfp_t gfp_mask, + bool noswap, + unsigned int swappiness) { struct scan_control sc = { .may_writepage = !laptop_mode, .may_swap = 1, .swap_cluster_max = SWAP_CLUSTER_MAX, - .swappiness = vm_swappiness, + .swappiness = swappiness, .order = 0, .mem_cgroup = mem_cont, .isolate_pages = mem_cgroup_isolate_pages, -- cgit v1.2.3-70-g09d2 From c772be939e078afd2505ede7d596a30f8f61de95 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 7 Jan 2009 18:08:25 -0800 Subject: memcg: fix calculation of active_ratio Currently, inactive_ratio of memcg is calculated at setting limit. because page_alloc.c does so and current implementation is straightforward porting. However, memcg introduced hierarchy feature recently. In hierarchy restriction, memory limit is not only decided memory.limit_in_bytes of current cgroup, but also parent limit and sibling memory usage. Then, The optimal inactive_ratio is changed frequently. So, everytime calculation is better. Tested-by: KAMEZAWA Hiroyuki Acked-by: KAMEZAWA Hiroyuki Signed-off-by: KOSAKI Motohiro Cc: Balbir Singh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 5 ++-- mm/memcontrol.c | 64 ++++++++++++++++++++++------------------------ mm/vmscan.c | 2 +- 3 files changed, 34 insertions(+), 37 deletions(-) (limited to 'mm/vmscan.c') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 8752052da8d..056cf82c0e8 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -97,8 +97,7 @@ extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority); extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority); -int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, - struct zone *zone); +int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg); unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone, enum lru_list lru); @@ -252,7 +251,7 @@ static inline bool mem_cgroup_oom_called(struct task_struct *task) } static inline int -mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone) +mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg) { return 1; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ab2ecbb95b8..c7d78eca836 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -166,9 +166,6 @@ struct mem_cgroup { unsigned int swappiness; - - unsigned int inactive_ratio; - /* * statistics. This must be placed at the end of memcg. */ @@ -432,15 +429,43 @@ void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority) spin_unlock(&mem->reclaim_param_lock); } -int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone) +static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages) { unsigned long active; unsigned long inactive; + unsigned long gb; + unsigned long inactive_ratio; inactive = mem_cgroup_get_all_zonestat(memcg, LRU_INACTIVE_ANON); active = mem_cgroup_get_all_zonestat(memcg, LRU_ACTIVE_ANON); - if (inactive * memcg->inactive_ratio < active) + gb = (inactive + active) >> (30 - PAGE_SHIFT); + if (gb) + inactive_ratio = int_sqrt(10 * gb); + else + inactive_ratio = 1; + + if (present_pages) { + present_pages[0] = inactive; + present_pages[1] = active; + } + + return inactive_ratio; +} + +int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg) +{ + unsigned long active; + unsigned long inactive; + unsigned long present_pages[2]; + unsigned long inactive_ratio; + + inactive_ratio = calc_inactive_ratio(memcg, present_pages); + + inactive = present_pages[0]; + active = present_pages[1]; + + if (inactive * inactive_ratio < active) return 1; return 0; @@ -1432,29 +1457,6 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) return 0; } -/* - * The inactive anon list should be small enough that the VM never has to - * do too much work, but large enough that each inactive page has a chance - * to be referenced again before it is swapped out. - * - * this calculation is straightforward porting from - * page_alloc.c::setup_per_zone_inactive_ratio(). - * it describe more detail. - */ -static void mem_cgroup_set_inactive_ratio(struct mem_cgroup *memcg) -{ - unsigned int gb, ratio; - - gb = res_counter_read_u64(&memcg->res, RES_LIMIT) >> 30; - if (gb) - ratio = int_sqrt(10 * gb); - else - ratio = 1; - - memcg->inactive_ratio = ratio; - -} - static DEFINE_MUTEX(set_limit_mutex); static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, @@ -1496,9 +1498,6 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, if (!progress) retry_count--; } - if (!ret) - mem_cgroup_set_inactive_ratio(memcg); - return ret; } @@ -1858,7 +1857,7 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, } #ifdef CONFIG_DEBUG_VM - cb->fill(cb, "inactive_ratio", mem_cont->inactive_ratio); + cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL)); { int nid, zid; @@ -2150,7 +2149,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) res_counter_init(&mem->res, NULL); res_counter_init(&mem->memsw, NULL); } - mem_cgroup_set_inactive_ratio(mem); mem->last_scanned_child = NULL; spin_lock_init(&mem->reclaim_param_lock); diff --git a/mm/vmscan.c b/mm/vmscan.c index ece2f405187..9a27c44aa32 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1340,7 +1340,7 @@ static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc) if (scanning_global_lru(sc)) low = inactive_anon_is_low_global(zone); else - low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup, zone); + low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup); return low; } -- cgit v1.2.3-70-g09d2