From 7a81b88cb53e335ff7d019e6398c95792c817d93 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:07:48 -0800 Subject: memcg: introduce charge-commit-cancel style of functions There is a small race in do_swap_page(). When the page swapped-in is charged, the mapcount can be greater than 0. But, at the same time some process (shares it ) call unmap and make mapcount 1->0 and the page is uncharged. CPUA CPUB mapcount == 1. (1) charge if mapcount==0 zap_pte_range() (2) mapcount 1 => 0. (3) uncharge(). (success) (4) set page's rmap() mapcount 0=>1 Then, this swap page's account is leaked. For fixing this, I added a new interface. - charge account to res_counter by PAGE_SIZE and try to free pages if necessary. - commit register page_cgroup and add to LRU if necessary. - cancel uncharge PAGE_SIZE because of do_swap_page failure. CPUA (1) charge (always) (2) set page's rmap (mapcount > 0) (3) commit charge was necessary or not after set_pte(). This protocol uses PCG_USED bit on page_cgroup for avoiding over accounting. Usual mem_cgroup_charge_common() does charge -> commit at a time. And this patch also adds following function to clarify all charges. - mem_cgroup_newpage_charge() ....replacement for mem_cgroup_charge() called against newly allocated anon pages. - mem_cgroup_charge_migrate_fixup() called only from remove_migration_ptes(). we'll have to rewrite this later.(this patch just keeps old behavior) This function will be removed by additional patch to make migration clearer. Good for clarifying "what we do" Then, we have 4 following charge points. - newpage - swap-in - add-to-cache. - migration. [akpm@linux-foundation.org: add missing inline directives to stubs] Signed-off-by: KAMEZAWA Hiroyuki Reviewed-by: Daisuke Nishimura Cc: Balbir Singh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/migrate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/migrate.c') diff --git a/mm/migrate.c b/mm/migrate.c index 55373983c9c..246dcb973ae 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -133,7 +133,7 @@ static void remove_migration_pte(struct vm_area_struct *vma, * be reliable, and this charge can actually fail: oh well, we don't * make the situation any worse by proceeding as if it had succeeded. */ - mem_cgroup_charge(new, mm, GFP_ATOMIC); + mem_cgroup_charge_migrate_fixup(new, mm, GFP_ATOMIC); get_page(new); pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); -- cgit v1.2.3-70-g09d2 From 01b1ae63c2270cbacfd43fea94578c17950eb548 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Wed, 7 Jan 2009 18:07:50 -0800 Subject: memcg: simple migration handling Now, management of "charge" under page migration is done under following manner. (Assume migrate page contents from oldpage to newpage) before - "newpage" is charged before migration. at success. - "oldpage" is uncharged at somewhere(unmap, radix-tree-replace) at failure - "newpage" is uncharged. - "oldpage" is charged if necessary (*1) But (*1) is not reliable....because of GFP_ATOMIC. This patch tries to change behavior as following by charge/commit/cancel ops. before - charge PAGE_SIZE (no target page) success - commit charge against "newpage". failure - commit charge against "oldpage". (PCG_USED bit works effectively to avoid double-counting) - if "oldpage" is obsolete, cancel charge of PAGE_SIZE. Signed-off-by: KAMEZAWA Hiroyuki Reviewed-by: Daisuke Nishimura Cc: Balbir Singh Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 19 +++----- mm/memcontrol.c | 108 ++++++++++++++++++++++----------------------- mm/migrate.c | 42 ++++++------------ 3 files changed, 73 insertions(+), 96 deletions(-) (limited to 'mm/migrate.c') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index c592f315cd0..b095f5f6ecf 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -29,8 +29,6 @@ struct mm_struct; extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); -extern int mem_cgroup_charge_migrate_fixup(struct page *page, - struct mm_struct *mm, gfp_t gfp_mask); /* for swap handling */ extern int mem_cgroup_try_charge(struct mm_struct *mm, gfp_t gfp_mask, struct mem_cgroup **ptr); @@ -60,8 +58,9 @@ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); ((cgroup) == mem_cgroup_from_task((mm)->owner)) extern int -mem_cgroup_prepare_migration(struct page *page, struct page *newpage); -extern void mem_cgroup_end_migration(struct page *page); +mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr); +extern void mem_cgroup_end_migration(struct mem_cgroup *mem, + struct page *oldpage, struct page *newpage); /* * For memory reclaim. @@ -94,12 +93,6 @@ static inline int mem_cgroup_cache_charge(struct page *page, return 0; } -static inline int mem_cgroup_charge_migrate_fixup(struct page *page, - struct mm_struct *mm, gfp_t gfp_mask) -{ - return 0; -} - static inline int mem_cgroup_try_charge(struct mm_struct *mm, gfp_t gfp_mask, struct mem_cgroup **ptr) { @@ -144,12 +137,14 @@ static inline int task_in_mem_cgroup(struct task_struct *task, } static inline int -mem_cgroup_prepare_migration(struct page *page, struct page *newpage) +mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) { return 0; } -static inline void mem_cgroup_end_migration(struct page *page) +static inline void mem_cgroup_end_migration(struct mem_cgroup *mem, + struct page *oldpage, + struct page *newpage) { } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c34eb52bdc3..b71195e8198 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -627,34 +627,6 @@ int mem_cgroup_newpage_charge(struct page *page, MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL); } -/* - * same as mem_cgroup_newpage_charge(), now. - * But what we assume is different from newpage, and this is special case. - * treat this in special function. easy for maintenance. - */ - -int mem_cgroup_charge_migrate_fixup(struct page *page, - struct mm_struct *mm, gfp_t gfp_mask) -{ - if (mem_cgroup_subsys.disabled) - return 0; - - if (PageCompound(page)) - return 0; - - if (page_mapped(page) || (page->mapping && !PageAnon(page))) - return 0; - - if (unlikely(!mm)) - mm = &init_mm; - - return mem_cgroup_charge_common(page, mm, gfp_mask, - MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL); -} - - - - int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) { @@ -697,7 +669,6 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL); } - void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr) { struct page_cgroup *pc; @@ -782,13 +753,13 @@ void mem_cgroup_uncharge_cache_page(struct page *page) } /* - * Before starting migration, account against new page. + * Before starting migration, account PAGE_SIZE to mem_cgroup that the old + * page belongs to. */ -int mem_cgroup_prepare_migration(struct page *page, struct page *newpage) +int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) { struct page_cgroup *pc; struct mem_cgroup *mem = NULL; - enum charge_type ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED; int ret = 0; if (mem_cgroup_subsys.disabled) @@ -799,42 +770,67 @@ int mem_cgroup_prepare_migration(struct page *page, struct page *newpage) if (PageCgroupUsed(pc)) { mem = pc->mem_cgroup; css_get(&mem->css); - if (PageCgroupCache(pc)) { - if (page_is_file_cache(page)) - ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; - else - ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; - } } unlock_page_cgroup(pc); + if (mem) { - ret = mem_cgroup_charge_common(newpage, NULL, - GFP_HIGHUSER_MOVABLE, - ctype, mem); + ret = mem_cgroup_try_charge(NULL, GFP_HIGHUSER_MOVABLE, &mem); css_put(&mem->css); } + *ptr = mem; return ret; } /* remove redundant charge if migration failed*/ -void mem_cgroup_end_migration(struct page *newpage) +void mem_cgroup_end_migration(struct mem_cgroup *mem, + struct page *oldpage, struct page *newpage) { + struct page *target, *unused; + struct page_cgroup *pc; + enum charge_type ctype; + + if (!mem) + return; + + /* at migration success, oldpage->mapping is NULL. */ + if (oldpage->mapping) { + target = oldpage; + unused = NULL; + } else { + target = newpage; + unused = oldpage; + } + + if (PageAnon(target)) + ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED; + else if (page_is_file_cache(target)) + ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; + else + ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; + + /* unused page is not on radix-tree now. */ + if (unused && ctype != MEM_CGROUP_CHARGE_TYPE_MAPPED) + __mem_cgroup_uncharge_common(unused, ctype); + + pc = lookup_page_cgroup(target); /* - * At success, page->mapping is not NULL. - * special rollback care is necessary when - * 1. at migration failure. (newpage->mapping is cleared in this case) - * 2. the newpage was moved but not remapped again because the task - * exits and the newpage is obsolete. In this case, the new page - * may be a swapcache. So, we just call mem_cgroup_uncharge_page() - * always for avoiding mess. The page_cgroup will be removed if - * unnecessary. File cache pages is still on radix-tree. Don't - * care it. + * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup. + * So, double-counting is effectively avoided. + */ + __mem_cgroup_commit_charge(mem, pc, ctype); + + /* + * Both of oldpage and newpage are still under lock_page(). + * Then, we don't have to care about race in radix-tree. + * But we have to be careful that this page is unmapped or not. + * + * There is a case for !page_mapped(). At the start of + * migration, oldpage was mapped. But now, it's zapped. + * But we know *target* page is not freed/reused under us. + * mem_cgroup_uncharge_page() does all necessary checks. */ - if (!newpage->mapping) - __mem_cgroup_uncharge_common(newpage, - MEM_CGROUP_CHARGE_TYPE_FORCE); - else if (PageAnon(newpage)) - mem_cgroup_uncharge_page(newpage); + if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED) + mem_cgroup_uncharge_page(target); } /* diff --git a/mm/migrate.c b/mm/migrate.c index 246dcb973ae..a30ea5fcf9f 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -121,20 +121,6 @@ static void remove_migration_pte(struct vm_area_struct *vma, if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old) goto out; - /* - * Yes, ignore the return value from a GFP_ATOMIC mem_cgroup_charge. - * Failure is not an option here: we're now expected to remove every - * migration pte, and will cause crashes otherwise. Normally this - * is not an issue: mem_cgroup_prepare_migration bumped up the old - * page_cgroup count for safety, that's now attached to the new page, - * so this charge should just be another incrementation of the count, - * to keep in balance with rmap.c's mem_cgroup_uncharging. But if - * there's been a force_empty, those reference counts may no longer - * be reliable, and this charge can actually fail: oh well, we don't - * make the situation any worse by proceeding as if it had succeeded. - */ - mem_cgroup_charge_migrate_fixup(new, mm, GFP_ATOMIC); - get_page(new); pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); if (is_write_migration_entry(entry)) @@ -378,9 +364,6 @@ static void migrate_page_copy(struct page *newpage, struct page *page) anon = PageAnon(page); page->mapping = NULL; - if (!anon) /* This page was removed from radix-tree. */ - mem_cgroup_uncharge_cache_page(page); - /* * If any waiters have accumulated on the new page then * wake them up. @@ -614,6 +597,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, struct page *newpage = get_new_page(page, private, &result); int rcu_locked = 0; int charge = 0; + struct mem_cgroup *mem; if (!newpage) return -ENOMEM; @@ -623,24 +607,26 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, goto move_newpage; } - charge = mem_cgroup_prepare_migration(page, newpage); - if (charge == -ENOMEM) { - rc = -ENOMEM; - goto move_newpage; - } /* prepare cgroup just returns 0 or -ENOMEM */ - BUG_ON(charge); - rc = -EAGAIN; + if (!trylock_page(page)) { if (!force) goto move_newpage; lock_page(page); } + /* charge against new page */ + charge = mem_cgroup_prepare_migration(page, &mem); + if (charge == -ENOMEM) { + rc = -ENOMEM; + goto unlock; + } + BUG_ON(charge); + if (PageWriteback(page)) { if (!force) - goto unlock; + goto uncharge; wait_on_page_writeback(page); } /* @@ -693,7 +679,9 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, rcu_unlock: if (rcu_locked) rcu_read_unlock(); - +uncharge: + if (!charge) + mem_cgroup_end_migration(mem, page, newpage); unlock: unlock_page(page); @@ -709,8 +697,6 @@ unlock: } move_newpage: - if (!charge) - mem_cgroup_end_migration(newpage); /* * Move the new page to the LRU. If migration was not successful -- cgit v1.2.3-70-g09d2