diff options
author | Bob Liu <lliubbo@gmail.com> | 2012-12-11 16:00:41 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-11 17:22:22 -0800 |
commit | b3092b3b734f146d96ca023a75cacf78078f96d5 (patch) | |
tree | 56dcf17fe72ff95b5769f76265684d427a389b96 /mm/huge_memory.c | |
parent | fa475e517adb422cb3492e636195f9b2c0d009c8 (diff) |
thp: cleanup: introduce mk_huge_pmd()
Introduce mk_huge_pmd() to simplify the code
Signed-off-by: Bob Liu <lliubbo@gmail.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Ni zhan Chen <nizhan.chen@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r-- | mm/huge_memory.c | 21 |
1 files changed, 12 insertions, 9 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 26002683a16..ea5fb93a53a 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -606,6 +606,15 @@ static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) return pmd; } +static inline pmd_t mk_huge_pmd(struct page *page, struct vm_area_struct *vma) +{ + pmd_t entry; + entry = mk_pmd(page, vma->vm_page_prot); + entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); + entry = pmd_mkhuge(entry); + return entry; +} + static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, @@ -629,9 +638,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, pte_free(mm, pgtable); } else { pmd_t entry; - entry = mk_pmd(page, vma->vm_page_prot); - entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); - entry = pmd_mkhuge(entry); + entry = mk_huge_pmd(page, vma); /* * The spinlocking to take the lru_lock inside * page_add_new_anon_rmap() acts as a full memory @@ -951,9 +958,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, } else { pmd_t entry; VM_BUG_ON(!PageHead(page)); - entry = mk_pmd(new_page, vma->vm_page_prot); - entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); - entry = pmd_mkhuge(entry); + entry = mk_huge_pmd(new_page, vma); pmdp_clear_flush(vma, haddr, pmd); page_add_new_anon_rmap(new_page, vma, haddr); set_pmd_at(mm, haddr, pmd, entry); @@ -2000,9 +2005,7 @@ static void collapse_huge_page(struct mm_struct *mm, __SetPageUptodate(new_page); pgtable = pmd_pgtable(_pmd); - _pmd = mk_pmd(new_page, vma->vm_page_prot); - _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); - _pmd = pmd_mkhuge(_pmd); + _pmd = mk_huge_pmd(new_page, vma); /* * spin_lock() below is not the equivalent of smp_wmb(), so |