diff options
author | Rik van Riel <riel@redhat.com> | 2010-08-09 17:19:48 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-09 20:45:02 -0700 |
commit | ad8c2ee801ad7a52d919b478d9b2c7b39a72d295 (patch) | |
tree | bc56cc023da3467447b0aecd30c0516881d53992 /mm | |
parent | 51b1bd2ace1595b72956224deda349efa880b693 (diff) |
rmap: add exclusive page to private anon_vma on swapin
On swapin it is fairly common for a page to be owned exclusively by one
process. In that case we want to add the page to the anon_vma of that
process's VMA, instead of to the root anon_vma.
This will reduce the amount of rmap searching that the swapout code needs
to do.
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 4 | ||||
-rw-r--r-- | mm/rmap.c | 13 |
2 files changed, 15 insertions, 2 deletions
diff --git a/mm/memory.c b/mm/memory.c index 6b0c37dcfd1..6bc039486e9 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2628,6 +2628,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, swp_entry_t entry; pte_t pte; struct mem_cgroup *ptr = NULL; + int exclusive = 0; int ret = 0; if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) @@ -2722,10 +2723,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) { pte = maybe_mkwrite(pte_mkdirty(pte), vma); flags &= ~FAULT_FLAG_WRITE; + exclusive = 1; } flush_icache_page(vma, page); set_pte_at(mm, address, page_table, pte); - page_add_anon_rmap(page, vma, address); + do_page_add_anon_rmap(page, vma, address, exclusive); /* It's better to call commit-charge after rmap is established */ mem_cgroup_commit_charge_swapin(page, ptr); diff --git a/mm/rmap.c b/mm/rmap.c index 4d152a6d3a8..a7d0f548263 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -830,6 +830,17 @@ static void __page_check_anon_rmap(struct page *page, void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { + do_page_add_anon_rmap(page, vma, address, 0); +} + +/* + * Special version of the above for do_swap_page, which often runs + * into pages that are exclusively owned by the current process. + * Everybody else should continue to use page_add_anon_rmap above. + */ +void do_page_add_anon_rmap(struct page *page, + struct vm_area_struct *vma, unsigned long address, int exclusive) +{ int first = atomic_inc_and_test(&page->_mapcount); if (first) __inc_zone_page_state(page, NR_ANON_PAGES); @@ -839,7 +850,7 @@ void page_add_anon_rmap(struct page *page, VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); if (first) - __page_set_anon_rmap(page, vma, address, 0); + __page_set_anon_rmap(page, vma, address, exclusive); else __page_check_anon_rmap(page, vma, address); } |