summaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2015-02-10 11:35:36 -0800
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2015-02-10 11:35:36 -0800
commit4ba24fef3eb3b142197135223b90ced2f319cd53 (patch)
treea20c125b27740ec7b4c761b11d801108e1b316b2 /mm/migrate.c
parent47c1ffb2b6b630894e9a16442611c056ab21c057 (diff)
parent98a4a59ee31a12105a2b84f5b8b515ac2cb208ef (diff)
Merge branch 'next' into for-linus
Prepare first round of input updates for 3.20.
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c68
1 files changed, 24 insertions, 44 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 2740360cd21..344cdf692fc 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -746,7 +746,7 @@ static int fallback_migrate_page(struct address_space *mapping,
* MIGRATEPAGE_SUCCESS - success
*/
static int move_to_new_page(struct page *newpage, struct page *page,
- int remap_swapcache, enum migrate_mode mode)
+ int page_was_mapped, enum migrate_mode mode)
{
struct address_space *mapping;
int rc;
@@ -784,7 +784,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
newpage->mapping = NULL;
} else {
mem_cgroup_migrate(page, newpage, false);
- if (remap_swapcache)
+ if (page_was_mapped)
remove_migration_ptes(page, newpage);
page->mapping = NULL;
}
@@ -798,7 +798,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
int force, enum migrate_mode mode)
{
int rc = -EAGAIN;
- int remap_swapcache = 1;
+ int page_was_mapped = 0;
struct anon_vma *anon_vma = NULL;
if (!trylock_page(page)) {
@@ -870,13 +870,12 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
* migrated but are not remapped when migration
* completes
*/
- remap_swapcache = 0;
} else {
goto out_unlock;
}
}
- if (unlikely(balloon_page_movable(page))) {
+ if (unlikely(isolated_balloon_page(page))) {
/*
* A ballooned page does not need any special attention from
* physical to virtual reverse mapping procedures.
@@ -910,13 +909,17 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
}
/* Establish migration ptes or remove ptes */
- try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+ if (page_mapped(page)) {
+ try_to_unmap(page,
+ TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+ page_was_mapped = 1;
+ }
skip_unmap:
if (!page_mapped(page))
- rc = move_to_new_page(newpage, page, remap_swapcache, mode);
+ rc = move_to_new_page(newpage, page, page_was_mapped, mode);
- if (rc && remap_swapcache)
+ if (rc && page_was_mapped)
remove_migration_ptes(page, page);
/* Drop an anon_vma reference if we took one */
@@ -955,17 +958,6 @@ static int unmap_and_move(new_page_t get_new_page, free_page_t put_new_page,
rc = __unmap_and_move(page, newpage, force, mode);
- if (unlikely(rc == MIGRATEPAGE_BALLOON_SUCCESS)) {
- /*
- * A ballooned page has been migrated already.
- * Now, it's the time to wrap-up counters,
- * handle the page back to Buddy and return.
- */
- dec_zone_page_state(page, NR_ISOLATED_ANON +
- page_is_file_cache(page));
- balloon_page_free(page);
- return MIGRATEPAGE_SUCCESS;
- }
out:
if (rc != -EAGAIN) {
/*
@@ -988,6 +980,9 @@ out:
if (rc != MIGRATEPAGE_SUCCESS && put_new_page) {
ClearPageSwapBacked(newpage);
put_new_page(newpage, private);
+ } else if (unlikely(__is_movable_balloon_page(newpage))) {
+ /* drop our reference, page already in the balloon */
+ put_page(newpage);
} else
putback_lru_page(newpage);
@@ -1025,6 +1020,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
{
int rc = 0;
int *result = NULL;
+ int page_was_mapped = 0;
struct page *new_hpage;
struct anon_vma *anon_vma = NULL;
@@ -1055,12 +1051,16 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
if (PageAnon(hpage))
anon_vma = page_get_anon_vma(hpage);
- try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+ if (page_mapped(hpage)) {
+ try_to_unmap(hpage,
+ TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+ page_was_mapped = 1;
+ }
if (!page_mapped(hpage))
- rc = move_to_new_page(new_hpage, hpage, 1, mode);
+ rc = move_to_new_page(new_hpage, hpage, page_was_mapped, mode);
- if (rc != MIGRATEPAGE_SUCCESS)
+ if (rc != MIGRATEPAGE_SUCCESS && page_was_mapped)
remove_migration_ptes(hpage, hpage);
if (anon_vma)
@@ -1536,27 +1536,6 @@ out:
return err;
}
-/*
- * Call migration functions in the vma_ops that may prepare
- * memory in a vm for migration. migration functions may perform
- * the migration for vmas that do not have an underlying page struct.
- */
-int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
- const nodemask_t *from, unsigned long flags)
-{
- struct vm_area_struct *vma;
- int err = 0;
-
- for (vma = mm->mmap; vma && !err; vma = vma->vm_next) {
- if (vma->vm_ops && vma->vm_ops->migrate) {
- err = vma->vm_ops->migrate(vma, to, from, flags);
- if (err)
- break;
- }
- }
- return err;
-}
-
#ifdef CONFIG_NUMA_BALANCING
/*
* Returns true if this is a safe migration target node for misplaced NUMA
@@ -1862,7 +1841,7 @@ fail_putback:
*/
flush_cache_range(vma, mmun_start, mmun_end);
page_add_anon_rmap(new_page, vma, mmun_start);
- pmdp_clear_flush(vma, mmun_start, pmd);
+ pmdp_clear_flush_notify(vma, mmun_start, pmd);
set_pmd_at(mm, mmun_start, pmd, entry);
flush_tlb_range(vma, mmun_start, mmun_end);
update_mmu_cache_pmd(vma, address, &entry);
@@ -1870,6 +1849,7 @@ fail_putback:
if (page_count(page) != 2) {
set_pmd_at(mm, mmun_start, pmd, orig_entry);
flush_tlb_range(vma, mmun_start, mmun_end);
+ mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
update_mmu_cache_pmd(vma, address, &entry);
page_remove_rmap(new_page);
goto fail_putback;