diff options
Diffstat (limited to 'mm/migrate.c')
-rw-r--r-- | mm/migrate.c | 24 |
1 files changed, 20 insertions, 4 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index 34d8ada053e..37c73b90200 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -49,9 +49,8 @@ int isolate_lru_page(struct page *page, struct list_head *pagelist) struct zone *zone = page_zone(page); spin_lock_irq(&zone->lru_lock); - if (PageLRU(page)) { + if (PageLRU(page) && get_page_unless_zero(page)) { ret = 0; - get_page(page); ClearPageLRU(page); if (PageActive(page)) del_page_from_active_list(zone, page); @@ -632,18 +631,35 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, goto unlock; wait_on_page_writeback(page); } - /* - * Establish migration ptes or remove ptes + * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, + * we cannot notice that anon_vma is freed while we migrates a page. + * This rcu_read_lock() delays freeing anon_vma pointer until the end + * of migration. File cache pages are no problem because of page_lock() + */ + rcu_read_lock(); + /* + * This is a corner case handling. + * When a new swap-cache is read into, it is linked to LRU + * and treated as swapcache but has no rmap yet. + * Calling try_to_unmap() against a page->mapping==NULL page is + * BUG. So handle it here. */ + if (!page->mapping) + goto rcu_unlock; + /* Establish migration ptes or remove ptes */ try_to_unmap(page, 1); + if (!page_mapped(page)) rc = move_to_new_page(newpage, page); if (rc) remove_migration_ptes(page, page); +rcu_unlock: + rcu_read_unlock(); unlock: + unlock_page(page); if (rc != -EAGAIN) { |