diff options
Diffstat (limited to 'mm/swap.c')
-rw-r--r-- | mm/swap.c | 184 |
1 files changed, 103 insertions, 81 deletions
diff --git a/mm/swap.c b/mm/swap.c index bbc1ce9f946..a448db377cb 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -39,6 +39,7 @@ int page_cluster; static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); +static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); /* * This path almost never happens for VM activity - pages are normally @@ -201,7 +202,7 @@ static void pagevec_lru_move_fn(struct pagevec *pvec, } if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); - release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); + release_pages(pvec->pages, pvec->nr, pvec->cold); pagevec_reinit(pvec); } @@ -211,8 +212,9 @@ static void pagevec_move_tail_fn(struct page *page, void *arg) struct zone *zone = page_zone(page); if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { - int lru = page_lru_base_type(page); + enum lru_list lru = page_lru_base_type(page); list_move_tail(&page->lru, &zone->lru[lru].list); + mem_cgroup_rotate_reclaimable_page(page); (*pgmoved)++; } } @@ -271,94 +273,27 @@ static void update_page_reclaim_stat(struct zone *zone, struct page *page, } /* - * A page will go to active list either by activate_page or putback_lru_page. - * In the activate_page case, the page hasn't active bit set. The page might - * not in LRU list because it's isolated before it gets a chance to be moved to - * active list. The window is small because pagevec just stores several pages. - * For such case, we do nothing for such page. - * In the putback_lru_page case, the page isn't in lru list but has active - * bit set + * FIXME: speed this up? */ -static void __activate_page(struct page *page, void *arg) +void activate_page(struct page *page) { struct zone *zone = page_zone(page); - int file = page_is_file_cache(page); - int lru = page_lru_base_type(page); - bool putback = !PageLRU(page); - - /* The page is isolated before it's moved to active list */ - if (!PageLRU(page) && !PageActive(page)) - return; - if ((PageLRU(page) && PageActive(page)) || PageUnevictable(page)) - return; - - if (!putback) - del_page_from_lru_list(zone, page, lru); - else - SetPageLRU(page); - - SetPageActive(page); - lru += LRU_ACTIVE; - add_page_to_lru_list(zone, page, lru); - - if (putback) - return; - __count_vm_event(PGACTIVATE); - update_page_reclaim_stat(zone, page, file, 1); -} - -#ifdef CONFIG_SMP -static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); - -static void activate_page_drain(int cpu) -{ - struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); - - if (pagevec_count(pvec)) - pagevec_lru_move_fn(pvec, __activate_page, NULL); -} -void activate_page(struct page *page) -{ + spin_lock_irq(&zone->lru_lock); if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { - struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); - - page_cache_get(page); - if (!pagevec_add(pvec, page)) - pagevec_lru_move_fn(pvec, __activate_page, NULL); - put_cpu_var(activate_page_pvecs); - } -} + int file = page_is_file_cache(page); + int lru = page_lru_base_type(page); + del_page_from_lru_list(zone, page, lru); -/* Caller should hold zone->lru_lock */ -int putback_active_lru_page(struct zone *zone, struct page *page) -{ - struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); + SetPageActive(page); + lru += LRU_ACTIVE; + add_page_to_lru_list(zone, page, lru); + __count_vm_event(PGACTIVATE); - if (!pagevec_add(pvec, page)) { - spin_unlock_irq(&zone->lru_lock); - pagevec_lru_move_fn(pvec, __activate_page, NULL); - spin_lock_irq(&zone->lru_lock); + update_page_reclaim_stat(zone, page, file, 1); } - put_cpu_var(activate_page_pvecs); - return 1; -} - -#else -static inline void activate_page_drain(int cpu) -{ -} - -void activate_page(struct page *page) -{ - struct zone *zone = page_zone(page); - - spin_lock_irq(&zone->lru_lock); - if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) - __activate_page(page, NULL); spin_unlock_irq(&zone->lru_lock); } -#endif /* * Mark a page as having seen activity. @@ -432,6 +367,71 @@ void add_page_to_unevictable_list(struct page *page) } /* + * If the page can not be invalidated, it is moved to the + * inactive list to speed up its reclaim. It is moved to the + * head of the list, rather than the tail, to give the flusher + * threads some time to write it out, as this is much more + * effective than the single-page writeout from reclaim. + * + * If the page isn't page_mapped and dirty/writeback, the page + * could reclaim asap using PG_reclaim. + * + * 1. active, mapped page -> none + * 2. active, dirty/writeback page -> inactive, head, PG_reclaim + * 3. inactive, mapped page -> none + * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim + * 5. inactive, clean -> inactive, tail + * 6. Others -> none + * + * In 4, why it moves inactive's head, the VM expects the page would + * be write it out by flusher threads as this is much more effective + * than the single-page writeout from reclaim. + */ +static void lru_deactivate_fn(struct page *page, void *arg) +{ + int lru, file; + bool active; + struct zone *zone = page_zone(page); + + if (!PageLRU(page)) + return; + + /* Some processes are using the page */ + if (page_mapped(page)) + return; + + active = PageActive(page); + + file = page_is_file_cache(page); + lru = page_lru_base_type(page); + del_page_from_lru_list(zone, page, lru + active); + ClearPageActive(page); + ClearPageReferenced(page); + add_page_to_lru_list(zone, page, lru); + + if (PageWriteback(page) || PageDirty(page)) { + /* + * PG_reclaim could be raced with end_page_writeback + * It can make readahead confusing. But race window + * is _really_ small and it's non-critical problem. + */ + SetPageReclaim(page); + } else { + /* + * The page's writeback ends up during pagevec + * We moves tha page into tail of inactive. + */ + list_move_tail(&page->lru, &zone->lru[lru].list); + mem_cgroup_rotate_reclaimable_page(page); + __count_vm_event(PGROTATED); + } + + if (active) + __count_vm_event(PGDEACTIVATE); + update_page_reclaim_stat(zone, page, file, 0); +} + +/* * Drain pages out of the cpu's pagevecs. * Either "cpu" is the current CPU, and preemption has already been * disabled; or "cpu" is being hot-unplugged, and is already dead. @@ -457,7 +457,29 @@ static void drain_cpu_pagevecs(int cpu) pagevec_move_tail(pvec); local_irq_restore(flags); } - activate_page_drain(cpu); + + pvec = &per_cpu(lru_deactivate_pvecs, cpu); + if (pagevec_count(pvec)) + pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); +} + +/** + * deactivate_page - forcefully deactivate a page + * @page: page to deactivate + * + * This function hints the VM that @page is a good reclaim candidate, + * for example if its invalidation fails due to the page being dirty + * or under writeback. + */ +void deactivate_page(struct page *page) +{ + if (likely(get_page_unless_zero(page))) { + struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); + + if (!pagevec_add(pvec, page)) + pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); + put_cpu_var(lru_deactivate_pvecs); + } } void lru_add_drain(void) |