From 46453a6e194a8c55fe6cf3dc8e1c4f24e2abc013 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Wed, 22 Mar 2006 00:07:58 -0800 Subject: [PATCH] mm: never ClearPageLRU released pages If vmscan finds a zero refcount page on the lru list, never ClearPageLRU it. This means the release code need not hold ->lru_lock to stabilise PageLRU, so that lock may be skipped entirely when releasing !PageLRU pages (because we know PageLRU won't have been temporarily cleared by vmscan, which was previously guaranteed by holding the lock to synchronise against vmscan). Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) (limited to 'mm/vmscan.c') diff --git a/mm/vmscan.c b/mm/vmscan.c index 4fe7e3aa02e..acb7611cd52 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1085,21 +1085,25 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src, page = lru_to_page(src); prefetchw_prev_lru_page(page, src, flags); - if (!TestClearPageLRU(page)) - BUG(); list_del(&page->lru); - if (get_page_testone(page)) { + if (unlikely(get_page_testone(page))) { /* * It is being freed elsewhere */ __put_page(page); - SetPageLRU(page); list_add(&page->lru, src); continue; - } else { - list_add(&page->lru, dst); - nr_taken++; } + + /* + * Be careful not to clear PageLRU until after we're sure + * the page is not being freed elsewhere -- the page release + * code relies on it. + */ + if (!TestClearPageLRU(page)) + BUG(); + list_add(&page->lru, dst); + nr_taken++; } *scanned = scan; -- cgit v1.2.3-70-g09d2 From 8d438f96d2b8eade6cbcd8adfc22dae6f5cbd6c0 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Wed, 22 Mar 2006 00:07:59 -0800 Subject: [PATCH] mm: PageLRU no testset PG_lru is protected by zone->lru_lock. It does not need TestSet/TestClear operations. Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page-flags.h | 5 ++--- mm/swap.c | 16 ++++++++-------- mm/vmscan.c | 20 +++++++++++--------- 3 files changed, 21 insertions(+), 20 deletions(-) (limited to 'mm/vmscan.c') diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index d52999c4333..58856c823f8 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -239,10 +239,9 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta); #define __ClearPageDirty(page) __clear_bit(PG_dirty, &(page)->flags) #define TestClearPageDirty(page) test_and_clear_bit(PG_dirty, &(page)->flags) -#define SetPageLRU(page) set_bit(PG_lru, &(page)->flags) #define PageLRU(page) test_bit(PG_lru, &(page)->flags) -#define TestSetPageLRU(page) test_and_set_bit(PG_lru, &(page)->flags) -#define TestClearPageLRU(page) test_and_clear_bit(PG_lru, &(page)->flags) +#define SetPageLRU(page) set_bit(PG_lru, &(page)->flags) +#define ClearPageLRU(page) clear_bit(PG_lru, &(page)->flags) #define PageActive(page) test_bit(PG_active, &(page)->flags) #define SetPageActive(page) set_bit(PG_active, &(page)->flags) diff --git a/mm/swap.c b/mm/swap.c index 3045a0f4c45..985324ee936 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -214,8 +214,8 @@ void fastcall __page_cache_release(struct page *page) struct zone *zone = page_zone(page); spin_lock_irqsave(&zone->lru_lock, flags); - if (!TestClearPageLRU(page)) - BUG(); + BUG_ON(!PageLRU(page)); + ClearPageLRU(page); del_page_from_lru(zone, page); spin_unlock_irqrestore(&zone->lru_lock, flags); } @@ -265,8 +265,8 @@ void release_pages(struct page **pages, int nr, int cold) zone = pagezone; spin_lock_irq(&zone->lru_lock); } - if (!TestClearPageLRU(page)) - BUG(); + BUG_ON(!PageLRU(page)); + ClearPageLRU(page); del_page_from_lru(zone, page); } @@ -345,8 +345,8 @@ void __pagevec_lru_add(struct pagevec *pvec) zone = pagezone; spin_lock_irq(&zone->lru_lock); } - if (TestSetPageLRU(page)) - BUG(); + BUG_ON(PageLRU(page)); + SetPageLRU(page); add_page_to_inactive_list(zone, page); } if (zone) @@ -372,8 +372,8 @@ void __pagevec_lru_add_active(struct pagevec *pvec) zone = pagezone; spin_lock_irq(&zone->lru_lock); } - if (TestSetPageLRU(page)) - BUG(); + BUG_ON(PageLRU(page)); + SetPageLRU(page); if (TestSetPageActive(page)) BUG(); add_page_to_active_list(zone, page); diff --git a/mm/vmscan.c b/mm/vmscan.c index acb7611cd52..40fb37828e8 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1042,9 +1042,10 @@ int isolate_lru_page(struct page *page) if (PageLRU(page)) { struct zone *zone = page_zone(page); spin_lock_irq(&zone->lru_lock); - if (TestClearPageLRU(page)) { + if (PageLRU(page)) { ret = 1; get_page(page); + ClearPageLRU(page); if (PageActive(page)) del_page_from_active_list(zone, page); else @@ -1085,6 +1086,8 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src, page = lru_to_page(src); prefetchw_prev_lru_page(page, src, flags); + BUG_ON(!PageLRU(page)); + list_del(&page->lru); if (unlikely(get_page_testone(page))) { /* @@ -1100,8 +1103,7 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src, * the page is not being freed elsewhere -- the page release * code relies on it. */ - if (!TestClearPageLRU(page)) - BUG(); + ClearPageLRU(page); list_add(&page->lru, dst); nr_taken++; } @@ -1156,8 +1158,8 @@ static void shrink_cache(struct zone *zone, struct scan_control *sc) */ while (!list_empty(&page_list)) { page = lru_to_page(&page_list); - if (TestSetPageLRU(page)) - BUG(); + BUG_ON(PageLRU(page)); + SetPageLRU(page); list_del(&page->lru); if (PageActive(page)) add_page_to_active_list(zone, page); @@ -1276,8 +1278,8 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc) while (!list_empty(&l_inactive)) { page = lru_to_page(&l_inactive); prefetchw_prev_lru_page(page, &l_inactive, flags); - if (TestSetPageLRU(page)) - BUG(); + BUG_ON(PageLRU(page)); + SetPageLRU(page); if (!TestClearPageActive(page)) BUG(); list_move(&page->lru, &zone->inactive_list); @@ -1305,8 +1307,8 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc) while (!list_empty(&l_active)) { page = lru_to_page(&l_active); prefetchw_prev_lru_page(page, &l_active, flags); - if (TestSetPageLRU(page)) - BUG(); + BUG_ON(PageLRU(page)); + SetPageLRU(page); BUG_ON(!PageActive(page)); list_move(&page->lru, &zone->active_list); pgmoved++; -- cgit v1.2.3-70-g09d2 From 4c84cacfa424264f7ad5287298d3ea4a3e935278 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Wed, 22 Mar 2006 00:08:00 -0800 Subject: [PATCH] mm: PageActive no testset PG_active is protected by zone->lru_lock, it does not need TestSet/TestClear operations. Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page-flags.h | 2 -- mm/swap.c | 4 ++-- mm/vmscan.c | 5 +++-- 3 files changed, 5 insertions(+), 6 deletions(-) (limited to 'mm/vmscan.c') diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 58856c823f8..5d1e7bd8510 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -246,8 +246,6 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta); #define PageActive(page) test_bit(PG_active, &(page)->flags) #define SetPageActive(page) set_bit(PG_active, &(page)->flags) #define ClearPageActive(page) clear_bit(PG_active, &(page)->flags) -#define TestClearPageActive(page) test_and_clear_bit(PG_active, &(page)->flags) -#define TestSetPageActive(page) test_and_set_bit(PG_active, &(page)->flags) #define PageSlab(page) test_bit(PG_slab, &(page)->flags) #define SetPageSlab(page) set_bit(PG_slab, &(page)->flags) diff --git a/mm/swap.c b/mm/swap.c index 985324ee936..cf88226cf96 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -374,8 +374,8 @@ void __pagevec_lru_add_active(struct pagevec *pvec) } BUG_ON(PageLRU(page)); SetPageLRU(page); - if (TestSetPageActive(page)) - BUG(); + BUG_ON(PageActive(page)); + SetPageActive(page); add_page_to_active_list(zone, page); } if (zone) diff --git a/mm/vmscan.c b/mm/vmscan.c index 40fb37828e8..8e477b1a483 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1280,8 +1280,9 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc) prefetchw_prev_lru_page(page, &l_inactive, flags); BUG_ON(PageLRU(page)); SetPageLRU(page); - if (!TestClearPageActive(page)) - BUG(); + BUG_ON(!PageActive(page)); + ClearPageActive(page); + list_move(&page->lru, &zone->inactive_list); pgmoved++; if (!pagevec_add(&pvec, page)) { -- cgit v1.2.3-70-g09d2 From 7c8ee9a86340db686cd4314e9944dc9b6111bda9 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Wed, 22 Mar 2006 00:08:03 -0800 Subject: [PATCH] mm: simplify vmscan vs release refcounting The VM has an interesting race where a page refcount can drop to zero, but it is still on the LRU lists for a short time. This was solved by testing a 0->1 refcount transition when picking up pages from the LRU, and dropping the refcount in that case. Instead, use atomic_add_unless to ensure we never pick up a 0 refcount page from the LRU, thus a 0 refcount page will never have its refcount elevated until it is allocated again. Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 19 +++++++++++-------- mm/vmscan.c | 25 +++++++++++-------------- 2 files changed, 22 insertions(+), 22 deletions(-) (limited to 'mm/vmscan.c') diff --git a/include/linux/mm.h b/include/linux/mm.h index 498ff8778fb..b12d5c76420 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -301,17 +301,20 @@ struct page { * Drop a ref, return true if the logical refcount fell to zero (the page has * no users) */ -#define put_page_testzero(p) \ - ({ \ - BUG_ON(atomic_read(&(p)->_count) == -1);\ - atomic_add_negative(-1, &(p)->_count); \ - }) +static inline int put_page_testzero(struct page *page) +{ + BUG_ON(atomic_read(&page->_count) == -1); + return atomic_add_negative(-1, &page->_count); +} /* - * Grab a ref, return true if the page previously had a logical refcount of - * zero. ie: returns true if we just grabbed an already-deemed-to-be-free page + * Try to grab a ref unless the page has a refcount of zero, return false if + * that is the case. */ -#define get_page_testone(p) atomic_inc_and_test(&(p)->_count) +static inline int get_page_unless_zero(struct page *page) +{ + return atomic_add_unless(&page->_count, 1, -1); +} #define set_page_count(p,v) atomic_set(&(p)->_count, (v) - 1) #define __put_page(p) atomic_dec(&(p)->_count) diff --git a/mm/vmscan.c b/mm/vmscan.c index 8e477b1a483..e21bab4deda 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1083,29 +1083,26 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src, int scan = 0; while (scan++ < nr_to_scan && !list_empty(src)) { + struct list_head *target; page = lru_to_page(src); prefetchw_prev_lru_page(page, src, flags); BUG_ON(!PageLRU(page)); list_del(&page->lru); - if (unlikely(get_page_testone(page))) { + target = src; + if (likely(get_page_unless_zero(page))) { /* - * It is being freed elsewhere + * Be careful not to clear PageLRU until after we're + * sure the page is not being freed elsewhere -- the + * page release code relies on it. */ - __put_page(page); - list_add(&page->lru, src); - continue; - } + ClearPageLRU(page); + target = dst; + nr_taken++; + } /* else it is being freed elsewhere */ - /* - * Be careful not to clear PageLRU until after we're sure - * the page is not being freed elsewhere -- the page release - * code relies on it. - */ - ClearPageLRU(page); - list_add(&page->lru, dst); - nr_taken++; + list_add(&page->lru, target); } *scanned = scan; -- cgit v1.2.3-70-g09d2 From 8695949a1d7c99e039595db00af8e0fe4722307d Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 22 Mar 2006 00:08:18 -0800 Subject: [PATCH] Thin out scan_control: remove nr_to_scan and priority Make nr_to_scan and priority a parameter instead of putting it into scan control. This allows various small optimizations and IMHO makes the code easier to read. Signed-off-by: Christoph Lameter Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 59 +++++++++++++++++++++++++---------------------------------- 1 file changed, 25 insertions(+), 34 deletions(-) (limited to 'mm/vmscan.c') diff --git a/mm/vmscan.c b/mm/vmscan.c index e21bab4deda..f7c4f37c3b1 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -52,9 +52,6 @@ typedef enum { } pageout_t; struct scan_control { - /* Ask refill_inactive_zone, or shrink_cache to scan this many pages */ - unsigned long nr_to_scan; - /* Incremented by the number of inactive pages that were scanned */ unsigned long nr_scanned; @@ -63,9 +60,6 @@ struct scan_control { unsigned long nr_mapped; /* From page_state */ - /* Ask shrink_caches, or shrink_zone to scan at this priority */ - unsigned int priority; - /* This context's GFP mask */ gfp_t gfp_mask; @@ -1112,11 +1106,10 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src, /* * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed */ -static void shrink_cache(struct zone *zone, struct scan_control *sc) +static void shrink_cache(int max_scan, struct zone *zone, struct scan_control *sc) { LIST_HEAD(page_list); struct pagevec pvec; - int max_scan = sc->nr_to_scan; pagevec_init(&pvec, 1); @@ -1192,12 +1185,11 @@ done: * But we had to alter page->flags anyway. */ static void -refill_inactive_zone(struct zone *zone, struct scan_control *sc) +refill_inactive_zone(int nr_pages, struct zone *zone, struct scan_control *sc) { int pgmoved; int pgdeactivate = 0; int pgscanned; - int nr_pages = sc->nr_to_scan; LIST_HEAD(l_hold); /* The pages which were snipped off */ LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */ LIST_HEAD(l_active); /* Pages to go onto the active_list */ @@ -1332,10 +1324,11 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc) * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. */ static void -shrink_zone(struct zone *zone, struct scan_control *sc) +shrink_zone(int priority, struct zone *zone, struct scan_control *sc) { unsigned long nr_active; unsigned long nr_inactive; + unsigned long nr_to_scan; atomic_inc(&zone->reclaim_in_progress); @@ -1343,14 +1336,14 @@ shrink_zone(struct zone *zone, struct scan_control *sc) * Add one to `nr_to_scan' just to make sure that the kernel will * slowly sift through the active list. */ - zone->nr_scan_active += (zone->nr_active >> sc->priority) + 1; + zone->nr_scan_active += (zone->nr_active >> priority) + 1; nr_active = zone->nr_scan_active; if (nr_active >= sc->swap_cluster_max) zone->nr_scan_active = 0; else nr_active = 0; - zone->nr_scan_inactive += (zone->nr_inactive >> sc->priority) + 1; + zone->nr_scan_inactive += (zone->nr_inactive >> priority) + 1; nr_inactive = zone->nr_scan_inactive; if (nr_inactive >= sc->swap_cluster_max) zone->nr_scan_inactive = 0; @@ -1359,17 +1352,17 @@ shrink_zone(struct zone *zone, struct scan_control *sc) while (nr_active || nr_inactive) { if (nr_active) { - sc->nr_to_scan = min(nr_active, + nr_to_scan = min(nr_active, (unsigned long)sc->swap_cluster_max); - nr_active -= sc->nr_to_scan; - refill_inactive_zone(zone, sc); + nr_active -= nr_to_scan; + refill_inactive_zone(nr_to_scan, zone, sc); } if (nr_inactive) { - sc->nr_to_scan = min(nr_inactive, + nr_to_scan = min(nr_inactive, (unsigned long)sc->swap_cluster_max); - nr_inactive -= sc->nr_to_scan; - shrink_cache(zone, sc); + nr_inactive -= nr_to_scan; + shrink_cache(nr_to_scan, zone, sc); } } @@ -1395,7 +1388,7 @@ shrink_zone(struct zone *zone, struct scan_control *sc) * scan then give up on it. */ static void -shrink_caches(struct zone **zones, struct scan_control *sc) +shrink_caches(int priority, struct zone **zones, struct scan_control *sc) { int i; @@ -1408,14 +1401,14 @@ shrink_caches(struct zone **zones, struct scan_control *sc) if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) continue; - zone->temp_priority = sc->priority; - if (zone->prev_priority > sc->priority) - zone->prev_priority = sc->priority; + zone->temp_priority = priority; + if (zone->prev_priority > priority) + zone->prev_priority = priority; - if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY) + if (zone->all_unreclaimable && priority != DEF_PRIORITY) continue; /* Let kswapd poll it */ - shrink_zone(zone, sc); + shrink_zone(priority, zone, sc); } } @@ -1462,11 +1455,10 @@ int try_to_free_pages(struct zone **zones, gfp_t gfp_mask) sc.nr_mapped = read_page_state(nr_mapped); sc.nr_scanned = 0; sc.nr_reclaimed = 0; - sc.priority = priority; sc.swap_cluster_max = SWAP_CLUSTER_MAX; if (!priority) disable_swap_token(); - shrink_caches(zones, &sc); + shrink_caches(priority, zones, &sc); shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); if (reclaim_state) { sc.nr_reclaimed += reclaim_state->reclaimed_slab; @@ -1629,9 +1621,8 @@ scan: zone->prev_priority = priority; sc.nr_scanned = 0; sc.nr_reclaimed = 0; - sc.priority = priority; sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX; - shrink_zone(zone, &sc); + shrink_zone(priority, zone, &sc); reclaim_state->reclaimed_slab = 0; nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, lru_pages); @@ -1886,6 +1877,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) struct scan_control sc; cpumask_t mask; int node_id; + int priority; if (time_before(jiffies, zone->last_unsuccessful_zone_reclaim + zone_reclaim_interval)) @@ -1906,7 +1898,6 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) sc.may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP); sc.nr_scanned = 0; sc.nr_reclaimed = 0; - sc.priority = ZONE_RECLAIM_PRIORITY + 1; sc.nr_mapped = read_page_state(nr_mapped); sc.gfp_mask = gfp_mask; @@ -1932,11 +1923,11 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) * Free memory by calling shrink zone with increasing priorities * until we have enough memory freed. */ + priority = ZONE_RECLAIM_PRIORITY; do { - sc.priority--; - shrink_zone(zone, &sc); - - } while (sc.nr_reclaimed < nr_pages && sc.priority > 0); + shrink_zone(priority, zone, &sc); + priority--; + } while (priority >= 0 && sc.nr_reclaimed < nr_pages); if (sc.nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) { /* -- cgit v1.2.3-70-g09d2 From 179e96395b1f01e95ebe1ff5ef306b810dbbd147 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 22 Mar 2006 00:08:18 -0800 Subject: [PATCH] vmscan: scan_control cleanup Initialise as much of scan_control as possible at the declaration site. This tidies things up a bit and assures us that all unmentioned fields are zeroed out. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 108 ++++++++++++++++++++++++++++++++++-------------------------- 1 file changed, 62 insertions(+), 46 deletions(-) (limited to 'mm/vmscan.c') diff --git a/mm/vmscan.c b/mm/vmscan.c index f7c4f37c3b1..5feef4d4650 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1431,13 +1431,14 @@ int try_to_free_pages(struct zone **zones, gfp_t gfp_mask) int ret = 0; int total_scanned = 0, total_reclaimed = 0; struct reclaim_state *reclaim_state = current->reclaim_state; - struct scan_control sc; unsigned long lru_pages = 0; int i; - - sc.gfp_mask = gfp_mask; - sc.may_writepage = !laptop_mode; - sc.may_swap = 1; + struct scan_control sc = { + .gfp_mask = gfp_mask, + .may_writepage = !laptop_mode, + .swap_cluster_max = SWAP_CLUSTER_MAX, + .may_swap = 1, + }; inc_page_state(allocstall); @@ -1455,7 +1456,6 @@ int try_to_free_pages(struct zone **zones, gfp_t gfp_mask) sc.nr_mapped = read_page_state(nr_mapped); sc.nr_scanned = 0; sc.nr_reclaimed = 0; - sc.swap_cluster_max = SWAP_CLUSTER_MAX; if (!priority) disable_swap_token(); shrink_caches(priority, zones, &sc); @@ -1478,7 +1478,8 @@ int try_to_free_pages(struct zone **zones, gfp_t gfp_mask) * that's undesirable in laptop mode, where we *want* lumpy * writeout. So in laptop mode, write out the whole world. */ - if (total_scanned > sc.swap_cluster_max + sc.swap_cluster_max/2) { + if (total_scanned > sc.swap_cluster_max + + sc.swap_cluster_max / 2) { wakeup_pdflush(laptop_mode ? 0 : total_scanned); sc.may_writepage = 1; } @@ -1532,14 +1533,16 @@ static int balance_pgdat(pg_data_t *pgdat, int nr_pages, int order) int i; int total_scanned, total_reclaimed; struct reclaim_state *reclaim_state = current->reclaim_state; - struct scan_control sc; + struct scan_control sc = { + .gfp_mask = GFP_KERNEL, + .may_swap = 1, + .swap_cluster_max = nr_pages ? nr_pages : SWAP_CLUSTER_MAX, + }; loop_again: total_scanned = 0; total_reclaimed = 0; - sc.gfp_mask = GFP_KERNEL; - sc.may_writepage = !laptop_mode; - sc.may_swap = 1; + sc.may_writepage = !laptop_mode, sc.nr_mapped = read_page_state(nr_mapped); inc_page_state(pageoutrun); @@ -1621,7 +1624,6 @@ scan: zone->prev_priority = priority; sc.nr_scanned = 0; sc.nr_reclaimed = 0; - sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX; shrink_zone(priority, zone, &sc); reclaim_state->reclaimed_slab = 0; nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, @@ -1869,46 +1871,21 @@ int zone_reclaim_interval __read_mostly = 30*HZ; /* * Try to free up some pages from this zone through reclaim. */ -int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) +static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) { - int nr_pages; + const int nr_pages = 1 << order; struct task_struct *p = current; struct reclaim_state reclaim_state; - struct scan_control sc; - cpumask_t mask; - int node_id; int priority; - - if (time_before(jiffies, - zone->last_unsuccessful_zone_reclaim + zone_reclaim_interval)) - return 0; - - if (!(gfp_mask & __GFP_WAIT) || - zone->all_unreclaimable || - atomic_read(&zone->reclaim_in_progress) > 0 || - (p->flags & PF_MEMALLOC)) - return 0; - - node_id = zone->zone_pgdat->node_id; - mask = node_to_cpumask(node_id); - if (!cpus_empty(mask) && node_id != numa_node_id()) - return 0; - - sc.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE); - sc.may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP); - sc.nr_scanned = 0; - sc.nr_reclaimed = 0; - sc.nr_mapped = read_page_state(nr_mapped); - sc.gfp_mask = gfp_mask; + struct scan_control sc = { + .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), + .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), + .nr_mapped = read_page_state(nr_mapped), + .swap_cluster_max = max(nr_pages, SWAP_CLUSTER_MAX), + .gfp_mask = gfp_mask, + }; disable_swap_token(); - - nr_pages = 1 << order; - if (nr_pages > SWAP_CLUSTER_MAX) - sc.swap_cluster_max = nr_pages; - else - sc.swap_cluster_max = SWAP_CLUSTER_MAX; - cond_resched(); /* * We need to be able to allocate from the reserves for RECLAIM_SWAP @@ -1949,5 +1926,44 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) return sc.nr_reclaimed >= nr_pages; } + +int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) +{ + cpumask_t mask; + int node_id; + + /* + * Do not reclaim if there was a recent unsuccessful attempt at zone + * reclaim. In that case we let allocations go off node for the + * zone_reclaim_interval. Otherwise we would scan for each off-node + * page allocation. + */ + if (time_before(jiffies, + zone->last_unsuccessful_zone_reclaim + zone_reclaim_interval)) + return 0; + + /* + * Avoid concurrent zone reclaims, do not reclaim in a zone that does + * not have reclaimable pages and if we should not delay the allocation + * then do not scan. + */ + if (!(gfp_mask & __GFP_WAIT) || + zone->all_unreclaimable || + atomic_read(&zone->reclaim_in_progress) > 0 || + (current->flags & PF_MEMALLOC)) + return 0; + + /* + * Only run zone reclaim on the local zone or on zones that do not + * have associated processors. This will favor the local processor + * over remote processors and spread off node memory allocations + * as wide as possible. + */ + node_id = zone->zone_pgdat->node_id; + mask = node_to_cpumask(node_id); + if (!cpus_empty(mask) && node_id != numa_node_id()) + return 0; + return __zone_reclaim(zone, gfp_mask, order); +} #endif -- cgit v1.2.3-70-g09d2 From 69e05944af39fc6c97b09380c8721e38433bd828 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 22 Mar 2006 00:08:19 -0800 Subject: [PATCH] vmscan: use unsigned longs Turn basically everything in vmscan.c into `unsigned long'. This is to avoid the possibility that some piece of code in there might decide to operate upon more than 4G (or even 2G) of pages in one hit. This might be silly, but we'll need it one day. Cc: Christoph Lameter Cc: Nick Piggin Signed-off-by: Rafael J. Wysocki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 2 +- include/linux/swap.h | 8 ++-- mm/vmscan.c | 104 +++++++++++++++++++++++++++++---------------------- 3 files changed, 64 insertions(+), 50 deletions(-) (limited to 'mm/vmscan.c') diff --git a/include/linux/mm.h b/include/linux/mm.h index e67980654c4..1850cf8bad6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1046,7 +1046,7 @@ int in_gate_area_no_task(unsigned long addr); int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); -int shrink_slab(unsigned long scanned, gfp_t gfp_mask, +unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, unsigned long lru_pages); void drop_pagecache(void); void drop_slab(void); diff --git a/include/linux/swap.h b/include/linux/swap.h index d572b19afb7..3dc6c89c49b 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -172,8 +172,8 @@ extern int rotate_reclaimable_page(struct page *page); extern void swap_setup(void); /* linux/mm/vmscan.c */ -extern int try_to_free_pages(struct zone **, gfp_t); -extern int shrink_all_memory(int); +extern unsigned long try_to_free_pages(struct zone **, gfp_t); +extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; #ifdef CONFIG_NUMA @@ -190,11 +190,11 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) #ifdef CONFIG_MIGRATION extern int isolate_lru_page(struct page *p); -extern int putback_lru_pages(struct list_head *l); +extern unsigned long putback_lru_pages(struct list_head *l); extern int migrate_page(struct page *, struct page *); extern void migrate_page_copy(struct page *, struct page *); extern int migrate_page_remove_references(struct page *, struct page *, int); -extern int migrate_pages(struct list_head *l, struct list_head *t, +extern unsigned long migrate_pages(struct list_head *l, struct list_head *t, struct list_head *moved, struct list_head *failed); extern int fail_migrate_page(struct page *, struct page *); #else diff --git a/mm/vmscan.c b/mm/vmscan.c index 5feef4d4650..62cd7cd257e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -177,10 +177,11 @@ EXPORT_SYMBOL(remove_shrinker); * * Returns the number of slab objects which we shrunk. */ -int shrink_slab(unsigned long scanned, gfp_t gfp_mask, unsigned long lru_pages) +unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, + unsigned long lru_pages) { struct shrinker *shrinker; - int ret = 0; + unsigned long ret = 0; if (scanned == 0) scanned = SWAP_CLUSTER_MAX; @@ -410,12 +411,13 @@ cannot_free: /* * shrink_list adds the number of reclaimed pages to sc->nr_reclaimed */ -static int shrink_list(struct list_head *page_list, struct scan_control *sc) +static unsigned long shrink_list(struct list_head *page_list, + struct scan_control *sc) { LIST_HEAD(ret_pages); struct pagevec freed_pvec; int pgactivate = 0; - int reclaimed = 0; + unsigned long reclaimed = 0; cond_resched(); @@ -599,11 +601,11 @@ static inline void move_to_lru(struct page *page) * * returns the number of pages put back. */ -int putback_lru_pages(struct list_head *l) +unsigned long putback_lru_pages(struct list_head *l) { struct page *page; struct page *page2; - int count = 0; + unsigned long count = 0; list_for_each_entry_safe(page, page2, l, lru) { move_to_lru(page); @@ -848,11 +850,11 @@ EXPORT_SYMBOL(migrate_page); * * Return: Number of pages not migrated when "to" ran empty. */ -int migrate_pages(struct list_head *from, struct list_head *to, +unsigned long migrate_pages(struct list_head *from, struct list_head *to, struct list_head *moved, struct list_head *failed) { - int retry; - int nr_failed = 0; + unsigned long retry; + unsigned long nr_failed = 0; int pass = 0; struct page *page; struct page *page2; @@ -1069,12 +1071,13 @@ int isolate_lru_page(struct page *page) * * returns how many pages were moved onto *@dst. */ -static int isolate_lru_pages(int nr_to_scan, struct list_head *src, - struct list_head *dst, int *scanned) +static unsigned long isolate_lru_pages(unsigned long nr_to_scan, + struct list_head *src, struct list_head *dst, + unsigned long *scanned) { - int nr_taken = 0; + unsigned long nr_taken = 0; struct page *page; - int scan = 0; + unsigned long scan = 0; while (scan++ < nr_to_scan && !list_empty(src)) { struct list_head *target; @@ -1106,20 +1109,22 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src, /* * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed */ -static void shrink_cache(int max_scan, struct zone *zone, struct scan_control *sc) +static void shrink_cache(unsigned long max_scan, struct zone *zone, + struct scan_control *sc) { LIST_HEAD(page_list); struct pagevec pvec; + unsigned long nr_scanned = 0; pagevec_init(&pvec, 1); lru_add_drain(); spin_lock_irq(&zone->lru_lock); - while (max_scan > 0) { + do { struct page *page; - int nr_taken; - int nr_scan; - int nr_freed; + unsigned long nr_taken; + unsigned long nr_scan; + unsigned long nr_freed; nr_taken = isolate_lru_pages(sc->swap_cluster_max, &zone->inactive_list, @@ -1131,7 +1136,7 @@ static void shrink_cache(int max_scan, struct zone *zone, struct scan_control *s if (nr_taken == 0) goto done; - max_scan -= nr_scan; + nr_scanned += nr_scan; nr_freed = shrink_list(&page_list, sc); local_irq_disable(); @@ -1161,7 +1166,7 @@ static void shrink_cache(int max_scan, struct zone *zone, struct scan_control *s spin_lock_irq(&zone->lru_lock); } } - } + } while (nr_scanned < max_scan); spin_unlock_irq(&zone->lru_lock); done: pagevec_release(&pvec); @@ -1185,11 +1190,12 @@ done: * But we had to alter page->flags anyway. */ static void -refill_inactive_zone(int nr_pages, struct zone *zone, struct scan_control *sc) +refill_inactive_zone(unsigned long nr_pages, struct zone *zone, + struct scan_control *sc) { - int pgmoved; + unsigned long pgmoved; int pgdeactivate = 0; - int pgscanned; + unsigned long pgscanned; LIST_HEAD(l_hold); /* The pages which were snipped off */ LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */ LIST_HEAD(l_active); /* Pages to go onto the active_list */ @@ -1323,8 +1329,8 @@ refill_inactive_zone(int nr_pages, struct zone *zone, struct scan_control *sc) /* * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. */ -static void -shrink_zone(int priority, struct zone *zone, struct scan_control *sc) +static void shrink_zone(int priority, struct zone *zone, + struct scan_control *sc) { unsigned long nr_active; unsigned long nr_inactive; @@ -1387,8 +1393,8 @@ shrink_zone(int priority, struct zone *zone, struct scan_control *sc) * If a zone is deemed to be full of pinned pages then just give it a light * scan then give up on it. */ -static void -shrink_caches(int priority, struct zone **zones, struct scan_control *sc) +static void shrink_caches(int priority, struct zone **zones, + struct scan_control *sc) { int i; @@ -1425,11 +1431,12 @@ shrink_caches(int priority, struct zone **zones, struct scan_control *sc) * holds filesystem locks which prevent writeout this might not work, and the * allocation attempt will fail. */ -int try_to_free_pages(struct zone **zones, gfp_t gfp_mask) +unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask) { int priority; int ret = 0; - int total_scanned = 0, total_reclaimed = 0; + unsigned long total_scanned = 0; + unsigned long total_reclaimed = 0; struct reclaim_state *reclaim_state = current->reclaim_state; unsigned long lru_pages = 0; int i; @@ -1525,13 +1532,15 @@ out: * the page allocator fallback scheme to ensure that aging of pages is balanced * across the zones. */ -static int balance_pgdat(pg_data_t *pgdat, int nr_pages, int order) +static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages, + int order) { - int to_free = nr_pages; + unsigned long to_free = nr_pages; int all_zones_ok; int priority; int i; - int total_scanned, total_reclaimed; + unsigned long total_scanned; + unsigned long total_reclaimed; struct reclaim_state *reclaim_state = current->reclaim_state; struct scan_control sc = { .gfp_mask = GFP_KERNEL, @@ -1776,22 +1785,23 @@ void wakeup_kswapd(struct zone *zone, int order) * Try to free `nr_pages' of memory, system-wide. Returns the number of freed * pages. */ -int shrink_all_memory(int nr_pages) +unsigned long shrink_all_memory(unsigned long nr_pages) { pg_data_t *pgdat; - int nr_to_free = nr_pages; - int ret = 0; + unsigned long nr_to_free = nr_pages; + unsigned long ret = 0; struct reclaim_state reclaim_state = { .reclaimed_slab = 0, }; current->reclaim_state = &reclaim_state; for_each_pgdat(pgdat) { - int freed; + unsigned long freed; + freed = balance_pgdat(pgdat, nr_to_free, 0); ret += freed; nr_to_free -= freed; - if (nr_to_free <= 0) + if ((long)nr_to_free <= 0) break; } current->reclaim_state = NULL; @@ -1805,8 +1815,7 @@ int shrink_all_memory(int nr_pages) away, we get changed to run anywhere: as the first one comes back, restore their cpu bindings. */ static int __devinit cpu_callback(struct notifier_block *nfb, - unsigned long action, - void *hcpu) + unsigned long action, void *hcpu) { pg_data_t *pgdat; cpumask_t mask; @@ -1826,10 +1835,15 @@ static int __devinit cpu_callback(struct notifier_block *nfb, static int __init kswapd_init(void) { pg_data_t *pgdat; + swap_setup(); - for_each_pgdat(pgdat) - pgdat->kswapd - = find_task_by_pid(kernel_thread(kswapd, pgdat, CLONE_KERNEL)); + for_each_pgdat(pgdat) { + pid_t pid; + + pid = kernel_thread(kswapd, pgdat, CLONE_KERNEL); + BUG_ON(pid < 0); + pgdat->kswapd = find_task_by_pid(pid); + } total_memory = nr_free_pagecache_pages(); hotcpu_notifier(cpu_callback, 0); return 0; @@ -1873,7 +1887,7 @@ int zone_reclaim_interval __read_mostly = 30*HZ; */ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) { - const int nr_pages = 1 << order; + const unsigned long nr_pages = 1 << order; struct task_struct *p = current; struct reclaim_state reclaim_state; int priority; @@ -1881,7 +1895,8 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), .nr_mapped = read_page_state(nr_mapped), - .swap_cluster_max = max(nr_pages, SWAP_CLUSTER_MAX), + .swap_cluster_max = max_t(unsigned long, nr_pages, + SWAP_CLUSTER_MAX), .gfp_mask = gfp_mask, }; @@ -1966,4 +1981,3 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) return __zone_reclaim(zone, gfp_mask, order); } #endif - -- cgit v1.2.3-70-g09d2 From 05ff51376f01fd8837946a4f8144a84f6cc71c19 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 22 Mar 2006 00:08:20 -0800 Subject: [PATCH] vmscan return nr_reclaimed Change all the vmscan functions to retunr the number-of-reclaimed pages and remove scan_conrtol.nr_reclaimed. Saves ten-odd bytes of text and makes things clearer and more consistent. The patch also changes the behaviour of zone_reclaim() when it falls back to slab shrinking. Christoph says "Setting this to one means that we will rescan and shrink the slab for each allocation if we are out of zone memory and RECLAIM_SLAB is set. Plus if we do an order 0 allocation we do not go off node as intended. "We better set this to zero. This means the allocation will go offnode despite us having potentially freed lots of memory on the zone. Future allocations can then again be done from this zone." Cc: Nick Piggin Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 77 ++++++++++++++++++++++++++++++------------------------------- 1 file changed, 38 insertions(+), 39 deletions(-) (limited to 'mm/vmscan.c') diff --git a/mm/vmscan.c b/mm/vmscan.c index 62cd7cd257e..8f6ad13d34f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -55,9 +55,6 @@ struct scan_control { /* Incremented by the number of inactive pages that were scanned */ unsigned long nr_scanned; - /* Incremented by the number of pages reclaimed */ - unsigned long nr_reclaimed; - unsigned long nr_mapped; /* From page_state */ /* This context's GFP mask */ @@ -409,7 +406,7 @@ cannot_free: } /* - * shrink_list adds the number of reclaimed pages to sc->nr_reclaimed + * shrink_list return the number of reclaimed pages */ static unsigned long shrink_list(struct list_head *page_list, struct scan_control *sc) @@ -417,7 +414,7 @@ static unsigned long shrink_list(struct list_head *page_list, LIST_HEAD(ret_pages); struct pagevec freed_pvec; int pgactivate = 0; - unsigned long reclaimed = 0; + unsigned long nr_reclaimed = 0; cond_resched(); @@ -557,7 +554,7 @@ static unsigned long shrink_list(struct list_head *page_list, free_it: unlock_page(page); - reclaimed++; + nr_reclaimed++; if (!pagevec_add(&freed_pvec, page)) __pagevec_release_nonlru(&freed_pvec); continue; @@ -575,8 +572,7 @@ keep: if (pagevec_count(&freed_pvec)) __pagevec_release_nonlru(&freed_pvec); mod_page_state(pgactivate, pgactivate); - sc->nr_reclaimed += reclaimed; - return reclaimed; + return nr_reclaimed; } #ifdef CONFIG_MIGRATION @@ -1107,14 +1103,15 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, } /* - * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed + * shrink_cache() return the number of reclaimed pages */ -static void shrink_cache(unsigned long max_scan, struct zone *zone, - struct scan_control *sc) +static unsigned long shrink_cache(unsigned long max_scan, struct zone *zone, + struct scan_control *sc) { LIST_HEAD(page_list); struct pagevec pvec; unsigned long nr_scanned = 0; + unsigned long nr_reclaimed = 0; pagevec_init(&pvec, 1); @@ -1138,7 +1135,7 @@ static void shrink_cache(unsigned long max_scan, struct zone *zone, nr_scanned += nr_scan; nr_freed = shrink_list(&page_list, sc); - + nr_reclaimed += nr_freed; local_irq_disable(); if (current_is_kswapd()) { __mod_page_state_zone(zone, pgscan_kswapd, nr_scan); @@ -1170,6 +1167,7 @@ static void shrink_cache(unsigned long max_scan, struct zone *zone, spin_unlock_irq(&zone->lru_lock); done: pagevec_release(&pvec); + return nr_reclaimed; } /* @@ -1329,12 +1327,13 @@ refill_inactive_zone(unsigned long nr_pages, struct zone *zone, /* * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. */ -static void shrink_zone(int priority, struct zone *zone, - struct scan_control *sc) +static unsigned long shrink_zone(int priority, struct zone *zone, + struct scan_control *sc) { unsigned long nr_active; unsigned long nr_inactive; unsigned long nr_to_scan; + unsigned long nr_reclaimed = 0; atomic_inc(&zone->reclaim_in_progress); @@ -1368,13 +1367,14 @@ static void shrink_zone(int priority, struct zone *zone, nr_to_scan = min(nr_inactive, (unsigned long)sc->swap_cluster_max); nr_inactive -= nr_to_scan; - shrink_cache(nr_to_scan, zone, sc); + nr_reclaimed += shrink_cache(nr_to_scan, zone, sc); } } throttle_vm_writeout(); atomic_dec(&zone->reclaim_in_progress); + return nr_reclaimed; } /* @@ -1393,9 +1393,10 @@ static void shrink_zone(int priority, struct zone *zone, * If a zone is deemed to be full of pinned pages then just give it a light * scan then give up on it. */ -static void shrink_caches(int priority, struct zone **zones, - struct scan_control *sc) +static unsigned long shrink_caches(int priority, struct zone **zones, + struct scan_control *sc) { + unsigned long nr_reclaimed = 0; int i; for (i = 0; zones[i] != NULL; i++) { @@ -1414,8 +1415,9 @@ static void shrink_caches(int priority, struct zone **zones, if (zone->all_unreclaimable && priority != DEF_PRIORITY) continue; /* Let kswapd poll it */ - shrink_zone(priority, zone, sc); + nr_reclaimed += shrink_zone(priority, zone, sc); } + return nr_reclaimed; } /* @@ -1436,7 +1438,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask) int priority; int ret = 0; unsigned long total_scanned = 0; - unsigned long total_reclaimed = 0; + unsigned long nr_reclaimed = 0; struct reclaim_state *reclaim_state = current->reclaim_state; unsigned long lru_pages = 0; int i; @@ -1462,18 +1464,16 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask) for (priority = DEF_PRIORITY; priority >= 0; priority--) { sc.nr_mapped = read_page_state(nr_mapped); sc.nr_scanned = 0; - sc.nr_reclaimed = 0; if (!priority) disable_swap_token(); - shrink_caches(priority, zones, &sc); + nr_reclaimed += shrink_caches(priority, zones, &sc); shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); if (reclaim_state) { - sc.nr_reclaimed += reclaim_state->reclaimed_slab; + nr_reclaimed += reclaim_state->reclaimed_slab; reclaim_state->reclaimed_slab = 0; } total_scanned += sc.nr_scanned; - total_reclaimed += sc.nr_reclaimed; - if (total_reclaimed >= sc.swap_cluster_max) { + if (nr_reclaimed >= sc.swap_cluster_max) { ret = 1; goto out; } @@ -1540,7 +1540,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages, int priority; int i; unsigned long total_scanned; - unsigned long total_reclaimed; + unsigned long nr_reclaimed; struct reclaim_state *reclaim_state = current->reclaim_state; struct scan_control sc = { .gfp_mask = GFP_KERNEL, @@ -1550,7 +1550,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages, loop_again: total_scanned = 0; - total_reclaimed = 0; + nr_reclaimed = 0; sc.may_writepage = !laptop_mode, sc.nr_mapped = read_page_state(nr_mapped); @@ -1632,13 +1632,11 @@ scan: if (zone->prev_priority > priority) zone->prev_priority = priority; sc.nr_scanned = 0; - sc.nr_reclaimed = 0; - shrink_zone(priority, zone, &sc); + nr_reclaimed += shrink_zone(priority, zone, &sc); reclaim_state->reclaimed_slab = 0; nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, lru_pages); - sc.nr_reclaimed += reclaim_state->reclaimed_slab; - total_reclaimed += sc.nr_reclaimed; + nr_reclaimed += reclaim_state->reclaimed_slab; total_scanned += sc.nr_scanned; if (zone->all_unreclaimable) continue; @@ -1651,10 +1649,10 @@ scan: * even in laptop mode */ if (total_scanned > SWAP_CLUSTER_MAX * 2 && - total_scanned > total_reclaimed+total_reclaimed/2) + total_scanned > nr_reclaimed + nr_reclaimed / 2) sc.may_writepage = 1; } - if (nr_pages && to_free > total_reclaimed) + if (nr_pages && to_free > nr_reclaimed) continue; /* swsusp: need to do more work */ if (all_zones_ok) break; /* kswapd: all done */ @@ -1671,7 +1669,7 @@ scan: * matches the direct reclaim path behaviour in terms of impact * on zone->*_priority. */ - if ((total_reclaimed >= SWAP_CLUSTER_MAX) && (!nr_pages)) + if ((nr_reclaimed >= SWAP_CLUSTER_MAX) && !nr_pages) break; } out: @@ -1685,7 +1683,7 @@ out: goto loop_again; } - return total_reclaimed; + return nr_reclaimed; } /* @@ -1891,6 +1889,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) struct task_struct *p = current; struct reclaim_state reclaim_state; int priority; + unsigned long nr_reclaimed = 0; struct scan_control sc = { .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), @@ -1917,11 +1916,11 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) */ priority = ZONE_RECLAIM_PRIORITY; do { - shrink_zone(priority, zone, &sc); + nr_reclaimed += shrink_zone(priority, zone, &sc); priority--; - } while (priority >= 0 && sc.nr_reclaimed < nr_pages); + } while (priority >= 0 && nr_reclaimed < nr_pages); - if (sc.nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) { + if (nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) { /* * shrink_slab does not currently allow us to determine * how many pages were freed in the zone. So we just @@ -1936,10 +1935,10 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) p->reclaim_state = NULL; current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); - if (sc.nr_reclaimed == 0) + if (nr_reclaimed == 0) zone->last_unsuccessful_zone_reclaim = jiffies; - return sc.nr_reclaimed >= nr_pages; + return nr_reclaimed >= nr_pages; } int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) -- cgit v1.2.3-70-g09d2 From 1742f19fa920cdd6905f0db5898524dde22ab2a4 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 22 Mar 2006 00:08:21 -0800 Subject: [PATCH] vmscan: rename functions We have: try_to_free_pages ->shrink_caches(struct zone **zones, ..) ->shrink_zone(struct zone *, ...) ->shrink_cache(struct zone *, ...) ->shrink_list(struct list_head *, ...) ->refill_inactive_list((struct zone *, ...) which is fairly irrational. Rename things so that we have try_to_free_pages ->shrink_zones(struct zone **zones, ..) ->shrink_zone(struct zone *, ...) ->shrink_inactive_list(struct zone *, ...) ->shrink_page_list(struct list_head *, ...) ->shrink_active_list(struct zone *, ...) Cc: Nick Piggin Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) (limited to 'mm/vmscan.c') diff --git a/mm/vmscan.c b/mm/vmscan.c index 8f6ad13d34f..2d5d4864de8 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -298,7 +298,8 @@ static void handle_write_error(struct address_space *mapping, } /* - * pageout is called by shrink_list() for each dirty page. Calls ->writepage(). + * pageout is called by shrink_page_list() for each dirty page. + * Calls ->writepage(). */ static pageout_t pageout(struct page *page, struct address_space *mapping) { @@ -406,10 +407,10 @@ cannot_free: } /* - * shrink_list return the number of reclaimed pages + * shrink_page_list() returns the number of reclaimed pages */ -static unsigned long shrink_list(struct list_head *page_list, - struct scan_control *sc) +static unsigned long shrink_page_list(struct list_head *page_list, + struct scan_control *sc) { LIST_HEAD(ret_pages); struct pagevec freed_pvec; @@ -1103,10 +1104,11 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, } /* - * shrink_cache() return the number of reclaimed pages + * shrink_inactive_list() is a helper for shrink_zone(). It returns the number + * of reclaimed pages */ -static unsigned long shrink_cache(unsigned long max_scan, struct zone *zone, - struct scan_control *sc) +static unsigned long shrink_inactive_list(unsigned long max_scan, + struct zone *zone, struct scan_control *sc) { LIST_HEAD(page_list); struct pagevec pvec; @@ -1134,7 +1136,7 @@ static unsigned long shrink_cache(unsigned long max_scan, struct zone *zone, goto done; nr_scanned += nr_scan; - nr_freed = shrink_list(&page_list, sc); + nr_freed = shrink_page_list(&page_list, sc); nr_reclaimed += nr_freed; local_irq_disable(); if (current_is_kswapd()) { @@ -1187,9 +1189,8 @@ done: * The downside is that we have to touch page->_count against each page. * But we had to alter page->flags anyway. */ -static void -refill_inactive_zone(unsigned long nr_pages, struct zone *zone, - struct scan_control *sc) +static void shrink_active_list(unsigned long nr_pages, struct zone *zone, + struct scan_control *sc) { unsigned long pgmoved; int pgdeactivate = 0; @@ -1360,14 +1361,15 @@ static unsigned long shrink_zone(int priority, struct zone *zone, nr_to_scan = min(nr_active, (unsigned long)sc->swap_cluster_max); nr_active -= nr_to_scan; - refill_inactive_zone(nr_to_scan, zone, sc); + shrink_active_list(nr_to_scan, zone, sc); } if (nr_inactive) { nr_to_scan = min(nr_inactive, (unsigned long)sc->swap_cluster_max); nr_inactive -= nr_to_scan; - nr_reclaimed += shrink_cache(nr_to_scan, zone, sc); + nr_reclaimed += shrink_inactive_list(nr_to_scan, zone, + sc); } } @@ -1393,7 +1395,7 @@ static unsigned long shrink_zone(int priority, struct zone *zone, * If a zone is deemed to be full of pinned pages then just give it a light * scan then give up on it. */ -static unsigned long shrink_caches(int priority, struct zone **zones, +static unsigned long shrink_zones(int priority, struct zone **zones, struct scan_control *sc) { unsigned long nr_reclaimed = 0; @@ -1466,7 +1468,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask) sc.nr_scanned = 0; if (!priority) disable_swap_token(); - nr_reclaimed += shrink_caches(priority, zones, &sc); + nr_reclaimed += shrink_zones(priority, zones, &sc); shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); if (reclaim_state) { nr_reclaimed += reclaim_state->reclaimed_slab; -- cgit v1.2.3-70-g09d2 From 7fb2d46d396b2491818f8e43b01049b3234e6c07 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 22 Mar 2006 00:08:22 -0800 Subject: [PATCH] zone_reclaim: additional comments and cleanup Add some comments to explain how zone reclaim works. And it fixes the following issues: - PF_SWAPWRITE needs to be set for RECLAIM_SWAP to be able to write out pages to swap. Currently RECLAIM_SWAP may not do that. - remove setting nr_reclaimed pages after slab reclaim since the slab shrinking code does not use that and the nr_reclaimed pages is just right for the intended follow up action. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) (limited to 'mm/vmscan.c') diff --git a/mm/vmscan.c b/mm/vmscan.c index 2d5d4864de8..c712b946e4f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1887,6 +1887,7 @@ int zone_reclaim_interval __read_mostly = 30*HZ; */ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) { + /* Minimum pages needed in order to stay on node */ const unsigned long nr_pages = 1 << order; struct task_struct *p = current; struct reclaim_state reclaim_state; @@ -1924,9 +1925,12 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) if (nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) { /* - * shrink_slab does not currently allow us to determine - * how many pages were freed in the zone. So we just - * shake the slab and then go offnode for a single allocation. + * shrink_slab() does not currently allow us to determine how + * many pages were freed in this zone. So we just shake the slab + * a bit and then go off node for this particular allocation + * despite possibly having freed enough memory to allocate in + * this zone. If we freed local memory then the next + * allocations will be local again. * * shrink_slab will free memory on all zones and may take * a long time. @@ -1937,8 +1941,14 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) p->reclaim_state = NULL; current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); - if (nr_reclaimed == 0) + if (nr_reclaimed == 0) { + /* + * We were unable to reclaim enough pages to stay on node. We + * now allow off node accesses for a certain time period before + * trying again to reclaim pages from the local zone. + */ zone->last_unsuccessful_zone_reclaim = jiffies; + } return nr_reclaimed >= nr_pages; } -- cgit v1.2.3-70-g09d2 From c9b02d970c385a253edb36c87643b0df706b50b4 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Wed, 22 Mar 2006 00:08:23 -0800 Subject: [PATCH] mm: isolate_lru_pages() scan count fix In isolate_lru_pages(), *scanned reports one more scan because the scan counter is increased one more time on exit of the while-loop. Change the while-loop to for-loop to fix it. Signed-off-by: Nick Piggin Signed-off-by: Wu Fengguang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm/vmscan.c') diff --git a/mm/vmscan.c b/mm/vmscan.c index c712b946e4f..85e95f44502 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1074,9 +1074,9 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, { unsigned long nr_taken = 0; struct page *page; - unsigned long scan = 0; + unsigned long scan; - while (scan++ < nr_to_scan && !list_empty(src)) { + for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { struct list_head *target; page = lru_to_page(src); prefetchw_prev_lru_page(page, src, flags); -- cgit v1.2.3-70-g09d2 From fb8d14e172a29ba5ac69a73b61196be86fdfc3e1 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Wed, 22 Mar 2006 00:08:28 -0800 Subject: [PATCH] mm: shrink_inactive_lis() nr_scan accounting fix In shrink_inactive_list(), nr_scan is not accounted when nr_taken is 0. But 0 pages taken does not mean 0 pages scanned. Move the goto statement below the accounting code to fix it. Signed-off-by: Wu Fengguang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'mm/vmscan.c') diff --git a/mm/vmscan.c b/mm/vmscan.c index 85e95f44502..486184d2b50 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1132,9 +1132,6 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, zone->pages_scanned += nr_scan; spin_unlock_irq(&zone->lru_lock); - if (nr_taken == 0) - goto done; - nr_scanned += nr_scan; nr_freed = shrink_page_list(&page_list, sc); nr_reclaimed += nr_freed; @@ -1146,6 +1143,9 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, __mod_page_state_zone(zone, pgscan_direct, nr_scan); __mod_page_state_zone(zone, pgsteal, nr_freed); + if (nr_taken == 0) + goto done; + spin_lock(&zone->lru_lock); /* * Put back any unfreeable pages. @@ -1166,8 +1166,9 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, } } } while (nr_scanned < max_scan); - spin_unlock_irq(&zone->lru_lock); + spin_unlock(&zone->lru_lock); done: + local_irq_enable(); pagevec_release(&pvec); return nr_reclaimed; } -- cgit v1.2.3-70-g09d2 From 0f8053a509ceba4a077a50ea7b77039b5559b428 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Wed, 22 Mar 2006 00:08:33 -0800 Subject: [PATCH] mm: make __put_page internal Remove __put_page from outside the core mm/. It is dangerous because it does not handle compound pages nicely, and misses 1->0 transitions. If a user later appears that really needs the extra speed we can reevaluate. Signed-off-by: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 1 - mm/filemap.c | 2 ++ mm/internal.h | 11 +++++++++++ mm/vmscan.c | 2 ++ 4 files changed, 15 insertions(+), 1 deletion(-) (limited to 'mm/vmscan.c') diff --git a/include/linux/mm.h b/include/linux/mm.h index 1850cf8bad6..9b3cdfc8046 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -308,7 +308,6 @@ static inline int get_page_unless_zero(struct page *page) } #define set_page_count(p,v) atomic_set(&(p)->_count, (v)) -#define __put_page(p) atomic_dec(&(p)->_count) extern void FASTCALL(__page_cache_release(struct page *)); diff --git a/mm/filemap.c b/mm/filemap.c index 44da3d47699..e8f58f7dd7a 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -30,6 +30,8 @@ #include #include #include "filemap.h" +#include "internal.h" + /* * FIXME: remove all knowledge of the buffer layer from the core VM */ diff --git a/mm/internal.h b/mm/internal.h index 17256bb2f4e..e3042db2a2d 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -8,6 +8,10 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ +#ifndef __MM_INTERNAL_H +#define __MM_INTERNAL_H + +#include static inline void set_page_refs(struct page *page, int order) { @@ -26,5 +30,12 @@ static inline void set_page_refs(struct page *page, int order) #endif /* CONFIG_MMU */ } +static inline void __put_page(struct page *page) +{ + atomic_dec(&page->_count); +} + extern void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order); + +#endif diff --git a/mm/vmscan.c b/mm/vmscan.c index 486184d2b50..3914a94aa90 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -39,6 +39,8 @@ #include +#include "internal.h" + /* possible outcome of pageout() */ typedef enum { /* failed to write page out, page is locked */ -- cgit v1.2.3-70-g09d2 From 6e5ef1a96e6e3b123da56292bc35017c8c401491 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 22 Mar 2006 00:08:45 -0800 Subject: [PATCH] vmscan: emove obsolete checks from shrink_list() and fix unlikely in refill_inactive_zone() As suggested by Marcelo: 1. The optimization introduced recently for not calling page_referenced() during zone reclaim makes two additional checks in shrink_list unnecessary. 2. The if (unlikely(sc->may_swap)) in refill_inactive_zone is optimized for the zone_reclaim case. However, most peoples system only does swap. Undo that. Signed-off-by: Christoph Lameter Cc: Marcelo Tosatti Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) (limited to 'mm/vmscan.c') diff --git a/mm/vmscan.c b/mm/vmscan.c index 3914a94aa90..f713e9f6ac7 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -460,12 +460,9 @@ static unsigned long shrink_page_list(struct list_head *page_list, * Anonymous process memory has backing store? * Try to allocate it some swap space here. */ - if (PageAnon(page) && !PageSwapCache(page)) { - if (!sc->may_swap) - goto keep_locked; + if (PageAnon(page) && !PageSwapCache(page)) if (!add_to_swap(page, GFP_ATOMIC)) goto activate_locked; - } #endif /* CONFIG_SWAP */ mapping = page_mapping(page); @@ -477,12 +474,6 @@ static unsigned long shrink_page_list(struct list_head *page_list, * processes. Try to unmap it here. */ if (page_mapped(page) && mapping) { - /* - * No unmapping if we do not swap - */ - if (!sc->may_swap) - goto keep_locked; - switch (try_to_unmap(page, 0)) { case SWAP_FAIL: goto activate_locked; @@ -1205,7 +1196,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, struct pagevec pvec; int reclaim_mapped = 0; - if (unlikely(sc->may_swap)) { + if (sc->may_swap) { long mapped_ratio; long distress; long swap_tendency; -- cgit v1.2.3-70-g09d2 From 248a0301e703cbf781aa02a91bcfc6da75870dd7 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 22 Mar 2006 00:09:04 -0800 Subject: [PATCH] mm: make shrink_all_memory try harder Make shrink_all_memory() repeat the attempts to free more memory if there seems to be no pages to free. Signed-off-by: Rafael J. Wysocki Cc: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'mm/vmscan.c') diff --git a/mm/vmscan.c b/mm/vmscan.c index f713e9f6ac7..548e023c193 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -1784,11 +1785,13 @@ unsigned long shrink_all_memory(unsigned long nr_pages) pg_data_t *pgdat; unsigned long nr_to_free = nr_pages; unsigned long ret = 0; + unsigned retry = 2; struct reclaim_state reclaim_state = { .reclaimed_slab = 0, }; current->reclaim_state = &reclaim_state; +repeat: for_each_pgdat(pgdat) { unsigned long freed; @@ -1798,6 +1801,10 @@ unsigned long shrink_all_memory(unsigned long nr_pages) if ((long)nr_to_free <= 0) break; } + if (retry-- && ret < nr_pages) { + blk_congestion_wait(WRITE, HZ/5); + goto repeat; + } current->reclaim_state = NULL; return ret; } -- cgit v1.2.3-70-g09d2 From b20a35035f983f4ac7e29c4a68f30e43510007e0 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 22 Mar 2006 00:09:12 -0800 Subject: [PATCH] page migration reorg Centralize the page migration functions in anticipation of additional tinkering. Creates a new file mm/migrate.c 1. Extract buffer_migrate_page() from fs/buffer.c 2. Extract central migration code from vmscan.c 3. Extract some components from mempolicy.c 4. Export pageout() and remove_from_swap() from vmscan.c 5. Make it possible to configure NUMA systems without page migration and non-NUMA systems with page migration. I had to so some #ifdeffing in mempolicy.c that may need a cleanup. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/buffer.c | 62 ----- fs/xfs/linux-2.6/xfs_buf.c | 1 + include/linux/migrate.h | 36 +++ include/linux/swap.h | 34 ++- mm/Kconfig | 6 + mm/Makefile | 2 + mm/mempolicy.c | 113 ++------ mm/migrate.c | 655 +++++++++++++++++++++++++++++++++++++++++++++ mm/swap_state.c | 1 + mm/vmscan.c | 491 +-------------------------------- 10 files changed, 741 insertions(+), 660 deletions(-) create mode 100644 include/linux/migrate.h create mode 100644 mm/migrate.c (limited to 'mm/vmscan.c') diff --git a/fs/buffer.c b/fs/buffer.c index a9b39940200..1d3683d496f 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -3050,68 +3050,6 @@ asmlinkage long sys_bdflush(int func, long data) return 0; } -/* - * Migration function for pages with buffers. This function can only be used - * if the underlying filesystem guarantees that no other references to "page" - * exist. - */ -#ifdef CONFIG_MIGRATION -int buffer_migrate_page(struct page *newpage, struct page *page) -{ - struct address_space *mapping = page->mapping; - struct buffer_head *bh, *head; - int rc; - - if (!mapping) - return -EAGAIN; - - if (!page_has_buffers(page)) - return migrate_page(newpage, page); - - head = page_buffers(page); - - rc = migrate_page_remove_references(newpage, page, 3); - if (rc) - return rc; - - bh = head; - do { - get_bh(bh); - lock_buffer(bh); - bh = bh->b_this_page; - - } while (bh != head); - - ClearPagePrivate(page); - set_page_private(newpage, page_private(page)); - set_page_private(page, 0); - put_page(page); - get_page(newpage); - - bh = head; - do { - set_bh_page(bh, newpage, bh_offset(bh)); - bh = bh->b_this_page; - - } while (bh != head); - - SetPagePrivate(newpage); - - migrate_page_copy(newpage, page); - - bh = head; - do { - unlock_buffer(bh); - put_bh(bh); - bh = bh->b_this_page; - - } while (bh != head); - - return 0; -} -EXPORT_SYMBOL(buffer_migrate_page); -#endif - /* * Buffer-head allocation */ diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index bfb4f2917bb..8cdfa415165 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -29,6 +29,7 @@ #include #include #include +#include #include "xfs_linux.h" STATIC kmem_zone_t *xfs_buf_zone; diff --git a/include/linux/migrate.h b/include/linux/migrate.h new file mode 100644 index 00000000000..7d09962c3c0 --- /dev/null +++ b/include/linux/migrate.h @@ -0,0 +1,36 @@ +#ifndef _LINUX_MIGRATE_H +#define _LINUX_MIGRATE_H + +#include +#include + +#ifdef CONFIG_MIGRATION +extern int isolate_lru_page(struct page *p, struct list_head *pagelist); +extern int putback_lru_pages(struct list_head *l); +extern int migrate_page(struct page *, struct page *); +extern void migrate_page_copy(struct page *, struct page *); +extern int migrate_page_remove_references(struct page *, struct page *, int); +extern int migrate_pages(struct list_head *l, struct list_head *t, + struct list_head *moved, struct list_head *failed); +int migrate_pages_to(struct list_head *pagelist, + struct vm_area_struct *vma, int dest); +extern int fail_migrate_page(struct page *, struct page *); + +extern int migrate_prep(void); + +#else + +static inline int isolate_lru_page(struct page *p, struct list_head *list) + { return -ENOSYS; } +static inline int putback_lru_pages(struct list_head *l) { return 0; } +static inline int migrate_pages(struct list_head *l, struct list_head *t, + struct list_head *moved, struct list_head *failed) { return -ENOSYS; } + +static inline int migrate_prep(void) { return -ENOSYS; } + +/* Possible settings for the migrate_page() method in address_operations */ +#define migrate_page NULL +#define fail_migrate_page NULL + +#endif /* CONFIG_MIGRATION */ +#endif /* _LINUX_MIGRATE_H */ diff --git a/include/linux/swap.h b/include/linux/swap.h index 3dc6c89c49b..12415dd9445 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -175,6 +175,21 @@ extern void swap_setup(void); extern unsigned long try_to_free_pages(struct zone **, gfp_t); extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; +extern int remove_mapping(struct address_space *mapping, struct page *page); + +/* possible outcome of pageout() */ +typedef enum { + /* failed to write page out, page is locked */ + PAGE_KEEP, + /* move page to the active list, page is locked */ + PAGE_ACTIVATE, + /* page has been sent to the disk successfully, page is unlocked */ + PAGE_SUCCESS, + /* page is clean and locked */ + PAGE_CLEAN, +} pageout_t; + +extern pageout_t pageout(struct page *page, struct address_space *mapping); #ifdef CONFIG_NUMA extern int zone_reclaim_mode; @@ -188,25 +203,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) } #endif -#ifdef CONFIG_MIGRATION -extern int isolate_lru_page(struct page *p); -extern unsigned long putback_lru_pages(struct list_head *l); -extern int migrate_page(struct page *, struct page *); -extern void migrate_page_copy(struct page *, struct page *); -extern int migrate_page_remove_references(struct page *, struct page *, int); -extern unsigned long migrate_pages(struct list_head *l, struct list_head *t, - struct list_head *moved, struct list_head *failed); -extern int fail_migrate_page(struct page *, struct page *); -#else -static inline int isolate_lru_page(struct page *p) { return -ENOSYS; } -static inline int putback_lru_pages(struct list_head *l) { return 0; } -static inline int migrate_pages(struct list_head *l, struct list_head *t, - struct list_head *moved, struct list_head *failed) { return -ENOSYS; } -/* Possible settings for the migrate_page() method in address_operations */ -#define migrate_page NULL -#define fail_migrate_page NULL -#endif - #ifdef CONFIG_MMU /* linux/mm/shmem.c */ extern int shmem_unuse(swp_entry_t entry, struct page *page); diff --git a/mm/Kconfig b/mm/Kconfig index a9cb80ae640..bd80460360d 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -137,5 +137,11 @@ config SPLIT_PTLOCK_CPUS # support for page migration # config MIGRATION + bool "Page migration" def_bool y if NUMA || SPARSEMEM || DISCONTIGMEM depends on SWAP + help + Allows the migration of the physical location of pages of processes + while the virtual addresses are not changed. This is useful for + example on NUMA systems to put pages nearer to the processors accessing + the page. diff --git a/mm/Makefile b/mm/Makefile index 9aa03fa1dcc..f10c753dce6 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -22,3 +22,5 @@ obj-$(CONFIG_SLOB) += slob.o obj-$(CONFIG_SLAB) += slab.o obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o obj-$(CONFIG_FS_XIP) += filemap_xip.o +obj-$(CONFIG_MIGRATION) += migrate.o + diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 96195dcb62e..e93cc740c22 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -86,6 +86,7 @@ #include #include #include +#include #include #include @@ -95,9 +96,6 @@ #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */ -/* The number of pages to migrate per call to migrate_pages() */ -#define MIGRATE_CHUNK_SIZE 256 - static struct kmem_cache *policy_cache; static struct kmem_cache *sn_cache; @@ -331,17 +329,10 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end, struct vm_area_struct *first, *vma, *prev; if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { - /* Must have swap device for migration */ - if (nr_swap_pages <= 0) - return ERR_PTR(-ENODEV); - /* - * Clear the LRU lists so pages can be isolated. - * Note that pages may be moved off the LRU after we have - * drained them. Those pages will fail to migrate like other - * pages that may be busy. - */ - lru_add_drain_all(); + err = migrate_prep(); + if (err) + return ERR_PTR(err); } first = find_vma(mm, start); @@ -550,92 +541,18 @@ long do_get_mempolicy(int *policy, nodemask_t *nmask, return err; } +#ifdef CONFIG_MIGRATION /* * page migration */ - static void migrate_page_add(struct page *page, struct list_head *pagelist, unsigned long flags) { /* * Avoid migrating a page that is shared with others. */ - if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) { - if (isolate_lru_page(page)) - list_add_tail(&page->lru, pagelist); - } -} - -/* - * Migrate the list 'pagelist' of pages to a certain destination. - * - * Specify destination with either non-NULL vma or dest_node >= 0 - * Return the number of pages not migrated or error code - */ -static int migrate_pages_to(struct list_head *pagelist, - struct vm_area_struct *vma, int dest) -{ - LIST_HEAD(newlist); - LIST_HEAD(moved); - LIST_HEAD(failed); - int err = 0; - unsigned long offset = 0; - int nr_pages; - struct page *page; - struct list_head *p; - -redo: - nr_pages = 0; - list_for_each(p, pagelist) { - if (vma) { - /* - * The address passed to alloc_page_vma is used to - * generate the proper interleave behavior. We fake - * the address here by an increasing offset in order - * to get the proper distribution of pages. - * - * No decision has been made as to which page - * a certain old page is moved to so we cannot - * specify the correct address. - */ - page = alloc_page_vma(GFP_HIGHUSER, vma, - offset + vma->vm_start); - offset += PAGE_SIZE; - } - else - page = alloc_pages_node(dest, GFP_HIGHUSER, 0); - - if (!page) { - err = -ENOMEM; - goto out; - } - list_add_tail(&page->lru, &newlist); - nr_pages++; - if (nr_pages > MIGRATE_CHUNK_SIZE) - break; - } - err = migrate_pages(pagelist, &newlist, &moved, &failed); - - putback_lru_pages(&moved); /* Call release pages instead ?? */ - - if (err >= 0 && list_empty(&newlist) && !list_empty(pagelist)) - goto redo; -out: - /* Return leftover allocated pages */ - while (!list_empty(&newlist)) { - page = list_entry(newlist.next, struct page, lru); - list_del(&page->lru); - __free_page(page); - } - list_splice(&failed, pagelist); - if (err < 0) - return err; - - /* Calculate number of leftover pages */ - nr_pages = 0; - list_for_each(p, pagelist) - nr_pages++; - return nr_pages; + if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) + isolate_lru_page(page, pagelist); } /* @@ -742,8 +659,23 @@ int do_migrate_pages(struct mm_struct *mm, if (err < 0) return err; return busy; + } +#else + +static void migrate_page_add(struct page *page, struct list_head *pagelist, + unsigned long flags) +{ +} + +int do_migrate_pages(struct mm_struct *mm, + const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) +{ + return -ENOSYS; +} +#endif + long do_mbind(unsigned long start, unsigned long len, unsigned long mode, nodemask_t *nmask, unsigned long flags) { @@ -808,6 +740,7 @@ long do_mbind(unsigned long start, unsigned long len, if (!err && nr_failed && (flags & MPOL_MF_STRICT)) err = -EIO; } + if (!list_empty(&pagelist)) putback_lru_pages(&pagelist); diff --git a/mm/migrate.c b/mm/migrate.c new file mode 100644 index 00000000000..09f6e4aa87f --- /dev/null +++ b/mm/migrate.c @@ -0,0 +1,655 @@ +/* + * Memory Migration functionality - linux/mm/migration.c + * + * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter + * + * Page migration was first developed in the context of the memory hotplug + * project. The main authors of the migration code are: + * + * IWAMOTO Toshihiro + * Hirokazu Takahashi + * Dave Hansen + * Christoph Lameter + */ + +#include +#include +#include +#include +#include /* for try_to_release_page(), + buffer_heads_over_limit */ +#include +#include +#include +#include +#include +#include +#include + +#include "internal.h" + +#include "internal.h" + +/* The maximum number of pages to take off the LRU for migration */ +#define MIGRATE_CHUNK_SIZE 256 + +#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) + +/* + * Isolate one page from the LRU lists. If successful put it onto + * the indicated list with elevated page count. + * + * Result: + * -EBUSY: page not on LRU list + * 0: page removed from LRU list and added to the specified list. + */ +int isolate_lru_page(struct page *page, struct list_head *pagelist) +{ + int ret = -EBUSY; + + if (PageLRU(page)) { + struct zone *zone = page_zone(page); + + spin_lock_irq(&zone->lru_lock); + if (PageLRU(page)) { + ret = 0; + get_page(page); + ClearPageLRU(page); + if (PageActive(page)) + del_page_from_active_list(zone, page); + else + del_page_from_inactive_list(zone, page); + list_add_tail(&page->lru, pagelist); + } + spin_unlock_irq(&zone->lru_lock); + } + return ret; +} + +/* + * migrate_prep() needs to be called after we have compiled the list of pages + * to be migrated using isolate_lru_page() but before we begin a series of calls + * to migrate_pages(). + */ +int migrate_prep(void) +{ + /* Must have swap device for migration */ + if (nr_swap_pages <= 0) + return -ENODEV; + + /* + * Clear the LRU lists so pages can be isolated. + * Note that pages may be moved off the LRU after we have + * drained them. Those pages will fail to migrate like other + * pages that may be busy. + */ + lru_add_drain_all(); + + return 0; +} + +static inline void move_to_lru(struct page *page) +{ + list_del(&page->lru); + if (PageActive(page)) { + /* + * lru_cache_add_active checks that + * the PG_active bit is off. + */ + ClearPageActive(page); + lru_cache_add_active(page); + } else { + lru_cache_add(page); + } + put_page(page); +} + +/* + * Add isolated pages on the list back to the LRU. + * + * returns the number of pages put back. + */ +int putback_lru_pages(struct list_head *l) +{ + struct page *page; + struct page *page2; + int count = 0; + + list_for_each_entry_safe(page, page2, l, lru) { + move_to_lru(page); + count++; + } + return count; +} + +/* + * Non migratable page + */ +int fail_migrate_page(struct page *newpage, struct page *page) +{ + return -EIO; +} +EXPORT_SYMBOL(fail_migrate_page); + +/* + * swapout a single page + * page is locked upon entry, unlocked on exit + */ +static int swap_page(struct page *page) +{ + struct address_space *mapping = page_mapping(page); + + if (page_mapped(page) && mapping) + if (try_to_unmap(page, 1) != SWAP_SUCCESS) + goto unlock_retry; + + if (PageDirty(page)) { + /* Page is dirty, try to write it out here */ + switch(pageout(page, mapping)) { + case PAGE_KEEP: + case PAGE_ACTIVATE: + goto unlock_retry; + + case PAGE_SUCCESS: + goto retry; + + case PAGE_CLEAN: + ; /* try to free the page below */ + } + } + + if (PagePrivate(page)) { + if (!try_to_release_page(page, GFP_KERNEL) || + (!mapping && page_count(page) == 1)) + goto unlock_retry; + } + + if (remove_mapping(mapping, page)) { + /* Success */ + unlock_page(page); + return 0; + } + +unlock_retry: + unlock_page(page); + +retry: + return -EAGAIN; +} +EXPORT_SYMBOL(swap_page); + +/* + * Remove references for a page and establish the new page with the correct + * basic settings to be able to stop accesses to the page. + */ +int migrate_page_remove_references(struct page *newpage, + struct page *page, int nr_refs) +{ + struct address_space *mapping = page_mapping(page); + struct page **radix_pointer; + + /* + * Avoid doing any of the following work if the page count + * indicates that the page is in use or truncate has removed + * the page. + */ + if (!mapping || page_mapcount(page) + nr_refs != page_count(page)) + return -EAGAIN; + + /* + * Establish swap ptes for anonymous pages or destroy pte + * maps for files. + * + * In order to reestablish file backed mappings the fault handlers + * will take the radix tree_lock which may then be used to stop + * processses from accessing this page until the new page is ready. + * + * A process accessing via a swap pte (an anonymous page) will take a + * page_lock on the old page which will block the process until the + * migration attempt is complete. At that time the PageSwapCache bit + * will be examined. If the page was migrated then the PageSwapCache + * bit will be clear and the operation to retrieve the page will be + * retried which will find the new page in the radix tree. Then a new + * direct mapping may be generated based on the radix tree contents. + * + * If the page was not migrated then the PageSwapCache bit + * is still set and the operation may continue. + */ + if (try_to_unmap(page, 1) == SWAP_FAIL) + /* A vma has VM_LOCKED set -> permanent failure */ + return -EPERM; + + /* + * Give up if we were unable to remove all mappings. + */ + if (page_mapcount(page)) + return -EAGAIN; + + write_lock_irq(&mapping->tree_lock); + + radix_pointer = (struct page **)radix_tree_lookup_slot( + &mapping->page_tree, + page_index(page)); + + if (!page_mapping(page) || page_count(page) != nr_refs || + *radix_pointer != page) { + write_unlock_irq(&mapping->tree_lock); + return 1; + } + + /* + * Now we know that no one else is looking at the page. + * + * Certain minimal information about a page must be available + * in order for other subsystems to properly handle the page if they + * find it through the radix tree update before we are finished + * copying the page. + */ + get_page(newpage); + newpage->index = page->index; + newpage->mapping = page->mapping; + if (PageSwapCache(page)) { + SetPageSwapCache(newpage); + set_page_private(newpage, page_private(page)); + } + + *radix_pointer = newpage; + __put_page(page); + write_unlock_irq(&mapping->tree_lock); + + return 0; +} +EXPORT_SYMBOL(migrate_page_remove_references); + +/* + * Copy the page to its new location + */ +void migrate_page_copy(struct page *newpage, struct page *page) +{ + copy_highpage(newpage, page); + + if (PageError(page)) + SetPageError(newpage); + if (PageReferenced(page)) + SetPageReferenced(newpage); + if (PageUptodate(page)) + SetPageUptodate(newpage); + if (PageActive(page)) + SetPageActive(newpage); + if (PageChecked(page)) + SetPageChecked(newpage); + if (PageMappedToDisk(page)) + SetPageMappedToDisk(newpage); + + if (PageDirty(page)) { + clear_page_dirty_for_io(page); + set_page_dirty(newpage); + } + + ClearPageSwapCache(page); + ClearPageActive(page); + ClearPagePrivate(page); + set_page_private(page, 0); + page->mapping = NULL; + + /* + * If any waiters have accumulated on the new page then + * wake them up. + */ + if (PageWriteback(newpage)) + end_page_writeback(newpage); +} +EXPORT_SYMBOL(migrate_page_copy); + +/* + * Common logic to directly migrate a single page suitable for + * pages that do not use PagePrivate. + * + * Pages are locked upon entry and exit. + */ +int migrate_page(struct page *newpage, struct page *page) +{ + int rc; + + BUG_ON(PageWriteback(page)); /* Writeback must be complete */ + + rc = migrate_page_remove_references(newpage, page, 2); + + if (rc) + return rc; + + migrate_page_copy(newpage, page); + + /* + * Remove auxiliary swap entries and replace + * them with real ptes. + * + * Note that a real pte entry will allow processes that are not + * waiting on the page lock to use the new page via the page tables + * before the new page is unlocked. + */ + remove_from_swap(newpage); + return 0; +} +EXPORT_SYMBOL(migrate_page); + +/* + * migrate_pages + * + * Two lists are passed to this function. The first list + * contains the pages isolated from the LRU to be migrated. + * The second list contains new pages that the pages isolated + * can be moved to. If the second list is NULL then all + * pages are swapped out. + * + * The function returns after 10 attempts or if no pages + * are movable anymore because to has become empty + * or no retryable pages exist anymore. + * + * Return: Number of pages not migrated when "to" ran empty. + */ +int migrate_pages(struct list_head *from, struct list_head *to, + struct list_head *moved, struct list_head *failed) +{ + int retry; + int nr_failed = 0; + int pass = 0; + struct page *page; + struct page *page2; + int swapwrite = current->flags & PF_SWAPWRITE; + int rc; + + if (!swapwrite) + current->flags |= PF_SWAPWRITE; + +redo: + retry = 0; + + list_for_each_entry_safe(page, page2, from, lru) { + struct page *newpage = NULL; + struct address_space *mapping; + + cond_resched(); + + rc = 0; + if (page_count(page) == 1) + /* page was freed from under us. So we are done. */ + goto next; + + if (to && list_empty(to)) + break; + + /* + * Skip locked pages during the first two passes to give the + * functions holding the lock time to release the page. Later we + * use lock_page() to have a higher chance of acquiring the + * lock. + */ + rc = -EAGAIN; + if (pass > 2) + lock_page(page); + else + if (TestSetPageLocked(page)) + goto next; + + /* + * Only wait on writeback if we have already done a pass where + * we we may have triggered writeouts for lots of pages. + */ + if (pass > 0) { + wait_on_page_writeback(page); + } else { + if (PageWriteback(page)) + goto unlock_page; + } + + /* + * Anonymous pages must have swap cache references otherwise + * the information contained in the page maps cannot be + * preserved. + */ + if (PageAnon(page) && !PageSwapCache(page)) { + if (!add_to_swap(page, GFP_KERNEL)) { + rc = -ENOMEM; + goto unlock_page; + } + } + + if (!to) { + rc = swap_page(page); + goto next; + } + + newpage = lru_to_page(to); + lock_page(newpage); + + /* + * Pages are properly locked and writeback is complete. + * Try to migrate the page. + */ + mapping = page_mapping(page); + if (!mapping) + goto unlock_both; + + if (mapping->a_ops->migratepage) { + /* + * Most pages have a mapping and most filesystems + * should provide a migration function. Anonymous + * pages are part of swap space which also has its + * own migration function. This is the most common + * path for page migration. + */ + rc = mapping->a_ops->migratepage(newpage, page); + goto unlock_both; + } + + /* + * Default handling if a filesystem does not provide + * a migration function. We can only migrate clean + * pages so try to write out any dirty pages first. + */ + if (PageDirty(page)) { + switch (pageout(page, mapping)) { + case PAGE_KEEP: + case PAGE_ACTIVATE: + goto unlock_both; + + case PAGE_SUCCESS: + unlock_page(newpage); + goto next; + + case PAGE_CLEAN: + ; /* try to migrate the page below */ + } + } + + /* + * Buffers are managed in a filesystem specific way. + * We must have no buffers or drop them. + */ + if (!page_has_buffers(page) || + try_to_release_page(page, GFP_KERNEL)) { + rc = migrate_page(newpage, page); + goto unlock_both; + } + + /* + * On early passes with mapped pages simply + * retry. There may be a lock held for some + * buffers that may go away. Later + * swap them out. + */ + if (pass > 4) { + /* + * Persistently unable to drop buffers..... As a + * measure of last resort we fall back to + * swap_page(). + */ + unlock_page(newpage); + newpage = NULL; + rc = swap_page(page); + goto next; + } + +unlock_both: + unlock_page(newpage); + +unlock_page: + unlock_page(page); + +next: + if (rc == -EAGAIN) { + retry++; + } else if (rc) { + /* Permanent failure */ + list_move(&page->lru, failed); + nr_failed++; + } else { + if (newpage) { + /* Successful migration. Return page to LRU */ + move_to_lru(newpage); + } + list_move(&page->lru, moved); + } + } + if (retry && pass++ < 10) + goto redo; + + if (!swapwrite) + current->flags &= ~PF_SWAPWRITE; + + return nr_failed + retry; +} + +/* + * Migration function for pages with buffers. This function can only be used + * if the underlying filesystem guarantees that no other references to "page" + * exist. + */ +int buffer_migrate_page(struct page *newpage, struct page *page) +{ + struct address_space *mapping = page->mapping; + struct buffer_head *bh, *head; + int rc; + + if (!mapping) + return -EAGAIN; + + if (!page_has_buffers(page)) + return migrate_page(newpage, page); + + head = page_buffers(page); + + rc = migrate_page_remove_references(newpage, page, 3); + + if (rc) + return rc; + + bh = head; + do { + get_bh(bh); + lock_buffer(bh); + bh = bh->b_this_page; + + } while (bh != head); + + ClearPagePrivate(page); + set_page_private(newpage, page_private(page)); + set_page_private(page, 0); + put_page(page); + get_page(newpage); + + bh = head; + do { + set_bh_page(bh, newpage, bh_offset(bh)); + bh = bh->b_this_page; + + } while (bh != head); + + SetPagePrivate(newpage); + + migrate_page_copy(newpage, page); + + bh = head; + do { + unlock_buffer(bh); + put_bh(bh); + bh = bh->b_this_page; + + } while (bh != head); + + return 0; +} +EXPORT_SYMBOL(buffer_migrate_page); + +/* + * Migrate the list 'pagelist' of pages to a certain destination. + * + * Specify destination with either non-NULL vma or dest_node >= 0 + * Return the number of pages not migrated or error code + */ +int migrate_pages_to(struct list_head *pagelist, + struct vm_area_struct *vma, int dest) +{ + LIST_HEAD(newlist); + LIST_HEAD(moved); + LIST_HEAD(failed); + int err = 0; + unsigned long offset = 0; + int nr_pages; + struct page *page; + struct list_head *p; + +redo: + nr_pages = 0; + list_for_each(p, pagelist) { + if (vma) { + /* + * The address passed to alloc_page_vma is used to + * generate the proper interleave behavior. We fake + * the address here by an increasing offset in order + * to get the proper distribution of pages. + * + * No decision has been made as to which page + * a certain old page is moved to so we cannot + * specify the correct address. + */ + page = alloc_page_vma(GFP_HIGHUSER, vma, + offset + vma->vm_start); + offset += PAGE_SIZE; + } + else + page = alloc_pages_node(dest, GFP_HIGHUSER, 0); + + if (!page) { + err = -ENOMEM; + goto out; + } + list_add_tail(&page->lru, &newlist); + nr_pages++; + if (nr_pages > MIGRATE_CHUNK_SIZE) + break; + } + err = migrate_pages(pagelist, &newlist, &moved, &failed); + + putback_lru_pages(&moved); /* Call release pages instead ?? */ + + if (err >= 0 && list_empty(&newlist) && !list_empty(pagelist)) + goto redo; +out: + /* Return leftover allocated pages */ + while (!list_empty(&newlist)) { + page = list_entry(newlist.next, struct page, lru); + list_del(&page->lru); + __free_page(page); + } + list_splice(&failed, pagelist); + if (err < 0) + return err; + + /* Calculate number of leftover pages */ + nr_pages = 0; + list_for_each(p, pagelist) + nr_pages++; + return nr_pages; +} diff --git a/mm/swap_state.c b/mm/swap_state.c index db8a3d3e163..d7af296833f 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -15,6 +15,7 @@ #include #include #include +#include #include diff --git a/mm/vmscan.c b/mm/vmscan.c index 548e023c193..fd572bbdc9f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -42,18 +42,6 @@ #include "internal.h" -/* possible outcome of pageout() */ -typedef enum { - /* failed to write page out, page is locked */ - PAGE_KEEP, - /* move page to the active list, page is locked */ - PAGE_ACTIVATE, - /* page has been sent to the disk successfully, page is unlocked */ - PAGE_SUCCESS, - /* page is clean and locked */ - PAGE_CLEAN, -} pageout_t; - struct scan_control { /* Incremented by the number of inactive pages that were scanned */ unsigned long nr_scanned; @@ -304,7 +292,7 @@ static void handle_write_error(struct address_space *mapping, * pageout is called by shrink_page_list() for each dirty page. * Calls ->writepage(). */ -static pageout_t pageout(struct page *page, struct address_space *mapping) +pageout_t pageout(struct page *page, struct address_space *mapping) { /* * If the page is dirty, only perform writeback if that write @@ -372,7 +360,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping) return PAGE_CLEAN; } -static int remove_mapping(struct address_space *mapping, struct page *page) +int remove_mapping(struct address_space *mapping, struct page *page) { if (!mapping) return 0; /* truncate got there first */ @@ -570,481 +558,6 @@ keep: return nr_reclaimed; } -#ifdef CONFIG_MIGRATION -static inline void move_to_lru(struct page *page) -{ - list_del(&page->lru); - if (PageActive(page)) { - /* - * lru_cache_add_active checks that - * the PG_active bit is off. - */ - ClearPageActive(page); - lru_cache_add_active(page); - } else { - lru_cache_add(page); - } - put_page(page); -} - -/* - * Add isolated pages on the list back to the LRU. - * - * returns the number of pages put back. - */ -unsigned long putback_lru_pages(struct list_head *l) -{ - struct page *page; - struct page *page2; - unsigned long count = 0; - - list_for_each_entry_safe(page, page2, l, lru) { - move_to_lru(page); - count++; - } - return count; -} - -/* - * Non migratable page - */ -int fail_migrate_page(struct page *newpage, struct page *page) -{ - return -EIO; -} -EXPORT_SYMBOL(fail_migrate_page); - -/* - * swapout a single page - * page is locked upon entry, unlocked on exit - */ -static int swap_page(struct page *page) -{ - struct address_space *mapping = page_mapping(page); - - if (page_mapped(page) && mapping) - if (try_to_unmap(page, 1) != SWAP_SUCCESS) - goto unlock_retry; - - if (PageDirty(page)) { - /* Page is dirty, try to write it out here */ - switch(pageout(page, mapping)) { - case PAGE_KEEP: - case PAGE_ACTIVATE: - goto unlock_retry; - - case PAGE_SUCCESS: - goto retry; - - case PAGE_CLEAN: - ; /* try to free the page below */ - } - } - - if (PagePrivate(page)) { - if (!try_to_release_page(page, GFP_KERNEL) || - (!mapping && page_count(page) == 1)) - goto unlock_retry; - } - - if (remove_mapping(mapping, page)) { - /* Success */ - unlock_page(page); - return 0; - } - -unlock_retry: - unlock_page(page); - -retry: - return -EAGAIN; -} -EXPORT_SYMBOL(swap_page); - -/* - * Page migration was first developed in the context of the memory hotplug - * project. The main authors of the migration code are: - * - * IWAMOTO Toshihiro - * Hirokazu Takahashi - * Dave Hansen - * Christoph Lameter - */ - -/* - * Remove references for a page and establish the new page with the correct - * basic settings to be able to stop accesses to the page. - */ -int migrate_page_remove_references(struct page *newpage, - struct page *page, int nr_refs) -{ - struct address_space *mapping = page_mapping(page); - struct page **radix_pointer; - - /* - * Avoid doing any of the following work if the page count - * indicates that the page is in use or truncate has removed - * the page. - */ - if (!mapping || page_mapcount(page) + nr_refs != page_count(page)) - return -EAGAIN; - - /* - * Establish swap ptes for anonymous pages or destroy pte - * maps for files. - * - * In order to reestablish file backed mappings the fault handlers - * will take the radix tree_lock which may then be used to stop - * processses from accessing this page until the new page is ready. - * - * A process accessing via a swap pte (an anonymous page) will take a - * page_lock on the old page which will block the process until the - * migration attempt is complete. At that time the PageSwapCache bit - * will be examined. If the page was migrated then the PageSwapCache - * bit will be clear and the operation to retrieve the page will be - * retried which will find the new page in the radix tree. Then a new - * direct mapping may be generated based on the radix tree contents. - * - * If the page was not migrated then the PageSwapCache bit - * is still set and the operation may continue. - */ - if (try_to_unmap(page, 1) == SWAP_FAIL) - /* A vma has VM_LOCKED set -> Permanent failure */ - return -EPERM; - - /* - * Give up if we were unable to remove all mappings. - */ - if (page_mapcount(page)) - return -EAGAIN; - - write_lock_irq(&mapping->tree_lock); - - radix_pointer = (struct page **)radix_tree_lookup_slot( - &mapping->page_tree, - page_index(page)); - - if (!page_mapping(page) || page_count(page) != nr_refs || - *radix_pointer != page) { - write_unlock_irq(&mapping->tree_lock); - return -EAGAIN; - } - - /* - * Now we know that no one else is looking at the page. - * - * Certain minimal information about a page must be available - * in order for other subsystems to properly handle the page if they - * find it through the radix tree update before we are finished - * copying the page. - */ - get_page(newpage); - newpage->index = page->index; - newpage->mapping = page->mapping; - if (PageSwapCache(page)) { - SetPageSwapCache(newpage); - set_page_private(newpage, page_private(page)); - } - - *radix_pointer = newpage; - __put_page(page); - write_unlock_irq(&mapping->tree_lock); - - return 0; -} -EXPORT_SYMBOL(migrate_page_remove_references); - -/* - * Copy the page to its new location - */ -void migrate_page_copy(struct page *newpage, struct page *page) -{ - copy_highpage(newpage, page); - - if (PageError(page)) - SetPageError(newpage); - if (PageReferenced(page)) - SetPageReferenced(newpage); - if (PageUptodate(page)) - SetPageUptodate(newpage); - if (PageActive(page)) - SetPageActive(newpage); - if (PageChecked(page)) - SetPageChecked(newpage); - if (PageMappedToDisk(page)) - SetPageMappedToDisk(newpage); - - if (PageDirty(page)) { - clear_page_dirty_for_io(page); - set_page_dirty(newpage); - } - - ClearPageSwapCache(page); - ClearPageActive(page); - ClearPagePrivate(page); - set_page_private(page, 0); - page->mapping = NULL; - - /* - * If any waiters have accumulated on the new page then - * wake them up. - */ - if (PageWriteback(newpage)) - end_page_writeback(newpage); -} -EXPORT_SYMBOL(migrate_page_copy); - -/* - * Common logic to directly migrate a single page suitable for - * pages that do not use PagePrivate. - * - * Pages are locked upon entry and exit. - */ -int migrate_page(struct page *newpage, struct page *page) -{ - int rc; - - BUG_ON(PageWriteback(page)); /* Writeback must be complete */ - - rc = migrate_page_remove_references(newpage, page, 2); - - if (rc) - return rc; - - migrate_page_copy(newpage, page); - - /* - * Remove auxiliary swap entries and replace - * them with real ptes. - * - * Note that a real pte entry will allow processes that are not - * waiting on the page lock to use the new page via the page tables - * before the new page is unlocked. - */ - remove_from_swap(newpage); - return 0; -} -EXPORT_SYMBOL(migrate_page); - -/* - * migrate_pages - * - * Two lists are passed to this function. The first list - * contains the pages isolated from the LRU to be migrated. - * The second list contains new pages that the pages isolated - * can be moved to. If the second list is NULL then all - * pages are swapped out. - * - * The function returns after 10 attempts or if no pages - * are movable anymore because to has become empty - * or no retryable pages exist anymore. - * - * Return: Number of pages not migrated when "to" ran empty. - */ -unsigned long migrate_pages(struct list_head *from, struct list_head *to, - struct list_head *moved, struct list_head *failed) -{ - unsigned long retry; - unsigned long nr_failed = 0; - int pass = 0; - struct page *page; - struct page *page2; - int swapwrite = current->flags & PF_SWAPWRITE; - int rc; - - if (!swapwrite) - current->flags |= PF_SWAPWRITE; - -redo: - retry = 0; - - list_for_each_entry_safe(page, page2, from, lru) { - struct page *newpage = NULL; - struct address_space *mapping; - - cond_resched(); - - rc = 0; - if (page_count(page) == 1) - /* page was freed from under us. So we are done. */ - goto next; - - if (to && list_empty(to)) - break; - - /* - * Skip locked pages during the first two passes to give the - * functions holding the lock time to release the page. Later we - * use lock_page() to have a higher chance of acquiring the - * lock. - */ - rc = -EAGAIN; - if (pass > 2) - lock_page(page); - else - if (TestSetPageLocked(page)) - goto next; - - /* - * Only wait on writeback if we have already done a pass where - * we we may have triggered writeouts for lots of pages. - */ - if (pass > 0) { - wait_on_page_writeback(page); - } else { - if (PageWriteback(page)) - goto unlock_page; - } - - /* - * Anonymous pages must have swap cache references otherwise - * the information contained in the page maps cannot be - * preserved. - */ - if (PageAnon(page) && !PageSwapCache(page)) { - if (!add_to_swap(page, GFP_KERNEL)) { - rc = -ENOMEM; - goto unlock_page; - } - } - - if (!to) { - rc = swap_page(page); - goto next; - } - - newpage = lru_to_page(to); - lock_page(newpage); - - /* - * Pages are properly locked and writeback is complete. - * Try to migrate the page. - */ - mapping = page_mapping(page); - if (!mapping) - goto unlock_both; - - if (mapping->a_ops->migratepage) { - /* - * Most pages have a mapping and most filesystems - * should provide a migration function. Anonymous - * pages are part of swap space which also has its - * own migration function. This is the most common - * path for page migration. - */ - rc = mapping->a_ops->migratepage(newpage, page); - goto unlock_both; - } - - /* - * Default handling if a filesystem does not provide - * a migration function. We can only migrate clean - * pages so try to write out any dirty pages first. - */ - if (PageDirty(page)) { - switch (pageout(page, mapping)) { - case PAGE_KEEP: - case PAGE_ACTIVATE: - goto unlock_both; - - case PAGE_SUCCESS: - unlock_page(newpage); - goto next; - - case PAGE_CLEAN: - ; /* try to migrate the page below */ - } - } - - /* - * Buffers are managed in a filesystem specific way. - * We must have no buffers or drop them. - */ - if (!page_has_buffers(page) || - try_to_release_page(page, GFP_KERNEL)) { - rc = migrate_page(newpage, page); - goto unlock_both; - } - - /* - * On early passes with mapped pages simply - * retry. There may be a lock held for some - * buffers that may go away. Later - * swap them out. - */ - if (pass > 4) { - /* - * Persistently unable to drop buffers..... As a - * measure of last resort we fall back to - * swap_page(). - */ - unlock_page(newpage); - newpage = NULL; - rc = swap_page(page); - goto next; - } - -unlock_both: - unlock_page(newpage); - -unlock_page: - unlock_page(page); - -next: - if (rc == -EAGAIN) { - retry++; - } else if (rc) { - /* Permanent failure */ - list_move(&page->lru, failed); - nr_failed++; - } else { - if (newpage) { - /* Successful migration. Return page to LRU */ - move_to_lru(newpage); - } - list_move(&page->lru, moved); - } - } - if (retry && pass++ < 10) - goto redo; - - if (!swapwrite) - current->flags &= ~PF_SWAPWRITE; - - return nr_failed + retry; -} - -/* - * Isolate one page from the LRU lists and put it on the - * indicated list with elevated refcount. - * - * Result: - * 0 = page not on LRU list - * 1 = page removed from LRU list and added to the specified list. - */ -int isolate_lru_page(struct page *page) -{ - int ret = 0; - - if (PageLRU(page)) { - struct zone *zone = page_zone(page); - spin_lock_irq(&zone->lru_lock); - if (PageLRU(page)) { - ret = 1; - get_page(page); - ClearPageLRU(page); - if (PageActive(page)) - del_page_from_active_list(zone, page); - else - del_page_from_inactive_list(zone, page); - } - spin_unlock_irq(&zone->lru_lock); - } - - return ret; -} -#endif - /* * zone->lru_lock is heavily contended. Some of the functions that * shrink the lists perform better by taking out a batch of pages -- cgit v1.2.3-70-g09d2