summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2007-10-16 01:25:49 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 09:42:59 -0700
commit535131e6925b4a95f321148ad7293f496e0e58d7 (patch)
treefdd49e29f89eb6db3ba2b5ba7df7b059de95a91f /mm/page_alloc.c
parentb2a0ac8875a0a3b9f0739b60526f8c5977d2200f (diff)
Choose pages from the per-cpu list based on migration type
The freelists for each migrate type can slowly become polluted due to the per-cpu list. Consider what happens when the following happens 1. A 2^(MAX_ORDER-1) list is reserved for __GFP_MOVABLE pages 2. An order-0 page is allocated from the newly reserved block 3. The page is freed and placed on the per-cpu list 4. alloc_page() is called with GFP_KERNEL as the gfp_mask 5. The per-cpu list is used to satisfy the allocation This results in a kernel page is in the middle of a migratable region. This patch prevents this leak occuring by storing the MIGRATE_ type of the page in page->private. On allocate, a page will only be returned of the desired type, else more pages will be allocated. This may temporarily allow a per-cpu list to go over the pcp->high limit but it'll be corrected on the next free. Care is taken to preserve the hotness of pages recently freed. The additional code is not measurably slower for the workloads we've tested. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c28
1 files changed, 24 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d54ecf41b44..e3e726bd285 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -760,7 +760,8 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
struct page *page = __rmqueue(zone, order, migratetype);
if (unlikely(page == NULL))
break;
- list_add_tail(&page->lru, list);
+ list_add(&page->lru, list);
+ set_page_private(page, migratetype);
}
spin_unlock(&zone->lock);
return i;
@@ -887,6 +888,7 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
local_irq_save(flags);
__count_vm_event(PGFREE);
list_add(&page->lru, &pcp->list);
+ set_page_private(page, get_pageblock_migratetype(page));
pcp->count++;
if (pcp->count >= pcp->high) {
free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
@@ -951,9 +953,27 @@ again:
if (unlikely(!pcp->count))
goto failed;
}
- page = list_entry(pcp->list.next, struct page, lru);
- list_del(&page->lru);
- pcp->count--;
+ /* Find a page of the appropriate migrate type */
+ list_for_each_entry(page, &pcp->list, lru) {
+ if (page_private(page) == migratetype) {
+ list_del(&page->lru);
+ pcp->count--;
+ break;
+ }
+ }
+
+ /*
+ * Check if a page of the appropriate migrate type
+ * was found. If not, allocate more to the pcp list
+ */
+ if (&page->lru == &pcp->list) {
+ pcp->count += rmqueue_bulk(zone, 0,
+ pcp->batch, &pcp->list, migratetype);
+ page = list_entry(pcp->list.next, struct page, lru);
+ VM_BUG_ON(page_private(page) != migratetype);
+ list_del(&page->lru);
+ pcp->count--;
+ }
} else {
spin_lock_irqsave(&zone->lock, flags);
page = __rmqueue(zone, order, migratetype);