summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2013-02-22 16:33:58 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 17:50:15 -0800
commit194159fbcc0d6ac1351837d3cd7a27a4af0219a6 (patch)
treea5a960a4c8698001db50a987015131bb5d256c6e /mm/page_alloc.c
parentc60514b6314137a9505c60966fda2094b22a2fda (diff)
mm: remove MIGRATE_ISOLATE check in hotpath
Several functions test MIGRATE_ISOLATE and some of those are hotpath but MIGRATE_ISOLATE is used only if we enable CONFIG_MEMORY_ISOLATION(ie, CMA, memory-hotplug and memory-failure) which are not common config option. So let's not add unnecessary overhead and code when we don't enable CONFIG_MEMORY_ISOLATION. Signed-off-by: Minchan Kim <minchan@kernel.org> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: Michal Nazarewicz <mina86@mina86.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 07fe78d01ff..e3fb290194c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -673,7 +673,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
__free_one_page(page, zone, 0, mt);
trace_mm_page_pcpu_drain(page, 0, mt);
- if (likely(get_pageblock_migratetype(page) != MIGRATE_ISOLATE)) {
+ if (likely(!is_migrate_isolate_page(page))) {
__mod_zone_page_state(zone, NR_FREE_PAGES, 1);
if (is_migrate_cma(mt))
__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1);
@@ -691,7 +691,7 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
zone->pages_scanned = 0;
__free_one_page(page, zone, order, migratetype);
- if (unlikely(migratetype != MIGRATE_ISOLATE))
+ if (unlikely(!is_migrate_isolate(migratetype)))
__mod_zone_freepage_state(zone, 1 << order, migratetype);
spin_unlock(&zone->lock);
}
@@ -923,7 +923,9 @@ static int fallbacks[MIGRATE_TYPES][4] = {
[MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
#endif
[MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
+#ifdef CONFIG_MEMORY_ISOLATION
[MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
+#endif
};
/*
@@ -1149,7 +1151,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
list_add_tail(&page->lru, list);
if (IS_ENABLED(CONFIG_CMA)) {
mt = get_pageblock_migratetype(page);
- if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE)
+ if (!is_migrate_cma(mt) && !is_migrate_isolate(mt))
mt = migratetype;
}
set_freepage_migratetype(page, mt);
@@ -1333,7 +1335,7 @@ void free_hot_cold_page(struct page *page, int cold)
* excessively into the page allocator
*/
if (migratetype >= MIGRATE_PCPTYPES) {
- if (unlikely(migratetype == MIGRATE_ISOLATE)) {
+ if (unlikely(is_migrate_isolate(migratetype))) {
free_one_page(zone, page, 0, migratetype);
goto out;
}
@@ -1407,7 +1409,7 @@ static int __isolate_free_page(struct page *page, unsigned int order)
zone = page_zone(page);
mt = get_pageblock_migratetype(page);
- if (mt != MIGRATE_ISOLATE) {
+ if (!is_migrate_isolate(mt)) {
/* Obey watermarks as if the page was being allocated */
watermark = low_wmark_pages(zone) + (1 << order);
if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
@@ -1426,7 +1428,7 @@ static int __isolate_free_page(struct page *page, unsigned int order)
struct page *endpage = page + (1 << order) - 1;
for (; page < endpage; page += pageblock_nr_pages) {
int mt = get_pageblock_migratetype(page);
- if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
+ if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
set_pageblock_migratetype(page,
MIGRATE_MOVABLE);
}
@@ -2904,7 +2906,9 @@ static void show_migration_types(unsigned char type)
#ifdef CONFIG_CMA
[MIGRATE_CMA] = 'C',
#endif
+#ifdef CONFIG_MEMORY_ISOLATION
[MIGRATE_ISOLATE] = 'I',
+#endif
};
char tmp[MIGRATE_TYPES + 1];
char *p = tmp;