summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c37
1 files changed, 7 insertions, 30 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8aec4d4601e..ac8fc51825b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -174,18 +174,13 @@ static void set_pageblock_migratetype(struct page *page, int migratetype)
PB_migrate, PB_migrate_end);
}
-static inline int allocflags_to_migratetype(gfp_t gfp_flags, int order)
+static inline int allocflags_to_migratetype(gfp_t gfp_flags)
{
WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
if (unlikely(page_group_by_mobility_disabled))
return MIGRATE_UNMOVABLE;
- /* Cluster high-order atomic allocations together */
- if (unlikely(order > 0) &&
- (!(gfp_flags & __GFP_WAIT) || in_interrupt()))
- return MIGRATE_HIGHATOMIC;
-
/* Cluster based on mobility */
return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
((gfp_flags & __GFP_RECLAIMABLE) != 0);
@@ -706,11 +701,10 @@ static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
* the free lists for the desirable migrate type are depleted
*/
static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
- [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_HIGHATOMIC, MIGRATE_RESERVE },
- [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_HIGHATOMIC, MIGRATE_RESERVE },
- [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_HIGHATOMIC, MIGRATE_RESERVE },
- [MIGRATE_HIGHATOMIC] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
- [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */
+ [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
+ [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
+ [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
+ [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */
};
/*
@@ -804,9 +798,7 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
int current_order;
struct page *page;
int migratetype, i;
- int nonatomic_fallback_atomic = 0;
-retry:
/* Find the largest possible block of pages in the other list */
for (current_order = MAX_ORDER-1; current_order >= order;
--current_order) {
@@ -816,14 +808,6 @@ retry:
/* MIGRATE_RESERVE handled later if necessary */
if (migratetype == MIGRATE_RESERVE)
continue;
- /*
- * Make it hard to fallback to blocks used for
- * high-order atomic allocations
- */
- if (migratetype == MIGRATE_HIGHATOMIC &&
- start_migratetype != MIGRATE_UNMOVABLE &&
- !nonatomic_fallback_atomic)
- continue;
area = &(zone->free_area[current_order]);
if (list_empty(&area->free_list[migratetype]))
@@ -849,8 +833,7 @@ retry:
start_migratetype);
/* Claim the whole block if over half of it is free */
- if ((pages << current_order) >= (1 << (MAX_ORDER-2)) &&
- migratetype != MIGRATE_HIGHATOMIC)
+ if ((pages << current_order) >= (1 << (MAX_ORDER-2)))
set_pageblock_migratetype(page,
start_migratetype);
@@ -872,12 +855,6 @@ retry:
}
}
- /* Allow fallback to high-order atomic blocks if memory is that low */
- if (!nonatomic_fallback_atomic) {
- nonatomic_fallback_atomic = 1;
- goto retry;
- }
-
/* Use MIGRATE_RESERVE rather than fail an allocation */
return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
}
@@ -1112,7 +1089,7 @@ static struct page *buffered_rmqueue(struct zonelist *zonelist,
struct page *page;
int cold = !!(gfp_flags & __GFP_COLD);
int cpu;
- int migratetype = allocflags_to_migratetype(gfp_flags, order);
+ int migratetype = allocflags_to_migratetype(gfp_flags);
again:
cpu = get_cpu();