summaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c255
1 files changed, 142 insertions, 113 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 37f97628706..21bf292b642 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -89,7 +89,8 @@ static void __reset_isolation_suitable(struct zone *zone)
unsigned long end_pfn = zone_end_pfn(zone);
unsigned long pfn;
- zone->compact_cached_migrate_pfn = start_pfn;
+ zone->compact_cached_migrate_pfn[0] = start_pfn;
+ zone->compact_cached_migrate_pfn[1] = start_pfn;
zone->compact_cached_free_pfn = end_pfn;
zone->compact_blockskip_flush = false;
@@ -131,9 +132,10 @@ void reset_isolation_suitable(pg_data_t *pgdat)
*/
static void update_pageblock_skip(struct compact_control *cc,
struct page *page, unsigned long nr_isolated,
- bool migrate_scanner)
+ bool set_unsuitable, bool migrate_scanner)
{
struct zone *zone = cc->zone;
+ unsigned long pfn;
if (cc->ignore_skip_hint)
return;
@@ -141,20 +143,32 @@ static void update_pageblock_skip(struct compact_control *cc,
if (!page)
return;
- if (!nr_isolated) {
- unsigned long pfn = page_to_pfn(page);
+ if (nr_isolated)
+ return;
+
+ /*
+ * Only skip pageblocks when all forms of compaction will be known to
+ * fail in the near future.
+ */
+ if (set_unsuitable)
set_pageblock_skip(page);
- /* Update where compaction should restart */
- if (migrate_scanner) {
- if (!cc->finished_update_migrate &&
- pfn > zone->compact_cached_migrate_pfn)
- zone->compact_cached_migrate_pfn = pfn;
- } else {
- if (!cc->finished_update_free &&
- pfn < zone->compact_cached_free_pfn)
- zone->compact_cached_free_pfn = pfn;
- }
+ pfn = page_to_pfn(page);
+
+ /* Update where async and sync compaction should restart */
+ if (migrate_scanner) {
+ if (cc->finished_update_migrate)
+ return;
+ if (pfn > zone->compact_cached_migrate_pfn[0])
+ zone->compact_cached_migrate_pfn[0] = pfn;
+ if (cc->mode != MIGRATE_ASYNC &&
+ pfn > zone->compact_cached_migrate_pfn[1])
+ zone->compact_cached_migrate_pfn[1] = pfn;
+ } else {
+ if (cc->finished_update_free)
+ return;
+ if (pfn < zone->compact_cached_free_pfn)
+ zone->compact_cached_free_pfn = pfn;
}
}
#else
@@ -166,7 +180,7 @@ static inline bool isolation_suitable(struct compact_control *cc,
static void update_pageblock_skip(struct compact_control *cc,
struct page *page, unsigned long nr_isolated,
- bool migrate_scanner)
+ bool set_unsuitable, bool migrate_scanner)
{
}
#endif /* CONFIG_COMPACTION */
@@ -195,7 +209,7 @@ static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
}
/* async aborts if taking too long or contended */
- if (!cc->sync) {
+ if (cc->mode == MIGRATE_ASYNC) {
cc->contended = true;
return false;
}
@@ -208,10 +222,28 @@ static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
return true;
}
-static inline bool compact_trylock_irqsave(spinlock_t *lock,
- unsigned long *flags, struct compact_control *cc)
+/*
+ * Aside from avoiding lock contention, compaction also periodically checks
+ * need_resched() and either schedules in sync compaction or aborts async
+ * compaction. This is similar to what compact_checklock_irqsave() does, but
+ * is used where no lock is concerned.
+ *
+ * Returns false when no scheduling was needed, or sync compaction scheduled.
+ * Returns true when async compaction should abort.
+ */
+static inline bool compact_should_abort(struct compact_control *cc)
{
- return compact_checklock_irqsave(lock, flags, false, cc);
+ /* async compaction aborts if contended */
+ if (need_resched()) {
+ if (cc->mode == MIGRATE_ASYNC) {
+ cc->contended = true;
+ return true;
+ }
+
+ cond_resched();
+ }
+
+ return false;
}
/* Returns true if the page is within a block suitable for migration to */
@@ -329,7 +361,8 @@ isolate_fail:
/* Update the pageblock-skip if the whole pageblock was scanned */
if (blockpfn == end_pfn)
- update_pageblock_skip(cc, valid_page, total_isolated, false);
+ update_pageblock_skip(cc, valid_page, total_isolated, true,
+ false);
count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
if (total_isolated)
@@ -464,8 +497,9 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
unsigned long flags;
bool locked = false;
struct page *page = NULL, *valid_page = NULL;
- bool skipped_async_unsuitable = false;
- const isolate_mode_t mode = (!cc->sync ? ISOLATE_ASYNC_MIGRATE : 0) |
+ bool set_unsuitable = true;
+ const isolate_mode_t mode = (cc->mode == MIGRATE_ASYNC ?
+ ISOLATE_ASYNC_MIGRATE : 0) |
(unevictable ? ISOLATE_UNEVICTABLE : 0);
/*
@@ -475,7 +509,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
*/
while (unlikely(too_many_isolated(zone))) {
/* async migration should just abort */
- if (!cc->sync)
+ if (cc->mode == MIGRATE_ASYNC)
return 0;
congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -484,8 +518,10 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
return 0;
}
+ if (compact_should_abort(cc))
+ return 0;
+
/* Time to isolate some pages for migration */
- cond_resched();
for (; low_pfn < end_pfn; low_pfn++) {
/* give a chance to irqs before checking need_resched() */
if (locked && !(low_pfn % SWAP_CLUSTER_MAX)) {
@@ -540,9 +576,9 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
* the minimum amount of work satisfies the allocation
*/
mt = get_pageblock_migratetype(page);
- if (!cc->sync && !migrate_async_suitable(mt)) {
- cc->finished_update_migrate = true;
- skipped_async_unsuitable = true;
+ if (cc->mode == MIGRATE_ASYNC &&
+ !migrate_async_suitable(mt)) {
+ set_unsuitable = false;
goto next_pageblock;
}
}
@@ -646,11 +682,10 @@ next_pageblock:
/*
* Update the pageblock-skip information and cached scanner pfn,
* if the whole pageblock was scanned without isolating any page.
- * This is not done when pageblock was skipped due to being unsuitable
- * for async compaction, so that eventual sync compaction can try.
*/
- if (low_pfn == end_pfn && !skipped_async_unsuitable)
- update_pageblock_skip(cc, valid_page, nr_isolated, true);
+ if (low_pfn == end_pfn)
+ update_pageblock_skip(cc, valid_page, nr_isolated,
+ set_unsuitable, true);
trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
@@ -671,44 +706,48 @@ static void isolate_freepages(struct zone *zone,
struct compact_control *cc)
{
struct page *page;
- unsigned long high_pfn, low_pfn, pfn, z_end_pfn, end_pfn;
+ unsigned long block_start_pfn; /* start of current pageblock */
+ unsigned long block_end_pfn; /* end of current pageblock */
+ unsigned long low_pfn; /* lowest pfn scanner is able to scan */
int nr_freepages = cc->nr_freepages;
struct list_head *freelist = &cc->freepages;
/*
* Initialise the free scanner. The starting point is where we last
- * scanned from (or the end of the zone if starting). The low point
- * is the end of the pageblock the migration scanner is using.
+ * successfully isolated from, zone-cached value, or the end of the
+ * zone when isolating for the first time. We need this aligned to
+ * the pageblock boundary, because we do
+ * block_start_pfn -= pageblock_nr_pages in the for loop.
+ * For ending point, take care when isolating in last pageblock of a
+ * a zone which ends in the middle of a pageblock.
+ * The low boundary is the end of the pageblock the migration scanner
+ * is using.
*/
- pfn = cc->free_pfn;
+ block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
+ block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
+ zone_end_pfn(zone));
low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
/*
- * Take care that if the migration scanner is at the end of the zone
- * that the free scanner does not accidentally move to the next zone
- * in the next isolation cycle.
- */
- high_pfn = min(low_pfn, pfn);
-
- z_end_pfn = zone_end_pfn(zone);
-
- /*
* Isolate free pages until enough are available to migrate the
* pages on cc->migratepages. We stop searching if the migrate
* and free page scanners meet or enough free pages are isolated.
*/
- for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
- pfn -= pageblock_nr_pages) {
+ for (; block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
+ block_end_pfn = block_start_pfn,
+ block_start_pfn -= pageblock_nr_pages) {
unsigned long isolated;
/*
* This can iterate a massively long zone without finding any
* suitable migration targets, so periodically check if we need
- * to schedule.
+ * to schedule, or even abort async compaction.
*/
- cond_resched();
+ if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
+ && compact_should_abort(cc))
+ break;
- if (!pfn_valid(pfn))
+ if (!pfn_valid(block_start_pfn))
continue;
/*
@@ -718,7 +757,7 @@ static void isolate_freepages(struct zone *zone,
* i.e. it's possible that all pages within a zones range of
* pages do not belong to a single zone.
*/
- page = pfn_to_page(pfn);
+ page = pfn_to_page(block_start_pfn);
if (page_zone(page) != zone)
continue;
@@ -731,29 +770,26 @@ static void isolate_freepages(struct zone *zone,
continue;
/* Found a block suitable for isolating free pages from */
- isolated = 0;
+ cc->free_pfn = block_start_pfn;
+ isolated = isolate_freepages_block(cc, block_start_pfn,
+ block_end_pfn, freelist, false);
+ nr_freepages += isolated;
/*
- * As pfn may not start aligned, pfn+pageblock_nr_page
- * may cross a MAX_ORDER_NR_PAGES boundary and miss
- * a pfn_valid check. Ensure isolate_freepages_block()
- * only scans within a pageblock
+ * Set a flag that we successfully isolated in this pageblock.
+ * In the next loop iteration, zone->compact_cached_free_pfn
+ * will not be updated and thus it will effectively contain the
+ * highest pageblock we isolated pages from.
*/
- end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
- end_pfn = min(end_pfn, z_end_pfn);
- isolated = isolate_freepages_block(cc, pfn, end_pfn,
- freelist, false);
- nr_freepages += isolated;
+ if (isolated)
+ cc->finished_update_free = true;
/*
- * Record the highest PFN we isolated pages from. When next
- * looking for free pages, the search will restart here as
- * page migration may have returned some pages to the allocator
+ * isolate_freepages_block() might have aborted due to async
+ * compaction being contended
*/
- if (isolated) {
- cc->finished_update_free = true;
- high_pfn = max(high_pfn, pfn);
- }
+ if (cc->contended)
+ break;
}
/* split_free_page does not map the pages */
@@ -763,10 +799,9 @@ static void isolate_freepages(struct zone *zone,
* If we crossed the migrate scanner, we want to keep it that way
* so that compact_finished() may detect this
*/
- if (pfn < low_pfn)
- cc->free_pfn = max(pfn, zone->zone_start_pfn);
- else
- cc->free_pfn = high_pfn;
+ if (block_start_pfn < low_pfn)
+ cc->free_pfn = cc->migrate_pfn;
+
cc->nr_freepages = nr_freepages;
}
@@ -781,9 +816,13 @@ static struct page *compaction_alloc(struct page *migratepage,
struct compact_control *cc = (struct compact_control *)data;
struct page *freepage;
- /* Isolate free pages if necessary */
+ /*
+ * Isolate free pages if necessary, and if we are not aborting due to
+ * contention.
+ */
if (list_empty(&cc->freepages)) {
- isolate_freepages(cc->zone, cc);
+ if (!cc->contended)
+ isolate_freepages(cc->zone, cc);
if (list_empty(&cc->freepages))
return NULL;
@@ -797,23 +836,16 @@ static struct page *compaction_alloc(struct page *migratepage,
}
/*
- * We cannot control nr_migratepages and nr_freepages fully when migration is
- * running as migrate_pages() has no knowledge of compact_control. When
- * migration is complete, we count the number of pages on the lists by hand.
+ * This is a migrate-callback that "frees" freepages back to the isolated
+ * freelist. All pages on the freelist are from the same zone, so there is no
+ * special handling needed for NUMA.
*/
-static void update_nr_listpages(struct compact_control *cc)
+static void compaction_free(struct page *page, unsigned long data)
{
- int nr_migratepages = 0;
- int nr_freepages = 0;
- struct page *page;
-
- list_for_each_entry(page, &cc->migratepages, lru)
- nr_migratepages++;
- list_for_each_entry(page, &cc->freepages, lru)
- nr_freepages++;
+ struct compact_control *cc = (struct compact_control *)data;
- cc->nr_migratepages = nr_migratepages;
- cc->nr_freepages = nr_freepages;
+ list_add(&page->lru, &cc->freepages);
+ cc->nr_freepages++;
}
/* possible outcome of isolate_migratepages */
@@ -860,13 +892,14 @@ static int compact_finished(struct zone *zone,
unsigned int order;
unsigned long watermark;
- if (fatal_signal_pending(current))
+ if (cc->contended || fatal_signal_pending(current))
return COMPACT_PARTIAL;
/* Compaction run completes if the migrate and free scanner meet */
if (cc->free_pfn <= cc->migrate_pfn) {
/* Let the next compaction start anew. */
- zone->compact_cached_migrate_pfn = zone->zone_start_pfn;
+ zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
+ zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
zone->compact_cached_free_pfn = zone_end_pfn(zone);
/*
@@ -966,6 +999,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
int ret;
unsigned long start_pfn = zone->zone_start_pfn;
unsigned long end_pfn = zone_end_pfn(zone);
+ const bool sync = cc->mode != MIGRATE_ASYNC;
ret = compaction_suitable(zone, cc->order);
switch (ret) {
@@ -991,7 +1025,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
* information on where the scanners should start but check that it
* is initialised by ensuring the values are within zone boundaries.
*/
- cc->migrate_pfn = zone->compact_cached_migrate_pfn;
+ cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
cc->free_pfn = zone->compact_cached_free_pfn;
if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
@@ -999,7 +1033,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
}
if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
cc->migrate_pfn = start_pfn;
- zone->compact_cached_migrate_pfn = cc->migrate_pfn;
+ zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
+ zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
}
trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);
@@ -1007,7 +1042,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
migrate_prep_local();
while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
- unsigned long nr_migrate, nr_remaining;
int err;
switch (isolate_migratepages(zone, cc)) {
@@ -1022,21 +1056,20 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
;
}
- nr_migrate = cc->nr_migratepages;
+ if (!cc->nr_migratepages)
+ continue;
+
err = migrate_pages(&cc->migratepages, compaction_alloc,
- (unsigned long)cc,
- cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC,
+ compaction_free, (unsigned long)cc, cc->mode,
MR_COMPACTION);
- update_nr_listpages(cc);
- nr_remaining = cc->nr_migratepages;
- trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
- nr_remaining);
+ trace_mm_compaction_migratepages(cc->nr_migratepages, err,
+ &cc->migratepages);
- /* Release isolated pages not migrated */
+ /* All pages were either migrated or will be released */
+ cc->nr_migratepages = 0;
if (err) {
putback_movable_pages(&cc->migratepages);
- cc->nr_migratepages = 0;
/*
* migrate_pages() may return -ENOMEM when scanners meet
* and we want compact_finished() to detect it
@@ -1058,9 +1091,8 @@ out:
return ret;
}
-static unsigned long compact_zone_order(struct zone *zone,
- int order, gfp_t gfp_mask,
- bool sync, bool *contended)
+static unsigned long compact_zone_order(struct zone *zone, int order,
+ gfp_t gfp_mask, enum migrate_mode mode, bool *contended)
{
unsigned long ret;
struct compact_control cc = {
@@ -1069,7 +1101,7 @@ static unsigned long compact_zone_order(struct zone *zone,
.order = order,
.migratetype = allocflags_to_migratetype(gfp_mask),
.zone = zone,
- .sync = sync,
+ .mode = mode,
};
INIT_LIST_HEAD(&cc.freepages);
INIT_LIST_HEAD(&cc.migratepages);
@@ -1091,7 +1123,7 @@ int sysctl_extfrag_threshold = 500;
* @order: The order of the current allocation
* @gfp_mask: The GFP mask of the current allocation
* @nodemask: The allowed nodes to allocate from
- * @sync: Whether migration is synchronous or not
+ * @mode: The migration mode for async, sync light, or sync migration
* @contended: Return value that is true if compaction was aborted due to lock contention
* @page: Optionally capture a free page of the requested order during compaction
*
@@ -1099,7 +1131,7 @@ int sysctl_extfrag_threshold = 500;
*/
unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *nodemask,
- bool sync, bool *contended)
+ enum migrate_mode mode, bool *contended)
{
enum zone_type high_zoneidx = gfp_zone(gfp_mask);
int may_enter_fs = gfp_mask & __GFP_FS;
@@ -1124,7 +1156,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
nodemask) {
int status;
- status = compact_zone_order(zone, order, gfp_mask, sync,
+ status = compact_zone_order(zone, order, gfp_mask, mode,
contended);
rc = max(status, rc);
@@ -1163,9 +1195,6 @@ static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
if (zone_watermark_ok(zone, cc->order,
low_wmark_pages(zone), 0, 0))
compaction_defer_reset(zone, cc->order, false);
- /* Currently async compaction is never deferred. */
- else if (cc->sync)
- defer_compaction(zone, cc->order);
}
VM_BUG_ON(!list_empty(&cc->freepages));
@@ -1177,7 +1206,7 @@ void compact_pgdat(pg_data_t *pgdat, int order)
{
struct compact_control cc = {
.order = order,
- .sync = false,
+ .mode = MIGRATE_ASYNC,
};
if (!order)
@@ -1190,7 +1219,7 @@ static void compact_node(int nid)
{
struct compact_control cc = {
.order = -1,
- .sync = true,
+ .mode = MIGRATE_SYNC,
.ignore_skip_hint = true,
};