diff options
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r-- | include/linux/mmzone.h | 104 |
1 files changed, 73 insertions, 31 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index dff71150966..68c569fcbb6 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -30,18 +30,44 @@ /* * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed * costly to service. That is between allocation orders which should - * coelesce naturally under reasonable reclaim pressure and those which + * coalesce naturally under reasonable reclaim pressure and those which * will not. */ #define PAGE_ALLOC_COSTLY_ORDER 3 -#define MIGRATE_UNMOVABLE 0 -#define MIGRATE_RECLAIMABLE 1 -#define MIGRATE_MOVABLE 2 -#define MIGRATE_PCPTYPES 3 /* the number of types on the pcp lists */ -#define MIGRATE_RESERVE 3 -#define MIGRATE_ISOLATE 4 /* can't allocate from here */ -#define MIGRATE_TYPES 5 +enum { + MIGRATE_UNMOVABLE, + MIGRATE_RECLAIMABLE, + MIGRATE_MOVABLE, + MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ + MIGRATE_RESERVE = MIGRATE_PCPTYPES, +#ifdef CONFIG_CMA + /* + * MIGRATE_CMA migration type is designed to mimic the way + * ZONE_MOVABLE works. Only movable pages can be allocated + * from MIGRATE_CMA pageblocks and page allocator never + * implicitly change migration type of MIGRATE_CMA pageblock. + * + * The way to use it is to change migratetype of a range of + * pageblocks to MIGRATE_CMA which can be done by + * __free_pageblock_cma() function. What is important though + * is that a range of pageblocks must be aligned to + * MAX_ORDER_NR_PAGES should biggest page be bigger then + * a single pageblock. + */ + MIGRATE_CMA, +#endif + MIGRATE_ISOLATE, /* can't allocate from here */ + MIGRATE_TYPES +}; + +#ifdef CONFIG_CMA +# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) +# define cma_wmark_pages(zone) zone->min_cma_pages +#else +# define is_migrate_cma(migratetype) false +# define cma_wmark_pages(zone) 0 +#endif #define for_each_migratetype_order(order, type) \ for (order = 0; order < MAX_ORDER; order++) \ @@ -159,8 +185,25 @@ static inline int is_unevictable_lru(enum lru_list lru) return (lru == LRU_UNEVICTABLE); } +struct zone_reclaim_stat { + /* + * The pageout code in vmscan.c keeps track of how many of the + * mem/swap backed and file backed pages are refeferenced. + * The higher the rotated/scanned ratio, the more valuable + * that cache is. + * + * The anon LRU stats live in [0], file LRU stats in [1] + */ + unsigned long recent_rotated[2]; + unsigned long recent_scanned[2]; +}; + struct lruvec { struct list_head lists[NR_LRU_LISTS]; + struct zone_reclaim_stat reclaim_stat; +#ifdef CONFIG_CGROUP_MEM_RES_CTLR + struct zone *zone; +#endif }; /* Mask used at gathering information at once (see memcontrol.c) */ @@ -169,16 +212,12 @@ struct lruvec { #define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON) #define LRU_ALL ((1 << NR_LRU_LISTS) - 1) -/* Isolate inactive pages */ -#define ISOLATE_INACTIVE ((__force isolate_mode_t)0x1) -/* Isolate active pages */ -#define ISOLATE_ACTIVE ((__force isolate_mode_t)0x2) /* Isolate clean file */ -#define ISOLATE_CLEAN ((__force isolate_mode_t)0x4) +#define ISOLATE_CLEAN ((__force isolate_mode_t)0x1) /* Isolate unmapped file */ -#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x8) +#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) /* Isolate for asynchronous migration */ -#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x10) +#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) /* LRU Isolation modes. */ typedef unsigned __bitwise__ isolate_mode_t; @@ -287,19 +326,6 @@ enum zone_type { #error ZONES_SHIFT -- too many zones configured adjust calculation #endif -struct zone_reclaim_stat { - /* - * The pageout code in vmscan.c keeps track of how many of the - * mem/swap backed and file backed pages are refeferenced. - * The higher the rotated/scanned ratio, the more valuable - * that cache is. - * - * The anon LRU stats live in [0], file LRU stats in [1] - */ - unsigned long recent_rotated[2]; - unsigned long recent_scanned[2]; -}; - struct zone { /* Fields commonly accessed by the page allocator */ @@ -347,6 +373,13 @@ struct zone { /* see spanned/present_pages for more description */ seqlock_t span_seqlock; #endif +#ifdef CONFIG_CMA + /* + * CMA needs to increase watermark levels during the allocation + * process to make sure that the system is not starved. + */ + unsigned long min_cma_pages; +#endif struct free_area free_area[MAX_ORDER]; #ifndef CONFIG_SPARSEMEM @@ -374,8 +407,6 @@ struct zone { spinlock_t lru_lock; struct lruvec lruvec; - struct zone_reclaim_stat reclaim_stat; - unsigned long pages_scanned; /* since last reclaim */ unsigned long flags; /* zone flags, see below */ @@ -663,7 +694,7 @@ typedef struct pglist_data { range, including holes */ int node_id; wait_queue_head_t kswapd_wait; - struct task_struct *kswapd; + struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */ int kswapd_max_order; enum zone_type classzone_idx; } pg_data_t; @@ -701,6 +732,17 @@ extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, unsigned long size, enum memmap_context context); +extern void lruvec_init(struct lruvec *lruvec, struct zone *zone); + +static inline struct zone *lruvec_zone(struct lruvec *lruvec) +{ +#ifdef CONFIG_CGROUP_MEM_RES_CTLR + return lruvec->zone; +#else + return container_of(lruvec, struct zone, lruvec); +#endif +} + #ifdef CONFIG_HAVE_MEMORY_PRESENT void memory_present(int nid, unsigned long start, unsigned long end); #else |