diff options
-rw-r--r-- | include/linux/mm_inline.h | 26 | ||||
-rw-r--r-- | include/linux/mmzone.h | 16 | ||||
-rw-r--r-- | mm/page_alloc.c | 6 | ||||
-rw-r--r-- | mm/vmscan.c | 22 |
4 files changed, 35 insertions, 35 deletions
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 4e3478e7192..8f84d2e53d0 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -22,21 +22,21 @@ static inline int page_is_file_cache(struct page *page) } static inline void -add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l) +add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list lru) { struct lruvec *lruvec; - lruvec = mem_cgroup_lru_add_list(zone, page, l); - list_add(&page->lru, &lruvec->lists[l]); - __mod_zone_page_state(zone, NR_LRU_BASE + l, hpage_nr_pages(page)); + lruvec = mem_cgroup_lru_add_list(zone, page, lru); + list_add(&page->lru, &lruvec->lists[lru]); + __mod_zone_page_state(zone, NR_LRU_BASE + lru, hpage_nr_pages(page)); } static inline void -del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l) +del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list lru) { - mem_cgroup_lru_del_list(page, l); + mem_cgroup_lru_del_list(page, lru); list_del(&page->lru); - __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page)); + __mod_zone_page_state(zone, NR_LRU_BASE + lru, -hpage_nr_pages(page)); } /** @@ -57,21 +57,21 @@ static inline enum lru_list page_lru_base_type(struct page *page) static inline void del_page_from_lru(struct zone *zone, struct page *page) { - enum lru_list l; + enum lru_list lru; if (PageUnevictable(page)) { __ClearPageUnevictable(page); - l = LRU_UNEVICTABLE; + lru = LRU_UNEVICTABLE; } else { - l = page_lru_base_type(page); + lru = page_lru_base_type(page); if (PageActive(page)) { __ClearPageActive(page); - l += LRU_ACTIVE; + lru += LRU_ACTIVE; } } - mem_cgroup_lru_del_list(page, l); + mem_cgroup_lru_del_list(page, lru); list_del(&page->lru); - __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page)); + __mod_zone_page_state(zone, NR_LRU_BASE + lru, -hpage_nr_pages(page)); } /** diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 2038b90ca6e..650ba2fb330 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -140,23 +140,23 @@ enum lru_list { NR_LRU_LISTS }; -#define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++) +#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) -#define for_each_evictable_lru(l) for (l = 0; l <= LRU_ACTIVE_FILE; l++) +#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) -static inline int is_file_lru(enum lru_list l) +static inline int is_file_lru(enum lru_list lru) { - return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE); + return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); } -static inline int is_active_lru(enum lru_list l) +static inline int is_active_lru(enum lru_list lru) { - return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE); + return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); } -static inline int is_unevictable_lru(enum lru_list l) +static inline int is_unevictable_lru(enum lru_list lru) { - return (l == LRU_UNEVICTABLE); + return (lru == LRU_UNEVICTABLE); } struct lruvec { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cb5723c491f..0027d8f4a1b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4262,7 +4262,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, for (j = 0; j < MAX_NR_ZONES; j++) { struct zone *zone = pgdat->node_zones + j; unsigned long size, realsize, memmap_pages; - enum lru_list l; + enum lru_list lru; size = zone_spanned_pages_in_node(nid, j, zones_size); realsize = size - zone_absent_pages_in_node(nid, j, @@ -4312,8 +4312,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, zone->zone_pgdat = pgdat; zone_pcp_init(zone); - for_each_lru(l) - INIT_LIST_HEAD(&zone->lruvec.lists[l]); + for_each_lru(lru) + INIT_LIST_HEAD(&zone->lruvec.lists[lru]); zone->reclaim_stat.recent_rotated[0] = 0; zone->reclaim_stat.recent_rotated[1] = 0; zone->reclaim_stat.recent_scanned[0] = 0; diff --git a/mm/vmscan.c b/mm/vmscan.c index 7724fb8e749..01466bf783f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1920,7 +1920,7 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc, unsigned long ap, fp; struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz); u64 fraction[2], denominator; - enum lru_list l; + enum lru_list lru; int noswap = 0; bool force_scan = false; @@ -2010,18 +2010,18 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc, fraction[1] = fp; denominator = ap + fp + 1; out: - for_each_evictable_lru(l) { - int file = is_file_lru(l); + for_each_evictable_lru(lru) { + int file = is_file_lru(lru); unsigned long scan; - scan = zone_nr_lru_pages(mz, l); + scan = zone_nr_lru_pages(mz, lru); if (priority || noswap) { scan >>= priority; if (!scan && force_scan) scan = SWAP_CLUSTER_MAX; scan = div64_u64(scan * fraction[file], denominator); } - nr[l] = scan; + nr[lru] = scan; } } @@ -2097,7 +2097,7 @@ static void shrink_mem_cgroup_zone(int priority, struct mem_cgroup_zone *mz, { unsigned long nr[NR_LRU_LISTS]; unsigned long nr_to_scan; - enum lru_list l; + enum lru_list lru; unsigned long nr_reclaimed, nr_scanned; unsigned long nr_to_reclaim = sc->nr_to_reclaim; struct blk_plug plug; @@ -2110,13 +2110,13 @@ restart: blk_start_plug(&plug); while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || nr[LRU_INACTIVE_FILE]) { - for_each_evictable_lru(l) { - if (nr[l]) { + for_each_evictable_lru(lru) { + if (nr[lru]) { nr_to_scan = min_t(unsigned long, - nr[l], SWAP_CLUSTER_MAX); - nr[l] -= nr_to_scan; + nr[lru], SWAP_CLUSTER_MAX); + nr[lru] -= nr_to_scan; - nr_reclaimed += shrink_list(l, nr_to_scan, + nr_reclaimed += shrink_list(lru, nr_to_scan, mz, sc, priority); } } |