From e3c40f379a144f35e53864a2cd970e238071afd7 Mon Sep 17 00:00:00 2001 From: Daniel Kiper Date: Tue, 24 May 2011 17:12:33 -0700 Subject: mm: pfn_to_section_nr()/section_nr_to_pfn() is valid only in CONFIG_SPARSEMEM context pfn_to_section_nr()/section_nr_to_pfn() is valid only in CONFIG_SPARSEMEM context. Move it to proper place. Signed-off-by: Daniel Kiper Cc: Dave Hansen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux/mmzone.h') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index e56f835274c..d7152002e18 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -928,9 +928,6 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn) #define pfn_to_nid(pfn) (0) #endif -#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) -#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) - #ifdef CONFIG_SPARSEMEM /* @@ -956,6 +953,9 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn) #error Allocator MAX_ORDER exceeds SECTION_SIZE #endif +#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) +#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) + struct page; struct page_cgroup; struct mem_section { -- cgit v1.2.3-70-g09d2 From a539f3533b78e39a22723d6d3e1e11b6c14454d9 Mon Sep 17 00:00:00 2001 From: Daniel Kiper Date: Tue, 24 May 2011 17:12:51 -0700 Subject: mm: add SECTION_ALIGN_UP() and SECTION_ALIGN_DOWN() macro Add SECTION_ALIGN_UP() and SECTION_ALIGN_DOWN() macro which aligns given pfn to upper section and lower section boundary accordingly. Required for the latest memory hotplug support for the Xen balloon driver. Signed-off-by: Daniel Kiper Reviewed-by: Konrad Rzeszutek Wilk David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux/mmzone.h') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index d7152002e18..217bcf6bca7 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -956,6 +956,9 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn) #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) +#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) +#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) + struct page; struct page_cgroup; struct mem_section { -- cgit v1.2.3-70-g09d2 From 7b7bf499f79de3f6c85a340c8453a78789523f85 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 19 May 2011 13:21:14 +0100 Subject: ARM: 6913/1: sparsemem: allow pfn_valid to be overridden when using SPARSEMEM In commit eb33575c ("[ARM] Double check memmap is actually valid with a memmap has unexpected holes V2"), a new function, memmap_valid_within, was introduced to mmzone.h so that holes in the memmap which pass pfn_valid in SPARSEMEM configurations can be detected and avoided. The fix to this problem checks that the pfn <-> page linkages are correct by calculating the page for the pfn and then checking that page_to_pfn on that page returns the original pfn. Unfortunately, in SPARSEMEM configurations, this results in reading from the page flags to determine the correct section. Since the memmap here has been freed, junk is read from memory and the check is no longer robust. In the best case, reading from /proc/pagetypeinfo will give you the wrong answer. In the worst case, you get SEGVs, Kernel OOPses and hung CPUs. Furthermore, ioremap implementations that use pfn_valid to disallow the remapping of normal memory will break. This patch allows architectures to provide their own pfn_valid function instead of using the default implementation used by sparsemem. The architecture-specific version is aware of the memmap state and will return false when passed a pfn for a freed page within a valid section. Acked-by: Mel Gorman Acked-by: Catalin Marinas Tested-by: H Hartley Sweeten Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/Kconfig | 3 +++ arch/arm/include/asm/page.h | 2 +- arch/arm/mm/init.c | 4 +++- include/linux/mmzone.h | 2 ++ 4 files changed, 9 insertions(+), 2 deletions(-) (limited to 'include/linux/mmzone.h') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 7275009686e..5be55d950ab 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1516,6 +1516,9 @@ config ARCH_SPARSEMEM_DEFAULT config ARCH_SELECT_MEMORY_MODEL def_bool ARCH_SPARSEMEM_ENABLE +config HAVE_ARCH_PFN_VALID + def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM + config HIGHMEM bool "High Memory Support" depends on MMU diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index f51a69595f6..ac75d084888 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -197,7 +197,7 @@ typedef unsigned long pgprot_t; typedef struct page *pgtable_t; -#ifndef CONFIG_SPARSEMEM +#ifdef CONFIG_HAVE_ARCH_PFN_VALID extern int pfn_valid(unsigned long); #endif diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 3f17ea146f0..bbc3346e8bc 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -273,13 +273,15 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, free_area_init_node(0, zone_size, min, zhole_size); } -#ifndef CONFIG_SPARSEMEM +#ifdef CONFIG_HAVE_ARCH_PFN_VALID int pfn_valid(unsigned long pfn) { return memblock_is_memory(pfn << PAGE_SHIFT); } EXPORT_SYMBOL(pfn_valid); +#endif +#ifndef CONFIG_SPARSEMEM static void arm_memory_present(void) { } diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 217bcf6bca7..261f299c944 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1056,12 +1056,14 @@ static inline struct mem_section *__pfn_to_section(unsigned long pfn) return __nr_to_section(pfn_to_section_nr(pfn)); } +#ifndef CONFIG_HAVE_ARCH_PFN_VALID static inline int pfn_valid(unsigned long pfn) { if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) return 0; return valid_section(__nr_to_section(pfn_to_section_nr(pfn))); } +#endif static inline int pfn_present(unsigned long pfn) { -- cgit v1.2.3-70-g09d2 From 246e87a9393448c20873bc5dee64be68ed559e24 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Thu, 26 May 2011 16:25:34 -0700 Subject: memcg: fix get_scan_count() for small targets During memory reclaim we determine the number of pages to be scanned per zone as (anon + file) >> priority. Assume scan = (anon + file) >> priority. If scan < SWAP_CLUSTER_MAX, the scan will be skipped for this time and priority gets higher. This has some problems. 1. This increases priority as 1 without any scan. To do scan in this priority, amount of pages should be larger than 512M. If pages>>priority < SWAP_CLUSTER_MAX, it's recorded and scan will be batched, later. (But we lose 1 priority.) If memory size is below 16M, pages >> priority is 0 and no scan in DEF_PRIORITY forever. 2. If zone->all_unreclaimabe==true, it's scanned only when priority==0. So, x86's ZONE_DMA will never be recoverred until the user of pages frees memory by itself. 3. With memcg, the limit of memory can be small. When using small memcg, it gets priority < DEF_PRIORITY-2 very easily and need to call wait_iff_congested(). For doing scan before priorty=9, 64MB of memory should be used. Then, this patch tries to scan SWAP_CLUSTER_MAX of pages in force...when 1. the target is enough small. 2. it's kswapd or memcg reclaim. Then we can avoid rapid priority drop and may be able to recover all_unreclaimable in a small zones. And this patch removes nr_saved_scan. This will allow scanning in this priority even when pages >> priority is very small. Signed-off-by: KAMEZAWA Hiroyuki Acked-by: Ying Han Cc: Balbir Singh Cc: KOSAKI Motohiro Cc: Daisuke Nishimura Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 5 ----- mm/page_alloc.c | 4 +--- mm/vmscan.c | 60 +++++++++++++++++++++++++++----------------------- 3 files changed, 34 insertions(+), 35 deletions(-) (limited to 'include/linux/mmzone.h') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 217bcf6bca7..29312bdf119 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -273,11 +273,6 @@ struct zone_reclaim_stat { */ unsigned long recent_rotated[2]; unsigned long recent_scanned[2]; - - /* - * accumulated for batching - */ - unsigned long nr_saved_scan[NR_LRU_LISTS]; }; struct zone { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2a00f17c3bf..a4e1db3f198 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4323,10 +4323,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, zone->zone_pgdat = pgdat; zone_pcp_init(zone); - for_each_lru(l) { + for_each_lru(l) INIT_LIST_HEAD(&zone->lru[l].list); - zone->reclaim_stat.nr_saved_scan[l] = 0; - } zone->reclaim_stat.recent_rotated[0] = 0; zone->reclaim_stat.recent_rotated[1] = 0; zone->reclaim_stat.recent_scanned[0] = 0; diff --git a/mm/vmscan.c b/mm/vmscan.c index b0875871820..2e8fbacd874 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1717,26 +1717,6 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, return shrink_inactive_list(nr_to_scan, zone, sc, priority, file); } -/* - * Smallish @nr_to_scan's are deposited in @nr_saved_scan, - * until we collected @swap_cluster_max pages to scan. - */ -static unsigned long nr_scan_try_batch(unsigned long nr_to_scan, - unsigned long *nr_saved_scan) -{ - unsigned long nr; - - *nr_saved_scan += nr_to_scan; - nr = *nr_saved_scan; - - if (nr >= SWAP_CLUSTER_MAX) - *nr_saved_scan = 0; - else - nr = 0; - - return nr; -} - /* * Determine how aggressively the anon and file LRU lists should be * scanned. The relative value of each set of LRU lists is determined @@ -1755,6 +1735,22 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc, u64 fraction[2], denominator; enum lru_list l; int noswap = 0; + int force_scan = 0; + + + anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + + zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); + file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + + zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); + + if (((anon + file) >> priority) < SWAP_CLUSTER_MAX) { + /* kswapd does zone balancing and need to scan this zone */ + if (scanning_global_lru(sc) && current_is_kswapd()) + force_scan = 1; + /* memcg may have small limit and need to avoid priority drop */ + if (!scanning_global_lru(sc)) + force_scan = 1; + } /* If we have no swap space, do not bother scanning anon pages. */ if (!sc->may_swap || (nr_swap_pages <= 0)) { @@ -1765,11 +1761,6 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc, goto out; } - anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + - zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); - file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + - zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); - if (scanning_global_lru(sc)) { free = zone_page_state(zone, NR_FREE_PAGES); /* If we have very few page cache pages, @@ -1836,8 +1827,23 @@ out: scan >>= priority; scan = div64_u64(scan * fraction[file], denominator); } - nr[l] = nr_scan_try_batch(scan, - &reclaim_stat->nr_saved_scan[l]); + + /* + * If zone is small or memcg is small, nr[l] can be 0. + * This results no-scan on this priority and priority drop down. + * For global direct reclaim, it can visit next zone and tend + * not to have problems. For global kswapd, it's for zone + * balancing and it need to scan a small amounts. When using + * memcg, priority drop can cause big latency. So, it's better + * to scan small amount. See may_noscan above. + */ + if (!scan && force_scan) { + if (file) + scan = SWAP_CLUSTER_MAX; + else if (!noswap) + scan = SWAP_CLUSTER_MAX; + } + nr[l] = scan; } } -- cgit v1.2.3-70-g09d2