From a5ec39507129a086d8838228ac1ca0a2eab38f91 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 7 May 2010 14:54:55 +0900 Subject: sh: convert kexec crash kernel management to LMB. This migrates the crash kernel handling off of bootmem and over to LMB. Signed-off-by: Paul Mundt --- arch/sh/include/asm/kexec.h | 8 +++++++ arch/sh/kernel/machine_kexec.c | 51 +++++++++++++++++++++++++++++++++++++++++- arch/sh/kernel/setup.c | 43 ----------------------------------- 3 files changed, 58 insertions(+), 44 deletions(-) (limited to 'arch/sh') diff --git a/arch/sh/include/asm/kexec.h b/arch/sh/include/asm/kexec.h index 765a5e1660f..ad6ef8a275e 100644 --- a/arch/sh/include/asm/kexec.h +++ b/arch/sh/include/asm/kexec.h @@ -26,6 +26,10 @@ /* The native architecture */ #define KEXEC_ARCH KEXEC_ARCH_SH +#ifdef CONFIG_KEXEC +/* arch/sh/kernel/machine_kexec.c */ +void reserve_crashkernel(void); + static inline void crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs) { @@ -59,4 +63,8 @@ static inline void crash_setup_regs(struct pt_regs *newregs, newregs->pc = (unsigned long)current_text_addr(); } } +#else +static inline void reserve_crashkernel(void) { } +#endif /* CONFIG_KEXEC */ + #endif /* __ASM_SH_KEXEC_H */ diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index 7672141c841..f0f049caa6e 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c @@ -8,7 +8,6 @@ * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ - #include #include #include @@ -16,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -148,3 +148,52 @@ void arch_crash_save_vmcoreinfo(void) VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); #endif } + +void __init reserve_crashkernel(void) +{ + unsigned long long crash_size, crash_base; + int ret; + + /* this is necessary because of lmb_phys_mem_size() */ + lmb_analyze(); + + ret = parse_crashkernel(boot_command_line, lmb_phys_mem_size(), + &crash_size, &crash_base); + if (ret == 0 && crash_size > 0) { + crashk_res.start = crash_base; + crashk_res.end = crash_base + crash_size - 1; + } + + if (crashk_res.end == crashk_res.start) + goto disable; + + crash_size = PAGE_ALIGN(crashk_res.end - crashk_res.start + 1); + if (!crashk_res.start) { + crashk_res.start = lmb_alloc(crash_size, PAGE_SIZE); + if (!crashk_res.start) { + pr_err("crashkernel allocation failed\n"); + goto disable; + } + } else { + ret = lmb_reserve(crashk_res.start, crash_size); + if (unlikely(ret < 0)) { + pr_err("crashkernel reservation failed - " + "memory is in use\n"); + goto disable; + } + } + + pr_info("Reserving %ldMB of memory at %ldMB " + "for crashkernel (System RAM: %ldMB)\n", + (unsigned long)(crash_size >> 20), + (unsigned long)(crashk_res.start >> 20), + (unsigned long)(lmb_phys_mem_size() >> 20)); + + crashk_res.end = crashk_res.start + crash_size - 1; + insert_resource(&iomem_resource, &crashk_res); + + return; + +disable: + crashk_res.start = crashk_res.end = 0; +} diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 9c7f7811af7..d67a8a38690 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -145,49 +145,6 @@ static void __init register_bootmem_low_pages(void) free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages)); } -#ifdef CONFIG_KEXEC -static void __init reserve_crashkernel(void) -{ - unsigned long long free_mem; - unsigned long long crash_size, crash_base; - void *vp; - int ret; - - free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT; - - ret = parse_crashkernel(boot_command_line, free_mem, - &crash_size, &crash_base); - if (ret == 0 && crash_size) { - if (crash_base <= 0) { - vp = alloc_bootmem_nopanic(crash_size); - if (!vp) { - printk(KERN_INFO "crashkernel allocation " - "failed\n"); - return; - } - crash_base = __pa(vp); - } else if (reserve_bootmem(crash_base, crash_size, - BOOTMEM_EXCLUSIVE) < 0) { - printk(KERN_INFO "crashkernel reservation failed - " - "memory is in use\n"); - return; - } - - printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " - "for crashkernel (System RAM: %ldMB)\n", - (unsigned long)(crash_size >> 20), - (unsigned long)(crash_base >> 20), - (unsigned long)(free_mem >> 20)); - crashk_res.start = crash_base; - crashk_res.end = crash_base + crash_size - 1; - insert_resource(&iomem_resource, &crashk_res); - } -} -#else -static inline void __init reserve_crashkernel(void) -{} -#endif - static void __init check_for_initrd(void) { #ifdef CONFIG_BLK_DEV_INITRD -- cgit v1.2.3-70-g09d2 From 36fa06d6b5ef1874d012c04b23d5b8982b1f8dea Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 7 May 2010 15:10:07 +0900 Subject: sh: convert initrd reservation to LMB. This switches over from bootmem -> LMB for the initrd area reservation. Signed-off-by: Paul Mundt --- arch/sh/kernel/setup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh') diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index d67a8a38690..08001729f5e 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -192,7 +192,7 @@ static void __init check_for_initrd(void) initrd_start = (unsigned long)__va(__pa(start)); initrd_end = initrd_start + INITRD_SIZE; - reserve_bootmem(__pa(initrd_start), INITRD_SIZE, BOOTMEM_DEFAULT); + lmb_reserve(__pa(initrd_start), INITRD_SIZE); return; -- cgit v1.2.3-70-g09d2 From 080e71e13d99d850875c0335c364766162eae7c6 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 7 May 2010 15:10:42 +0900 Subject: sh: bump up extra LMB reservations in bootmem init. This bumps up the extra LMB reservations in ordering so that they're accounted for prior to iterating over the region list. This ensures that reservations are visible both within the LMB and bootmem context. Signed-off-by: Paul Mundt --- arch/sh/kernel/setup.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'arch/sh') diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 08001729f5e..e3f0da7b865 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -275,6 +275,12 @@ void __init setup_bootmem_allocator(unsigned long free_pfn) __add_active_range(0, start_pfn, end_pfn); } + /* + * Handle additional early reservations + */ + check_for_initrd(); + reserve_crashkernel(); + /* * Add all physical memory to the bootmem map and mark each * area as present. @@ -290,10 +296,6 @@ void __init setup_bootmem_allocator(unsigned long free_pfn) node_set_online(0); sparse_memory_present_with_active_regions(0); - - check_for_initrd(); - - reserve_crashkernel(); } #ifndef CONFIG_NEED_MULTIPLE_NODES -- cgit v1.2.3-70-g09d2 From 19d8f84f86af867abee174be8bf1e4941a59143d Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 10 May 2010 15:39:05 +0900 Subject: sh: enable LMB region setup via machvec. This plugs in a memory init callback in the machvec to permit boards to wire up various bits of memory directly in to LMB. A generic machvec implementation is provided that simply wraps around the normal Kconfig-derived memory start/size. Signed-off-by: Paul Mundt --- arch/sh/include/asm/io_generic.h | 1 + arch/sh/include/asm/machvec.h | 2 ++ arch/sh/include/asm/mmzone.h | 1 - arch/sh/kernel/machvec.c | 1 + arch/sh/kernel/setup.c | 26 +++++++++++++------------- arch/sh/mm/init.c | 8 +++++++- 6 files changed, 24 insertions(+), 15 deletions(-) (limited to 'arch/sh') diff --git a/arch/sh/include/asm/io_generic.h b/arch/sh/include/asm/io_generic.h index 1e5d375f55d..491df93cbf8 100644 --- a/arch/sh/include/asm/io_generic.h +++ b/arch/sh/include/asm/io_generic.h @@ -38,5 +38,6 @@ void IO_CONCAT(__IO_PREFIX,iounmap)(void *addr); void __iomem *IO_CONCAT(__IO_PREFIX,ioport_map)(unsigned long addr, unsigned int size); void IO_CONCAT(__IO_PREFIX,ioport_unmap)(void __iomem *addr); +void IO_CONCAT(__IO_PREFIX,mem_init)(void); #undef __IO_PREFIX diff --git a/arch/sh/include/asm/machvec.h b/arch/sh/include/asm/machvec.h index 9c30955630f..bc0218cb72e 100644 --- a/arch/sh/include/asm/machvec.h +++ b/arch/sh/include/asm/machvec.h @@ -49,6 +49,8 @@ struct sh_machine_vector { int (*mv_clk_init)(void); int (*mv_mode_pins)(void); + + void (*mv_mem_init)(void); }; extern struct sh_machine_vector sh_mv; diff --git a/arch/sh/include/asm/mmzone.h b/arch/sh/include/asm/mmzone.h index 7f5363b29ba..94f04b2f4fb 100644 --- a/arch/sh/include/asm/mmzone.h +++ b/arch/sh/include/asm/mmzone.h @@ -42,7 +42,6 @@ setup_bootmem_node(int nid, unsigned long start, unsigned long end) void __init plat_mem_setup(void); /* arch/sh/kernel/setup.c */ -void __init setup_bootmem_allocator(unsigned long start_pfn); void __init __add_active_range(unsigned int nid, unsigned long start_pfn, unsigned long end_pfn); diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c index 1652340ba3f..85cfaf916fd 100644 --- a/arch/sh/kernel/machvec.c +++ b/arch/sh/kernel/machvec.c @@ -131,6 +131,7 @@ void __init sh_mv_setup(void) mv_set(ioport_unmap); mv_set(irq_demux); mv_set(mode_pins); + mv_set(mem_init); if (!sh_mv.mv_nr_irqs) sh_mv.mv_nr_irqs = NR_IRQS; diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index e3f0da7b865..2c9ab284276 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -4,7 +4,7 @@ * This file handles the architecture-dependent parts of initialization * * Copyright (C) 1999 Niibe Yutaka - * Copyright (C) 2002 - 2007 Paul Mundt + * Copyright (C) 2002 - 2010 Paul Mundt */ #include #include @@ -41,6 +41,7 @@ #include #include #include +#include /* * Initialize loops_per_jiffy as 10000000 (1000MIPS). @@ -247,7 +248,7 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn, add_active_range(nid, start_pfn, end_pfn); } -void __init setup_bootmem_allocator(unsigned long free_pfn) +void __init do_init_bootmem(void) { unsigned long bootmap_size; unsigned long bootmap_pages, bootmem_paddr; @@ -298,12 +299,9 @@ void __init setup_bootmem_allocator(unsigned long free_pfn) sparse_memory_present_with_active_regions(0); } -#ifndef CONFIG_NEED_MULTIPLE_NODES static void __init setup_memory(void) { unsigned long start_pfn; - u64 base = min_low_pfn << PAGE_SHIFT; - u64 size = (max_low_pfn << PAGE_SHIFT) - base; /* * Partially used pages are not usable - thus @@ -311,8 +309,6 @@ static void __init setup_memory(void) */ start_pfn = PFN_UP(__pa(_end)); - lmb_add(base, size); - /* * Reserve the kernel text and * Reserve the bootmem bitmap. We do this in two steps (first step @@ -333,11 +329,9 @@ static void __init setup_memory(void) lmb_analyze(); lmb_dump_all(); - setup_bootmem_allocator(start_pfn); + do_init_bootmem(); + plat_mem_setup(); } -#else -extern void __init setup_memory(void); -#endif /* * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by @@ -358,7 +352,11 @@ static int __init parse_elfcorehdr(char *arg) early_param("elfcorehdr", parse_elfcorehdr); #endif -void __init __attribute__ ((weak)) plat_early_device_setup(void) +void __init __weak plat_early_device_setup(void) +{ +} + +void __init __weak plat_mem_setup(void) { } @@ -426,7 +424,10 @@ void __init setup_arch(char **cmdline_p) /* Let earlyprintk output early console messages */ early_platform_driver_probe("earlyprintk", 1, 1); + lmb_init(); + sh_mv_setup(); + sh_mv.mv_mem_init(); /* * Find the highest page frame number we have available @@ -442,7 +443,6 @@ void __init setup_arch(char **cmdline_p) nodes_clear(node_online_map); pmb_init(); - lmb_init(); setup_memory(); sparse_init(); diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index c505de61a5c..9c5400b02f4 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -2,7 +2,7 @@ * linux/arch/sh/mm/init.c * * Copyright (C) 1999 Niibe Yutaka - * Copyright (C) 2002 - 2007 Paul Mundt + * Copyright (C) 2002 - 2010 Paul Mundt * * Based on linux/arch/i386/mm/init.c: * Copyright (C) 1995 Linus Torvalds @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -27,6 +28,11 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); pgd_t swapper_pg_dir[PTRS_PER_PGD]; +void __init generic_mem_init(void) +{ + lmb_add(__MEMORY_START, __MEMORY_SIZE); +} + #ifdef CONFIG_MMU static pte_t *__get_pte_phys(unsigned long addr) { -- cgit v1.2.3-70-g09d2 From 5e2ff328c0668794ff408a4632f5b8a62827571f Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 10 May 2010 20:17:25 +0900 Subject: sh: rework memory limits to work with LMB. This reworks the memory limit handling to tie in through the available LMB infrastructure. This requires a bit of reordering as we need to have all of the LMB reservations taken care of prior to establishing the limits. While we're at it, the crash kernel reservation semantics are reworked so that we allocate from the bottom up and reduce the risk of having to disable the memory limit due to a clash with the crash kernel reservation. Signed-off-by: Paul Mundt --- arch/sh/include/asm/page.h | 2 +- arch/sh/kernel/machine_kexec.c | 20 ++++++++++---- arch/sh/kernel/setup.c | 63 +++++++++++++++++------------------------- 3 files changed, 40 insertions(+), 45 deletions(-) (limited to 'arch/sh') diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index 0152c040f6c..fb703d120d0 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -49,7 +49,7 @@ extern unsigned long shm_align_mask; extern unsigned long max_low_pfn, min_low_pfn; -extern unsigned long memory_start, memory_end; +extern unsigned long memory_start, memory_end, memory_limit; static inline unsigned long pages_do_alias(unsigned long addr1, unsigned long addr2) diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index f0f049caa6e..7f68fc0e89e 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c @@ -169,7 +169,8 @@ void __init reserve_crashkernel(void) crash_size = PAGE_ALIGN(crashk_res.end - crashk_res.start + 1); if (!crashk_res.start) { - crashk_res.start = lmb_alloc(crash_size, PAGE_SIZE); + unsigned long max = lmb_end_of_DRAM() - memory_limit; + crashk_res.start = __lmb_alloc_base(crash_size, PAGE_SIZE, max); if (!crashk_res.start) { pr_err("crashkernel allocation failed\n"); goto disable; @@ -183,15 +184,22 @@ void __init reserve_crashkernel(void) } } - pr_info("Reserving %ldMB of memory at %ldMB " + crashk_res.end = crashk_res.start + crash_size - 1; + + /* + * Crash kernel trumps memory limit + */ + if ((lmb_end_of_DRAM() - memory_limit) <= crashk_res.end) { + memory_limit = 0; + pr_info("Disabled memory limit for crashkernel\n"); + } + + pr_info("Reserving %ldMB of memory at 0x%08lx " "for crashkernel (System RAM: %ldMB)\n", (unsigned long)(crash_size >> 20), - (unsigned long)(crashk_res.start >> 20), + (unsigned long)(crashk_res.start), (unsigned long)(lmb_phys_mem_size() >> 20)); - crashk_res.end = crashk_res.start + crash_size - 1; - insert_resource(&iomem_resource, &crashk_res); - return; disable: diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 2c9ab284276..f6a2db12ad7 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -95,6 +95,7 @@ unsigned long memory_start; EXPORT_SYMBOL(memory_start); unsigned long memory_end = 0; EXPORT_SYMBOL(memory_end); +unsigned long memory_limit = 0; static struct resource mem_resources[MAX_NUMNODES]; @@ -102,21 +103,12 @@ int l1i_cache_shape, l1d_cache_shape, l2_cache_shape; static int __init early_parse_mem(char *p) { - unsigned long size; + if (!p) + return 1; - memory_start = (unsigned long)__va(__MEMORY_START); - size = memparse(p, &p); - - if (size > __MEMORY_SIZE) { - printk(KERN_ERR - "Using mem= to increase the size of kernel memory " - "is not allowed.\n" - " Recompile the kernel with the correct value for " - "CONFIG_MEMORY_SIZE.\n"); - return 0; - } + memory_limit = PAGE_ALIGN(memparse(p, &p)); - memory_end = memory_start + size; + pr_notice("Memory limited to %ldMB\n", memory_limit >> 20); return 0; } @@ -252,7 +244,7 @@ void __init do_init_bootmem(void) { unsigned long bootmap_size; unsigned long bootmap_pages, bootmem_paddr; - u64 total_pages = (lmb_end_of_DRAM() - __MEMORY_START) >> PAGE_SHIFT; + u64 total_pages = lmb_phys_mem_size() >> PAGE_SHIFT; int i; bootmap_pages = bootmem_bootmap_pages(total_pages); @@ -276,12 +268,6 @@ void __init do_init_bootmem(void) __add_active_range(0, start_pfn, end_pfn); } - /* - * Handle additional early reservations - */ - check_for_initrd(); - reserve_crashkernel(); - /* * Add all physical memory to the bootmem map and mark each * area as present. @@ -299,7 +285,7 @@ void __init do_init_bootmem(void) sparse_memory_present_with_active_regions(0); } -static void __init setup_memory(void) +static void __init early_reserve_mem(void) { unsigned long start_pfn; @@ -326,11 +312,11 @@ static void __init setup_memory(void) if (CONFIG_ZERO_PAGE_OFFSET != 0) lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); - lmb_analyze(); - lmb_dump_all(); - - do_init_bootmem(); - plat_mem_setup(); + /* + * Handle additional early reservations + */ + check_for_initrd(); + reserve_crashkernel(); } /* @@ -397,10 +383,6 @@ void __init setup_arch(char **cmdline_p) bss_resource.start = virt_to_phys(__bss_start); bss_resource.end = virt_to_phys(_ebss)-1; - memory_start = (unsigned long)__va(__MEMORY_START); - if (!memory_end) - memory_end = memory_start + __MEMORY_SIZE; - #ifdef CONFIG_CMDLINE_OVERWRITE strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line)); #else @@ -417,8 +399,6 @@ void __init setup_arch(char **cmdline_p) parse_early_param(); - uncached_init(); - plat_early_device_setup(); /* Let earlyprintk output early console messages */ @@ -429,21 +409,28 @@ void __init setup_arch(char **cmdline_p) sh_mv_setup(); sh_mv.mv_mem_init(); - /* - * Find the highest page frame number we have available - */ - max_pfn = PFN_DOWN(__pa(memory_end)); + early_reserve_mem(); + + lmb_enforce_memory_limit(memory_limit); + lmb_analyze(); + + lmb_dump_all(); /* * Determine low and high memory ranges: */ - max_low_pfn = max_pfn; + max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; min_low_pfn = __MEMORY_START >> PAGE_SHIFT; nodes_clear(node_online_map); + memory_start = (unsigned long)__va(__MEMORY_START); + memory_end = memory_start + (memory_limit ?: lmb_phys_mem_size()); + + uncached_init(); pmb_init(); - setup_memory(); + do_init_bootmem(); + plat_mem_setup(); sparse_init(); #ifdef CONFIG_DUMMY_CONSOLE -- cgit v1.2.3-70-g09d2 From 4bc277ac9cae60e11fe2e557e4ea4acb56d3dc9a Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 11 May 2010 13:32:19 +0900 Subject: sh: bootmem refactoring. This reworks much of the bootmem setup and initialization code allowing us to get rid of duplicate work between the NUMA and non-NUMA cases. The end result is that we end up with a much more flexible interface for supporting more complex topologies (fake NUMA, highmem, etc, etc.) which is entirely LMB backed. This is an incremental step for more NUMA work as well as gradually enabling migration off of bootmem entirely. Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmzone.h | 2 + arch/sh/include/asm/setup.h | 1 + arch/sh/kernel/setup.c | 143 ++----------------------------------- arch/sh/mm/init.c | 165 ++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 169 insertions(+), 142 deletions(-) (limited to 'arch/sh') diff --git a/arch/sh/include/asm/mmzone.h b/arch/sh/include/asm/mmzone.h index 94f04b2f4fb..8887baff5ef 100644 --- a/arch/sh/include/asm/mmzone.h +++ b/arch/sh/include/asm/mmzone.h @@ -44,6 +44,8 @@ void __init plat_mem_setup(void); /* arch/sh/kernel/setup.c */ void __init __add_active_range(unsigned int nid, unsigned long start_pfn, unsigned long end_pfn); +/* arch/sh/mm/init.c */ +void __init allocate_pgdat(unsigned int nid); #endif /* __KERNEL__ */ #endif /* __ASM_SH_MMZONE_H */ diff --git a/arch/sh/include/asm/setup.h b/arch/sh/include/asm/setup.h index 4758325bb24..01fa17a3d75 100644 --- a/arch/sh/include/asm/setup.h +++ b/arch/sh/include/asm/setup.h @@ -19,6 +19,7 @@ #define COMMAND_LINE ((char *) (PARAM+0x100)) void sh_mv_setup(void); +void check_for_initrd(void); #endif /* __KERNEL__ */ diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index f6a2db12ad7..61404ed0144 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -114,31 +114,7 @@ static int __init early_parse_mem(char *p) } early_param("mem", early_parse_mem); -/* - * Register fully available low RAM pages with the bootmem allocator. - */ -static void __init register_bootmem_low_pages(void) -{ - unsigned long curr_pfn, last_pfn, pages; - - /* - * We are rounding up the start address of usable memory: - */ - curr_pfn = PFN_UP(__MEMORY_START); - - /* - * ... and at the end of the usable range downwards: - */ - last_pfn = PFN_DOWN(__pa(memory_end)); - - if (last_pfn > max_low_pfn) - last_pfn = max_low_pfn; - - pages = last_pfn - curr_pfn; - free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages)); -} - -static void __init check_for_initrd(void) +void __init check_for_initrd(void) { #ifdef CONFIG_BLK_DEV_INITRD unsigned long start, end; @@ -240,85 +216,6 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn, add_active_range(nid, start_pfn, end_pfn); } -void __init do_init_bootmem(void) -{ - unsigned long bootmap_size; - unsigned long bootmap_pages, bootmem_paddr; - u64 total_pages = lmb_phys_mem_size() >> PAGE_SHIFT; - int i; - - bootmap_pages = bootmem_bootmap_pages(total_pages); - - bootmem_paddr = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); - - /* - * Find a proper area for the bootmem bitmap. After this - * bootstrap step all allocations (until the page allocator - * is intact) must be done via bootmem_alloc(). - */ - bootmap_size = init_bootmem_node(NODE_DATA(0), - bootmem_paddr >> PAGE_SHIFT, - min_low_pfn, max_low_pfn); - - /* Add active regions with valid PFNs. */ - for (i = 0; i < lmb.memory.cnt; i++) { - unsigned long start_pfn, end_pfn; - start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; - end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); - __add_active_range(0, start_pfn, end_pfn); - } - - /* - * Add all physical memory to the bootmem map and mark each - * area as present. - */ - register_bootmem_low_pages(); - - /* Reserve the sections we're already using. */ - for (i = 0; i < lmb.reserved.cnt; i++) - reserve_bootmem(lmb.reserved.region[i].base, - lmb_size_bytes(&lmb.reserved, i), - BOOTMEM_DEFAULT); - - node_set_online(0); - - sparse_memory_present_with_active_regions(0); -} - -static void __init early_reserve_mem(void) -{ - unsigned long start_pfn; - - /* - * Partially used pages are not usable - thus - * we are rounding upwards: - */ - start_pfn = PFN_UP(__pa(_end)); - - /* - * Reserve the kernel text and - * Reserve the bootmem bitmap. We do this in two steps (first step - * was init_bootmem()), because this catches the (definitely buggy) - * case of us accidentally initializing the bootmem allocator with - * an invalid RAM area. - */ - lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, - (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - - (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); - - /* - * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. - */ - if (CONFIG_ZERO_PAGE_OFFSET != 0) - lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); - - /* - * Handle additional early reservations - */ - check_for_initrd(); - reserve_crashkernel(); -} - /* * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by * is_kdump_kernel() to determine if we are booting after a panic. Hence @@ -342,10 +239,6 @@ void __init __weak plat_early_device_setup(void) { } -void __init __weak plat_mem_setup(void) -{ -} - void __init setup_arch(char **cmdline_p) { enable_mmu(); @@ -401,44 +294,16 @@ void __init setup_arch(char **cmdline_p) plat_early_device_setup(); - /* Let earlyprintk output early console messages */ - early_platform_driver_probe("earlyprintk", 1, 1); - - lmb_init(); - sh_mv_setup(); - sh_mv.mv_mem_init(); - - early_reserve_mem(); - lmb_enforce_memory_limit(memory_limit); - lmb_analyze(); - - lmb_dump_all(); - - /* - * Determine low and high memory ranges: - */ - max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; - min_low_pfn = __MEMORY_START >> PAGE_SHIFT; - - nodes_clear(node_online_map); - - memory_start = (unsigned long)__va(__MEMORY_START); - memory_end = memory_start + (memory_limit ?: lmb_phys_mem_size()); + /* Let earlyprintk output early console messages */ + early_platform_driver_probe("earlyprintk", 1, 1); - uncached_init(); - pmb_init(); - do_init_bootmem(); - plat_mem_setup(); - sparse_init(); + paging_init(); #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; #endif - paging_init(); - - ioremap_fixed_init(); /* Perform the machine specific initialisation */ if (likely(sh_mv.mv_setup)) diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 9c5400b02f4..7f3cb5254ab 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -17,11 +17,14 @@ #include #include #include +#include #include #include +#include #include #include #include +#include #include #include @@ -33,6 +36,11 @@ void __init generic_mem_init(void) lmb_add(__MEMORY_START, __MEMORY_SIZE); } +void __init __weak plat_mem_setup(void) +{ + /* Nothing to see here, move along. */ +} + #ifdef CONFIG_MMU static pte_t *__get_pte_phys(unsigned long addr) { @@ -158,15 +166,166 @@ void __init page_table_range_init(unsigned long start, unsigned long end, } #endif /* CONFIG_MMU */ -/* - * paging_init() sets up the page tables - */ +void __init allocate_pgdat(unsigned int nid) +{ + unsigned long start_pfn, end_pfn; +#ifdef CONFIG_NEED_MULTIPLE_NODES + unsigned long phys; +#endif + + get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); + +#ifdef CONFIG_NEED_MULTIPLE_NODES + phys = __lmb_alloc_base(sizeof(struct pglist_data), + SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT); + /* Retry with all of system memory */ + if (!phys) + phys = __lmb_alloc_base(sizeof(struct pglist_data), + SMP_CACHE_BYTES, lmb_end_of_DRAM()); + if (!phys) + panic("Can't allocate pgdat for node %d\n", nid); + + NODE_DATA(nid) = __va(phys); + memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); + + NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; +#endif + + NODE_DATA(nid)->node_start_pfn = start_pfn; + NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; +} + +static void __init bootmem_init_one_node(unsigned int nid) +{ + unsigned long total_pages, paddr; + unsigned long end_pfn; + struct pglist_data *p; + int i; + + p = NODE_DATA(nid); + + /* Nothing to do.. */ + if (!p->node_spanned_pages) + return; + + end_pfn = p->node_start_pfn + p->node_spanned_pages; + + total_pages = bootmem_bootmap_pages(p->node_spanned_pages); + + paddr = lmb_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE); + if (!paddr) + panic("Can't allocate bootmap for nid[%d]\n", nid); + + init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn); + + free_bootmem_with_active_regions(nid, end_pfn); + + /* + * XXX Handle initial reservations for the system memory node + * only for the moment, we'll refactor this later for handling + * reservations in other nodes. + */ + if (nid == 0) { + /* Reserve the sections we're already using. */ + for (i = 0; i < lmb.reserved.cnt; i++) + reserve_bootmem(lmb.reserved.region[i].base, + lmb_size_bytes(&lmb.reserved, i), + BOOTMEM_DEFAULT); + } + + sparse_memory_present_with_active_regions(nid); +} + +static void __init do_init_bootmem(void) +{ + int i; + + /* Add active regions with valid PFNs. */ + for (i = 0; i < lmb.memory.cnt; i++) { + unsigned long start_pfn, end_pfn; + start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; + end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); + __add_active_range(0, start_pfn, end_pfn); + } + + /* All of system RAM sits in node 0 for the non-NUMA case */ + allocate_pgdat(0); + node_set_online(0); + + plat_mem_setup(); + + for_each_online_node(i) + bootmem_init_one_node(i); + + sparse_init(); +} + +static void __init early_reserve_mem(void) +{ + unsigned long start_pfn; + + /* + * Partially used pages are not usable - thus + * we are rounding upwards: + */ + start_pfn = PFN_UP(__pa(_end)); + + /* + * Reserve the kernel text and Reserve the bootmem bitmap. We do + * this in two steps (first step was init_bootmem()), because + * this catches the (definitely buggy) case of us accidentally + * initializing the bootmem allocator with an invalid RAM area. + */ + lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, + (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - + (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); + + /* + * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. + */ + if (CONFIG_ZERO_PAGE_OFFSET != 0) + lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); + + /* + * Handle additional early reservations + */ + check_for_initrd(); + reserve_crashkernel(); +} + void __init paging_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; unsigned long vaddr, end; int nid; + lmb_init(); + + sh_mv.mv_mem_init(); + + early_reserve_mem(); + + lmb_enforce_memory_limit(memory_limit); + lmb_analyze(); + + lmb_dump_all(); + + /* + * Determine low and high memory ranges: + */ + max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; + min_low_pfn = __MEMORY_START >> PAGE_SHIFT; + + nodes_clear(node_online_map); + + memory_start = (unsigned long)__va(__MEMORY_START); + memory_end = memory_start + (memory_limit ?: lmb_phys_mem_size()); + + uncached_init(); + pmb_init(); + do_init_bootmem(); + ioremap_fixed_init(); + /* We don't need to map the kernel through the TLB, as * it is permanatly mapped using P1. So clear the * entire pgd. */ -- cgit v1.2.3-70-g09d2 From dfbca89987b74c34d9b1a2414b0e5ccee65347e0 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 11 May 2010 13:50:29 +0900 Subject: sh: Reject small mappings for PMB bolting. The minimum section size for the PMB is 16M, so just always error out early if the specified size is too small. This permits us to unconditionally call in to pmb_bolt_mapping() with variable sizes without wasting a TLB and cache flush for the range. Signed-off-by: Paul Mundt --- arch/sh/mm/pmb.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/sh') diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index e9f5384f3f1..18623ba751b 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -341,6 +341,8 @@ int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, unsigned long flags, pmb_flags; int i, mapped; + if (size < SZ_16M) + return -EINVAL; if (!pmb_addr_valid(vaddr, size)) return -EFAULT; if (pmb_mapping_exists(vaddr, phys, size)) -- cgit v1.2.3-70-g09d2 From 21823259a70b7a2a21eea1d48c25a6f38896dd11 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 11 May 2010 13:52:50 +0900 Subject: sh: Ensure active regions have a backing PMB entry. In the NUMA or memory hot-add case where system memory has been partitioned up, we immediately run in to a situation where the existing PMB entry doesn't cover the new range (primarily as a result of the entry size being shrunk to match the node size early in the initialization). In order to fix this up it's necessary to preload a PMB mapping for the new range prior to activation in order to circumvent reset by MMU. Signed-off-by: Paul Mundt --- arch/sh/kernel/setup.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'arch/sh') diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 61404ed0144..57bd93838f1 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -191,13 +191,18 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn, unsigned long end_pfn) { struct resource *res = &mem_resources[nid]; + unsigned long start, end; WARN_ON(res->name); /* max one active range per node for now */ + start = start_pfn << PAGE_SHIFT; + end = end_pfn << PAGE_SHIFT; + res->name = "System RAM"; - res->start = start_pfn << PAGE_SHIFT; - res->end = (end_pfn << PAGE_SHIFT) - 1; + res->start = start; + res->end = end - 1; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; + if (request_resource(&iomem_resource, res)) { pr_err("unable to request memory_resource 0x%lx 0x%lx\n", start_pfn, end_pfn); @@ -213,6 +218,14 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn, request_resource(res, &data_resource); request_resource(res, &bss_resource); + /* + * Also make sure that there is a PMB mapping that covers this + * range before we attempt to activate it, to avoid reset by MMU. + * We can hit this path with NUMA or memory hot-add. + */ + pmb_bolt_mapping((unsigned long)__va(start), start, end - start, + PAGE_KERNEL); + add_active_range(nid, start_pfn, end_pfn); } -- cgit v1.2.3-70-g09d2