diff options
author | Lachlan McIlroy <lachlan@redback.melbourne.sgi.com> | 2008-05-19 15:09:05 +1000 |
---|---|---|
committer | Lachlan McIlroy <lachlan@redback.melbourne.sgi.com> | 2008-05-19 15:09:05 +1000 |
commit | c203e45f069af47ca7623e4dcd8c00bfba2722e4 (patch) | |
tree | 4563115b6565dcfd97015c1c9366fb3d07cabf19 /arch/powerpc/mm | |
parent | a94477da38e0b261a7ecea71f4c95a3bcd5be69c (diff) | |
parent | b8291ad07a7f3b5b990900f0001198ac23ba893e (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into for-linus
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 28 | ||||
-rw-r--r-- | arch/powerpc/mm/init_64.c | 10 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 30 | ||||
-rw-r--r-- | arch/powerpc/mm/slb.c | 43 | ||||
-rw-r--r-- | arch/powerpc/mm/slb_low.S | 16 |
5 files changed, 98 insertions, 29 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 2b5a399f6fa..0f2d239d94c 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -94,6 +94,9 @@ unsigned long htab_hash_mask; int mmu_linear_psize = MMU_PAGE_4K; int mmu_virtual_psize = MMU_PAGE_4K; int mmu_vmalloc_psize = MMU_PAGE_4K; +#ifdef CONFIG_SPARSEMEM_VMEMMAP +int mmu_vmemmap_psize = MMU_PAGE_4K; +#endif int mmu_io_psize = MMU_PAGE_4K; int mmu_kernel_ssize = MMU_SEGSIZE_256M; int mmu_highuser_ssize = MMU_SEGSIZE_256M; @@ -387,11 +390,32 @@ static void __init htab_init_page_sizes(void) } #endif /* CONFIG_PPC_64K_PAGES */ +#ifdef CONFIG_SPARSEMEM_VMEMMAP + /* We try to use 16M pages for vmemmap if that is supported + * and we have at least 1G of RAM at boot + */ + if (mmu_psize_defs[MMU_PAGE_16M].shift && + lmb_phys_mem_size() >= 0x40000000) + mmu_vmemmap_psize = MMU_PAGE_16M; + else if (mmu_psize_defs[MMU_PAGE_64K].shift) + mmu_vmemmap_psize = MMU_PAGE_64K; + else + mmu_vmemmap_psize = MMU_PAGE_4K; +#endif /* CONFIG_SPARSEMEM_VMEMMAP */ + printk(KERN_DEBUG "Page orders: linear mapping = %d, " - "virtual = %d, io = %d\n", + "virtual = %d, io = %d" +#ifdef CONFIG_SPARSEMEM_VMEMMAP + ", vmemmap = %d" +#endif + "\n", mmu_psize_defs[mmu_linear_psize].shift, mmu_psize_defs[mmu_virtual_psize].shift, - mmu_psize_defs[mmu_io_psize].shift); + mmu_psize_defs[mmu_io_psize].shift +#ifdef CONFIG_SPARSEMEM_VMEMMAP + ,mmu_psize_defs[mmu_vmemmap_psize].shift +#endif + ); #ifdef CONFIG_HUGETLB_PAGE /* Init large page size. Currently, we pick 16M or 1M depending diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index c5ac532a016..6aa65375abf 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -19,6 +19,8 @@ * */ +#undef DEBUG + #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> @@ -208,12 +210,12 @@ int __meminit vmemmap_populated(unsigned long start, int page_size) } int __meminit vmemmap_populate(struct page *start_page, - unsigned long nr_pages, int node) + unsigned long nr_pages, int node) { unsigned long mode_rw; unsigned long start = (unsigned long)start_page; unsigned long end = (unsigned long)(start_page + nr_pages); - unsigned long page_size = 1 << mmu_psize_defs[mmu_linear_psize].shift; + unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX; @@ -235,11 +237,11 @@ int __meminit vmemmap_populate(struct page *start_page, start, p, __pa(p)); mapped = htab_bolt_mapping(start, start + page_size, - __pa(p), mode_rw, mmu_linear_psize, + __pa(p), mode_rw, mmu_vmemmap_psize, mmu_kernel_ssize); BUG_ON(mapped < 0); } return 0; } -#endif +#endif /* CONFIG_SPARSEMEM_VMEMMAP */ diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index d9e37f365b5..f67e118116f 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -154,19 +154,35 @@ out: /* * walk_memory_resource() needs to make sure there is no holes in a given - * memory range. On PPC64, since this range comes from /sysfs, the range - * is guaranteed to be valid, non-overlapping and can not contain any - * holes. By the time we get here (memory add or remove), /proc/device-tree - * is updated and correct. Only reason we need to check against device-tree - * would be if we allow user-land to specify a memory range through a - * system call/ioctl etc. instead of doing offline/online through /sysfs. + * memory range. PPC64 does not maintain the memory layout in /proc/iomem. + * Instead it maintains it in lmb.memory structures. Walk through the + * memory regions, find holes and callback for contiguous regions. */ int walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg, int (*func)(unsigned long, unsigned long, void *)) { - return (*func)(start_pfn, nr_pages, arg); + struct lmb_property res; + unsigned long pfn, len; + u64 end; + int ret = -1; + + res.base = (u64) start_pfn << PAGE_SHIFT; + res.size = (u64) nr_pages << PAGE_SHIFT; + + end = res.base + res.size - 1; + while ((res.base < end) && (lmb_find(&res) >= 0)) { + pfn = (unsigned long)(res.base >> PAGE_SHIFT); + len = (unsigned long)(res.size >> PAGE_SHIFT); + ret = (*func)(pfn, len, arg); + if (ret) + break; + res.base += (res.size + 1); + res.size = (end - res.base + 1); + } + return ret; } +EXPORT_SYMBOL_GPL(walk_memory_resource); #endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 906daeda59a..89497fb0428 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -28,9 +28,9 @@ #include <asm/udbg.h> #ifdef DEBUG -#define DBG(fmt...) udbg_printf(fmt) +#define DBG(fmt...) printk(fmt) #else -#define DBG(fmt...) +#define DBG pr_debug #endif extern void slb_allocate_realmode(unsigned long ea); @@ -44,13 +44,13 @@ static void slb_allocate(unsigned long ea) slb_allocate_realmode(ea); } +#define slb_esid_mask(ssize) \ + (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T) + static inline unsigned long mk_esid_data(unsigned long ea, int ssize, unsigned long slot) { - unsigned long mask; - - mask = (ssize == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T; - return (ea & mask) | SLB_ESID_V | slot; + return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot; } #define slb_vsid_shift(ssize) \ @@ -263,13 +263,19 @@ void slb_initialize(void) extern unsigned int *slb_miss_kernel_load_linear; extern unsigned int *slb_miss_kernel_load_io; extern unsigned int *slb_compare_rr_to_size; +#ifdef CONFIG_SPARSEMEM_VMEMMAP + extern unsigned int *slb_miss_kernel_load_vmemmap; + unsigned long vmemmap_llp; +#endif /* Prepare our SLB miss handler based on our page size */ linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; io_llp = mmu_psize_defs[mmu_io_psize].sllp; vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp; - +#ifdef CONFIG_SPARSEMEM_VMEMMAP + vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp; +#endif if (!slb_encoding_inited) { slb_encoding_inited = 1; patch_slb_encoding(slb_miss_kernel_load_linear, @@ -279,8 +285,14 @@ void slb_initialize(void) patch_slb_encoding(slb_compare_rr_to_size, mmu_slb_size); - DBG("SLB: linear LLP = %04x\n", linear_llp); - DBG("SLB: io LLP = %04x\n", io_llp); + DBG("SLB: linear LLP = %04lx\n", linear_llp); + DBG("SLB: io LLP = %04lx\n", io_llp); + +#ifdef CONFIG_SPARSEMEM_VMEMMAP + patch_slb_encoding(slb_miss_kernel_load_vmemmap, + SLB_VSID_KERNEL | vmemmap_llp); + DBG("SLB: vmemmap LLP = %04lx\n", vmemmap_llp); +#endif } get_paca()->stab_rr = SLB_NUM_BOLTED; @@ -301,11 +313,16 @@ void slb_initialize(void) create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1); + /* For the boot cpu, we're running on the stack in init_thread_union, + * which is in the first segment of the linear mapping, and also + * get_paca()->kstack hasn't been initialized yet. + * For secondary cpus, we need to bolt the kernel stack entry now. + */ slb_shadow_clear(2); + if (raw_smp_processor_id() != boot_cpuid && + (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET) + create_shadowed_slbe(get_paca()->kstack, + mmu_kernel_ssize, lflags, 2); - /* We don't bolt the stack for the time being - we're in boot, - * so the stack is in the bolted segment. By the time it goes - * elsewhere, we'll call _switch() which will bolt in the new - * one. */ asm volatile("isync":::"memory"); } diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 657f6b37e9d..bc44dc4b5c6 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -47,8 +47,7 @@ _GLOBAL(slb_allocate_realmode) * it to VSID 0, which is reserved as a bad VSID - one which * will never have any pages in it. */ - /* Check if hitting the linear mapping of the vmalloc/ioremap - * kernel space + /* Check if hitting the linear mapping or some other kernel space */ bne cr7,1f @@ -62,7 +61,18 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) b slb_finish_load_1T -1: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below +1: +#ifdef CONFIG_SPARSEMEM_VMEMMAP + /* Check virtual memmap region. To be patches at kernel boot */ + cmpldi cr0,r9,0xf + bne 1f +_GLOBAL(slb_miss_kernel_load_vmemmap) + li r11,0 + b 6f +1: +#endif /* CONFIG_SPARSEMEM_VMEMMAP */ + + /* vmalloc/ioremap mapping encoding bits, the "li" instructions below * will be patched by the kernel at boot */ BEGIN_FTR_SECTION |