diff options
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r-- | arch/ia64/mm/Makefile | 5 | ||||
-rw-r--r-- | arch/ia64/mm/contig.c | 4 | ||||
-rw-r--r-- | arch/ia64/mm/discontig.c | 50 | ||||
-rw-r--r-- | arch/ia64/mm/fault.c | 34 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 15 | ||||
-rw-r--r-- | arch/ia64/mm/numa.c | 24 | ||||
-rw-r--r-- | arch/ia64/mm/tlb.c | 23 |
7 files changed, 101 insertions, 54 deletions
diff --git a/arch/ia64/mm/Makefile b/arch/ia64/mm/Makefile index 7078f67887e..d78d20f0a0f 100644 --- a/arch/ia64/mm/Makefile +++ b/arch/ia64/mm/Makefile @@ -7,6 +7,5 @@ obj-y := init.o fault.o tlb.o extable.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_NUMA) += numa.o obj-$(CONFIG_DISCONTIGMEM) += discontig.o -ifndef CONFIG_DISCONTIGMEM -obj-y += contig.o -endif +obj-$(CONFIG_SPARSEMEM) += discontig.o +obj-$(CONFIG_FLATMEM) += contig.o diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 91a055f5731..acaaec4e468 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c @@ -269,7 +269,7 @@ paging_init (void) efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); if (max_gap < LARGE_GAP) { vmem_map = (struct page *) 0; - free_area_init_node(0, &contig_page_data, zones_size, 0, + free_area_init_node(0, NODE_DATA(0), zones_size, 0, zholes_size); } else { unsigned long map_size; @@ -282,7 +282,7 @@ paging_init (void) efi_memmap_walk(create_mem_map_page_table, NULL); NODE_DATA(0)->node_mem_map = vmem_map; - free_area_init_node(0, &contig_page_data, zones_size, + free_area_init_node(0, NODE_DATA(0), zones_size, 0, zholes_size); printk("Virtual mem_map starts at 0x%p\n", mem_map); diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index b5c90e54819..a88cdb7232f 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -421,6 +421,37 @@ static void __init memory_less_nodes(void) return; } +#ifdef CONFIG_SPARSEMEM +/** + * register_sparse_mem - notify SPARSEMEM that this memory range exists. + * @start: physical start of range + * @end: physical end of range + * @arg: unused + * + * Simply calls SPARSEMEM to register memory section(s). + */ +static int __init register_sparse_mem(unsigned long start, unsigned long end, + void *arg) +{ + int nid; + + start = __pa(start) >> PAGE_SHIFT; + end = __pa(end) >> PAGE_SHIFT; + nid = early_pfn_to_nid(start); + memory_present(nid, start, end); + + return 0; +} + +static void __init arch_sparse_init(void) +{ + efi_memmap_walk(register_sparse_mem, NULL); + sparse_init(); +} +#else +#define arch_sparse_init() do {} while (0) +#endif + /** * find_memory - walk the EFI memory map and setup the bootmem allocator * @@ -524,12 +555,18 @@ void show_mem(void) show_free_areas(); printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); for_each_pgdat(pgdat) { - unsigned long present = pgdat->node_present_pages; + unsigned long present; + unsigned long flags; int shared = 0, cached = 0, reserved = 0; + printk("Node ID: %d\n", pgdat->node_id); + pgdat_resize_lock(pgdat, &flags); + present = pgdat->node_present_pages; for(i = 0; i < pgdat->node_spanned_pages; i++) { - struct page *page = pgdat_page_nr(pgdat, i); - if (!ia64_pfn_valid(pgdat->node_start_pfn+i)) + struct page *page; + if (pfn_valid(pgdat->node_start_pfn + i)) + page = pfn_to_page(pgdat->node_start_pfn + i); + else continue; if (PageReserved(page)) reserved++; @@ -538,6 +575,7 @@ void show_mem(void) else if (page_count(page)) shared += page_count(page)-1; } + pgdat_resize_unlock(pgdat, &flags); total_present += present; total_reserved += reserved; total_cached += cached; @@ -648,12 +686,16 @@ void __init paging_init(void) max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; + arch_sparse_init(); + efi_memmap_walk(filter_rsvd_memory, count_node_pages); +#ifdef CONFIG_VIRTUAL_MEM_MAP vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page)); vmem_map = (struct page *) vmalloc_end; efi_memmap_walk(create_mem_map_page_table, NULL); printk("Virtual mem_map starts at 0x%p\n", vmem_map); +#endif for_each_online_node(node) { memset(zones_size, 0, sizeof(zones_size)); @@ -690,7 +732,9 @@ void __init paging_init(void) pfn_offset = mem_data[node].min_pfn; +#ifdef CONFIG_VIRTUAL_MEM_MAP NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset; +#endif free_area_init_node(node, NODE_DATA(node), zones_size, pfn_offset, zholes_size); } diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 3c32af910d6..af7eb087dca 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -20,32 +20,6 @@ extern void die (char *, struct pt_regs *, long); /* - * This routine is analogous to expand_stack() but instead grows the - * register backing store (which grows towards higher addresses). - * Since the register backing store is access sequentially, we - * disallow growing the RBS by more than a page at a time. Note that - * the VM_GROWSUP flag can be set on any VM area but that's fine - * because the total process size is still limited by RLIMIT_STACK and - * RLIMIT_AS. - */ -static inline long -expand_backing_store (struct vm_area_struct *vma, unsigned long address) -{ - unsigned long grow; - - grow = PAGE_SIZE >> PAGE_SHIFT; - if (address - vma->vm_start > current->signal->rlim[RLIMIT_STACK].rlim_cur - || (((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->signal->rlim[RLIMIT_AS].rlim_cur)) - return -ENOMEM; - vma->vm_end += PAGE_SIZE; - vma->vm_mm->total_vm += grow; - if (vma->vm_flags & VM_LOCKED) - vma->vm_mm->locked_vm += grow; - __vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, grow); - return 0; -} - -/* * Return TRUE if ADDRESS points at a page in the kernel's mapped segment * (inside region 5, on ia64) and that page is present. */ @@ -185,7 +159,13 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) || REGION_OFFSET(address) >= RGN_MAP_LIMIT) goto bad_area; - if (expand_backing_store(vma, address)) + /* + * Since the register backing store is accessed sequentially, + * we disallow growing it by more than a page at a time. + */ + if (address > vma->vm_end + PAGE_SIZE - sizeof(long)) + goto bad_area; + if (expand_upwards(vma, address)) goto bad_area; } goto good_area; diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 1281c609ee9..e3215ba64ff 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -158,7 +158,7 @@ ia64_init_addr_space (void) vma->vm_start = current->thread.rbs_bot & PAGE_MASK; vma->vm_end = vma->vm_start + PAGE_SIZE; vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7]; - vma->vm_flags = VM_DATA_DEFAULT_FLAGS | VM_GROWSUP; + vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; down_write(¤t->mm->mmap_sem); if (insert_vm_struct(current->mm, vma)) { up_write(¤t->mm->mmap_sem); @@ -275,26 +275,21 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */ - spin_lock(&init_mm.page_table_lock); { pud = pud_alloc(&init_mm, pgd, address); if (!pud) goto out; - pmd = pmd_alloc(&init_mm, pud, address); if (!pmd) goto out; - pte = pte_alloc_map(&init_mm, pmd, address); + pte = pte_alloc_kernel(pmd, address); if (!pte) goto out; - if (!pte_none(*pte)) { - pte_unmap(pte); + if (!pte_none(*pte)) goto out; - } set_pte(pte, mk_pte(page, pgprot)); - pte_unmap(pte); } - out: spin_unlock(&init_mm.page_table_lock); + out: /* no need for flush_tlb */ return page; } @@ -593,7 +588,7 @@ mem_init (void) platform_dma_init(); #endif -#ifndef CONFIG_DISCONTIGMEM +#ifdef CONFIG_FLATMEM if (!mem_map) BUG(); max_mapnr = max_low_pfn; diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c index 77118bbf3d8..4e5c8b36ad9 100644 --- a/arch/ia64/mm/numa.c +++ b/arch/ia64/mm/numa.c @@ -47,3 +47,27 @@ paddr_to_nid(unsigned long paddr) return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0); } + +#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA) +/* + * Because of holes evaluate on section limits. + * If the section of memory exists, then return the node where the section + * resides. Otherwise return node 0 as the default. This is used by + * SPARSEMEM to allocate the SPARSEMEM sectionmap on the NUMA node where + * the section resides. + */ +int early_pfn_to_nid(unsigned long pfn) +{ + int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec; + + for (i = 0; i < num_node_memblks; i++) { + ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT; + esec = (node_memblk[i].start_paddr + node_memblk[i].size + + ((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT; + if (section >= ssec && section < esec) + return node_memblk[i].nid; + } + + return 0; +} +#endif diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index 464557e4ed8..c79a9b96d02 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -77,19 +77,25 @@ wrap_mmu_context (struct mm_struct *mm) /* can't call flush_tlb_all() here because of race condition with O(1) scheduler [EF] */ { int cpu = get_cpu(); /* prevent preemption/migration */ - for (i = 0; i < NR_CPUS; ++i) - if (cpu_online(i) && (i != cpu)) + for_each_online_cpu(i) { + if (i != cpu) per_cpu(ia64_need_tlb_flush, i) = 1; + } put_cpu(); } local_flush_tlb_all(); } void -ia64_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbits) +ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long nbits) { static DEFINE_SPINLOCK(ptcg_lock); + if (mm != current->active_mm) { + flush_tlb_all(); + return; + } + /* HW requires global serialization of ptc.ga. */ spin_lock(&ptcg_lock); { @@ -135,15 +141,12 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long unsigned long size = end - start; unsigned long nbits; +#ifndef CONFIG_SMP if (mm != current->active_mm) { - /* this does happen, but perhaps it's not worth optimizing for? */ -#ifdef CONFIG_SMP - flush_tlb_all(); -#else mm->context = 0; -#endif return; } +#endif nbits = ia64_fls(size + 0xfff); while (unlikely (((1UL << nbits) & purge.mask) == 0) && (nbits < purge.max_bits)) @@ -153,12 +156,14 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long start &= ~((1UL << nbits) - 1); # ifdef CONFIG_SMP - platform_global_tlb_purge(start, end, nbits); + platform_global_tlb_purge(mm, start, end, nbits); # else + preempt_disable(); do { ia64_ptcl(start, (nbits<<2)); start += (1UL << nbits); } while (start < end); + preempt_enable(); # endif ia64_srlz_i(); /* srlz.i implies srlz.d */ |