diff options
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r-- | arch/ia64/mm/discontig.c | 66 | ||||
-rw-r--r-- | arch/ia64/mm/fault.c | 3 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 8 | ||||
-rw-r--r-- | arch/ia64/mm/ioremap.c | 6 | ||||
-rw-r--r-- | arch/ia64/mm/tlb.c | 12 |
5 files changed, 85 insertions, 10 deletions
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index ec9eeb89975..b6bcc9fa360 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -519,6 +519,68 @@ void __cpuinit *per_cpu_init(void) } #endif /* CONFIG_SMP */ +#ifdef CONFIG_VIRTUAL_MEM_MAP +static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i) +{ + unsigned long end_address, hole_next_pfn; + unsigned long stop_address; + + end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; + end_address = PAGE_ALIGN(end_address); + + stop_address = (unsigned long) &vmem_map[ + pgdat->node_start_pfn + pgdat->node_spanned_pages]; + + do { + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + pgd = pgd_offset_k(end_address); + if (pgd_none(*pgd)) { + end_address += PGDIR_SIZE; + continue; + } + + pud = pud_offset(pgd, end_address); + if (pud_none(*pud)) { + end_address += PUD_SIZE; + continue; + } + + pmd = pmd_offset(pud, end_address); + if (pmd_none(*pmd)) { + end_address += PMD_SIZE; + continue; + } + + pte = pte_offset_kernel(pmd, end_address); +retry_pte: + if (pte_none(*pte)) { + end_address += PAGE_SIZE; + pte++; + if ((end_address < stop_address) && + (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) + goto retry_pte; + continue; + } + /* Found next valid vmem_map page */ + break; + } while (end_address < stop_address); + + end_address = min(end_address, stop_address); + end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; + hole_next_pfn = end_address / sizeof(struct page); + return hole_next_pfn - pgdat->node_start_pfn; +} +#else +static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i) +{ + return i + 1; +} +#endif + /** * show_mem - give short summary of memory stats * @@ -547,8 +609,10 @@ void show_mem(void) struct page *page; if (pfn_valid(pgdat->node_start_pfn + i)) page = pfn_to_page(pgdat->node_start_pfn + i); - else + else { + i = find_next_valid_pfn_for_pgdat(pgdat, i) - 1; continue; + } if (PageReserved(page)) reserved++; else if (PageSwapCache(page)) diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index af7eb087dca..d98ec49570b 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -60,6 +60,9 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re struct siginfo si; unsigned long mask; + /* mmap_sem is performance critical.... */ + prefetchw(&mm->mmap_sem); + /* * If we're in an interrupt or have no user context, we must not take the fault.. */ diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 2ef1151cde9..cafa8776a53 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -109,6 +109,7 @@ lazy_mmu_prot_update (pte_t pte) { unsigned long addr; struct page *page; + unsigned long order; if (!pte_exec(pte)) return; /* not an executable page... */ @@ -119,7 +120,12 @@ lazy_mmu_prot_update (pte_t pte) if (test_bit(PG_arch_1, &page->flags)) return; /* i-cache is already coherent with d-cache */ - flush_icache_range(addr, addr + PAGE_SIZE); + if (PageCompound(page)) { + order = (unsigned long) (page[1].lru.prev); + flush_icache_range(addr, addr + (1UL << order << PAGE_SHIFT)); + } + else + flush_icache_range(addr, addr + PAGE_SIZE); set_bit(PG_arch_1, &page->flags); /* mark page as clean */ } diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c index 62328621f99..643ccc6960c 100644 --- a/arch/ia64/mm/ioremap.c +++ b/arch/ia64/mm/ioremap.c @@ -21,12 +21,12 @@ __ioremap (unsigned long offset, unsigned long size) void __iomem * ioremap (unsigned long offset, unsigned long size) { - if (efi_mem_attribute_range(offset, size, EFI_MEMORY_UC)) - return __ioremap(offset, size); - if (efi_mem_attribute_range(offset, size, EFI_MEMORY_WB)) return phys_to_virt(offset); + if (efi_mem_attribute_range(offset, size, EFI_MEMORY_UC)) + return __ioremap(offset, size); + /* * Someday this should check ACPI resources so we * can do the right thing for hot-plugged regions. diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index 6a4eec9113e..4dbbca0b5e9 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -156,17 +156,19 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, nbits = purge.max_bits; start &= ~((1UL << nbits) - 1); -# ifdef CONFIG_SMP - platform_global_tlb_purge(mm, start, end, nbits); -# else preempt_disable(); +#ifdef CONFIG_SMP + if (mm != current->active_mm || cpus_weight(mm->cpu_vm_mask) != 1) { + platform_global_tlb_purge(mm, start, end, nbits); + preempt_enable(); + return; + } +#endif do { ia64_ptcl(start, (nbits<<2)); start += (1UL << nbits); } while (start < end); preempt_enable(); -# endif - ia64_srlz_i(); /* srlz.i implies srlz.d */ } EXPORT_SYMBOL(flush_tlb_range); |