/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994 - 2000 Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* Atomicity and interruptability */ #ifdef CONFIG_MIPS_MT_SMTC #include #define ENTER_CRITICAL(flags) \ { \ unsigned int mvpflags; \ local_irq_save(flags);\ mvpflags = dvpe() #define EXIT_CRITICAL(flags) \ evpe(mvpflags); \ local_irq_restore(flags); \ } #else #define ENTER_CRITICAL(flags) local_irq_save(flags) #define EXIT_CRITICAL(flags) local_irq_restore(flags) #endif /* CONFIG_MIPS_MT_SMTC */ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); /* * We have up to 8 empty zeroed pages so we can map one of the right colour * when needed. This is necessary only on R4000 / R4400 SC and MC versions * where we have to avoid VCED / VECI exceptions for good performance at * any price. Since page is never written to after the initialization we * don't have to care about aliases on other CPUs. */ unsigned long empty_zero_page, zero_page_mask; /* * Not static inline because used by IP27 special magic initialization code */ unsigned long setup_zero_pages(void) { unsigned int order; unsigned long size; struct page *page; if (cpu_has_vce) order = 3; else order = 0; empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); if (!empty_zero_page) panic("Oh boy, that early out of memory?"); page = virt_to_page((void *)empty_zero_page); split_page(page, order); while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) { SetPageReserved(page); page++; } size = PAGE_SIZE << order; zero_page_mask = (size - 1) & PAGE_MASK; return 1UL << order; } /* * These are almost like kmap_atomic / kunmap_atmic except they take an * additional address argument as the hint. */ #define kmap_get_fixmap_pte(vaddr) \ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) #ifdef CONFIG_MIPS_MT_SMTC static pte_t *kmap_coherent_pte; static void __init kmap_coherent_init(void) { unsigned long vaddr; /* cache the first coherent kmap pte */ vaddr = __fix_to_virt(FIX_CMAP_BEGIN); kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); } #else static inline void kmap_coherent_init(void) {} #endif void *kmap_coherent(struct page *page, unsigned long addr) { enum fixed_addresses idx; unsigned long vaddr, flags, entrylo; unsigned long old_ctx; pte_t pte; int tlbidx; BUG_ON(Page_dcache_dirty(page)); inc_preempt_count(); idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); #ifdef CONFIG_MIPS_MT_SMTC idx += FIX_N_COLOURS * smp_processor_id(); #endif vaddr = __fix_to_virt(FIX_CMAP_END - idx); pte = mk_pte(page, PAGE_KERNEL); #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) entrylo = pte.pte_high; #else entrylo = pte_val(pte) >> 6; #endif ENTER_CRITICAL(flags); old_ctx = read_c0_entryhi(); write_c0_entryhi(vaddr & (PAGE_MASK << 1)); write_c0_entrylo0(entrylo); write_c0_entrylo1(entrylo); #ifdef CONFIG_MIPS_MT_SMTC set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); /* preload TLB instead of local_flush_tlb_one() */ mtc0_tlbw_hazard(); tlb_probe(); tlb_probe_hazard(); tlbidx = read_c0_index(); mtc0_tlbw_hazard(); if (tlbidx < 0) tlb_write_random(); else tlb_write_indexed(); #else tlbidx = read_c0_wired(); write_c0_wired(tlbidx + 1); write_c0_index(tlbidx); mtc0_tlbw_hazard(); tlb_write_indexed(); #endif tlbw_use_hazard(); write_c0_entryhi(old_ctx); EXIT_CRITICAL(flags); return (void*) vaddr; } #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) void kunmap_coherent(void) { #ifndef CONFIG_MIPS_MT_SMTC unsigned int wired; unsigned long flags, old_ctx; ENTER_CRITICAL(flags); old_ctx = read_c0_entryhi(); wired = read_c0_wired() - 1; write_c0_wired(wired); write_c0_index(wired); write_c0_entryhi(UNIQUE_ENTRYHI(wired)); write_c0_entrylo0(0); write_c0_entrylo1(0); mtc0_tlbw_hazard(); tlb_write_indexed(); tlbw_use_hazard(); write_c0_entryhi(old_ctx); EXIT_CRITICAL(flags); #endif dec_preempt_count(); preempt_check_resched(); } void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { void *vfrom, *vto; vto = kmap_atomic(to, KM_USER1); if (cpu_has_dc_aliases && page_mapped(from) && !Page_dcache_dirty(from)) { vfrom = kmap_coherent(from, vaddr); copy_page(vto, vfrom); kunmap_coherent(); } else { vfrom = kmap_atomic(from, KM_USER0); copy_page(vto, vfrom); kunmap_atomic(vfrom, KM_USER0); } if ((!cpu_has_ic_fills_f_dc) || pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) flush_data_cache_page((unsigned long)vto); kunmap_atomic(vto, KM_USER1); /* Make sure this page is cleared on other CPU's too before using it */ smp_wmb(); } void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { if (cpu_has_dc_aliases && page_mapped(page) && !Page_dcache_dirty(page)) { void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(vto, src, len); kunmap_coherent(); } else { memcpy(dst, src, len); if (cpu_has_dc_aliases) SetPageDcacheDirty(page); } if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) flush_cache_page(vma, vaddr, page_to_pfn(page)); } EXPORT_SYMBOL(copy_to_user_page); void copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { if (cpu_has_dc_aliases && page_mapped(page) && !Page_dcache_dirty(page)) { void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(dst, vfrom, len); kunmap_coherent(); } else { memcpy(dst, src, len); if (cpu_has_dc_aliases) SetPageDcacheDirty(page); } } EXPORT_SYMBOL(copy_from_user_page); #ifdef CONFIG_HIGHMEM unsigned long highstart_pfn, highend_pfn; pte_t *kmap_pte; pgprot_t kmap_prot; static void __init kmap_init(void) { unsigned long kmap_vstart; /* cache the first kmap pte */ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); kmap_pte = kmap_get_fixmap_pte(kmap_vstart); kmap_prot = PAGE_KERNEL; } #endif /* CONFIG_HIGHMEM */ void __init fixrange_init(unsigned long start, unsigned long end, pgd_t *pgd_base) { #if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC) pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; int i, j, k; unsigned long vaddr; vaddr = start; i = __pgd_offset(vaddr); j = __pud_offset(vaddr); k = __pmd_offset(vaddr); pgd = pgd_base + i; for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { pud = (pud_t *)pgd; for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { pmd = (pmd_t *)pud; for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { if (pmd_none(*pmd)) { pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); set_pmd(pmd, __pmd((unsigned long)pte)); if (pte != pte_offset_kernel(pmd, 0)) BUG(); } vaddr += PMD_SIZE; } k = 0; } j = 0; } #endif } #ifndef CONFIG_NEED_MULTIPLE_NODES static int __init page_is_ram(unsigned long pagenr) { int i; for (i = 0; i < boot_mem_map.nr_map; i++) { unsigned long addr, end; if (boot_mem_map.map[i].type != BOOT_MEM_RAM) /* not usable memory */ continue; addr = PFN_UP(boot_mem_map.map[i].addr); end = PFN_DOWN(boot_mem_map.map[i].addr + boot_mem_map.map[i].size); if (pagenr >= addr && pagenr < end) return 1; } return 0; } void __init paging_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; unsigned long lastpfn; pagetable_init(); #ifdef CONFIG_HIGHMEM kmap_init(); #endif kmap_coherent_init(); #ifdef CONFIG_ZONE_DMA max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; #endif #ifdef CONFIG_ZONE_DMA32 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; lastpfn = max_low_pfn; #ifdef CONFIG_HIGHMEM max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; lastpfn = highend_pfn; if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) { printk(KERN_WARNING "This processor doesn't support highmem." " %ldk highmem ignored\n", (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10)); max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn; lastpfn = max_low_pfn; } #endif free_area_init_nodes(max_zone_pfns); } static struct kcore_list kcore_mem, kcore_vmalloc; #ifdef CONFIG_64BIT static struct kcore_list kcore_kseg0; #endif void __init mem_init(void) { unsigned long codesize, reservedpages, datasize, initsize; unsigned long tmp, ram; #ifdef CONFIG_HIGHMEM #ifdef CONFIG_DISCONTIGMEM #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet" #endif max_mapnr = highend_pfn; #else max_mapnr = max_low_pfn; #endif high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); totalram_pages += free_all_bootmem(); totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */ reservedpages = ram = 0; for (tmp = 0; tmp < max_low_pfn; tmp++) if (page_is_ram(tmp)) { ram++; if (PageReserved(pfn_to_page(tmp))) reservedpages++; } num_physpages = ram; #ifdef CONFIG_HIGHMEM for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { struct page *page = pfn_to_page(tmp); if (!page_is_ram(tmp)) { SetPageReserved(page); continue; } ClearPageReserved(page); init_page_count(page); __free_page(page); totalhigh_pages++; } totalram_pages += totalhigh_pages; num_physpages += totalhigh_pages; #endif codesize = (unsigned long) &_etext - (unsigned long) &_text; datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; #ifdef CONFIG_64BIT if ((unsigned long) &_text > (unsigned long) CKSEG0) /* The -4 is a hack so that user tools don't have to handle the overflow. */ kclist_add(&kcore_kseg0, (void *) CKSEG0, 0x80000000 - 4); #endif kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), ram << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), datasize >> 10, initsize >> 10, (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); } #endif /* !CONFIG_NEED_MULTIPLE_NODES */ void free_init_pages(const char *what, unsigned long begin, unsigned long end) { unsigned long pfn; for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) { struct page *page = pfn_to_page(pfn); void *addr = phys_to_virt(PFN_PHYS(pfn)); ClearPageReserved(page); init_page_count(page); memset(addr, POISON_FREE_INITMEM, PAGE_SIZE); __free_page(page); totalram_pages++; } printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { free_init_pages("initrd memory", virt_to_phys((void *)start), virt_to_phys((void *)end)); } #endif void __init_refok free_initmem(void) { prom_free_prom_memory(); free_init_pages("unused kernel memory", __pa_symbol(&__init_begin), __pa_symbol(&__init_end)); } unsigned long pgd_current[NR_CPUS]; /* * On 64-bit we've got three-level pagetables with a slightly * different layout ... */ #define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<