diff options
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/c-r4k.c | 22 | ||||
-rw-r--r-- | arch/mips/mm/cache.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/dma-coherent.c | 4 | ||||
-rw-r--r-- | arch/mips/mm/dma-ip27.c | 4 | ||||
-rw-r--r-- | arch/mips/mm/dma-ip32.c | 5 | ||||
-rw-r--r-- | arch/mips/mm/dma-noncoherent.c | 5 | ||||
-rw-r--r-- | arch/mips/mm/fault.c | 4 | ||||
-rw-r--r-- | arch/mips/mm/highmem.c | 10 | ||||
-rw-r--r-- | arch/mips/mm/init.c | 84 | ||||
-rw-r--r-- | arch/mips/mm/ioremap.c | 96 | ||||
-rw-r--r-- | arch/mips/mm/pg-r4k.c | 9 | ||||
-rw-r--r-- | arch/mips/mm/pgtable-64.c | 3 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 55 |
13 files changed, 152 insertions, 150 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index cc895dad71d..df04a315d83 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -323,7 +323,6 @@ static void __init r4k_blast_scache_setup(void) static inline void local_r4k_flush_cache_all(void * args) { r4k_blast_dcache(); - r4k_blast_icache(); } static void r4k_flush_cache_all(void) @@ -359,21 +358,19 @@ static void r4k___flush_cache_all(void) static inline void local_r4k_flush_cache_range(void * args) { struct vm_area_struct *vma = args; - int exec; if (!(cpu_context(smp_processor_id(), vma->vm_mm))) return; - exec = vma->vm_flags & VM_EXEC; - if (cpu_has_dc_aliases || exec) - r4k_blast_dcache(); - if (exec) - r4k_blast_icache(); + r4k_blast_dcache(); } static void r4k_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { + if (!cpu_has_dc_aliases) + return; + r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1); } @@ -384,18 +381,21 @@ static inline void local_r4k_flush_cache_mm(void * args) if (!cpu_context(smp_processor_id(), mm)) return; - r4k_blast_dcache(); - r4k_blast_icache(); - /* * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we * only flush the primary caches but R10000 and R12000 behave sane ... + * R4000SC and R4400SC indexed S-cache ops also invalidate primary + * caches, so we can bail out early. */ if (current_cpu_data.cputype == CPU_R4000SC || current_cpu_data.cputype == CPU_R4000MC || current_cpu_data.cputype == CPU_R4400SC || - current_cpu_data.cputype == CPU_R4400MC) + current_cpu_data.cputype == CPU_R4400MC) { r4k_blast_scache(); + return; + } + + r4k_blast_dcache(); } static void r4k_flush_cache_mm(struct mm_struct *mm) diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index caf807ded51..1f954a238a6 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -32,6 +32,7 @@ void (*local_flush_data_cache_page)(void * addr); void (*flush_data_cache_page)(unsigned long addr); void (*flush_icache_all)(void); +EXPORT_SYMBOL_GPL(local_flush_data_cache_page); EXPORT_SYMBOL(flush_data_cache_page); #ifdef CONFIG_DMA_NONCOHERENT diff --git a/arch/mips/mm/dma-coherent.c b/arch/mips/mm/dma-coherent.c index 7fa5fd16e46..5697c6e250a 100644 --- a/arch/mips/mm/dma-coherent.c +++ b/arch/mips/mm/dma-coherent.c @@ -190,14 +190,14 @@ int dma_supported(struct device *dev, u64 mask) EXPORT_SYMBOL(dma_supported); -int dma_is_consistent(dma_addr_t dma_addr) +int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) { return 1; } EXPORT_SYMBOL(dma_is_consistent); -void dma_cache_sync(void *vaddr, size_t size, +void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); diff --git a/arch/mips/mm/dma-ip27.c b/arch/mips/mm/dma-ip27.c index 8da19fd22ac..f088344db46 100644 --- a/arch/mips/mm/dma-ip27.c +++ b/arch/mips/mm/dma-ip27.c @@ -197,14 +197,14 @@ int dma_supported(struct device *dev, u64 mask) EXPORT_SYMBOL(dma_supported); -int dma_is_consistent(dma_addr_t dma_addr) +int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) { return 1; } EXPORT_SYMBOL(dma_is_consistent); -void dma_cache_sync(void *vaddr, size_t size, +void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); diff --git a/arch/mips/mm/dma-ip32.c b/arch/mips/mm/dma-ip32.c index ec54ed0d26f..b42b6f7456e 100644 --- a/arch/mips/mm/dma-ip32.c +++ b/arch/mips/mm/dma-ip32.c @@ -363,14 +363,15 @@ int dma_supported(struct device *dev, u64 mask) EXPORT_SYMBOL(dma_supported); -int dma_is_consistent(dma_addr_t dma_addr) +int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) { return 1; } EXPORT_SYMBOL(dma_is_consistent); -void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) +void dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction) { if (direction == DMA_NONE) return; diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index 2eeffe5c2a3..8cecef0957c 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -299,14 +299,15 @@ int dma_supported(struct device *dev, u64 mask) EXPORT_SYMBOL(dma_supported); -int dma_is_consistent(dma_addr_t dma_addr) +int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) { return 1; } EXPORT_SYMBOL(dma_is_consistent); -void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) +void dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction) { if (direction == DMA_NONE) return; diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 8423d859077..6f90e7ef66a 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -60,6 +60,10 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, */ if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END)) goto vmalloc_fault; +#ifdef MODULE_START + if (unlikely(address >= MODULE_START && address < MODULE_END)) + goto vmalloc_fault; +#endif /* * If we're in an interrupt or have no user diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c index 99ebf3ccc22..675502ada5a 100644 --- a/arch/mips/mm/highmem.c +++ b/arch/mips/mm/highmem.c @@ -39,7 +39,7 @@ void *__kmap_atomic(struct page *page, enum km_type type) unsigned long vaddr; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ - inc_preempt_count(); + pagefault_disable(); if (!PageHighMem(page)) return page_address(page); @@ -62,8 +62,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type) enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); if (vaddr < FIXADDR_START) { // FIXME - dec_preempt_count(); - preempt_check_resched(); + pagefault_enable(); return; } @@ -78,8 +77,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type) local_flush_tlb_one(vaddr); #endif - dec_preempt_count(); - preempt_check_resched(); + pagefault_enable(); } #ifndef CONFIG_LIMITED_DMA @@ -92,7 +90,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) enum fixed_addresses idx; unsigned long vaddr; - inc_preempt_count(); + pagefault_disable(); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 2de4d3c367a..30245c09d02 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -90,9 +90,9 @@ unsigned long setup_zero_pages(void) if (!empty_zero_page) panic("Oh boy, that early out of memory?"); - page = virt_to_page(empty_zero_page); + page = virt_to_page((void *)empty_zero_page); split_page(page, order); - while (page < virt_to_page(empty_zero_page + (PAGE_SIZE << order))) { + while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) { SetPageReserved(page); page++; } @@ -203,6 +203,31 @@ static inline void kunmap_coherent(struct page *page) preempt_check_resched(); } +void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + void *vfrom, *vto; + + vto = kmap_atomic(to, KM_USER1); + if (cpu_has_dc_aliases) { + vfrom = kmap_coherent(from, vaddr); + copy_page(vto, vfrom); + kunmap_coherent(from); + } else { + vfrom = kmap_atomic(from, KM_USER0); + copy_page(vto, vfrom); + kunmap_atomic(vfrom, KM_USER0); + } + if (((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) || + pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) + flush_data_cache_page((unsigned long)vto); + kunmap_atomic(vto, KM_USER1); + /* Make sure this page is cleared on other CPU's too before using it */ + smp_wmb(); +} + +EXPORT_SYMBOL(copy_user_highpage); + void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) @@ -316,7 +341,7 @@ static int __init page_is_ram(unsigned long pagenr) void __init paging_init(void) { unsigned long zones_size[MAX_NR_ZONES] = { 0, }; - unsigned long max_dma, high, low; + unsigned long max_dma, low; #ifndef CONFIG_FLATMEM unsigned long zholes_size[MAX_NR_ZONES] = { 0, }; unsigned long i, j, pfn; @@ -331,7 +356,6 @@ void __init paging_init(void) max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; low = max_low_pfn; - high = highend_pfn; #ifdef CONFIG_ISA if (low < max_dma) @@ -344,13 +368,13 @@ void __init paging_init(void) zones_size[ZONE_DMA] = low; #endif #ifdef CONFIG_HIGHMEM - if (cpu_has_dc_aliases) { - printk(KERN_WARNING "This processor doesn't support highmem."); - if (high - low) - printk(" %ldk highmem ignored", high - low); - printk("\n"); - } else - zones_size[ZONE_HIGHMEM] = high - low; + zones_size[ZONE_HIGHMEM] = highend_pfn - highstart_pfn; + + if (cpu_has_dc_aliases && zones_size[ZONE_HIGHMEM]) { + printk(KERN_WARNING "This processor doesn't support highmem." + " %ldk highmem ignored\n", zones_size[ZONE_HIGHMEM]); + zones_size[ZONE_HIGHMEM] = 0; + } #endif #ifdef CONFIG_FLATMEM @@ -443,15 +467,18 @@ void __init mem_init(void) } #endif /* !CONFIG_NEED_MULTIPLE_NODES */ -void free_init_pages(char *what, unsigned long begin, unsigned long end) +static void free_init_pages(char *what, unsigned long begin, unsigned long end) { - unsigned long addr; + unsigned long pfn; + + for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) { + struct page *page = pfn_to_page(pfn); + void *addr = phys_to_virt(PFN_PHYS(pfn)); - for (addr = begin; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - memset((void *)addr, 0xcc, PAGE_SIZE); - free_page(addr); + ClearPageReserved(page); + init_page_count(page); + memset(addr, POISON_FREE_INITMEM, PAGE_SIZE); + __free_page(page); totalram_pages++; } printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); @@ -460,12 +487,9 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { -#ifdef CONFIG_64BIT - /* Switch from KSEG0 to XKPHYS addresses */ - start = (unsigned long)phys_to_virt(CPHYSADDR(start)); - end = (unsigned long)phys_to_virt(CPHYSADDR(end)); -#endif - free_init_pages("initrd memory", start, end); + free_init_pages("initrd memory", + virt_to_phys((void *)start), + virt_to_phys((void *)end)); } #endif @@ -473,17 +497,13 @@ extern unsigned long prom_free_prom_memory(void); void free_initmem(void) { - unsigned long start, end, freed; + unsigned long freed; freed = prom_free_prom_memory(); if (freed) printk(KERN_INFO "Freeing firmware memory: %ldk freed\n",freed); - start = (unsigned long)(&__init_begin); - end = (unsigned long)(&__init_end); -#ifdef CONFIG_64BIT - start = PAGE_OFFSET | CPHYSADDR(start); - end = PAGE_OFFSET | CPHYSADDR(end); -#endif - free_init_pages("unused kernel memory", start, end); + free_init_pages("unused kernel memory", + __pa_symbol(&__init_begin), + __pa_symbol(&__init_end)); } diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c index cea7d0ea36e..fc2c96f0a1f 100644 --- a/arch/mips/mm/ioremap.c +++ b/arch/mips/mm/ioremap.c @@ -6,98 +6,13 @@ * (C) Copyright 1995 1996 Linus Torvalds * (C) Copyright 2001, 2002 Ralf Baechle */ +#include <linux/mm.h> #include <linux/module.h> #include <asm/addrspace.h> #include <asm/byteorder.h> #include <linux/vmalloc.h> -#include <asm/cacheflush.h> -#include <asm/io.h> -#include <asm/tlbflush.h> - -static inline void remap_area_pte(pte_t * pte, unsigned long address, - phys_t size, phys_t phys_addr, unsigned long flags) -{ - phys_t end; - unsigned long pfn; - pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE - | __WRITEABLE | flags); - - address &= ~PMD_MASK; - end = address + size; - if (end > PMD_SIZE) - end = PMD_SIZE; - if (address >= end) - BUG(); - pfn = phys_addr >> PAGE_SHIFT; - do { - if (!pte_none(*pte)) { - printk("remap_area_pte: page already exists\n"); - BUG(); - } - set_pte(pte, pfn_pte(pfn, pgprot)); - address += PAGE_SIZE; - pfn++; - pte++; - } while (address && (address < end)); -} - -static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, - phys_t size, phys_t phys_addr, unsigned long flags) -{ - phys_t end; - - address &= ~PGDIR_MASK; - end = address + size; - if (end > PGDIR_SIZE) - end = PGDIR_SIZE; - phys_addr -= address; - if (address >= end) - BUG(); - do { - pte_t * pte = pte_alloc_kernel(pmd, address); - if (!pte) - return -ENOMEM; - remap_area_pte(pte, address, end - address, address + phys_addr, flags); - address = (address + PMD_SIZE) & PMD_MASK; - pmd++; - } while (address && (address < end)); - return 0; -} - -static int remap_area_pages(unsigned long address, phys_t phys_addr, - phys_t size, unsigned long flags) -{ - int error; - pgd_t * dir; - unsigned long end = address + size; - - phys_addr -= address; - dir = pgd_offset(&init_mm, address); - flush_cache_all(); - if (address >= end) - BUG(); - do { - pud_t *pud; - pmd_t *pmd; - - error = -ENOMEM; - pud = pud_alloc(&init_mm, dir, address); - if (!pud) - break; - pmd = pmd_alloc(&init_mm, pud, address); - if (!pmd) - break; - if (remap_area_pmd(pmd, address, end - address, - phys_addr + address, flags)) - break; - error = 0; - address = (address + PGDIR_SIZE) & PGDIR_MASK; - dir++; - } while (address && (address < end)); - flush_tlb_all(); - return error; -} +#include <linux/io.h> /* * Generic mapping function (not visible outside): @@ -121,6 +36,7 @@ void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags) unsigned long offset; phys_t last_addr; void * addr; + pgprot_t pgprot; phys_addr = fixup_bigphys_addr(phys_addr, size); @@ -152,6 +68,9 @@ void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags) return NULL; } + pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE + | __WRITEABLE | flags); + /* * Mappings have to be page-aligned */ @@ -166,7 +85,8 @@ void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags) if (!area) return NULL; addr = area->addr; - if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { + if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, + phys_addr, pgprot)) { vunmap(addr); return NULL; } diff --git a/arch/mips/mm/pg-r4k.c b/arch/mips/mm/pg-r4k.c index d41fc5885e8..dc795be6280 100644 --- a/arch/mips/mm/pg-r4k.c +++ b/arch/mips/mm/pg-r4k.c @@ -243,11 +243,10 @@ static void __init __build_store_reg(int reg) static inline void build_store_reg(int reg) { - if (cpu_has_prefetch) - if (reg) - build_dst_pref(pref_offset_copy); - else - build_dst_pref(pref_offset_clear); + int pref_off = cpu_has_prefetch ? + (reg ? pref_offset_copy : pref_offset_clear) : 0; + if (pref_off) + build_dst_pref(pref_off); else if (cpu_has_cache_cdex_s) build_cdex_s(); else if (cpu_has_cache_cdex_p) diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c index 8d600d307d5..c46eb651bf0 100644 --- a/arch/mips/mm/pgtable-64.c +++ b/arch/mips/mm/pgtable-64.c @@ -58,6 +58,9 @@ void __init pagetable_init(void) /* Initialize the entire pgd. */ pgd_init((unsigned long)swapper_pg_dir); +#ifdef MODULE_START + pgd_init((unsigned long)module_pg_dir); +#endif pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table); pgd_base = swapper_pg_dir; diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index fec318a1c8c..492c518e7ba 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -423,6 +423,9 @@ enum label_id { label_invalid, label_second_part, label_leave, +#ifdef MODULE_START + label_module_alloc, +#endif label_vmalloc, label_vmalloc_done, label_tlbw_hazard, @@ -455,6 +458,9 @@ static __init void build_label(struct label **lab, u32 *addr, L_LA(_second_part) L_LA(_leave) +#ifdef MODULE_START +L_LA(_module_alloc) +#endif L_LA(_vmalloc) L_LA(_vmalloc_done) L_LA(_tlbw_hazard) @@ -686,6 +692,13 @@ static void __init il_bgezl(u32 **p, struct reloc **r, unsigned int reg, i_bgezl(p, reg, 0); } +static void __init __attribute__((unused)) +il_bgez(u32 **p, struct reloc **r, unsigned int reg, enum label_id l) +{ + r_mips_pc16(r, *p, l); + i_bgez(p, reg, 0); +} + /* The only general purpose registers allowed in TLB handlers. */ #define K0 26 #define K1 27 @@ -970,7 +983,11 @@ build_get_pmde64(u32 **p, struct label **l, struct reloc **r, * The vmalloc handling is not in the hotpath. */ i_dmfc0(p, tmp, C0_BADVADDR); +#ifdef MODULE_START + il_bltz(p, r, tmp, label_module_alloc); +#else il_bltz(p, r, tmp, label_vmalloc); +#endif /* No i_nop needed here, since the next insn doesn't touch TMP. */ #ifdef CONFIG_SMP @@ -1023,8 +1040,46 @@ build_get_pgd_vmalloc64(u32 **p, struct label **l, struct reloc **r, { long swpd = (long)swapper_pg_dir; +#ifdef MODULE_START + long modd = (long)module_pg_dir; + + l_module_alloc(l, *p); + /* + * Assumption: + * VMALLOC_START >= 0xc000000000000000UL + * MODULE_START >= 0xe000000000000000UL + */ + i_SLL(p, ptr, bvaddr, 2); + il_bgez(p, r, ptr, label_vmalloc); + + if (in_compat_space_p(MODULE_START) && !rel_lo(MODULE_START)) { + i_lui(p, ptr, rel_hi(MODULE_START)); /* delay slot */ + } else { + /* unlikely configuration */ + i_nop(p); /* delay slot */ + i_LA(p, ptr, MODULE_START); + } + i_dsubu(p, bvaddr, bvaddr, ptr); + + if (in_compat_space_p(modd) && !rel_lo(modd)) { + il_b(p, r, label_vmalloc_done); + i_lui(p, ptr, rel_hi(modd)); + } else { + i_LA_mostly(p, ptr, modd); + il_b(p, r, label_vmalloc_done); + i_daddiu(p, ptr, ptr, rel_lo(modd)); + } + + l_vmalloc(l, *p); + if (in_compat_space_p(MODULE_START) && !rel_lo(MODULE_START) && + MODULE_START << 32 == VMALLOC_START) + i_dsll32(p, ptr, ptr, 0); /* typical case */ + else + i_LA(p, ptr, VMALLOC_START); +#else l_vmalloc(l, *p); i_LA(p, ptr, VMALLOC_START); +#endif i_dsubu(p, bvaddr, bvaddr, ptr); if (in_compat_space_p(swpd) && !rel_lo(swpd)) { |