diff options
Diffstat (limited to 'arch/xtensa/mm')
-rw-r--r-- | arch/xtensa/mm/cache.c | 27 | ||||
-rw-r--r-- | arch/xtensa/mm/fault.c | 1 | ||||
-rw-r--r-- | arch/xtensa/mm/init.c | 16 | ||||
-rw-r--r-- | arch/xtensa/mm/misc.S | 51 | ||||
-rw-r--r-- | arch/xtensa/mm/mmu.c | 2 | ||||
-rw-r--r-- | arch/xtensa/mm/tlb.c | 9 |
6 files changed, 76 insertions, 30 deletions
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c index 85df4655d32..81edeab82d1 100644 --- a/arch/xtensa/mm/cache.c +++ b/arch/xtensa/mm/cache.c @@ -118,7 +118,7 @@ void flush_dcache_page(struct page *page) * For now, flush the whole cache. FIXME?? */ -void flush_cache_range(struct vm_area_struct* vma, +void flush_cache_range(struct vm_area_struct* vma, unsigned long start, unsigned long end) { __flush_invalidate_dcache_all(); @@ -133,7 +133,7 @@ void flush_cache_range(struct vm_area_struct* vma, */ void flush_cache_page(struct vm_area_struct* vma, unsigned long address, - unsigned long pfn) + unsigned long pfn) { /* Note that we have to use the 'alias' address to avoid multi-hit */ @@ -166,14 +166,14 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { - unsigned long vaddr = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK); unsigned long paddr = (unsigned long) page_address(page); unsigned long phys = page_to_phys(page); + unsigned long tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK); __flush_invalidate_dcache_page(paddr); - __flush_invalidate_dcache_page_alias(vaddr, phys); - __invalidate_icache_page_alias(vaddr, phys); + __flush_invalidate_dcache_page_alias(tmp, phys); + __invalidate_icache_page_alias(tmp, phys); clear_bit(PG_arch_1, &page->flags); } @@ -195,7 +195,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK -void copy_to_user_page(struct vm_area_struct *vma, struct page *page, +void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { @@ -205,8 +205,8 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, /* Flush and invalidate user page if aliased. */ if (alias) { - unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); - __flush_invalidate_dcache_page_alias(temp, phys); + unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); + __flush_invalidate_dcache_page_alias(t, phys); } /* Copy data */ @@ -219,12 +219,11 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, */ if (alias) { - unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); + unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); __flush_invalidate_dcache_range((unsigned long) dst, len); - if ((vma->vm_flags & VM_EXEC) != 0) { - __invalidate_icache_page_alias(temp, phys); - } + if ((vma->vm_flags & VM_EXEC) != 0) + __invalidate_icache_page_alias(t, phys); } else if ((vma->vm_flags & VM_EXEC) != 0) { __flush_dcache_range((unsigned long)dst,len); @@ -245,8 +244,8 @@ extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page, */ if (alias) { - unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); - __flush_invalidate_dcache_page_alias(temp, phys); + unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); + __flush_invalidate_dcache_page_alias(t, phys); } memcpy(dst, src, len); diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index 245b08f7eaf..4b7bc8db170 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -254,4 +254,3 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) die("Oops", regs, sig); do_exit(sig); } - diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index db955179da2..7a5156ffebb 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -75,15 +75,15 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist) sysmem.nr_banks++; } sysmem.bank[i].end = start; + + } else if (end < sysmem.bank[i].end) { + sysmem.bank[i].start = end; + } else { - if (end < sysmem.bank[i].end) - sysmem.bank[i].start = end; - else { - /* remove entry */ - sysmem.nr_banks--; - sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start; - sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end; - } + /* remove entry */ + sysmem.nr_banks--; + sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start; + sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end; } return -1; } diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S index b048406d875..d97ed1ba7b0 100644 --- a/arch/xtensa/mm/misc.S +++ b/arch/xtensa/mm/misc.S @@ -29,6 +29,7 @@ */ ENTRY(clear_page) + entry a1, 16 movi a3, 0 @@ -45,6 +46,8 @@ ENTRY(clear_page) retw +ENDPROC(clear_page) + /* * copy_page and copy_user_page are the same for non-cache-aliased configs. * @@ -53,6 +56,7 @@ ENTRY(clear_page) */ ENTRY(copy_page) + entry a1, 16 __loopi a2, a4, PAGE_SIZE, 32 @@ -84,6 +88,8 @@ ENTRY(copy_page) retw +ENDPROC(copy_page) + #ifdef CONFIG_MMU /* * If we have to deal with cache aliasing, we use temporary memory mappings @@ -109,6 +115,7 @@ ENTRY(__tlbtemp_mapping_start) */ ENTRY(clear_user_page) + entry a1, 32 /* Mark page dirty and determine alias. */ @@ -164,6 +171,8 @@ ENTRY(clear_user_page) retw +ENDPROC(clear_user_page) + /* * copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page) * a2 a3 a4 a5 @@ -171,7 +180,7 @@ ENTRY(clear_user_page) ENTRY(copy_user_page) - entry a1, 32 + entry a1, 32 /* Mark page dirty and determine alias for destination. */ @@ -262,6 +271,8 @@ ENTRY(copy_user_page) retw +ENDPROC(copy_user_page) + #endif #if (DCACHE_WAY_SIZE > PAGE_SIZE) @@ -272,6 +283,7 @@ ENTRY(copy_user_page) */ ENTRY(__flush_invalidate_dcache_page_alias) + entry sp, 16 movi a7, 0 # required for exception handler @@ -287,6 +299,7 @@ ENTRY(__flush_invalidate_dcache_page_alias) retw +ENDPROC(__flush_invalidate_dcache_page_alias) #endif ENTRY(__tlbtemp_mapping_itlb) @@ -294,6 +307,7 @@ ENTRY(__tlbtemp_mapping_itlb) #if (ICACHE_WAY_SIZE > PAGE_SIZE) ENTRY(__invalidate_icache_page_alias) + entry sp, 16 addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE) @@ -307,11 +321,14 @@ ENTRY(__invalidate_icache_page_alias) isync retw +ENDPROC(__invalidate_icache_page_alias) + #endif /* End of special treatment in tlb miss exception */ ENTRY(__tlbtemp_mapping_end) + #endif /* CONFIG_MMU /* @@ -319,6 +336,7 @@ ENTRY(__tlbtemp_mapping_end) */ ENTRY(__invalidate_icache_page) + entry sp, 16 ___invalidate_icache_page a2 a3 @@ -326,11 +344,14 @@ ENTRY(__invalidate_icache_page) retw +ENDPROC(__invalidate_icache_page) + /* * void __invalidate_dcache_page(ulong start) */ ENTRY(__invalidate_dcache_page) + entry sp, 16 ___invalidate_dcache_page a2 a3 @@ -338,11 +359,14 @@ ENTRY(__invalidate_dcache_page) retw +ENDPROC(__invalidate_dcache_page) + /* * void __flush_invalidate_dcache_page(ulong start) */ ENTRY(__flush_invalidate_dcache_page) + entry sp, 16 ___flush_invalidate_dcache_page a2 a3 @@ -350,11 +374,14 @@ ENTRY(__flush_invalidate_dcache_page) dsync retw +ENDPROC(__flush_invalidate_dcache_page) + /* * void __flush_dcache_page(ulong start) */ ENTRY(__flush_dcache_page) + entry sp, 16 ___flush_dcache_page a2 a3 @@ -362,11 +389,14 @@ ENTRY(__flush_dcache_page) dsync retw +ENDPROC(__flush_dcache_page) + /* * void __invalidate_icache_range(ulong start, ulong size) */ ENTRY(__invalidate_icache_range) + entry sp, 16 ___invalidate_icache_range a2 a3 a4 @@ -374,11 +404,14 @@ ENTRY(__invalidate_icache_range) retw +ENDPROC(__invalidate_icache_range) + /* * void __flush_invalidate_dcache_range(ulong start, ulong size) */ ENTRY(__flush_invalidate_dcache_range) + entry sp, 16 ___flush_invalidate_dcache_range a2 a3 a4 @@ -386,11 +419,14 @@ ENTRY(__flush_invalidate_dcache_range) retw +ENDPROC(__flush_invalidate_dcache_range) + /* * void _flush_dcache_range(ulong start, ulong size) */ ENTRY(__flush_dcache_range) + entry sp, 16 ___flush_dcache_range a2 a3 a4 @@ -398,22 +434,28 @@ ENTRY(__flush_dcache_range) retw +ENDPROC(__flush_dcache_range) + /* * void _invalidate_dcache_range(ulong start, ulong size) */ ENTRY(__invalidate_dcache_range) + entry sp, 16 ___invalidate_dcache_range a2 a3 a4 retw +ENDPROC(__invalidate_dcache_range) + /* * void _invalidate_icache_all(void) */ ENTRY(__invalidate_icache_all) + entry sp, 16 ___invalidate_icache_all a2 a3 @@ -421,11 +463,14 @@ ENTRY(__invalidate_icache_all) retw +ENDPROC(__invalidate_icache_all) + /* * void _flush_invalidate_dcache_all(void) */ ENTRY(__flush_invalidate_dcache_all) + entry sp, 16 ___flush_invalidate_dcache_all a2 a3 @@ -433,11 +478,14 @@ ENTRY(__flush_invalidate_dcache_all) retw +ENDPROC(__flush_invalidate_dcache_all) + /* * void _invalidate_dcache_all(void) */ ENTRY(__invalidate_dcache_all) + entry sp, 16 ___invalidate_dcache_all a2 a3 @@ -445,3 +493,4 @@ ENTRY(__invalidate_dcache_all) retw +ENDPROC(__invalidate_dcache_all) diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c index ca81654f3ec..0f77f9d3bb8 100644 --- a/arch/xtensa/mm/mmu.c +++ b/arch/xtensa/mm/mmu.c @@ -37,7 +37,7 @@ void __init init_mmu(void) /* Set rasid register to a known value. */ - set_rasid_register(ASID_USER_FIRST); + set_rasid_register(ASID_INSERT(ASID_USER_FIRST)); /* Set PTEVADDR special register to the start of the page * table, which is in kernel mappable space (ie. not diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c index e2700b21395..5411aa67c68 100644 --- a/arch/xtensa/mm/tlb.c +++ b/arch/xtensa/mm/tlb.c @@ -63,7 +63,7 @@ void flush_tlb_all (void) void flush_tlb_mm(struct mm_struct *mm) { if (mm == current->active_mm) { - int flags; + unsigned long flags; local_save_flags(flags); __get_new_mmu_context(mm); __load_mmu_context(mm); @@ -82,7 +82,7 @@ void flush_tlb_mm(struct mm_struct *mm) #endif void flush_tlb_range (struct vm_area_struct *vma, - unsigned long start, unsigned long end) + unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; unsigned long flags; @@ -100,7 +100,7 @@ void flush_tlb_range (struct vm_area_struct *vma, int oldpid = get_rasid_register(); set_rasid_register (ASID_INSERT(mm->context)); start &= PAGE_MASK; - if (vma->vm_flags & VM_EXEC) + if (vma->vm_flags & VM_EXEC) while(start < end) { invalidate_itlb_mapping(start); invalidate_dtlb_mapping(start); @@ -130,7 +130,7 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page) local_save_flags(flags); - oldpid = get_rasid_register(); + oldpid = get_rasid_register(); if (vma->vm_flags & VM_EXEC) invalidate_itlb_mapping(page); @@ -140,4 +140,3 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page) local_irq_restore(flags); } - |