diff options
Diffstat (limited to 'arch/arc/mm')
-rw-r--r-- | arch/arc/mm/cache_arc700.c | 155 | ||||
-rw-r--r-- | arch/arc/mm/fault.c | 8 | ||||
-rw-r--r-- | arch/arc/mm/init.c | 7 | ||||
-rw-r--r-- | arch/arc/mm/tlb.c | 91 | ||||
-rw-r--r-- | arch/arc/mm/tlbex.S | 4 |
5 files changed, 154 insertions, 111 deletions
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c index 5a1259cd948..6b58c1de757 100644 --- a/arch/arc/mm/cache_arc700.c +++ b/arch/arc/mm/cache_arc700.c @@ -182,7 +182,7 @@ void arc_cache_init(void) #ifdef CONFIG_ARC_HAS_ICACHE /* 1. Confirm some of I-cache params which Linux assumes */ - if (ic->line_len != ARC_ICACHE_LINE_LEN) + if (ic->line_len != L1_CACHE_BYTES) panic("Cache H/W doesn't match kernel Config"); if (ic->ver != CONFIG_ARC_MMU_VER) @@ -205,7 +205,7 @@ chk_dc: return; #ifdef CONFIG_ARC_HAS_DCACHE - if (dc->line_len != ARC_DCACHE_LINE_LEN) + if (dc->line_len != L1_CACHE_BYTES) panic("Cache H/W doesn't match kernel Config"); /* check for D-Cache aliasing */ @@ -240,6 +240,67 @@ chk_dc: #define OP_INV 0x1 #define OP_FLUSH 0x2 #define OP_FLUSH_N_INV 0x3 +#define OP_INV_IC 0x4 + +/* + * Common Helper for Line Operations on {I,D}-Cache + */ +static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr, + unsigned long sz, const int cacheop) +{ + unsigned int aux_cmd, aux_tag; + int num_lines; + const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE; + + if (cacheop == OP_INV_IC) { + aux_cmd = ARC_REG_IC_IVIL; + aux_tag = ARC_REG_IC_PTAG; + } + else { + /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ + aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; + aux_tag = ARC_REG_DC_PTAG; + } + + /* Ensure we properly floor/ceil the non-line aligned/sized requests + * and have @paddr - aligned to cache line and integral @num_lines. + * This however can be avoided for page sized since: + * -@paddr will be cache-line aligned already (being page aligned) + * -@sz will be integral multiple of line size (being page sized). + */ + if (!full_page_op) { + sz += paddr & ~CACHE_LINE_MASK; + paddr &= CACHE_LINE_MASK; + vaddr &= CACHE_LINE_MASK; + } + + num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); + +#if (CONFIG_ARC_MMU_VER <= 2) + /* MMUv2 and before: paddr contains stuffed vaddrs bits */ + paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; +#else + /* if V-P const for loop, PTAG can be written once outside loop */ + if (full_page_op) + write_aux_reg(ARC_REG_DC_PTAG, paddr); +#endif + + while (num_lines-- > 0) { +#if (CONFIG_ARC_MMU_VER > 2) + /* MMUv3, cache ops require paddr seperately */ + if (!full_page_op) { + write_aux_reg(aux_tag, paddr); + paddr += L1_CACHE_BYTES; + } + + write_aux_reg(aux_cmd, vaddr); + vaddr += L1_CACHE_BYTES; +#else + write_aux_reg(aux, paddr); + paddr += L1_CACHE_BYTES; +#endif + } +} #ifdef CONFIG_ARC_HAS_DCACHE @@ -289,53 +350,6 @@ static inline void __dc_entire_op(const int cacheop) write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH); } -/* - * Per Line Operation on D-Cache - * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete - * It's sole purpose is to help gcc generate ZOL - * (aliasing VIPT dcache flushing needs both vaddr and paddr) - */ -static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr, - unsigned long sz, const int aux_reg) -{ - int num_lines; - - /* Ensure we properly floor/ceil the non-line aligned/sized requests - * and have @paddr - aligned to cache line and integral @num_lines. - * This however can be avoided for page sized since: - * -@paddr will be cache-line aligned already (being page aligned) - * -@sz will be integral multiple of line size (being page sized). - */ - if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) { - sz += paddr & ~DCACHE_LINE_MASK; - paddr &= DCACHE_LINE_MASK; - vaddr &= DCACHE_LINE_MASK; - } - - num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN); - -#if (CONFIG_ARC_MMU_VER <= 2) - paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; -#endif - - while (num_lines-- > 0) { -#if (CONFIG_ARC_MMU_VER > 2) - /* - * Just as for I$, in MMU v3, D$ ops also require - * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops - */ - write_aux_reg(ARC_REG_DC_PTAG, paddr); - - write_aux_reg(aux_reg, vaddr); - vaddr += ARC_DCACHE_LINE_LEN; -#else - /* paddr contains stuffed vaddrs bits */ - write_aux_reg(aux_reg, paddr); -#endif - paddr += ARC_DCACHE_LINE_LEN; - } -} - /* For kernel mappings cache operation: index is same as paddr */ #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) @@ -346,7 +360,6 @@ static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr, unsigned long sz, const int cacheop) { unsigned long flags, tmp = tmp; - int aux; local_irq_save(flags); @@ -361,12 +374,7 @@ static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr, write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH); } - if (cacheop & OP_INV) /* Inv / flush-n-inv use same cmd reg */ - aux = ARC_REG_DC_IVDL; - else - aux = ARC_REG_DC_FLDL; - - __dc_line_loop(paddr, vaddr, sz, aux); + __cache_line_loop(paddr, vaddr, sz, cacheop); if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */ wait_for_flush(); @@ -438,42 +446,9 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, unsigned long sz) { unsigned long flags; - int num_lines; - - /* - * Ensure we properly floor/ceil the non-line aligned/sized requests: - * However page sized flushes can be compile time optimised. - * -@paddr will be cache-line aligned already (being page aligned) - * -@sz will be integral multiple of line size (being page sized). - */ - if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) { - sz += paddr & ~ICACHE_LINE_MASK; - paddr &= ICACHE_LINE_MASK; - vaddr &= ICACHE_LINE_MASK; - } - - num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN); - -#if (CONFIG_ARC_MMU_VER <= 2) - /* bits 17:13 of vaddr go as bits 4:0 of paddr */ - paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; -#endif local_irq_save(flags); - while (num_lines-- > 0) { -#if (CONFIG_ARC_MMU_VER > 2) - /* tag comes from phy addr */ - write_aux_reg(ARC_REG_IC_PTAG, paddr); - - /* index bits come from vaddr */ - write_aux_reg(ARC_REG_IC_IVIL, vaddr); - vaddr += ARC_ICACHE_LINE_LEN; -#else - /* paddr contains stuffed vaddrs bits */ - write_aux_reg(ARC_REG_IC_IVIL, paddr); -#endif - paddr += ARC_ICACHE_LINE_LEN; - } + __cache_line_loop(paddr, vaddr, sz, OP_INV_IC); local_irq_restore(flags); } diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index d63f3de0cd5..9c69552350c 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -17,7 +17,7 @@ #include <asm/pgalloc.h> #include <asm/mmu.h> -static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address) +static int handle_vmalloc_fault(unsigned long address) { /* * Synchronize this task's top level page-table @@ -27,7 +27,7 @@ static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address) pud_t *pud, *pud_k; pmd_t *pmd, *pmd_k; - pgd = pgd_offset_fast(mm, address); + pgd = pgd_offset_fast(current->active_mm, address); pgd_k = pgd_offset_k(address); if (!pgd_present(*pgd_k)) @@ -52,7 +52,7 @@ bad_area: return 1; } -void do_page_fault(struct pt_regs *regs, unsigned long address) +void do_page_fault(unsigned long address, struct pt_regs *regs) { struct vm_area_struct *vma = NULL; struct task_struct *tsk = current; @@ -72,7 +72,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address) * nothing more. */ if (address >= VMALLOC_START && address <= VMALLOC_END) { - ret = handle_vmalloc_fault(mm, address); + ret = handle_vmalloc_fault(address); if (unlikely(ret)) goto bad_area_nosemaphore; else diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 81279ec73a6..55e0a85bea7 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -125,10 +125,3 @@ void __init free_initrd_mem(unsigned long start, unsigned long end) free_reserved_area((void *)start, (void *)end, -1, "initrd"); } #endif - -#ifdef CONFIG_OF_FLATTREE -void __init early_init_dt_setup_initrd_arch(u64 start, u64 end) -{ - pr_err("%s(%llx, %llx)\n", __func__, start, end); -} -#endif /* CONFIG_OF_FLATTREE */ diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index 71cb26df425..e1acf0ce564 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -100,7 +100,7 @@ /* A copy of the ASID from the PID reg is kept in asid_cache */ -unsigned int asid_cache = MM_CTXT_FIRST_CYCLE; +DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE; /* * Utility Routine to erase a J-TLB entry @@ -274,6 +274,7 @@ noinline void local_flush_tlb_mm(struct mm_struct *mm) void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { + const unsigned int cpu = smp_processor_id(); unsigned long flags; /* If range @start to @end is more than 32 TLB entries deep, @@ -297,9 +298,9 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, local_irq_save(flags); - if (vma->vm_mm->context.asid != MM_CTXT_NO_ASID) { + if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { while (start < end) { - tlb_entry_erase(start | hw_pid(vma->vm_mm)); + tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); start += PAGE_SIZE; } } @@ -346,6 +347,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { + const unsigned int cpu = smp_processor_id(); unsigned long flags; /* Note that it is critical that interrupts are DISABLED between @@ -353,14 +355,87 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) */ local_irq_save(flags); - if (vma->vm_mm->context.asid != MM_CTXT_NO_ASID) { - tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm)); + if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { + tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); utlb_invalidate(); } local_irq_restore(flags); } +#ifdef CONFIG_SMP + +struct tlb_args { + struct vm_area_struct *ta_vma; + unsigned long ta_start; + unsigned long ta_end; +}; + +static inline void ipi_flush_tlb_page(void *arg) +{ + struct tlb_args *ta = arg; + + local_flush_tlb_page(ta->ta_vma, ta->ta_start); +} + +static inline void ipi_flush_tlb_range(void *arg) +{ + struct tlb_args *ta = arg; + + local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); +} + +static inline void ipi_flush_tlb_kernel_range(void *arg) +{ + struct tlb_args *ta = (struct tlb_args *)arg; + + local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); +} + +void flush_tlb_all(void) +{ + on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ + on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm, + mm, 1); +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) +{ + struct tlb_args ta = { + .ta_vma = vma, + .ta_start = uaddr + }; + + on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); +} + +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + struct tlb_args ta = { + .ta_vma = vma, + .ta_start = start, + .ta_end = end + }; + + on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); +} + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + struct tlb_args ta = { + .ta_start = start, + .ta_end = end + }; + + on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); +} +#endif + /* * Routine to create a TLB entry */ @@ -400,7 +475,7 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) local_irq_save(flags); - tlb_paranoid_check(vma->vm_mm->context.asid, address); + tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), address); address &= PAGE_MASK; @@ -610,9 +685,9 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address, struct pt_regs *regs) { int set, way, n; - unsigned int pd0[4], pd1[4]; /* assume max 4 ways */ unsigned long flags, is_valid; struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; + unsigned int pd0[mmu->ways], pd1[mmu->ways]; local_irq_save(flags); @@ -637,7 +712,7 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address, continue; /* Scan the set for duplicate ways: needs a nested loop */ - for (way = 0; way < mmu->ways; way++) { + for (way = 0; way < mmu->ways - 1; way++) { if (!pd0[way]) continue; diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S index cf7d7d9ad69..3fcfdb38d24 100644 --- a/arch/arc/mm/tlbex.S +++ b/arch/arc/mm/tlbex.S @@ -369,8 +369,8 @@ do_slow_path_pf: EXCEPTION_PROLOGUE ; ------- setup args for Linux Page fault Hanlder --------- - mov_s r0, sp - lr r1, [efa] + mov_s r1, sp + lr r0, [efa] ; We don't want exceptions to be disabled while the fault is handled. ; Now that we have saved the context we return from exception hence |