diff options
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/c-octeon.c | 16 | ||||
-rw-r--r-- | arch/mips/mm/c-r4k.c | 21 | ||||
-rw-r--r-- | arch/mips/mm/dma-default.c | 163 | ||||
-rw-r--r-- | arch/mips/mm/fault.c | 11 | ||||
-rw-r--r-- | arch/mips/mm/highmem.c | 51 | ||||
-rw-r--r-- | arch/mips/mm/sc-mips.c | 38 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 11 | ||||
-rw-r--r-- | arch/mips/mm/uasm.c | 20 |
8 files changed, 175 insertions, 156 deletions
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c index 0f9c488044d..16c4d256b76 100644 --- a/arch/mips/mm/c-octeon.c +++ b/arch/mips/mm/c-octeon.c @@ -181,10 +181,10 @@ static void __cpuinit probe_octeon(void) unsigned int config1; struct cpuinfo_mips *c = ¤t_cpu_data; + config1 = read_c0_config1(); switch (c->cputype) { case CPU_CAVIUM_OCTEON: case CPU_CAVIUM_OCTEON_PLUS: - config1 = read_c0_config1(); c->icache.linesz = 2 << ((config1 >> 19) & 7); c->icache.sets = 64 << ((config1 >> 22) & 7); c->icache.ways = 1 + ((config1 >> 16) & 7); @@ -204,6 +204,20 @@ static void __cpuinit probe_octeon(void) c->options |= MIPS_CPU_PREFETCH; break; + case CPU_CAVIUM_OCTEON2: + c->icache.linesz = 2 << ((config1 >> 19) & 7); + c->icache.sets = 8; + c->icache.ways = 37; + c->icache.flags |= MIPS_CACHE_VTAG; + icache_size = c->icache.sets * c->icache.ways * c->icache.linesz; + + c->dcache.linesz = 128; + c->dcache.ways = 32; + c->dcache.sets = 8; + dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz; + c->options |= MIPS_CPU_PREFETCH; + break; + default: panic("Unsupported Cavium Networks CPU type\n"); break; diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 6721ee2b1e8..b4923a75cb4 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -42,14 +42,14 @@ * o collapses to normal function call on UP kernels * o collapses to normal function call on systems with a single shared * primary cache. + * o doesn't disable interrupts on the local CPU */ -static inline void r4k_on_each_cpu(void (*func) (void *info), void *info, - int wait) +static inline void r4k_on_each_cpu(void (*func) (void *info), void *info) { preempt_disable(); #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) - smp_call_function(func, info, wait); + smp_call_function(func, info, 1); #endif func(info); preempt_enable(); @@ -363,7 +363,7 @@ static inline void local_r4k___flush_cache_all(void * args) static void r4k___flush_cache_all(void) { - r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1); + r4k_on_each_cpu(local_r4k___flush_cache_all, NULL); } static inline int has_valid_asid(const struct mm_struct *mm) @@ -410,7 +410,7 @@ static void r4k_flush_cache_range(struct vm_area_struct *vma, int exec = vma->vm_flags & VM_EXEC; if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) - r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1); + r4k_on_each_cpu(local_r4k_flush_cache_range, vma); } static inline void local_r4k_flush_cache_mm(void * args) @@ -442,7 +442,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm) if (!cpu_has_dc_aliases) return; - r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1); + r4k_on_each_cpu(local_r4k_flush_cache_mm, mm); } struct flush_cache_page_args { @@ -534,7 +534,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma, args.addr = addr; args.pfn = pfn; - r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1); + r4k_on_each_cpu(local_r4k_flush_cache_page, &args); } static inline void local_r4k_flush_data_cache_page(void * addr) @@ -547,8 +547,7 @@ static void r4k_flush_data_cache_page(unsigned long addr) if (in_atomic()) local_r4k_flush_data_cache_page((void *)addr); else - r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, - 1); + r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr); } struct flush_icache_range_args { @@ -589,7 +588,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end) args.start = start; args.end = end; - r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args, 1); + r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args); instruction_hazard(); } @@ -710,7 +709,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg) static void r4k_flush_cache_sigtramp(unsigned long addr) { - r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1); + r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr); } static void r4k_flush_icache_all(void) diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index 469d4019f79..21ea14efb83 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -95,10 +95,9 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size, return ret; } - EXPORT_SYMBOL(dma_alloc_noncoherent); -void *dma_alloc_coherent(struct device *dev, size_t size, +static void *mips_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t * dma_handle, gfp_t gfp) { void *ret; @@ -123,7 +122,6 @@ void *dma_alloc_coherent(struct device *dev, size_t size, return ret; } -EXPORT_SYMBOL(dma_alloc_coherent); void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) @@ -131,10 +129,9 @@ void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); free_pages((unsigned long) vaddr, get_order(size)); } - EXPORT_SYMBOL(dma_free_noncoherent); -void dma_free_coherent(struct device *dev, size_t size, void *vaddr, +static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { unsigned long addr = (unsigned long) vaddr; @@ -151,8 +148,6 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr, free_pages(addr, get_order(size)); } -EXPORT_SYMBOL(dma_free_coherent); - static inline void __dma_sync(unsigned long addr, size_t size, enum dma_data_direction direction) { @@ -174,21 +169,8 @@ static inline void __dma_sync(unsigned long addr, size_t size, } } -dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction direction) -{ - unsigned long addr = (unsigned long) ptr; - - if (!plat_device_is_coherent(dev)) - __dma_sync(addr, size, direction); - - return plat_map_dma_mem(dev, ptr, size); -} - -EXPORT_SYMBOL(dma_map_single); - -void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction direction) +static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { if (cpu_is_noncoherent_r10000(dev)) __dma_sync(dma_addr_to_virt(dev, dma_addr), size, @@ -197,15 +179,11 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, plat_unmap_dma_mem(dev, dma_addr, size, direction); } -EXPORT_SYMBOL(dma_unmap_single); - -int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction direction) +static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction direction, struct dma_attrs *attrs) { int i; - BUG_ON(direction == DMA_NONE); - for (i = 0; i < nents; i++, sg++) { unsigned long addr; @@ -219,33 +197,27 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, return nents; } -EXPORT_SYMBOL(dma_map_sg); - -dma_addr_t dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction direction) +static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction direction, + struct dma_attrs *attrs) { - BUG_ON(direction == DMA_NONE); + unsigned long addr; - if (!plat_device_is_coherent(dev)) { - unsigned long addr; + addr = (unsigned long) page_address(page) + offset; - addr = (unsigned long) page_address(page) + offset; + if (!plat_device_is_coherent(dev)) __dma_sync(addr, size, direction); - } - return plat_map_dma_mem_page(dev, page) + offset; + return plat_map_dma_mem(dev, (void *)addr, size); } -EXPORT_SYMBOL(dma_map_page); - -void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, - enum dma_data_direction direction) +static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg, + int nhwentries, enum dma_data_direction direction, + struct dma_attrs *attrs) { unsigned long addr; int i; - BUG_ON(direction == DMA_NONE); - for (i = 0; i < nhwentries; i++, sg++) { if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) { @@ -257,13 +229,9 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, } } -EXPORT_SYMBOL(dma_unmap_sg); - -void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction direction) +static void mips_dma_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { - BUG_ON(direction == DMA_NONE); - if (cpu_is_noncoherent_r10000(dev)) { unsigned long addr; @@ -272,13 +240,9 @@ void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, } } -EXPORT_SYMBOL(dma_sync_single_for_cpu); - -void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, - size_t size, enum dma_data_direction direction) +static void mips_dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { - BUG_ON(direction == DMA_NONE); - plat_extra_sync_for_device(dev); if (!plat_device_is_coherent(dev)) { unsigned long addr; @@ -288,46 +252,11 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, } } -EXPORT_SYMBOL(dma_sync_single_for_device); - -void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - - if (cpu_is_noncoherent_r10000(dev)) { - unsigned long addr; - - addr = dma_addr_to_virt(dev, dma_handle); - __dma_sync(addr + offset, size, direction); - } -} - -EXPORT_SYMBOL(dma_sync_single_range_for_cpu); - -void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - - plat_extra_sync_for_device(dev); - if (!plat_device_is_coherent(dev)) { - unsigned long addr; - - addr = dma_addr_to_virt(dev, dma_handle); - __dma_sync(addr + offset, size, direction); - } -} - -EXPORT_SYMBOL(dma_sync_single_range_for_device); - -void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction direction) +static void mips_dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sg, int nelems, enum dma_data_direction direction) { int i; - BUG_ON(direction == DMA_NONE); - /* Make sure that gcc doesn't leave the empty loop body. */ for (i = 0; i < nelems; i++, sg++) { if (cpu_is_noncoherent_r10000(dev)) @@ -336,15 +265,11 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, } } -EXPORT_SYMBOL(dma_sync_sg_for_cpu); - -void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction direction) +static void mips_dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sg, int nelems, enum dma_data_direction direction) { int i; - BUG_ON(direction == DMA_NONE); - /* Make sure that gcc doesn't leave the empty loop body. */ for (i = 0; i < nelems; i++, sg++) { if (!plat_device_is_coherent(dev)) @@ -353,24 +278,18 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele } } -EXPORT_SYMBOL(dma_sync_sg_for_device); - -int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return plat_dma_mapping_error(dev, dma_addr); } -EXPORT_SYMBOL(dma_mapping_error); - -int dma_supported(struct device *dev, u64 mask) +int mips_dma_supported(struct device *dev, u64 mask) { return plat_dma_supported(dev, mask); } -EXPORT_SYMBOL(dma_supported); - void dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) + enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); @@ -380,3 +299,31 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, } EXPORT_SYMBOL(dma_cache_sync); + +static struct dma_map_ops mips_default_dma_map_ops = { + .alloc_coherent = mips_dma_alloc_coherent, + .free_coherent = mips_dma_free_coherent, + .map_page = mips_dma_map_page, + .unmap_page = mips_dma_unmap_page, + .map_sg = mips_dma_map_sg, + .unmap_sg = mips_dma_unmap_sg, + .sync_single_for_cpu = mips_dma_sync_single_for_cpu, + .sync_single_for_device = mips_dma_sync_single_for_device, + .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu, + .sync_sg_for_device = mips_dma_sync_sg_for_device, + .mapping_error = mips_dma_mapping_error, + .dma_supported = mips_dma_supported +}; + +struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops; +EXPORT_SYMBOL(mips_dma_map_ops); + +#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) + +static int __init mips_dma_init(void) +{ + dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); + + return 0; +} +fs_initcall(mips_dma_init); diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 783ad0065fd..137ee76a004 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -18,6 +18,7 @@ #include <linux/smp.h> #include <linux/module.h> #include <linux/kprobes.h> +#include <linux/perf_event.h> #include <asm/branch.h> #include <asm/mmu_context.h> @@ -144,6 +145,7 @@ good_area: * the fault. */ fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; @@ -151,10 +153,15 @@ good_area: goto do_sigbus; BUG(); } - if (fault & VM_FAULT_MAJOR) + if (fault & VM_FAULT_MAJOR) { + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, + 1, 0, regs, address); tsk->maj_flt++; - else + } else { + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, + 1, 0, regs, address); tsk->min_flt++; + } up_read(&mm->mmap_sem); return; diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c index 6a2b1bf9ef1..3634c7ea06a 100644 --- a/arch/mips/mm/highmem.c +++ b/arch/mips/mm/highmem.c @@ -9,7 +9,7 @@ static pte_t *kmap_pte; unsigned long highstart_pfn, highend_pfn; -void *__kmap(struct page *page) +void *kmap(struct page *page) { void *addr; @@ -21,16 +21,16 @@ void *__kmap(struct page *page) return addr; } -EXPORT_SYMBOL(__kmap); +EXPORT_SYMBOL(kmap); -void __kunmap(struct page *page) +void kunmap(struct page *page) { BUG_ON(in_interrupt()); if (!PageHighMem(page)) return; kunmap_high(page); } -EXPORT_SYMBOL(__kunmap); +EXPORT_SYMBOL(kunmap); /* * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because @@ -41,17 +41,17 @@ EXPORT_SYMBOL(__kunmap); * kmaps are appropriate for short, tight code paths only. */ -void *__kmap_atomic(struct page *page, enum km_type type) +void *__kmap_atomic(struct page *page) { - enum fixed_addresses idx; unsigned long vaddr; + int idx, type; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ pagefault_disable(); if (!PageHighMem(page)) return page_address(page); - debug_kmap_atomic(type); + type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM @@ -64,43 +64,48 @@ void *__kmap_atomic(struct page *page, enum km_type type) } EXPORT_SYMBOL(__kmap_atomic); -void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) +void __kunmap_atomic(void *kvaddr) { -#ifdef CONFIG_DEBUG_HIGHMEM unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; - enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); + int type; if (vaddr < FIXADDR_START) { // FIXME pagefault_enable(); return; } - BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); + type = kmap_atomic_idx(); +#ifdef CONFIG_DEBUG_HIGHMEM + { + int idx = type + KM_TYPE_NR * smp_processor_id(); - /* - * force other mappings to Oops if they'll try to access - * this pte without first remap it - */ - pte_clear(&init_mm, vaddr, kmap_pte-idx); - local_flush_tlb_one(vaddr); -#endif + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); + /* + * force other mappings to Oops if they'll try to access + * this pte without first remap it + */ + pte_clear(&init_mm, vaddr, kmap_pte-idx); + local_flush_tlb_one(vaddr); + } +#endif + kmap_atomic_idx_pop(); pagefault_enable(); } -EXPORT_SYMBOL(__kunmap_atomic_notypecheck); +EXPORT_SYMBOL(__kunmap_atomic); /* * This is the same as kmap_atomic() but can map memory that doesn't * have a struct page associated with it. */ -void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) +void *kmap_atomic_pfn(unsigned long pfn) { - enum fixed_addresses idx; unsigned long vaddr; + int idx, type; pagefault_disable(); - debug_kmap_atomic(type); + type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); @@ -109,7 +114,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) return (void*) vaddr; } -struct page *__kmap_atomic_to_page(void *ptr) +struct page *kmap_atomic_to_page(void *ptr) { unsigned long idx, vaddr = (unsigned long)ptr; pte_t *pte; diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 5ab5fa8c1d8..9cca8de0054 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -57,6 +57,38 @@ static struct bcache_ops mips_sc_ops = { .bc_inv = mips_sc_inv }; +/* + * Check if the L2 cache controller is activated on a particular platform. + * MTI's L2 controller and the L2 cache controller of Broadcom's BMIPS + * cores both use c0_config2's bit 12 as "L2 Bypass" bit, that is the + * cache being disabled. However there is no guarantee for this to be + * true on all platforms. In an act of stupidity the spec defined bits + * 12..15 as implementation defined so below function will eventually have + * to be replaced by a platform specific probe. + */ +static inline int mips_sc_is_activated(struct cpuinfo_mips *c) +{ + unsigned int config2 = read_c0_config2(); + unsigned int tmp; + + /* Check the bypass bit (L2B) */ + switch (c->cputype) { + case CPU_34K: + case CPU_74K: + case CPU_1004K: + case CPU_BMIPS5000: + if (config2 & (1 << 12)) + return 0; + } + + tmp = (config2 >> 4) & 0x0f; + if (0 < tmp && tmp <= 7) + c->scache.linesz = 2 << tmp; + else + return 0; + return 1; +} + static inline int __init mips_sc_probe(void) { struct cpuinfo_mips *c = ¤t_cpu_data; @@ -79,10 +111,8 @@ static inline int __init mips_sc_probe(void) return 0; config2 = read_c0_config2(); - tmp = (config2 >> 4) & 0x0f; - if (0 < tmp && tmp <= 7) - c->scache.linesz = 2 << tmp; - else + + if (!mips_sc_is_activated(c)) return 0; tmp = (config2 >> 8) & 0x0f; diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 4510e61883e..93816f3bca6 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -338,13 +338,12 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, case CPU_4KSC: case CPU_20KC: case CPU_25KF: - case CPU_BCM3302: - case CPU_BCM4710: + case CPU_BMIPS32: + case CPU_BMIPS3300: + case CPU_BMIPS4350: + case CPU_BMIPS4380: + case CPU_BMIPS5000: case CPU_LOONGSON2: - case CPU_BCM6338: - case CPU_BCM6345: - case CPU_BCM6348: - case CPU_BCM6358: case CPU_R5500: if (m4kc_tlbp_war()) uasm_i_nop(p); diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index d2647a4e012..23afdebc8e5 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -405,7 +405,6 @@ I_u1u2u3(_mfc0) I_u1u2u3(_mtc0) I_u2u1u3(_ori) I_u3u1u2(_or) -I_u2s3u1(_pref) I_0(_rfe) I_u2s3u1(_sc) I_u2s3u1(_scd) @@ -427,6 +426,25 @@ I_u1(_syscall); I_u1u2s3(_bbit0); I_u1u2s3(_bbit1); +#ifdef CONFIG_CPU_CAVIUM_OCTEON +#include <asm/octeon/octeon.h> +void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b, + unsigned int c) +{ + if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5) + /* + * As per erratum Core-14449, replace prefetches 0-4, + * 6-24 with 'pref 28'. + */ + build_insn(buf, insn_pref, c, 28, b); + else + build_insn(buf, insn_pref, c, a, b); +} +UASM_EXPORT_SYMBOL(uasm_i_pref); +#else +I_u2s3u1(_pref) +#endif + /* Handle labels. */ void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid) { |