diff options
Diffstat (limited to 'arch/arm/mm')
36 files changed, 818 insertions, 276 deletions
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 827e238e5d4..e8d34a80851 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -27,6 +27,9 @@ obj-$(CONFIG_CPU_ABRT_EV5TJ) += abort-ev5tj.o obj-$(CONFIG_CPU_ABRT_EV6) += abort-ev6.o obj-$(CONFIG_CPU_ABRT_EV7) += abort-ev7.o +AFLAGS_abort-ev6.o :=-Wa,-march=armv6k +AFLAGS_abort-ev7.o :=-Wa,-march=armv7-a + obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o @@ -39,6 +42,9 @@ obj-$(CONFIG_CPU_CACHE_V6) += cache-v6.o obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o +AFLAGS_cache-v6.o :=-Wa,-march=armv6 +AFLAGS_cache-v7.o :=-Wa,-march=armv7-a + obj-$(CONFIG_CPU_COPY_V3) += copypage-v3.o obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o @@ -58,6 +64,9 @@ obj-$(CONFIG_CPU_TLB_V6) += tlb-v6.o obj-$(CONFIG_CPU_TLB_V7) += tlb-v7.o obj-$(CONFIG_CPU_TLB_FA) += tlb-fa.o +AFLAGS_tlb-v6.o :=-Wa,-march=armv6 +AFLAGS_tlb-v7.o :=-Wa,-march=armv7-a + obj-$(CONFIG_CPU_ARM610) += proc-arm6_7.o obj-$(CONFIG_CPU_ARM710) += proc-arm6_7.o obj-$(CONFIG_CPU_ARM7TDMI) += proc-arm7tdmi.o @@ -84,6 +93,9 @@ obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o obj-$(CONFIG_CPU_V6) += proc-v6.o obj-$(CONFIG_CPU_V7) += proc-v7.o +AFLAGS_proc-v6.o :=-Wa,-march=armv6 +AFLAGS_proc-v7.o :=-Wa,-march=armv7-a + obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S index a89444a3c01..7148e53e607 100644 --- a/arch/arm/mm/cache-fa.S +++ b/arch/arm/mm/cache-fa.S @@ -157,7 +157,7 @@ ENTRY(fa_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -ENTRY(fa_dma_inv_range) +fa_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry @@ -180,7 +180,7 @@ ENTRY(fa_dma_inv_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(fa_dma_clean_range) +fa_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -205,6 +205,30 @@ ENTRY(fa_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(fa_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq fa_dma_clean_range + bcs fa_dma_inv_range + b fa_dma_flush_range +ENDPROC(fa_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(fa_dma_unmap_area) + mov pc, lr +ENDPROC(fa_dma_unmap_area) + __INITDATA .type fa_cache_fns, #object @@ -215,7 +239,7 @@ ENTRY(fa_cache_fns) .long fa_coherent_kern_range .long fa_coherent_user_range .long fa_flush_kern_dcache_area - .long fa_dma_inv_range - .long fa_dma_clean_range + .long fa_dma_map_area + .long fa_dma_unmap_area .long fa_dma_flush_range .size fa_cache_fns, . - fa_cache_fns diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S index 2a482731ea3..c2ff3c599fe 100644 --- a/arch/arm/mm/cache-v3.S +++ b/arch/arm/mm/cache-v3.S @@ -84,20 +84,6 @@ ENTRY(v3_flush_kern_dcache_area) /* FALLTHROUGH */ /* - * dma_inv_range(start, end) - * - * Invalidate (discard) the specified virtual address range. - * May not write back any entries. If 'start' or 'end' - * are not cache line aligned, those lines must be written - * back. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v3_dma_inv_range) - /* FALLTHROUGH */ - -/* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. @@ -108,18 +94,29 @@ ENTRY(v3_dma_inv_range) ENTRY(v3_dma_flush_range) mov r0, #0 mcr p15, 0, r0, c7, c0, 0 @ flush ID cache + mov pc, lr + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v3_dma_unmap_area) + teq r2, #DMA_TO_DEVICE + bne v3_dma_flush_range /* FALLTHROUGH */ /* - * dma_clean_range(start, end) - * - * Clean (write back) the specified virtual address range. - * - * - start - virtual start address - * - end - virtual end address + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction */ -ENTRY(v3_dma_clean_range) +ENTRY(v3_dma_map_area) mov pc, lr +ENDPROC(v3_dma_unmap_area) +ENDPROC(v3_dma_map_area) __INITDATA @@ -131,7 +128,7 @@ ENTRY(v3_cache_fns) .long v3_coherent_kern_range .long v3_coherent_user_range .long v3_flush_kern_dcache_area - .long v3_dma_inv_range - .long v3_dma_clean_range + .long v3_dma_map_area + .long v3_dma_unmap_area .long v3_dma_flush_range .size v3_cache_fns, . - v3_cache_fns diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index 5c7da3e372e..4810f7e3e81 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S @@ -94,20 +94,6 @@ ENTRY(v4_flush_kern_dcache_area) /* FALLTHROUGH */ /* - * dma_inv_range(start, end) - * - * Invalidate (discard) the specified virtual address range. - * May not write back any entries. If 'start' or 'end' - * are not cache line aligned, those lines must be written - * back. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v4_dma_inv_range) - /* FALLTHROUGH */ - -/* * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. @@ -120,18 +106,29 @@ ENTRY(v4_dma_flush_range) mov r0, #0 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache #endif + mov pc, lr + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4_dma_unmap_area) + teq r2, #DMA_TO_DEVICE + bne v4_dma_flush_range /* FALLTHROUGH */ /* - * dma_clean_range(start, end) - * - * Clean (write back) the specified virtual address range. - * - * - start - virtual start address - * - end - virtual end address + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction */ -ENTRY(v4_dma_clean_range) +ENTRY(v4_dma_map_area) mov pc, lr +ENDPROC(v4_dma_unmap_area) +ENDPROC(v4_dma_map_area) __INITDATA @@ -143,7 +140,7 @@ ENTRY(v4_cache_fns) .long v4_coherent_kern_range .long v4_coherent_user_range .long v4_flush_kern_dcache_area - .long v4_dma_inv_range - .long v4_dma_clean_range + .long v4_dma_map_area + .long v4_dma_unmap_area .long v4_dma_flush_range .size v4_cache_fns, . - v4_cache_fns diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S index 3dbedf1ec0e..df8368afa10 100644 --- a/arch/arm/mm/cache-v4wb.S +++ b/arch/arm/mm/cache-v4wb.S @@ -173,7 +173,7 @@ ENTRY(v4wb_coherent_user_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(v4wb_dma_inv_range) +v4wb_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -194,7 +194,7 @@ ENTRY(v4wb_dma_inv_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(v4wb_dma_clean_range) +v4wb_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -216,6 +216,30 @@ ENTRY(v4wb_dma_clean_range) .globl v4wb_dma_flush_range .set v4wb_dma_flush_range, v4wb_coherent_kern_range +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4wb_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq v4wb_dma_clean_range + bcs v4wb_dma_inv_range + b v4wb_dma_flush_range +ENDPROC(v4wb_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4wb_dma_unmap_area) + mov pc, lr +ENDPROC(v4wb_dma_unmap_area) + __INITDATA .type v4wb_cache_fns, #object @@ -226,7 +250,7 @@ ENTRY(v4wb_cache_fns) .long v4wb_coherent_kern_range .long v4wb_coherent_user_range .long v4wb_flush_kern_dcache_area - .long v4wb_dma_inv_range - .long v4wb_dma_clean_range + .long v4wb_dma_map_area + .long v4wb_dma_unmap_area .long v4wb_dma_flush_range .size v4wb_cache_fns, . - v4wb_cache_fns diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S index b3b7410270b..45c70312f43 100644 --- a/arch/arm/mm/cache-v4wt.S +++ b/arch/arm/mm/cache-v4wt.S @@ -142,23 +142,12 @@ ENTRY(v4wt_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -ENTRY(v4wt_dma_inv_range) +v4wt_dma_inv_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b - /* FALLTHROUGH */ - -/* - * dma_clean_range(start, end) - * - * Clean the specified virtual address range. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v4wt_dma_clean_range) mov pc, lr /* @@ -172,6 +161,29 @@ ENTRY(v4wt_dma_clean_range) .globl v4wt_dma_flush_range .equ v4wt_dma_flush_range, v4wt_dma_inv_range +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4wt_dma_unmap_area) + add r1, r1, r0 + teq r2, #DMA_TO_DEVICE + bne v4wt_dma_inv_range + /* FALLTHROUGH */ + +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4wt_dma_map_area) + mov pc, lr +ENDPROC(v4wt_dma_unmap_area) +ENDPROC(v4wt_dma_map_area) + __INITDATA .type v4wt_cache_fns, #object @@ -182,7 +194,7 @@ ENTRY(v4wt_cache_fns) .long v4wt_coherent_kern_range .long v4wt_coherent_user_range .long v4wt_flush_kern_dcache_area - .long v4wt_dma_inv_range - .long v4wt_dma_clean_range + .long v4wt_dma_map_area + .long v4wt_dma_unmap_area .long v4wt_dma_flush_range .size v4wt_cache_fns, . - v4wt_cache_fns diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 4ba0a24ce6f..9d89c67a1cc 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -195,7 +195,7 @@ ENTRY(v6_flush_kern_dcache_area) * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(v6_dma_inv_range) +v6_dma_inv_range: tst r0, #D_CACHE_LINE_SIZE - 1 bic r0, r0, #D_CACHE_LINE_SIZE - 1 #ifdef HARVARD_CACHE @@ -228,7 +228,7 @@ ENTRY(v6_dma_inv_range) * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(v6_dma_clean_range) +v6_dma_clean_range: bic r0, r0, #D_CACHE_LINE_SIZE - 1 1: #ifdef HARVARD_CACHE @@ -263,6 +263,32 @@ ENTRY(v6_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v6_dma_map_area) + add r1, r1, r0 + teq r2, #DMA_FROM_DEVICE + beq v6_dma_inv_range + b v6_dma_clean_range +ENDPROC(v6_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v6_dma_unmap_area) + add r1, r1, r0 + teq r2, #DMA_TO_DEVICE + bne v6_dma_inv_range + mov pc, lr +ENDPROC(v6_dma_unmap_area) + __INITDATA .type v6_cache_fns, #object @@ -273,7 +299,7 @@ ENTRY(v6_cache_fns) .long v6_coherent_kern_range .long v6_coherent_user_range .long v6_flush_kern_dcache_area - .long v6_dma_inv_range - .long v6_dma_clean_range + .long v6_dma_map_area + .long v6_dma_unmap_area .long v6_dma_flush_range .size v6_cache_fns, . - v6_cache_fns diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 9073db849fb..bcd64f26587 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -216,7 +216,7 @@ ENDPROC(v7_flush_kern_dcache_area) * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(v7_dma_inv_range) +v7_dma_inv_range: dcache_line_size r2, r3 sub r3, r2, #1 tst r0, r3 @@ -240,7 +240,7 @@ ENDPROC(v7_dma_inv_range) * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(v7_dma_clean_range) +v7_dma_clean_range: dcache_line_size r2, r3 sub r3, r2, #1 bic r0, r0, r3 @@ -271,6 +271,32 @@ ENTRY(v7_dma_flush_range) mov pc, lr ENDPROC(v7_dma_flush_range) +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v7_dma_map_area) + add r1, r1, r0 + teq r2, #DMA_FROM_DEVICE + beq v7_dma_inv_range + b v7_dma_clean_range +ENDPROC(v7_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v7_dma_unmap_area) + add r1, r1, r0 + teq r2, #DMA_TO_DEVICE + bne v7_dma_inv_range + mov pc, lr +ENDPROC(v7_dma_unmap_area) + __INITDATA .type v7_cache_fns, #object @@ -281,7 +307,7 @@ ENTRY(v7_cache_fns) .long v7_coherent_kern_range .long v7_coherent_user_range .long v7_flush_kern_dcache_area - .long v7_dma_inv_range - .long v7_dma_clean_range + .long v7_dma_map_area + .long v7_dma_unmap_area .long v7_dma_flush_range .size v7_cache_fns, . - v7_cache_fns diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c index 5d180cb0bd9..c3154928bcc 100644 --- a/arch/arm/mm/cache-xsc3l2.c +++ b/arch/arm/mm/cache-xsc3l2.c @@ -221,15 +221,14 @@ static int __init xsc3_l2_init(void) if (!cpu_is_xsc3() || !xsc3_l2_present()) return 0; - if (!(get_cr() & CR_L2)) { + if (get_cr() & CR_L2) { pr_info("XScale3 L2 cache enabled.\n"); - adjust_cr(CR_L2, CR_L2); xsc3_l2_inv_all(); - } - outer_cache.inv_range = xsc3_l2_inv_range; - outer_cache.clean_range = xsc3_l2_clean_range; - outer_cache.flush_range = xsc3_l2_flush_range; + outer_cache.inv_range = xsc3_l2_inv_range; + outer_cache.clean_range = xsc3_l2_clean_range; + outer_cache.flush_range = xsc3_l2_flush_range; + } return 0; } diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index 70997d5bee2..5eb4fd93893 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c @@ -68,12 +68,13 @@ feroceon_copy_user_page(void *kto, const void *kfrom) } void feroceon_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr) + unsigned long vaddr, struct vm_area_struct *vma) { void *kto, *kfrom; kto = kmap_atomic(to, KM_USER0); kfrom = kmap_atomic(from, KM_USER1); + flush_cache_page(vma, vaddr, page_to_pfn(from)); feroceon_copy_user_page(kto, kfrom); kunmap_atomic(kfrom, KM_USER1); kunmap_atomic(kto, KM_USER0); diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c index de9c06854ad..f72303e1d80 100644 --- a/arch/arm/mm/copypage-v3.c +++ b/arch/arm/mm/copypage-v3.c @@ -38,7 +38,7 @@ v3_copy_user_page(void *kto, const void *kfrom) } void v3_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr) + unsigned long vaddr, struct vm_area_struct *vma) { void *kto, *kfrom; diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 7370a7142b0..598c51ad507 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -69,7 +69,7 @@ mc_copy_user_page(void *from, void *to) } void v4_mc_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr) + unsigned long vaddr, struct vm_area_struct *vma) { void *kto = kmap_atomic(to, KM_USER1); diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index 9ab09841422..7c2eb55cd4a 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c @@ -48,12 +48,13 @@ v4wb_copy_user_page(void *kto, const void *kfrom) } void v4wb_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr) + unsigned long vaddr, struct vm_area_struct *vma) { void *kto, *kfrom; kto = kmap_atomic(to, KM_USER0); kfrom = kmap_atomic(from, KM_USER1); + flush_cache_page(vma, vaddr, page_to_pfn(from)); v4wb_copy_user_page(kto, kfrom); kunmap_atomic(kfrom, KM_USER1); kunmap_atomic(kto, KM_USER0); diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index 300efafd664..172e6a55458 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c @@ -44,7 +44,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom) } void v4wt_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr) + unsigned long vaddr, struct vm_area_struct *vma) { void *kto, *kfrom; diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 0fa1319273d..8bca4dea6df 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -34,7 +34,7 @@ static DEFINE_SPINLOCK(v6_lock); * attack the kernel's existing mapping of these pages. */ static void v6_copy_user_highpage_nonaliasing(struct page *to, - struct page *from, unsigned long vaddr) + struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { void *kto, *kfrom; @@ -81,7 +81,7 @@ static void discard_old_kernel_data(void *kto) * Copy the page, taking account of the cache colour. */ static void v6_copy_user_highpage_aliasing(struct page *to, - struct page *from, unsigned long vaddr) + struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { unsigned int offset = CACHE_COLOUR(vaddr); unsigned long kfrom, kto; diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index bc4525f5ab2..747ad4140fc 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c @@ -71,12 +71,13 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom) } void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr) + unsigned long vaddr, struct vm_area_struct *vma) { void *kto, *kfrom; kto = kmap_atomic(to, KM_USER0); kfrom = kmap_atomic(from, KM_USER1); + flush_cache_page(vma, vaddr, page_to_pfn(from)); xsc3_mc_copy_user_page(kto, kfrom); kunmap_atomic(kfrom, KM_USER1); kunmap_atomic(kto, KM_USER0); diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 76824d3e966..9920c0ae209 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -91,7 +91,7 @@ mc_copy_user_page(void *from, void *to) } void xscale_mc_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr) + unsigned long vaddr, struct vm_area_struct *vma) { void *kto = kmap_atomic(to, KM_USER1); diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 26325cb5d36..64daef2173b 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -404,78 +404,44 @@ EXPORT_SYMBOL(dma_free_coherent); * platforms with CONFIG_DMABOUNCE. * Use the driver DMA support - see dma-mapping.h (dma_sync_*) */ -void dma_cache_maint(const void *start, size_t size, int direction) +void ___dma_single_cpu_to_dev(const void *kaddr, size_t size, + enum dma_data_direction dir) { - void (*inner_op)(const void *, const void *); - void (*outer_op)(unsigned long, unsigned long); - - BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1)); - - switch (direction) { - case DMA_FROM_DEVICE: /* invalidate only */ - inner_op = dmac_inv_range; - outer_op = outer_inv_range; - break; - case DMA_TO_DEVICE: /* writeback only */ - inner_op = dmac_clean_range; - outer_op = outer_clean_range; - break; - case DMA_BIDIRECTIONAL: /* writeback and invalidate */ - inner_op = dmac_flush_range; - outer_op = outer_flush_range; - break; - default: - BUG(); - } + unsigned long paddr; + + BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); + + dmac_map_area(kaddr, size, dir); - inner_op(start, start + size); - outer_op(__pa(start), __pa(start) + size); + paddr = __pa(kaddr); + if (dir == DMA_FROM_DEVICE) { + outer_inv_range(paddr, paddr + size); + } else { + outer_clean_range(paddr, paddr + size); + } + /* FIXME: non-speculating: flush on bidirectional mappings? */ } -EXPORT_SYMBOL(dma_cache_maint); +EXPORT_SYMBOL(___dma_single_cpu_to_dev); -static void dma_cache_maint_contiguous(struct page *page, unsigned long offset, - size_t size, int direction) +void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, + enum dma_data_direction dir) { - void *vaddr; - unsigned long paddr; - void (*inner_op)(const void *, const void *); - void (*outer_op)(unsigned long, unsigned long); - - switch (direction) { - case DMA_FROM_DEVICE: /* invalidate only */ - inner_op = dmac_inv_range; - outer_op = outer_inv_range; - break; - case DMA_TO_DEVICE: /* writeback only */ - inner_op = dmac_clean_range; - outer_op = outer_clean_range; - break; - case DMA_BIDIRECTIONAL: /* writeback and invalidate */ - inner_op = dmac_flush_range; - outer_op = outer_flush_range; - break; - default: - BUG(); - } + BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); - if (!PageHighMem(page)) { - vaddr = page_address(page) + offset; - inner_op(vaddr, vaddr + size); - } else { - vaddr = kmap_high_get(page); - if (vaddr) { - vaddr += offset; - inner_op(vaddr, vaddr + size); - kunmap_high(page); - } + /* FIXME: non-speculating: not required */ + /* don't bother invalidating if DMA to device */ + if (dir != DMA_TO_DEVICE) { + unsigned long paddr = __pa(kaddr); + outer_inv_range(paddr, paddr + size); } - paddr = page_to_phys(page) + offset; - outer_op(paddr, paddr + size); + dmac_unmap_area(kaddr, size, dir); } +EXPORT_SYMBOL(___dma_single_dev_to_cpu); -void dma_cache_maint_page(struct page *page, unsigned long offset, - size_t size, int dir) +static void dma_cache_maint_page(struct page *page, unsigned long offset, + size_t size, enum dma_data_direction dir, + void (*op)(const void *, size_t, int)) { /* * A single sg entry may refer to multiple physically contiguous @@ -486,20 +452,62 @@ void dma_cache_maint_page(struct page *page, unsigned long offset, size_t left = size; do { size_t len = left; - if (PageHighMem(page) && len + offset > PAGE_SIZE) { - if (offset >= PAGE_SIZE) { - page += offset / PAGE_SIZE; - offset %= PAGE_SIZE; + void *vaddr; + + if (PageHighMem(page)) { + if (len + offset > PAGE_SIZE) { + if (offset >= PAGE_SIZE) { + page += offset / PAGE_SIZE; + offset %= PAGE_SIZE; + } + len = PAGE_SIZE - offset; } - len = PAGE_SIZE - offset; + vaddr = kmap_high_get(page); + if (vaddr) { + vaddr += offset; + op(vaddr, len, dir); + kunmap_high(page); + } + } else { + vaddr = page_address(page) + offset; + op(vaddr, len, dir); } - dma_cache_maint_contiguous(page, offset, len, dir); offset = 0; page++; left -= len; } while (left); } -EXPORT_SYMBOL(dma_cache_maint_page); + +void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, + size_t size, enum dma_data_direction dir) +{ + unsigned long paddr; + + dma_cache_maint_page(page, off, size, dir, dmac_map_area); + + paddr = page_to_phys(page) + off; + if (dir == DMA_FROM_DEVICE) { + outer_inv_range(paddr, paddr + size); + } else { + outer_clean_range(paddr, paddr + size); + } + /* FIXME: non-speculating: flush on bidirectional mappings? */ +} +EXPORT_SYMBOL(___dma_page_cpu_to_dev); + +void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, + size_t size, enum dma_data_direction dir) +{ + unsigned long paddr = page_to_phys(page) + off; + + /* FIXME: non-speculating: not required */ + /* don't bother invalidating if DMA to device */ + if (dir != DMA_TO_DEVICE) + outer_inv_range(paddr, paddr + size); + + dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); +} +EXPORT_SYMBOL(___dma_page_dev_to_cpu); /** * dma_map_sg - map a set of SG buffers for streaming mode DMA @@ -573,8 +581,12 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int i; for_each_sg(sg, s, nents, i) { - dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, - sg_dma_len(s), dir); + if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, + sg_dma_len(s), dir)) + continue; + + __dma_page_dev_to_cpu(sg_page(s), s->offset, + s->length, dir); } } EXPORT_SYMBOL(dma_sync_sg_for_cpu); @@ -597,9 +609,8 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, sg_dma_len(s), dir)) continue; - if (!arch_is_coherent()) - dma_cache_maint_page(sg_page(s), s->offset, - s->length, dir); + __dma_page_cpu_to_dev(sg_page(s), s->offset, + s->length, dir); } } EXPORT_SYMBOL(dma_sync_sg_for_device); diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 56ee15321b0..c9b97e9836a 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -36,28 +36,12 @@ static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; * Therefore those configurations which might call adjust_pte (those * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. */ -static int adjust_pte(struct vm_area_struct *vma, unsigned long address) +static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, + unsigned long pfn, pte_t *ptep) { - pgd_t *pgd; - pmd_t *pmd; - pte_t *pte, entry; + pte_t entry = *ptep; int ret; - pgd = pgd_offset(vma->vm_mm, address); - if (pgd_none(*pgd)) - goto no_pgd; - if (pgd_bad(*pgd)) - goto bad_pgd; - - pmd = pmd_offset(pgd, address); - if (pmd_none(*pmd)) - goto no_pmd; - if (pmd_bad(*pmd)) - goto bad_pmd; - - pte = pte_offset_map(pmd, address); - entry = *pte; - /* * If this page is present, it's actually being shared. */ @@ -68,33 +52,55 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) * fault (ie, is old), we can safely ignore any issues. */ if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { - unsigned long pfn = pte_pfn(entry); flush_cache_page(vma, address, pfn); outer_flush_range((pfn << PAGE_SHIFT), (pfn << PAGE_SHIFT) + PAGE_SIZE); pte_val(entry) &= ~L_PTE_MT_MASK; pte_val(entry) |= shared_pte_mask; - set_pte_at(vma->vm_mm, address, pte, entry); + set_pte_at(vma->vm_mm, address, ptep, entry); flush_tlb_page(vma, address); } - pte_unmap(pte); + return ret; +} + +static int adjust_pte(struct vm_area_struct *vma, unsigned long address, + unsigned long pfn) +{ + spinlock_t *ptl; + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + int ret; + + pgd = pgd_offset(vma->vm_mm, address); + if (pgd_none_or_clear_bad(pgd)) + return 0; + + pmd = pmd_offset(pgd, address); + if (pmd_none_or_clear_bad(pmd)) + return 0; -bad_pgd: - pgd_ERROR(*pgd); - pgd_clear(pgd); -no_pgd: - return 0; - -bad_pmd: - pmd_ERROR(*pmd); - pmd_clear(pmd); -no_pmd: - return 0; + /* + * This is called while another page table is mapped, so we + * must use the nested version. This also means we need to + * open-code the spin-locking. + */ + ptl = pte_lockptr(vma->vm_mm, pmd); + pte = pte_offset_map_nested(pmd, address); + spin_lock(ptl); + + ret = do_adjust_pte(vma, address, pfn, pte); + + spin_unlock(ptl); + pte_unmap_nested(pte); + + return ret; } static void -make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) +make_coherent(struct address_space *mapping, struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, unsigned long pfn) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *mpnt; @@ -122,11 +128,11 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigne if (!(mpnt->vm_flags & VM_MAYSHARE)) continue; offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; - aliases += adjust_pte(mpnt, mpnt->vm_start + offset); + aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn); } flush_dcache_mmap_unlock(mapping); if (aliases) - adjust_pte(vma, addr); + do_adjust_pte(vma, addr, pfn, ptep); else flush_cache_page(vma, addr, pfn); } @@ -144,9 +150,10 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigne * * Note that the pte lock will be held. */ -void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) +void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep) { - unsigned long pfn = pte_pfn(pte); + unsigned long pfn = pte_pfn(*ptep); struct address_space *mapping; struct page *page; @@ -168,7 +175,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) #endif if (mapping) { if (cache_is_vivt()) - make_coherent(mapping, vma, addr, pfn); + make_coherent(mapping, vma, addr, ptep, pfn); else if (vma->vm_flags & VM_EXEC) __flush_icache_all(); } diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 6f3a4b7a3b8..e34f095e209 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -13,6 +13,7 @@ #include <asm/cacheflush.h> #include <asm/cachetype.h> +#include <asm/smp_plat.h> #include <asm/system.h> #include <asm/tlbflush.h> @@ -87,13 +88,26 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) __flush_icache_all(); } +#else +#define flush_pfn_alias(pfn,vaddr) do { } while (0) +#endif +#ifdef CONFIG_SMP +static void flush_ptrace_access_other(void *args) +{ + __flush_icache_all(); +} +#endif + +static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, - unsigned long uaddr, void *kaddr, - unsigned long len, int write) + unsigned long uaddr, void *kaddr, unsigned long len) { if (cache_is_vivt()) { - vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write); + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { + unsigned long addr = (unsigned long)kaddr; + __cpuc_coherent_kern_range(addr, addr + len); + } return; } @@ -104,16 +118,37 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, } /* VIPT non-aliasing cache */ - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) && - vma->vm_flags & VM_EXEC) { + if (vma->vm_flags & VM_EXEC) { unsigned long addr = (unsigned long)kaddr; - /* only flushing the kernel mapping on non-aliasing VIPT */ __cpuc_coherent_kern_range(addr, addr + len); +#ifdef CONFIG_SMP + if (cache_ops_need_broadcast()) + smp_call_function(flush_ptrace_access_other, + NULL, 1); +#endif } } -#else -#define flush_pfn_alias(pfn,vaddr) do { } while (0) + +/* + * Copy user data from/to a page which is mapped into a different + * processes address space. Really, we want to allow our "user + * space" model to handle this. + * + * Note that this code needs to run on the current CPU. + */ +void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long uaddr, void *dst, const void *src, + unsigned long len) +{ +#ifdef CONFIG_SMP + preempt_disable(); #endif + memcpy(dst, src, len); + flush_ptrace_access(vma, page, uaddr, dst, len); +#ifdef CONFIG_SMP + preempt_enable(); +#endif +} void __flush_dcache_page(struct address_space *mapping, struct page *page) { diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 52c40d15567..a04ffbbbe25 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -616,7 +616,7 @@ void __init mem_init(void) "%dK data, %dK init, %luK highmem)\n", nr_free_pages() << (PAGE_SHIFT-10), codesize >> 10, datasize >> 10, initsize >> 10, - (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); + totalhigh_pages << (PAGE_SHIFT-10)); if (PAGE_SIZE >= 16384 && num_physpages <= 128) { extern int sysctl_overcommit_memory; diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index 8012e24282b..72507c630ce 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -265,7 +265,7 @@ ENTRY(arm1020_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm1020_dma_inv_range) +arm1020_dma_inv_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 @@ -295,7 +295,7 @@ ENTRY(arm1020_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm1020_dma_clean_range) +arm1020_dma_clean_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 @@ -330,6 +330,30 @@ ENTRY(arm1020_dma_flush_range) mcr p15, 0, ip, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1020_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm1020_dma_clean_range + bcs arm1020_dma_inv_range + b arm1020_dma_flush_range +ENDPROC(arm1020_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1020_dma_unmap_area) + mov pc, lr +ENDPROC(arm1020_dma_unmap_area) + ENTRY(arm1020_cache_fns) .long arm1020_flush_kern_cache_all .long arm1020_flush_user_cache_all @@ -337,8 +361,8 @@ ENTRY(arm1020_cache_fns) .long arm1020_coherent_kern_range .long arm1020_coherent_user_range .long arm1020_flush_kern_dcache_area - .long arm1020_dma_inv_range - .long arm1020_dma_clean_range + .long arm1020_dma_map_area + .long arm1020_dma_unmap_area .long arm1020_dma_flush_range .align 5 diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index 41fe25d234f..d2782980560 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -258,7 +258,7 @@ ENTRY(arm1020e_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm1020e_dma_inv_range) +arm1020e_dma_inv_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 @@ -284,7 +284,7 @@ ENTRY(arm1020e_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm1020e_dma_clean_range) +arm1020e_dma_clean_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 @@ -316,6 +316,30 @@ ENTRY(arm1020e_dma_flush_range) mcr p15, 0, ip, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1020e_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm1020e_dma_clean_range + bcs arm1020e_dma_inv_range + b arm1020e_dma_flush_range +ENDPROC(arm1020e_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1020e_dma_unmap_area) + mov pc, lr +ENDPROC(arm1020e_dma_unmap_area) + ENTRY(arm1020e_cache_fns) .long arm1020e_flush_kern_cache_all .long arm1020e_flush_user_cache_all @@ -323,8 +347,8 @@ ENTRY(arm1020e_cache_fns) .long arm1020e_coherent_kern_range .long arm1020e_coherent_user_range .long arm1020e_flush_kern_dcache_area - .long arm1020e_dma_inv_range - .long arm1020e_dma_clean_range + .long arm1020e_dma_map_area + .long arm1020e_dma_unmap_area .long arm1020e_dma_flush_range .align 5 diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index 20a5b1b31a7..ce13e4a827d 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S @@ -247,7 +247,7 @@ ENTRY(arm1022_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm1022_dma_inv_range) +arm1022_dma_inv_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 @@ -273,7 +273,7 @@ ENTRY(arm1022_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm1022_dma_clean_range) +arm1022_dma_clean_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 @@ -305,6 +305,30 @@ ENTRY(arm1022_dma_flush_range) mcr p15, 0, ip, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1022_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm1022_dma_clean_range + bcs arm1022_dma_inv_range + b arm1022_dma_flush_range +ENDPROC(arm1022_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1022_dma_unmap_area) + mov pc, lr +ENDPROC(arm1022_dma_unmap_area) + ENTRY(arm1022_cache_fns) .long arm1022_flush_kern_cache_all .long arm1022_flush_user_cache_all @@ -312,8 +336,8 @@ ENTRY(arm1022_cache_fns) .long arm1022_coherent_kern_range .long arm1022_coherent_user_range .long arm1022_flush_kern_dcache_area - .long arm1022_dma_inv_range - .long arm1022_dma_clean_range + .long arm1022_dma_map_area + .long arm1022_dma_unmap_area .long arm1022_dma_flush_range .align 5 diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 96aedb10fcc..636672a29c6 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S @@ -241,7 +241,7 @@ ENTRY(arm1026_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm1026_dma_inv_range) +arm1026_dma_inv_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 @@ -267,7 +267,7 @@ ENTRY(arm1026_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm1026_dma_clean_range) +arm1026_dma_clean_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 @@ -299,6 +299,30 @@ ENTRY(arm1026_dma_flush_range) mcr p15, 0, ip, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1026_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm1026_dma_clean_range + bcs arm1026_dma_inv_range + b arm1026_dma_flush_range +ENDPROC(arm1026_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1026_dma_unmap_area) + mov pc, lr +ENDPROC(arm1026_dma_unmap_area) + ENTRY(arm1026_cache_fns) .long arm1026_flush_kern_cache_all .long arm1026_flush_user_cache_all @@ -306,8 +330,8 @@ ENTRY(arm1026_cache_fns) .long arm1026_coherent_kern_range .long arm1026_coherent_user_range .long arm1026_flush_kern_dcache_area - .long arm1026_dma_inv_range - .long arm1026_dma_clean_range + .long arm1026_dma_map_area + .long arm1026_dma_unmap_area .long arm1026_dma_flush_range .align 5 diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 471669e2d7c..8be81992645 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -239,7 +239,7 @@ ENTRY(arm920_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm920_dma_inv_range) +arm920_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -262,7 +262,7 @@ ENTRY(arm920_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm920_dma_clean_range) +arm920_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -288,6 +288,30 @@ ENTRY(arm920_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm920_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm920_dma_clean_range + bcs arm920_dma_inv_range + b arm920_dma_flush_range +ENDPROC(arm920_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm920_dma_unmap_area) + mov pc, lr +ENDPROC(arm920_dma_unmap_area) + ENTRY(arm920_cache_fns) .long arm920_flush_kern_cache_all .long arm920_flush_user_cache_all @@ -295,8 +319,8 @@ ENTRY(arm920_cache_fns) .long arm920_coherent_kern_range .long arm920_coherent_user_range .long arm920_flush_kern_dcache_area - .long arm920_dma_inv_range - .long arm920_dma_clean_range + .long arm920_dma_map_area + .long arm920_dma_unmap_area .long arm920_dma_flush_range #endif diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index ee111b00fa4..c0ff8e4b107 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S @@ -241,7 +241,7 @@ ENTRY(arm922_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm922_dma_inv_range) +arm922_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -264,7 +264,7 @@ ENTRY(arm922_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm922_dma_clean_range) +arm922_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -290,6 +290,30 @@ ENTRY(arm922_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm922_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm922_dma_clean_range + bcs arm922_dma_inv_range + b arm922_dma_flush_range +ENDPROC(arm922_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm922_dma_unmap_area) + mov pc, lr +ENDPROC(arm922_dma_unmap_area) + ENTRY(arm922_cache_fns) .long arm922_flush_kern_cache_all .long arm922_flush_user_cache_all @@ -297,8 +321,8 @@ ENTRY(arm922_cache_fns) .long arm922_coherent_kern_range .long arm922_coherent_user_range .long arm922_flush_kern_dcache_area - .long arm922_dma_inv_range - .long arm922_dma_clean_range + .long arm922_dma_map_area + .long arm922_dma_unmap_area .long arm922_dma_flush_range #endif diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 8deb5bde58e..3c6cffe400f 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -283,7 +283,7 @@ ENTRY(arm925_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm925_dma_inv_range) +arm925_dma_inv_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -308,7 +308,7 @@ ENTRY(arm925_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm925_dma_clean_range) +arm925_dma_clean_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry @@ -341,6 +341,30 @@ ENTRY(arm925_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm925_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm925_dma_clean_range + bcs arm925_dma_inv_range + b arm925_dma_flush_range +ENDPROC(arm925_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm925_dma_unmap_area) + mov pc, lr +ENDPROC(arm925_dma_unmap_area) + ENTRY(arm925_cache_fns) .long arm925_flush_kern_cache_all .long arm925_flush_user_cache_all @@ -348,8 +372,8 @@ ENTRY(arm925_cache_fns) .long arm925_coherent_kern_range .long arm925_coherent_user_range .long arm925_flush_kern_dcache_area - .long arm925_dma_inv_range - .long arm925_dma_clean_range + .long arm925_dma_map_area + .long arm925_dma_unmap_area .long arm925_dma_flush_range ENTRY(cpu_arm925_dcache_clean_area) diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 64db6e275a4..75b707c9cce 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -246,7 +246,7 @@ ENTRY(arm926_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm926_dma_inv_range) +arm926_dma_inv_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -271,7 +271,7 @@ ENTRY(arm926_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm926_dma_clean_range) +arm926_dma_clean_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry @@ -304,6 +304,30 @@ ENTRY(arm926_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm926_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm926_dma_clean_range + bcs arm926_dma_inv_range + b arm926_dma_flush_range +ENDPROC(arm926_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm926_dma_unmap_area) + mov pc, lr +ENDPROC(arm926_dma_unmap_area) + ENTRY(arm926_cache_fns) .long arm926_flush_kern_cache_all .long arm926_flush_user_cache_all @@ -311,8 +335,8 @@ ENTRY(arm926_cache_fns) .long arm926_coherent_kern_range .long arm926_coherent_user_range .long arm926_flush_kern_dcache_area - .long arm926_dma_inv_range - .long arm926_dma_clean_range + .long arm926_dma_map_area + .long arm926_dma_unmap_area .long arm926_dma_flush_range ENTRY(cpu_arm926_dcache_clean_area) diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index 8196b9f401f..1af1657819e 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S @@ -171,7 +171,7 @@ ENTRY(arm940_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -ENTRY(arm940_dma_inv_range) +arm940_dma_inv_range: mov ip, #0 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries @@ -192,7 +192,7 @@ ENTRY(arm940_dma_inv_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(arm940_dma_clean_range) +arm940_dma_clean_range: ENTRY(cpu_arm940_dcache_clean_area) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH @@ -233,6 +233,30 @@ ENTRY(arm940_dma_flush_range) mcr p15, 0, ip, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm940_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm940_dma_clean_range + bcs arm940_dma_inv_range + b arm940_dma_flush_range +ENDPROC(arm940_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm940_dma_unmap_area) + mov pc, lr +ENDPROC(arm940_dma_unmap_area) + ENTRY(arm940_cache_fns) .long arm940_flush_kern_cache_all .long arm940_flush_user_cache_all @@ -240,8 +264,8 @@ ENTRY(arm940_cache_fns) .long arm940_coherent_kern_range .long arm940_coherent_user_range .long arm940_flush_kern_dcache_area - .long arm940_dma_inv_range - .long arm940_dma_clean_range + .long arm940_dma_map_area + .long arm940_dma_unmap_area .long arm940_dma_flush_range __INIT diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index 9a951239c86..1664b6aaff7 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S @@ -215,7 +215,7 @@ ENTRY(arm946_flush_kern_dcache_area) * - end - virtual end address * (same as arm926) */ -ENTRY(arm946_dma_inv_range) +arm946_dma_inv_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -240,7 +240,7 @@ ENTRY(arm946_dma_inv_range) * * (same as arm926) */ -ENTRY(arm946_dma_clean_range) +arm946_dma_clean_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry @@ -275,6 +275,30 @@ ENTRY(arm946_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm946_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm946_dma_clean_range + bcs arm946_dma_inv_range + b arm946_dma_flush_range +ENDPROC(arm946_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm946_dma_unmap_area) + mov pc, lr +ENDPROC(arm946_dma_unmap_area) + ENTRY(arm946_cache_fns) .long arm946_flush_kern_cache_all .long arm946_flush_user_cache_all @@ -282,8 +306,8 @@ ENTRY(arm946_cache_fns) .long arm946_coherent_kern_range .long arm946_coherent_user_range .long arm946_flush_kern_dcache_area - .long arm946_dma_inv_range - .long arm946_dma_clean_range + .long arm946_dma_map_area + .long arm946_dma_unmap_area .long arm946_dma_flush_range diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index dbc39383e66..53e63234384 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S @@ -274,7 +274,7 @@ ENTRY(feroceon_range_flush_kern_dcache_area) * (same as v4wb) */ .align 5 -ENTRY(feroceon_dma_inv_range) +feroceon_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -288,7 +288,7 @@ ENTRY(feroceon_dma_inv_range) mov pc, lr .align 5 -ENTRY(feroceon_range_dma_inv_range) +feroceon_range_dma_inv_range: mrs r2, cpsr tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -314,7 +314,7 @@ ENTRY(feroceon_range_dma_inv_range) * (same as v4wb) */ .align 5 -ENTRY(feroceon_dma_clean_range) +feroceon_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -324,7 +324,7 @@ ENTRY(feroceon_dma_clean_range) mov pc, lr .align 5 -ENTRY(feroceon_range_dma_clean_range) +feroceon_range_dma_clean_range: mrs r2, cpsr cmp r1, r0 subne r1, r1, #1 @ top address is inclusive @@ -367,6 +367,44 @@ ENTRY(feroceon_range_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(feroceon_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq feroceon_dma_clean_range + bcs feroceon_dma_inv_range + b feroceon_dma_flush_range +ENDPROC(feroceon_dma_map_area) + +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(feroceon_range_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq feroceon_range_dma_clean_range + bcs feroceon_range_dma_inv_range + b feroceon_range_dma_flush_range +ENDPROC(feroceon_range_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(feroceon_dma_unmap_area) + mov pc, lr +ENDPROC(feroceon_dma_unmap_area) + ENTRY(feroceon_cache_fns) .long feroceon_flush_kern_cache_all .long feroceon_flush_user_cache_all @@ -374,8 +412,8 @@ ENTRY(feroceon_cache_fns) .long feroceon_coherent_kern_range .long feroceon_coherent_user_range .long feroceon_flush_kern_dcache_area - .long feroceon_dma_inv_range - .long feroceon_dma_clean_range + .long feroceon_dma_map_area + .long feroceon_dma_unmap_area .long feroceon_dma_flush_range ENTRY(feroceon_range_cache_fns) @@ -385,8 +423,8 @@ ENTRY(feroceon_range_cache_fns) .long feroceon_coherent_kern_range .long feroceon_coherent_user_range .long feroceon_range_flush_kern_dcache_area - .long feroceon_range_dma_inv_range - .long feroceon_range_dma_clean_range + .long feroceon_range_dma_map_area + .long feroceon_dma_unmap_area .long feroceon_range_dma_flush_range .align 5 diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 9674d36cc97..caa31154e7d 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -218,7 +218,7 @@ ENTRY(mohawk_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(mohawk_dma_inv_range) +mohawk_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1 @@ -241,7 +241,7 @@ ENTRY(mohawk_dma_inv_range) * * (same as v4wb) */ -ENTRY(mohawk_dma_clean_range) +mohawk_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -268,6 +268,30 @@ ENTRY(mohawk_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(mohawk_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq mohawk_dma_clean_range + bcs mohawk_dma_inv_range + b mohawk_dma_flush_range +ENDPROC(mohawk_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(mohawk_dma_unmap_area) + mov pc, lr +ENDPROC(mohawk_dma_unmap_area) + ENTRY(mohawk_cache_fns) .long mohawk_flush_kern_cache_all .long mohawk_flush_user_cache_all @@ -275,8 +299,8 @@ ENTRY(mohawk_cache_fns) .long mohawk_coherent_kern_range .long mohawk_coherent_user_range .long mohawk_flush_kern_dcache_area - .long mohawk_dma_inv_range - .long mohawk_dma_clean_range + .long mohawk_dma_map_area + .long mohawk_dma_unmap_area .long mohawk_dma_flush_range ENTRY(cpu_mohawk_dcache_clean_area) diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 96456f54879..e5797f1c1db 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -257,7 +257,7 @@ ENTRY(xsc3_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -ENTRY(xsc3_dma_inv_range) +xsc3_dma_inv_range: tst r0, #CACHELINESIZE - 1 bic r0, r0, #CACHELINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line @@ -278,7 +278,7 @@ ENTRY(xsc3_dma_inv_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(xsc3_dma_clean_range) +xsc3_dma_clean_range: bic r0, r0, #CACHELINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line add r0, r0, #CACHELINESIZE @@ -304,6 +304,30 @@ ENTRY(xsc3_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ data write barrier mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(xsc3_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq xsc3_dma_clean_range + bcs xsc3_dma_inv_range + b xsc3_dma_flush_range +ENDPROC(xsc3_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(xsc3_dma_unmap_area) + mov pc, lr +ENDPROC(xsc3_dma_unmap_area) + ENTRY(xsc3_cache_fns) .long xsc3_flush_kern_cache_all .long xsc3_flush_user_cache_all @@ -311,8 +335,8 @@ ENTRY(xsc3_cache_fns) .long xsc3_coherent_kern_range .long xsc3_coherent_user_range .long xsc3_flush_kern_dcache_area - .long xsc3_dma_inv_range - .long xsc3_dma_clean_range + .long xsc3_dma_map_area + .long xsc3_dma_unmap_area .long xsc3_dma_flush_range ENTRY(cpu_xsc3_dcache_clean_area) @@ -407,6 +431,13 @@ __xsc3_setup: adr r5, xsc3_crval ldmia r5, {r5, r6} + +#ifdef CONFIG_CACHE_XSC3L2 + mrc p15, 1, r0, c0, c0, 1 @ get L2 present information + ands r0, r0, #0xf8 + orrne r6, r6, #(1 << 26) @ enable L2 if present +#endif + mrc p15, 0, r0, c1, c0, 0 @ get control register bic r0, r0, r5 @ ..V. ..R. .... ..A. orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu) diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 93df47265f2..63037e2162f 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -315,7 +315,7 @@ ENTRY(xscale_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -ENTRY(xscale_dma_inv_range) +xscale_dma_inv_range: tst r0, #CACHELINESIZE - 1 bic r0, r0, #CACHELINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -336,7 +336,7 @@ ENTRY(xscale_dma_inv_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(xscale_dma_clean_range) +xscale_dma_clean_range: bic r0, r0, #CACHELINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHELINESIZE @@ -363,6 +363,43 @@ ENTRY(xscale_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(xscale_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq xscale_dma_clean_range + bcs xscale_dma_inv_range + b xscale_dma_flush_range +ENDPROC(xscale_dma_map_area) + +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(xscale_dma_a0_map_area) + add r1, r1, r0 + teq r2, #DMA_TO_DEVICE + beq xscale_dma_clean_range + b xscale_dma_flush_range +ENDPROC(xscsale_dma_a0_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(xscale_dma_unmap_area) + mov pc, lr +ENDPROC(xscale_dma_unmap_area) + ENTRY(xscale_cache_fns) .long xscale_flush_kern_cache_all .long xscale_flush_user_cache_all @@ -370,8 +407,8 @@ ENTRY(xscale_cache_fns) .long xscale_coherent_kern_range .long xscale_coherent_user_range .long xscale_flush_kern_dcache_area - .long xscale_dma_inv_range - .long xscale_dma_clean_range + .long xscale_dma_map_area + .long xscale_dma_unmap_area .long xscale_dma_flush_range /* @@ -394,8 +431,8 @@ ENTRY(xscale_80200_A0_A1_cache_fns) .long xscale_coherent_kern_range .long xscale_coherent_user_range .long xscale_flush_kern_dcache_area - .long xscale_dma_flush_range - .long xscale_dma_clean_range + .long xscale_dma_a0_map_area + .long xscale_dma_unmap_area .long xscale_dma_flush_range ENTRY(cpu_xscale_dcache_clean_area) diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S index a26a605b73b..0cb1848bd87 100644 --- a/arch/arm/mm/tlb-v7.S +++ b/arch/arm/mm/tlb-v7.S @@ -40,7 +40,6 @@ ENTRY(v7wbi_flush_user_tlb_range) asid r3, r3 @ mask ASID orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA mov r1, r1, lsl #PAGE_SHIFT - vma_vm_flags r2, r2 @ get vma->vm_flags 1: #ifdef CONFIG_SMP mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) |