diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/copypage-v4mc.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 28 | ||||
-rw-r--r-- | arch/arm/mm/ioremap.c | 11 | ||||
-rw-r--r-- | arch/arm/mm/proc-syms.c | 1 |
4 files changed, 18 insertions, 24 deletions
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index bdb5fd983b1..1601698b980 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -68,7 +68,7 @@ mc_copy_user_page(void *from, void *to) : "r" (from), "r" (to), "I" (PAGE_SIZE / 64)); } -void v4_mc_copy_user_highpage(struct page *from, struct page *to, +void v4_mc_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr) { void *kto = kmap_atomic(to, KM_USER1); diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 67960017dc8..310e479309e 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -71,7 +71,7 @@ static DEFINE_SPINLOCK(consistent_lock); * the amount of RAM found at boot time.) I would imagine that get_vm_area() * would have to initialise this each time prior to calling vm_region_alloc(). */ -struct vm_region { +struct arm_vm_region { struct list_head vm_list; unsigned long vm_start; unsigned long vm_end; @@ -79,20 +79,20 @@ struct vm_region { int vm_active; }; -static struct vm_region consistent_head = { +static struct arm_vm_region consistent_head = { .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), .vm_start = CONSISTENT_BASE, .vm_end = CONSISTENT_END, }; -static struct vm_region * -vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp) +static struct arm_vm_region * +arm_vm_region_alloc(struct arm_vm_region *head, size_t size, gfp_t gfp) { unsigned long addr = head->vm_start, end = head->vm_end - size; unsigned long flags; - struct vm_region *c, *new; + struct arm_vm_region *c, *new; - new = kmalloc(sizeof(struct vm_region), gfp); + new = kmalloc(sizeof(struct arm_vm_region), gfp); if (!new) goto out; @@ -127,9 +127,9 @@ vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp) return NULL; } -static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr) +static struct arm_vm_region *arm_vm_region_find(struct arm_vm_region *head, unsigned long addr) { - struct vm_region *c; + struct arm_vm_region *c; list_for_each_entry(c, &head->vm_list, vm_list) { if (c->vm_active && c->vm_start == addr) @@ -149,7 +149,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, pgprot_t prot) { struct page *page; - struct vm_region *c; + struct arm_vm_region *c; unsigned long order; u64 mask = ISA_DMA_THRESHOLD, limit; @@ -214,7 +214,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, /* * Allocate a virtual address in the consistent mapping region. */ - c = vm_region_alloc(&consistent_head, size, + c = arm_vm_region_alloc(&consistent_head, size, gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); if (c) { pte_t *pte; @@ -311,13 +311,13 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size) { unsigned long flags, user_size, kern_size; - struct vm_region *c; + struct arm_vm_region *c; int ret = -ENXIO; user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; spin_lock_irqsave(&consistent_lock, flags); - c = vm_region_find(&consistent_head, (unsigned long)cpu_addr); + c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr); spin_unlock_irqrestore(&consistent_lock, flags); if (c) { @@ -359,7 +359,7 @@ EXPORT_SYMBOL(dma_mmap_writecombine); */ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) { - struct vm_region *c; + struct arm_vm_region *c; unsigned long flags, addr; pte_t *ptep; int idx; @@ -378,7 +378,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr size = PAGE_ALIGN(size); spin_lock_irqsave(&consistent_lock, flags); - c = vm_region_find(&consistent_head, (unsigned long)cpu_addr); + c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr); if (!c) goto no_area; diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 18373f73f2f..9f88dd3be60 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -138,7 +138,7 @@ void __check_kvm_seq(struct mm_struct *mm) */ static void unmap_area_sections(unsigned long virt, unsigned long size) { - unsigned long addr = virt, end = virt + (size & ~SZ_1M); + unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1)); pgd_t *pgd; flush_cache_vunmap(addr, end); @@ -337,10 +337,7 @@ void __iounmap(volatile void __iomem *io_addr) void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); #ifndef CONFIG_SMP struct vm_struct **p, *tmp; -#endif - unsigned int section_mapping = 0; -#ifndef CONFIG_SMP /* * If this is a section based mapping we need to handle it * specially as the VM subsystem does not know how to handle @@ -352,11 +349,8 @@ void __iounmap(volatile void __iomem *io_addr) for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { if (tmp->flags & VM_ARM_SECTION_MAPPING) { - *p = tmp->next; unmap_area_sections((unsigned long)tmp->addr, tmp->size); - kfree(tmp); - section_mapping = 1; } break; } @@ -364,7 +358,6 @@ void __iounmap(volatile void __iomem *io_addr) write_unlock(&vmlist_lock); #endif - if (!section_mapping) - vunmap(addr); + vunmap(addr); } EXPORT_SYMBOL(__iounmap); diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c index 4ad3bf291ad..195e48edd8c 100644 --- a/arch/arm/mm/proc-syms.c +++ b/arch/arm/mm/proc-syms.c @@ -27,6 +27,7 @@ EXPORT_SYMBOL(__cpuc_flush_kern_all); EXPORT_SYMBOL(__cpuc_flush_user_all); EXPORT_SYMBOL(__cpuc_flush_user_range); EXPORT_SYMBOL(__cpuc_coherent_kern_range); +EXPORT_SYMBOL(dmac_inv_range); /* because of flush_ioremap_region() */ #else EXPORT_SYMBOL(cpu_cache); #endif |