diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2014-07-07 10:17:56 +0200 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2014-07-07 10:17:56 +0200 |
commit | f1615bbe9be4def59c3b3eaddb60722efeed16c2 (patch) | |
tree | ca3020e65447576fc1826e819651e6ba072030b5 /arch/arm/mm/dma-mapping.c | |
parent | cfb3c0ab0903abb6ea5215b37eebd9c2a1f057eb (diff) | |
parent | cd3de83f147601356395b57a8673e9c5ff1e59d1 (diff) |
Merge tag 'v3.16-rc4' into drm-intel-next-queued
Due to Dave's vacation drm-next hasn't opened yet for 3.17 so I
couldn't move my drm-intel-next queue forward yet like I usually do.
Just pull in the latest upstream -rc to unblock patch merging - I
don't want to needlessly rebase my current patch pile really and void
all the testing we've done already.
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 28 |
1 files changed, 15 insertions, 13 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 6b00be1f971..4c88935654c 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -390,7 +390,7 @@ static int __init atomic_pool_init(void) if (!pages) goto no_pages; - if (IS_ENABLED(CONFIG_DMA_CMA)) + if (dev_get_cma_area(NULL)) ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page, atomic_pool_init); else @@ -701,7 +701,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, addr = __alloc_simple_buffer(dev, size, gfp, &page); else if (!(gfp & __GFP_WAIT)) addr = __alloc_from_pool(size, &page); - else if (!IS_ENABLED(CONFIG_DMA_CMA)) + else if (!dev_get_cma_area(dev)) addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); else addr = __alloc_from_contiguous(dev, size, prot, &page, caller); @@ -790,7 +790,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, __dma_free_buffer(page, size); } else if (__free_from_pool(cpu_addr, size)) { return; - } else if (!IS_ENABLED(CONFIG_DMA_CMA)) { + } else if (!dev_get_cma_area(dev)) { __dma_free_remap(cpu_addr, size); __dma_free_buffer(page, size); } else { @@ -885,7 +885,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { - unsigned long paddr; + phys_addr_t paddr; dma_cache_maint_page(page, off, size, dir, dmac_map_area); @@ -901,14 +901,15 @@ static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { - unsigned long paddr = page_to_phys(page) + off; + phys_addr_t paddr = page_to_phys(page) + off; /* FIXME: non-speculating: not required */ - /* don't bother invalidating if DMA to device */ - if (dir != DMA_TO_DEVICE) + /* in any case, don't bother invalidating if DMA to device */ + if (dir != DMA_TO_DEVICE) { outer_inv_range(paddr, paddr + size); - dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); + dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); + } /* * Mark the D-cache clean for these pages to avoid extra flushing. @@ -1074,6 +1075,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, unsigned int order = get_order(size); unsigned int align = 0; unsigned int count, start; + size_t mapping_size = mapping->bits << PAGE_SHIFT; unsigned long flags; dma_addr_t iova; int i; @@ -1119,7 +1121,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, } spin_unlock_irqrestore(&mapping->lock, flags); - iova = mapping->base + (mapping->size * i); + iova = mapping->base + (mapping_size * i); iova += start << PAGE_SHIFT; return iova; @@ -1129,6 +1131,7 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping, dma_addr_t addr, size_t size) { unsigned int start, count; + size_t mapping_size = mapping->bits << PAGE_SHIFT; unsigned long flags; dma_addr_t bitmap_base; u32 bitmap_index; @@ -1136,14 +1139,14 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping, if (!size) return; - bitmap_index = (u32) (addr - mapping->base) / (u32) mapping->size; + bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size; BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); - bitmap_base = mapping->base + mapping->size * bitmap_index; + bitmap_base = mapping->base + mapping_size * bitmap_index; start = (addr - bitmap_base) >> PAGE_SHIFT; - if (addr + size > bitmap_base + mapping->size) { + if (addr + size > bitmap_base + mapping_size) { /* * The address range to be freed reaches into the iova * range of the next bitmap. This should not happen as @@ -1964,7 +1967,6 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size) mapping->extensions = extensions; mapping->base = base; mapping->bits = BITS_PER_BYTE * bitmap_size; - mapping->size = mapping->bits << PAGE_SHIFT; spin_lock_init(&mapping->lock); |