summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarek Szyprowski <m.szyprowski@samsung.com>2012-05-16 19:38:58 +0100
committerMarek Szyprowski <m.szyprowski@samsung.com>2012-07-30 12:25:46 +0200
commit955c757e090f41188764a0e488ba7036f7dbb215 (patch)
treead5f20e5caa6331ce889261f3f0a11b3c8deed12
parentd5724f172fd1406c6962c4d2e27228b8e9e83642 (diff)
ARM: dma-mapping: add support for DMA_ATTR_NO_KERNEL_MAPPING attribute
This patch adds support for DMA_ATTR_NO_KERNEL_MAPPING attribute for IOMMU allocations, what let drivers to save precious kernel virtual address space for large buffers that are intended to be accessed only from userspace. This patch is heavily based on initial work kindly provided by Abhinav Kochhar <abhinav.k@samsung.com>. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Reviewed-by: Kyungmin Park <kyungmin.park@samsung.com>
-rw-r--r--arch/arm/mm/dma-mapping.c18
1 files changed, 13 insertions, 5 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index cbdeaf4a339..8e505b9df5c 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1074,10 +1074,13 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si
return 0;
}
-static struct page **__iommu_get_pages(void *cpu_addr)
+static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
{
struct vm_struct *area;
+ if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+ return cpu_addr;
+
area = find_vm_area(cpu_addr);
if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
return area->pages;
@@ -1102,6 +1105,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
if (*handle == DMA_ERROR_CODE)
goto err_buffer;
+ if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+ return pages;
+
addr = __iommu_alloc_remap(pages, size, gfp, prot,
__builtin_return_address(0));
if (!addr)
@@ -1122,7 +1128,7 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
{
unsigned long uaddr = vma->vm_start;
unsigned long usize = vma->vm_end - vma->vm_start;
- struct page **pages = __iommu_get_pages(cpu_addr);
+ struct page **pages = __iommu_get_pages(cpu_addr, attrs);
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
@@ -1149,7 +1155,7 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, struct dma_attrs *attrs)
{
- struct page **pages = __iommu_get_pages(cpu_addr);
+ struct page **pages = __iommu_get_pages(cpu_addr, attrs);
size = PAGE_ALIGN(size);
if (!pages) {
@@ -1157,8 +1163,10 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
return;
}
- unmap_kernel_range((unsigned long)cpu_addr, size);
- vunmap(cpu_addr);
+ if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
+ unmap_kernel_range((unsigned long)cpu_addr, size);
+ vunmap(cpu_addr);
+ }
__iommu_remove_mapping(dev, handle, size);
__iommu_free_buffer(dev, pages, size);