diff options
author | Hiroshi Doyu <hdoyu@nvidia.com> | 2012-08-28 08:13:03 +0300 |
---|---|---|
committer | Marek Szyprowski <m.szyprowski@samsung.com> | 2012-08-28 21:01:06 +0200 |
commit | 665bad7bb911d392000fa69bc6b599c0df992504 (patch) | |
tree | 12b5ad80e748a79466041a7ac254924e7531f4ce | |
parent | 21d0a75951ccf71f671eb24b61a8ad2b497be4b4 (diff) |
ARM: dma-mapping: Introduce __atomic_get_pages() for __iommu_get_pages()
Support atomic allocation in __iommu_get_pages().
Signed-off-by: Hiroshi Doyu <hdoyu@nvidia.com>
[moved __atomic_get_pages() under #ifdef CONFIG_ARM_DMA_USE_IOMMU
to avoid unused fuction warning for no-IOMMU case]
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 12 |
1 files changed, 12 insertions, 0 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 882eacc6ebc..54b158df5af 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1136,10 +1136,22 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si return 0; } +static struct page **__atomic_get_pages(void *addr) +{ + struct dma_pool *pool = &atomic_pool; + struct page **pages = pool->pages; + int offs = (addr - pool->vaddr) >> PAGE_SHIFT; + + return pages + offs; +} + static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) { struct vm_struct *area; + if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) + return __atomic_get_pages(cpu_addr); + if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) return cpu_addr; |