summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorHiroshi Doyu <hdoyu@nvidia.com>2012-08-28 08:13:01 +0300
committerMarek Szyprowski <m.szyprowski@samsung.com>2012-08-28 21:01:05 +0200
commit6b3fe47264262fa082897ebe8ae01041eae65e14 (patch)
treea3802f904ad0f9b0b9e4565173ed24c95915987c /arch/arm/mm
parentcb01b633eeb77ae7128cab0a3b5d3de56da6e913 (diff)
ARM: dma-mapping: atomic_pool with struct page **pages
struct page **pages is necessary to align with non atomic path in __iommu_get_pages(). atomic_pool() has the intialized **pages instead of just *page. Signed-off-by: Hiroshi Doyu <hdoyu@nvidia.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/dma-mapping.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index acced933210..9a21284a6ac 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -275,7 +275,7 @@ struct dma_pool {
unsigned long *bitmap;
unsigned long nr_pages;
void *vaddr;
- struct page *page;
+ struct page **pages;
};
static struct dma_pool atomic_pool = {
@@ -314,6 +314,7 @@ static int __init atomic_pool_init(void)
unsigned long nr_pages = pool->size >> PAGE_SHIFT;
unsigned long *bitmap;
struct page *page;
+ struct page **pages;
void *ptr;
int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
@@ -321,21 +322,31 @@ static int __init atomic_pool_init(void)
if (!bitmap)
goto no_bitmap;
+ pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ goto no_pages;
+
if (IS_ENABLED(CONFIG_CMA))
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
else
ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
&page, NULL);
if (ptr) {
+ int i;
+
+ for (i = 0; i < nr_pages; i++)
+ pages[i] = page + i;
+
spin_lock_init(&pool->lock);
pool->vaddr = ptr;
- pool->page = page;
+ pool->pages = pages;
pool->bitmap = bitmap;
pool->nr_pages = nr_pages;
pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
(unsigned)pool->size / 1024);
return 0;
}
+no_pages:
kfree(bitmap);
no_bitmap:
pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
@@ -460,7 +471,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
if (pageno < pool->nr_pages) {
bitmap_set(pool->bitmap, pageno, count);
ptr = pool->vaddr + PAGE_SIZE * pageno;
- *ret_page = pool->page + pageno;
+ *ret_page = pool->pages[pageno];
} else {
pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
"Please increase it with coherent_pool= kernel parameter!\n",