From fcf3a6ef4a588c9f06ad7b01c83534ab81985a3f Mon Sep 17 00:00:00 2001 From: Ohad Ben-Cohen Date: Mon, 15 Aug 2011 23:21:41 +0300 Subject: omap: iommu/iovmm: move to dedicated iommu folder Move OMAP's iommu drivers to the dedicated iommu drivers folder. While OMAP's iovmm (virtual memory manager) driver does not strictly belong to the iommu drivers folder, move it there as well, because it's by no means OMAP-specific (in concept. technically it is still coupled with OMAP's iommu). Eventually, iovmm will be completely replaced with the generic, iommu-based, dma-mapping API. Signed-off-by: Ohad Ben-Cohen Acked-by: Laurent Pinchart Acked-by: Hiroshi DOYU Acked-by: Tony Lindgren Signed-off-by: Joerg Roedel --- drivers/iommu/omap-iovmm.c | 923 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 923 insertions(+) create mode 100644 drivers/iommu/omap-iovmm.c (limited to 'drivers/iommu/omap-iovmm.c') diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c new file mode 100644 index 00000000000..809ca124196 --- /dev/null +++ b/drivers/iommu/omap-iovmm.c @@ -0,0 +1,923 @@ +/* + * omap iommu: simple virtual address space management + * + * Copyright (C) 2008-2009 Nokia Corporation + * + * Written by Hiroshi DOYU + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include + +/* + * A device driver needs to create address mappings between: + * + * - iommu/device address + * - physical address + * - mpu virtual address + * + * There are 4 possible patterns for them: + * + * |iova/ mapping iommu_ page + * | da pa va (d)-(p)-(v) function type + * --------------------------------------------------------------------------- + * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s + * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s + * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s + * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n* + * + * + * 'iova': device iommu virtual address + * 'da': alias of 'iova' + * 'pa': physical address + * 'va': mpu virtual address + * + * 'c': contiguous memory area + * 'd': discontiguous memory area + * 'a': anonymous memory allocation + * '()': optional feature + * + * 'n': a normal page(4KB) size is used. + * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used. + * + * '*': not yet, but feasible. + */ + +static struct kmem_cache *iovm_area_cachep; + +/* return total bytes of sg buffers */ +static size_t sgtable_len(const struct sg_table *sgt) +{ + unsigned int i, total = 0; + struct scatterlist *sg; + + if (!sgt) + return 0; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + size_t bytes; + + bytes = sg->length; + + if (!iopgsz_ok(bytes)) { + pr_err("%s: sg[%d] not iommu pagesize(%x)\n", + __func__, i, bytes); + return 0; + } + + total += bytes; + } + + return total; +} +#define sgtable_ok(x) (!!sgtable_len(x)) + +static unsigned max_alignment(u32 addr) +{ + int i; + unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, }; + for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++) + ; + return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0; +} + +/* + * calculate the optimal number sg elements from total bytes based on + * iommu superpages + */ +static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa) +{ + unsigned nr_entries = 0, ent_sz; + + if (!IS_ALIGNED(bytes, PAGE_SIZE)) { + pr_err("%s: wrong size %08x\n", __func__, bytes); + return 0; + } + + while (bytes) { + ent_sz = max_alignment(da | pa); + ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes)); + nr_entries++; + da += ent_sz; + pa += ent_sz; + bytes -= ent_sz; + } + + return nr_entries; +} + +/* allocate and initialize sg_table header(a kind of 'superblock') */ +static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags, + u32 da, u32 pa) +{ + unsigned int nr_entries; + int err; + struct sg_table *sgt; + + if (!bytes) + return ERR_PTR(-EINVAL); + + if (!IS_ALIGNED(bytes, PAGE_SIZE)) + return ERR_PTR(-EINVAL); + + if (flags & IOVMF_LINEAR) { + nr_entries = sgtable_nents(bytes, da, pa); + if (!nr_entries) + return ERR_PTR(-EINVAL); + } else + nr_entries = bytes / PAGE_SIZE; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return ERR_PTR(-ENOMEM); + + err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL); + if (err) { + kfree(sgt); + return ERR_PTR(err); + } + + pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries); + + return sgt; +} + +/* free sg_table header(a kind of superblock) */ +static void sgtable_free(struct sg_table *sgt) +{ + if (!sgt) + return; + + sg_free_table(sgt); + kfree(sgt); + + pr_debug("%s: sgt:%p\n", __func__, sgt); +} + +/* map 'sglist' to a contiguous mpu virtual area and return 'va' */ +static void *vmap_sg(const struct sg_table *sgt) +{ + u32 va; + size_t total; + unsigned int i; + struct scatterlist *sg; + struct vm_struct *new; + const struct mem_type *mtype; + + mtype = get_mem_type(MT_DEVICE); + if (!mtype) + return ERR_PTR(-EINVAL); + + total = sgtable_len(sgt); + if (!total) + return ERR_PTR(-EINVAL); + + new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END); + if (!new) + return ERR_PTR(-ENOMEM); + va = (u32)new->addr; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + size_t bytes; + u32 pa; + int err; + + pa = sg_phys(sg); + bytes = sg->length; + + BUG_ON(bytes != PAGE_SIZE); + + err = ioremap_page(va, pa, mtype); + if (err) + goto err_out; + + va += bytes; + } + + flush_cache_vmap((unsigned long)new->addr, + (unsigned long)(new->addr + total)); + return new->addr; + +err_out: + WARN_ON(1); /* FIXME: cleanup some mpu mappings */ + vunmap(new->addr); + return ERR_PTR(-EAGAIN); +} + +static inline void vunmap_sg(const void *va) +{ + vunmap(va); +} + +static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da) +{ + struct iovm_struct *tmp; + + list_for_each_entry(tmp, &obj->mmap, list) { + if ((da >= tmp->da_start) && (da < tmp->da_end)) { + size_t len; + + len = tmp->da_end - tmp->da_start; + + dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", + __func__, tmp->da_start, da, tmp->da_end, len, + tmp->flags); + + return tmp; + } + } + + return NULL; +} + +/** + * find_iovm_area - find iovma which includes @da + * @da: iommu device virtual address + * + * Find the existing iovma starting at @da + */ +struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da) +{ + struct iovm_struct *area; + + mutex_lock(&obj->mmap_lock); + area = __find_iovm_area(obj, da); + mutex_unlock(&obj->mmap_lock); + + return area; +} +EXPORT_SYMBOL_GPL(find_iovm_area); + +/* + * This finds the hole(area) which fits the requested address and len + * in iovmas mmap, and returns the new allocated iovma. + */ +static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da, + size_t bytes, u32 flags) +{ + struct iovm_struct *new, *tmp; + u32 start, prev_end, alignment; + + if (!obj || !bytes) + return ERR_PTR(-EINVAL); + + start = da; + alignment = PAGE_SIZE; + + if (~flags & IOVMF_DA_FIXED) { + /* Don't map address 0 */ + start = obj->da_start ? obj->da_start : alignment; + + if (flags & IOVMF_LINEAR) + alignment = iopgsz_max(bytes); + start = roundup(start, alignment); + } else if (start < obj->da_start || start > obj->da_end || + obj->da_end - start < bytes) { + return ERR_PTR(-EINVAL); + } + + tmp = NULL; + if (list_empty(&obj->mmap)) + goto found; + + prev_end = 0; + list_for_each_entry(tmp, &obj->mmap, list) { + + if (prev_end > start) + break; + + if (tmp->da_start > start && (tmp->da_start - start) >= bytes) + goto found; + + if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED) + start = roundup(tmp->da_end + 1, alignment); + + prev_end = tmp->da_end; + } + + if ((start >= prev_end) && (obj->da_end - start >= bytes)) + goto found; + + dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n", + __func__, da, bytes, flags); + + return ERR_PTR(-EINVAL); + +found: + new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL); + if (!new) + return ERR_PTR(-ENOMEM); + + new->iommu = obj; + new->da_start = start; + new->da_end = start + bytes; + new->flags = flags; + + /* + * keep ascending order of iovmas + */ + if (tmp) + list_add_tail(&new->list, &tmp->list); + else + list_add(&new->list, &obj->mmap); + + dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n", + __func__, new->da_start, start, new->da_end, bytes, flags); + + return new; +} + +static void free_iovm_area(struct iommu *obj, struct iovm_struct *area) +{ + size_t bytes; + + BUG_ON(!obj || !area); + + bytes = area->da_end - area->da_start; + + dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n", + __func__, area->da_start, area->da_end, bytes, area->flags); + + list_del(&area->list); + kmem_cache_free(iovm_area_cachep, area); +} + +/** + * da_to_va - convert (d) to (v) + * @obj: objective iommu + * @da: iommu device virtual address + * @va: mpu virtual address + * + * Returns mpu virtual addr which corresponds to a given device virtual addr + */ +void *da_to_va(struct iommu *obj, u32 da) +{ + void *va = NULL; + struct iovm_struct *area; + + mutex_lock(&obj->mmap_lock); + + area = __find_iovm_area(obj, da); + if (!area) { + dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); + goto out; + } + va = area->va; +out: + mutex_unlock(&obj->mmap_lock); + + return va; +} +EXPORT_SYMBOL_GPL(da_to_va); + +static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va) +{ + unsigned int i; + struct scatterlist *sg; + void *va = _va; + void *va_end; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + struct page *pg; + const size_t bytes = PAGE_SIZE; + + /* + * iommu 'superpage' isn't supported with 'iommu_vmalloc()' + */ + pg = vmalloc_to_page(va); + BUG_ON(!pg); + sg_set_page(sg, pg, bytes, 0); + + va += bytes; + } + + va_end = _va + PAGE_SIZE * i; +} + +static inline void sgtable_drain_vmalloc(struct sg_table *sgt) +{ + /* + * Actually this is not necessary at all, just exists for + * consistency of the code readability. + */ + BUG_ON(!sgt); +} + +static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da, + size_t len) +{ + unsigned int i; + struct scatterlist *sg; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + unsigned bytes; + + bytes = max_alignment(da | pa); + bytes = min_t(unsigned, bytes, iopgsz_max(len)); + + BUG_ON(!iopgsz_ok(bytes)); + + sg_set_buf(sg, phys_to_virt(pa), bytes); + /* + * 'pa' is cotinuous(linear). + */ + pa += bytes; + da += bytes; + len -= bytes; + } + BUG_ON(len); +} + +static inline void sgtable_drain_kmalloc(struct sg_table *sgt) +{ + /* + * Actually this is not necessary at all, just exists for + * consistency of the code readability + */ + BUG_ON(!sgt); +} + +/* create 'da' <-> 'pa' mapping from 'sgt' */ +static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new, + const struct sg_table *sgt, u32 flags) +{ + int err; + unsigned int i, j; + struct scatterlist *sg; + u32 da = new->da_start; + int order; + + if (!domain || !sgt) + return -EINVAL; + + BUG_ON(!sgtable_ok(sgt)); + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + u32 pa; + size_t bytes; + + pa = sg_phys(sg); + bytes = sg->length; + + flags &= ~IOVMF_PGSZ_MASK; + + if (bytes_to_iopgsz(bytes) < 0) + goto err_out; + + order = get_order(bytes); + + pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, + i, da, pa, bytes); + + err = iommu_map(domain, da, pa, order, flags); + if (err) + goto err_out; + + da += bytes; + } + return 0; + +err_out: + da = new->da_start; + + for_each_sg(sgt->sgl, sg, i, j) { + size_t bytes; + + bytes = sg->length; + order = get_order(bytes); + + /* ignore failures.. we're already handling one */ + iommu_unmap(domain, da, order); + + da += bytes; + } + return err; +} + +/* release 'da' <-> 'pa' mapping */ +static void unmap_iovm_area(struct iommu_domain *domain, struct iommu *obj, + struct iovm_struct *area) +{ + u32 start; + size_t total = area->da_end - area->da_start; + const struct sg_table *sgt = area->sgt; + struct scatterlist *sg; + int i, err; + + BUG_ON(!sgtable_ok(sgt)); + BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); + + start = area->da_start; + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + size_t bytes; + int order; + + bytes = sg->length; + order = get_order(bytes); + + err = iommu_unmap(domain, start, order); + if (err) + break; + + dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n", + __func__, start, bytes, area->flags); + + BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); + + total -= bytes; + start += bytes; + } + BUG_ON(total); +} + +/* template function for all unmapping */ +static struct sg_table *unmap_vm_area(struct iommu_domain *domain, + struct iommu *obj, const u32 da, + void (*fn)(const void *), u32 flags) +{ + struct sg_table *sgt = NULL; + struct iovm_struct *area; + + if (!IS_ALIGNED(da, PAGE_SIZE)) { + dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da); + return NULL; + } + + mutex_lock(&obj->mmap_lock); + + area = __find_iovm_area(obj, da); + if (!area) { + dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); + goto out; + } + + if ((area->flags & flags) != flags) { + dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__, + area->flags); + goto out; + } + sgt = (struct sg_table *)area->sgt; + + unmap_iovm_area(domain, obj, area); + + fn(area->va); + + dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__, + area->da_start, da, area->da_end, + area->da_end - area->da_start, area->flags); + + free_iovm_area(obj, area); +out: + mutex_unlock(&obj->mmap_lock); + + return sgt; +} + +static u32 map_iommu_region(struct iommu_domain *domain, struct iommu *obj, + u32 da, const struct sg_table *sgt, void *va, + size_t bytes, u32 flags) +{ + int err = -ENOMEM; + struct iovm_struct *new; + + mutex_lock(&obj->mmap_lock); + + new = alloc_iovm_area(obj, da, bytes, flags); + if (IS_ERR(new)) { + err = PTR_ERR(new); + goto err_alloc_iovma; + } + new->va = va; + new->sgt = sgt; + + if (map_iovm_area(domain, new, sgt, new->flags)) + goto err_map; + + mutex_unlock(&obj->mmap_lock); + + dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n", + __func__, new->da_start, bytes, new->flags, va); + + return new->da_start; + +err_map: + free_iovm_area(obj, new); +err_alloc_iovma: + mutex_unlock(&obj->mmap_lock); + return err; +} + +static inline u32 __iommu_vmap(struct iommu_domain *domain, struct iommu *obj, + u32 da, const struct sg_table *sgt, + void *va, size_t bytes, u32 flags) +{ + return map_iommu_region(domain, obj, da, sgt, va, bytes, flags); +} + +/** + * iommu_vmap - (d)-(p)-(v) address mapper + * @obj: objective iommu + * @sgt: address of scatter gather table + * @flags: iovma and page property + * + * Creates 1-n-1 mapping with given @sgt and returns @da. + * All @sgt element must be io page size aligned. + */ +u32 iommu_vmap(struct iommu_domain *domain, struct iommu *obj, u32 da, + const struct sg_table *sgt, u32 flags) +{ + size_t bytes; + void *va = NULL; + + if (!obj || !obj->dev || !sgt) + return -EINVAL; + + bytes = sgtable_len(sgt); + if (!bytes) + return -EINVAL; + bytes = PAGE_ALIGN(bytes); + + if (flags & IOVMF_MMIO) { + va = vmap_sg(sgt); + if (IS_ERR(va)) + return PTR_ERR(va); + } + + flags |= IOVMF_DISCONT; + flags |= IOVMF_MMIO; + + da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags); + if (IS_ERR_VALUE(da)) + vunmap_sg(va); + + return da; +} +EXPORT_SYMBOL_GPL(iommu_vmap); + +/** + * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()' + * @obj: objective iommu + * @da: iommu device virtual address + * + * Free the iommu virtually contiguous memory area starting at + * @da, which was returned by 'iommu_vmap()'. + */ +struct sg_table * +iommu_vunmap(struct iommu_domain *domain, struct iommu *obj, u32 da) +{ + struct sg_table *sgt; + /* + * 'sgt' is allocated before 'iommu_vmalloc()' is called. + * Just returns 'sgt' to the caller to free + */ + sgt = unmap_vm_area(domain, obj, da, vunmap_sg, + IOVMF_DISCONT | IOVMF_MMIO); + if (!sgt) + dev_dbg(obj->dev, "%s: No sgt\n", __func__); + return sgt; +} +EXPORT_SYMBOL_GPL(iommu_vunmap); + +/** + * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper + * @obj: objective iommu + * @da: contiguous iommu virtual memory + * @bytes: allocation size + * @flags: iovma and page property + * + * Allocate @bytes linearly and creates 1-n-1 mapping and returns + * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. + */ +u32 iommu_vmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da, + size_t bytes, u32 flags) +{ + void *va; + struct sg_table *sgt; + + if (!obj || !obj->dev || !bytes) + return -EINVAL; + + bytes = PAGE_ALIGN(bytes); + + va = vmalloc(bytes); + if (!va) + return -ENOMEM; + + flags |= IOVMF_DISCONT; + flags |= IOVMF_ALLOC; + + sgt = sgtable_alloc(bytes, flags, da, 0); + if (IS_ERR(sgt)) { + da = PTR_ERR(sgt); + goto err_sgt_alloc; + } + sgtable_fill_vmalloc(sgt, va); + + da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags); + if (IS_ERR_VALUE(da)) + goto err_iommu_vmap; + + return da; + +err_iommu_vmap: + sgtable_drain_vmalloc(sgt); + sgtable_free(sgt); +err_sgt_alloc: + vfree(va); + return da; +} +EXPORT_SYMBOL_GPL(iommu_vmalloc); + +/** + * iommu_vfree - release memory allocated by 'iommu_vmalloc()' + * @obj: objective iommu + * @da: iommu device virtual address + * + * Frees the iommu virtually continuous memory area starting at + * @da, as obtained from 'iommu_vmalloc()'. + */ +void iommu_vfree(struct iommu_domain *domain, struct iommu *obj, const u32 da) +{ + struct sg_table *sgt; + + sgt = unmap_vm_area(domain, obj, da, vfree, + IOVMF_DISCONT | IOVMF_ALLOC); + if (!sgt) + dev_dbg(obj->dev, "%s: No sgt\n", __func__); + sgtable_free(sgt); +} +EXPORT_SYMBOL_GPL(iommu_vfree); + +static u32 __iommu_kmap(struct iommu_domain *domain, struct iommu *obj, + u32 da, u32 pa, void *va, size_t bytes, u32 flags) +{ + struct sg_table *sgt; + + sgt = sgtable_alloc(bytes, flags, da, pa); + if (IS_ERR(sgt)) + return PTR_ERR(sgt); + + sgtable_fill_kmalloc(sgt, pa, da, bytes); + + da = map_iommu_region(domain, obj, da, sgt, va, bytes, flags); + if (IS_ERR_VALUE(da)) { + sgtable_drain_kmalloc(sgt); + sgtable_free(sgt); + } + + return da; +} + +/** + * iommu_kmap - (d)-(p)-(v) address mapper + * @obj: objective iommu + * @da: contiguous iommu virtual memory + * @pa: contiguous physical memory + * @flags: iovma and page property + * + * Creates 1-1-1 mapping and returns @da again, which can be + * adjusted if 'IOVMF_DA_FIXED' is not set. + */ +u32 iommu_kmap(struct iommu_domain *domain, struct iommu *obj, u32 da, u32 pa, + size_t bytes, u32 flags) +{ + void *va; + + if (!obj || !obj->dev || !bytes) + return -EINVAL; + + bytes = PAGE_ALIGN(bytes); + + va = ioremap(pa, bytes); + if (!va) + return -ENOMEM; + + flags |= IOVMF_LINEAR; + flags |= IOVMF_MMIO; + + da = __iommu_kmap(domain, obj, da, pa, va, bytes, flags); + if (IS_ERR_VALUE(da)) + iounmap(va); + + return da; +} +EXPORT_SYMBOL_GPL(iommu_kmap); + +/** + * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()' + * @obj: objective iommu + * @da: iommu device virtual address + * + * Frees the iommu virtually contiguous memory area starting at + * @da, which was passed to and was returned by'iommu_kmap()'. + */ +void iommu_kunmap(struct iommu_domain *domain, struct iommu *obj, u32 da) +{ + struct sg_table *sgt; + typedef void (*func_t)(const void *); + + sgt = unmap_vm_area(domain, obj, da, (func_t)iounmap, + IOVMF_LINEAR | IOVMF_MMIO); + if (!sgt) + dev_dbg(obj->dev, "%s: No sgt\n", __func__); + sgtable_free(sgt); +} +EXPORT_SYMBOL_GPL(iommu_kunmap); + +/** + * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper + * @obj: objective iommu + * @da: contiguous iommu virtual memory + * @bytes: bytes for allocation + * @flags: iovma and page property + * + * Allocate @bytes linearly and creates 1-1-1 mapping and returns + * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. + */ +u32 iommu_kmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da, + size_t bytes, u32 flags) +{ + void *va; + u32 pa; + + if (!obj || !obj->dev || !bytes) + return -EINVAL; + + bytes = PAGE_ALIGN(bytes); + + va = kmalloc(bytes, GFP_KERNEL | GFP_DMA); + if (!va) + return -ENOMEM; + pa = virt_to_phys(va); + + flags |= IOVMF_LINEAR; + flags |= IOVMF_ALLOC; + + da = __iommu_kmap(domain, obj, da, pa, va, bytes, flags); + if (IS_ERR_VALUE(da)) + kfree(va); + + return da; +} +EXPORT_SYMBOL_GPL(iommu_kmalloc); + +/** + * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()' + * @obj: objective iommu + * @da: iommu device virtual address + * + * Frees the iommu virtually contiguous memory area starting at + * @da, which was passed to and was returned by'iommu_kmalloc()'. + */ +void iommu_kfree(struct iommu_domain *domain, struct iommu *obj, u32 da) +{ + struct sg_table *sgt; + + sgt = unmap_vm_area(domain, obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC); + if (!sgt) + dev_dbg(obj->dev, "%s: No sgt\n", __func__); + sgtable_free(sgt); +} +EXPORT_SYMBOL_GPL(iommu_kfree); + + +static int __init iovmm_init(void) +{ + const unsigned long flags = SLAB_HWCACHE_ALIGN; + struct kmem_cache *p; + + p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0, + flags, NULL); + if (!p) + return -ENOMEM; + iovm_area_cachep = p; + + return 0; +} +module_init(iovmm_init); + +static void __exit iovmm_exit(void) +{ + kmem_cache_destroy(iovm_area_cachep); +} +module_exit(iovmm_exit); + +MODULE_DESCRIPTION("omap iommu: simple virtual address space management"); +MODULE_AUTHOR("Hiroshi DOYU "); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3-70-g09d2 From 5a6a5b1bcca3247e9161ccada488965c94012c48 Mon Sep 17 00:00:00 2001 From: Ohad Ben-Cohen Date: Tue, 16 Aug 2011 15:31:16 +0300 Subject: omap: iovmm: remove unused functionality Remove unused functionality from OMAP's iovmm module. The intention is to eventually completely replace iovmm with the generic DMA-API, so new code that'd need this iovmm functionality will have to extend the DMA-API instead. Signed-off-by: Ohad Ben-Cohen Acked-by: Hiroshi DOYU Acked-by: Tony Lindgren Signed-off-by: Joerg Roedel --- arch/arm/plat-omap/include/plat/iovmm.h | 8 -- drivers/iommu/omap-iovmm.c | 201 -------------------------------- 2 files changed, 209 deletions(-) (limited to 'drivers/iommu/omap-iovmm.c') diff --git a/arch/arm/plat-omap/include/plat/iovmm.h b/arch/arm/plat-omap/include/plat/iovmm.h index e2f0b38a026..fc9aa6fe590 100644 --- a/arch/arm/plat-omap/include/plat/iovmm.h +++ b/arch/arm/plat-omap/include/plat/iovmm.h @@ -81,14 +81,6 @@ extern u32 iommu_vmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da, size_t bytes, u32 flags); extern void iommu_vfree(struct iommu_domain *domain, struct iommu *obj, const u32 da); -extern u32 iommu_kmap(struct iommu_domain *domain, struct iommu *obj, u32 da, - u32 pa, size_t bytes, u32 flags); -extern void iommu_kunmap(struct iommu_domain *domain, struct iommu *obj, - u32 da); -extern u32 iommu_kmalloc(struct iommu_domain *domain, struct iommu *obj, - u32 da, size_t bytes, u32 flags); -extern void iommu_kfree(struct iommu_domain *domain, struct iommu *obj, u32 da); - extern void *da_to_va(struct iommu *obj, u32 da); #endif /* __IOMMU_MMAP_H */ diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c index 809ca124196..996bec0b4a2 100644 --- a/drivers/iommu/omap-iovmm.c +++ b/drivers/iommu/omap-iovmm.c @@ -25,40 +25,6 @@ #include -/* - * A device driver needs to create address mappings between: - * - * - iommu/device address - * - physical address - * - mpu virtual address - * - * There are 4 possible patterns for them: - * - * |iova/ mapping iommu_ page - * | da pa va (d)-(p)-(v) function type - * --------------------------------------------------------------------------- - * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s - * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s - * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s - * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n* - * - * - * 'iova': device iommu virtual address - * 'da': alias of 'iova' - * 'pa': physical address - * 'va': mpu virtual address - * - * 'c': contiguous memory area - * 'd': discontiguous memory area - * 'a': anonymous memory allocation - * '()': optional feature - * - * 'n': a normal page(4KB) size is used. - * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used. - * - * '*': not yet, but feasible. - */ - static struct kmem_cache *iovm_area_cachep; /* return total bytes of sg buffers */ @@ -419,40 +385,6 @@ static inline void sgtable_drain_vmalloc(struct sg_table *sgt) BUG_ON(!sgt); } -static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da, - size_t len) -{ - unsigned int i; - struct scatterlist *sg; - - for_each_sg(sgt->sgl, sg, sgt->nents, i) { - unsigned bytes; - - bytes = max_alignment(da | pa); - bytes = min_t(unsigned, bytes, iopgsz_max(len)); - - BUG_ON(!iopgsz_ok(bytes)); - - sg_set_buf(sg, phys_to_virt(pa), bytes); - /* - * 'pa' is cotinuous(linear). - */ - pa += bytes; - da += bytes; - len -= bytes; - } - BUG_ON(len); -} - -static inline void sgtable_drain_kmalloc(struct sg_table *sgt) -{ - /* - * Actually this is not necessary at all, just exists for - * consistency of the code readability - */ - BUG_ON(!sgt); -} - /* create 'da' <-> 'pa' mapping from 'sgt' */ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new, const struct sg_table *sgt, u32 flags) @@ -764,139 +696,6 @@ void iommu_vfree(struct iommu_domain *domain, struct iommu *obj, const u32 da) } EXPORT_SYMBOL_GPL(iommu_vfree); -static u32 __iommu_kmap(struct iommu_domain *domain, struct iommu *obj, - u32 da, u32 pa, void *va, size_t bytes, u32 flags) -{ - struct sg_table *sgt; - - sgt = sgtable_alloc(bytes, flags, da, pa); - if (IS_ERR(sgt)) - return PTR_ERR(sgt); - - sgtable_fill_kmalloc(sgt, pa, da, bytes); - - da = map_iommu_region(domain, obj, da, sgt, va, bytes, flags); - if (IS_ERR_VALUE(da)) { - sgtable_drain_kmalloc(sgt); - sgtable_free(sgt); - } - - return da; -} - -/** - * iommu_kmap - (d)-(p)-(v) address mapper - * @obj: objective iommu - * @da: contiguous iommu virtual memory - * @pa: contiguous physical memory - * @flags: iovma and page property - * - * Creates 1-1-1 mapping and returns @da again, which can be - * adjusted if 'IOVMF_DA_FIXED' is not set. - */ -u32 iommu_kmap(struct iommu_domain *domain, struct iommu *obj, u32 da, u32 pa, - size_t bytes, u32 flags) -{ - void *va; - - if (!obj || !obj->dev || !bytes) - return -EINVAL; - - bytes = PAGE_ALIGN(bytes); - - va = ioremap(pa, bytes); - if (!va) - return -ENOMEM; - - flags |= IOVMF_LINEAR; - flags |= IOVMF_MMIO; - - da = __iommu_kmap(domain, obj, da, pa, va, bytes, flags); - if (IS_ERR_VALUE(da)) - iounmap(va); - - return da; -} -EXPORT_SYMBOL_GPL(iommu_kmap); - -/** - * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()' - * @obj: objective iommu - * @da: iommu device virtual address - * - * Frees the iommu virtually contiguous memory area starting at - * @da, which was passed to and was returned by'iommu_kmap()'. - */ -void iommu_kunmap(struct iommu_domain *domain, struct iommu *obj, u32 da) -{ - struct sg_table *sgt; - typedef void (*func_t)(const void *); - - sgt = unmap_vm_area(domain, obj, da, (func_t)iounmap, - IOVMF_LINEAR | IOVMF_MMIO); - if (!sgt) - dev_dbg(obj->dev, "%s: No sgt\n", __func__); - sgtable_free(sgt); -} -EXPORT_SYMBOL_GPL(iommu_kunmap); - -/** - * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper - * @obj: objective iommu - * @da: contiguous iommu virtual memory - * @bytes: bytes for allocation - * @flags: iovma and page property - * - * Allocate @bytes linearly and creates 1-1-1 mapping and returns - * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. - */ -u32 iommu_kmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da, - size_t bytes, u32 flags) -{ - void *va; - u32 pa; - - if (!obj || !obj->dev || !bytes) - return -EINVAL; - - bytes = PAGE_ALIGN(bytes); - - va = kmalloc(bytes, GFP_KERNEL | GFP_DMA); - if (!va) - return -ENOMEM; - pa = virt_to_phys(va); - - flags |= IOVMF_LINEAR; - flags |= IOVMF_ALLOC; - - da = __iommu_kmap(domain, obj, da, pa, va, bytes, flags); - if (IS_ERR_VALUE(da)) - kfree(va); - - return da; -} -EXPORT_SYMBOL_GPL(iommu_kmalloc); - -/** - * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()' - * @obj: objective iommu - * @da: iommu device virtual address - * - * Frees the iommu virtually contiguous memory area starting at - * @da, which was passed to and was returned by'iommu_kmalloc()'. - */ -void iommu_kfree(struct iommu_domain *domain, struct iommu *obj, u32 da) -{ - struct sg_table *sgt; - - sgt = unmap_vm_area(domain, obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC); - if (!sgt) - dev_dbg(obj->dev, "%s: No sgt\n", __func__); - sgtable_free(sgt); -} -EXPORT_SYMBOL_GPL(iommu_kfree); - - static int __init iovmm_init(void) { const unsigned long flags = SLAB_HWCACHE_ALIGN; -- cgit v1.2.3-70-g09d2 From 6c32df437c7c5b1fc29d3ca29b0ff44f8dfafc56 Mon Sep 17 00:00:00 2001 From: Ohad Ben-Cohen Date: Wed, 17 Aug 2011 22:57:56 +0300 Subject: omap: iommu: omapify 'struct iommu' and exposed API Prepend 'omap_' to OMAP's 'struct iommu' and exposed API, to prevent namespace pollution and generally to improve readability of the code that still uses the driver directly. Update the users as needed as well. Signed-off-by: Ohad Ben-Cohen Acked-by: Laurent Pinchart Acked-by: Hiroshi DOYU Acked-by: Tony Lindgren Signed-off-by: Joerg Roedel --- arch/arm/mach-omap2/iommu2.c | 31 +++--- arch/arm/plat-omap/include/plat/iommu.h | 55 ++++++----- arch/arm/plat-omap/include/plat/iommu2.h | 4 +- arch/arm/plat-omap/include/plat/iopgtable.h | 2 +- arch/arm/plat-omap/include/plat/iovmm.h | 19 ++-- drivers/iommu/omap-iommu-debug.c | 34 +++---- drivers/iommu/omap-iommu.c | 146 ++++++++++++++-------------- drivers/iommu/omap-iovmm.c | 62 ++++++------ drivers/media/video/omap3isp/isp.c | 6 +- drivers/media/video/omap3isp/isp.h | 2 +- drivers/media/video/omap3isp/ispccdc.c | 23 ++--- drivers/media/video/omap3isp/ispstat.c | 9 +- drivers/media/video/omap3isp/ispvideo.c | 4 +- 13 files changed, 208 insertions(+), 189 deletions(-) (limited to 'drivers/iommu/omap-iovmm.c') diff --git a/arch/arm/mach-omap2/iommu2.c b/arch/arm/mach-omap2/iommu2.c index f286012783c..eefc37912ef 100644 --- a/arch/arm/mach-omap2/iommu2.c +++ b/arch/arm/mach-omap2/iommu2.c @@ -66,7 +66,7 @@ ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0) -static void __iommu_set_twl(struct iommu *obj, bool on) +static void __iommu_set_twl(struct omap_iommu *obj, bool on) { u32 l = iommu_read_reg(obj, MMU_CNTL); @@ -85,7 +85,7 @@ static void __iommu_set_twl(struct iommu *obj, bool on) } -static int omap2_iommu_enable(struct iommu *obj) +static int omap2_iommu_enable(struct omap_iommu *obj) { u32 l, pa; unsigned long timeout; @@ -127,7 +127,7 @@ static int omap2_iommu_enable(struct iommu *obj) return 0; } -static void omap2_iommu_disable(struct iommu *obj) +static void omap2_iommu_disable(struct omap_iommu *obj) { u32 l = iommu_read_reg(obj, MMU_CNTL); @@ -138,12 +138,12 @@ static void omap2_iommu_disable(struct iommu *obj) dev_dbg(obj->dev, "%s is shutting down\n", obj->name); } -static void omap2_iommu_set_twl(struct iommu *obj, bool on) +static void omap2_iommu_set_twl(struct omap_iommu *obj, bool on) { __iommu_set_twl(obj, false); } -static u32 omap2_iommu_fault_isr(struct iommu *obj, u32 *ra) +static u32 omap2_iommu_fault_isr(struct omap_iommu *obj, u32 *ra) { u32 stat, da; u32 errs = 0; @@ -173,13 +173,13 @@ static u32 omap2_iommu_fault_isr(struct iommu *obj, u32 *ra) return errs; } -static void omap2_tlb_read_cr(struct iommu *obj, struct cr_regs *cr) +static void omap2_tlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) { cr->cam = iommu_read_reg(obj, MMU_READ_CAM); cr->ram = iommu_read_reg(obj, MMU_READ_RAM); } -static void omap2_tlb_load_cr(struct iommu *obj, struct cr_regs *cr) +static void omap2_tlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) { iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM); iommu_write_reg(obj, cr->ram, MMU_RAM); @@ -193,7 +193,8 @@ static u32 omap2_cr_to_virt(struct cr_regs *cr) return cr->cam & mask; } -static struct cr_regs *omap2_alloc_cr(struct iommu *obj, struct iotlb_entry *e) +static struct cr_regs *omap2_alloc_cr(struct omap_iommu *obj, + struct iotlb_entry *e) { struct cr_regs *cr; @@ -230,7 +231,8 @@ static u32 omap2_get_pte_attr(struct iotlb_entry *e) return attr; } -static ssize_t omap2_dump_cr(struct iommu *obj, struct cr_regs *cr, char *buf) +static ssize_t +omap2_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, char *buf) { char *p = buf; @@ -254,7 +256,8 @@ static ssize_t omap2_dump_cr(struct iommu *obj, struct cr_regs *cr, char *buf) goto out; \ } while (0) -static ssize_t omap2_iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t len) +static ssize_t +omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len) { char *p = buf; @@ -280,7 +283,7 @@ out: return p - buf; } -static void omap2_iommu_save_ctx(struct iommu *obj) +static void omap2_iommu_save_ctx(struct omap_iommu *obj) { int i; u32 *p = obj->ctx; @@ -293,7 +296,7 @@ static void omap2_iommu_save_ctx(struct iommu *obj) BUG_ON(p[0] != IOMMU_ARCH_VERSION); } -static void omap2_iommu_restore_ctx(struct iommu *obj) +static void omap2_iommu_restore_ctx(struct omap_iommu *obj) { int i; u32 *p = obj->ctx; @@ -343,13 +346,13 @@ static const struct iommu_functions omap2_iommu_ops = { static int __init omap2_iommu_init(void) { - return install_iommu_arch(&omap2_iommu_ops); + return omap_install_iommu_arch(&omap2_iommu_ops); } module_init(omap2_iommu_init); static void __exit omap2_iommu_exit(void) { - uninstall_iommu_arch(&omap2_iommu_ops); + omap_uninstall_iommu_arch(&omap2_iommu_ops); } module_exit(omap2_iommu_exit); diff --git a/arch/arm/plat-omap/include/plat/iommu.h b/arch/arm/plat-omap/include/plat/iommu.h index 9ae1e279864..7f1df0e18d5 100644 --- a/arch/arm/plat-omap/include/plat/iommu.h +++ b/arch/arm/plat-omap/include/plat/iommu.h @@ -25,7 +25,7 @@ struct iotlb_entry { }; }; -struct iommu { +struct omap_iommu { const char *name; struct module *owner; struct clk *clk; @@ -48,7 +48,7 @@ struct iommu { struct list_head mmap; struct mutex mmap_lock; /* protect mmap */ - int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs, void *priv); + int (*isr)(struct omap_iommu *obj, u32 da, u32 iommu_errs, void *priv); void *ctx; /* iommu context: registres saved area */ u32 da_start; @@ -81,25 +81,27 @@ struct iotlb_lock { struct iommu_functions { unsigned long version; - int (*enable)(struct iommu *obj); - void (*disable)(struct iommu *obj); - void (*set_twl)(struct iommu *obj, bool on); - u32 (*fault_isr)(struct iommu *obj, u32 *ra); + int (*enable)(struct omap_iommu *obj); + void (*disable)(struct omap_iommu *obj); + void (*set_twl)(struct omap_iommu *obj, bool on); + u32 (*fault_isr)(struct omap_iommu *obj, u32 *ra); - void (*tlb_read_cr)(struct iommu *obj, struct cr_regs *cr); - void (*tlb_load_cr)(struct iommu *obj, struct cr_regs *cr); + void (*tlb_read_cr)(struct omap_iommu *obj, struct cr_regs *cr); + void (*tlb_load_cr)(struct omap_iommu *obj, struct cr_regs *cr); - struct cr_regs *(*alloc_cr)(struct iommu *obj, struct iotlb_entry *e); + struct cr_regs *(*alloc_cr)(struct omap_iommu *obj, + struct iotlb_entry *e); int (*cr_valid)(struct cr_regs *cr); u32 (*cr_to_virt)(struct cr_regs *cr); void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e); - ssize_t (*dump_cr)(struct iommu *obj, struct cr_regs *cr, char *buf); + ssize_t (*dump_cr)(struct omap_iommu *obj, struct cr_regs *cr, + char *buf); u32 (*get_pte_attr)(struct iotlb_entry *e); - void (*save_ctx)(struct iommu *obj); - void (*restore_ctx)(struct iommu *obj); - ssize_t (*dump_ctx)(struct iommu *obj, char *buf, ssize_t len); + void (*save_ctx)(struct omap_iommu *obj); + void (*restore_ctx)(struct omap_iommu *obj); + ssize_t (*dump_ctx)(struct omap_iommu *obj, char *buf, ssize_t len); }; struct iommu_platform_data { @@ -150,28 +152,31 @@ struct iommu_platform_data { /* * global functions */ -extern u32 iommu_arch_version(void); +extern u32 omap_iommu_arch_version(void); -extern void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e); +extern void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e); -extern int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e); +extern int +omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e); -extern int iommu_set_isr(const char *name, - int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs, +extern int omap_iommu_set_isr(const char *name, + int (*isr)(struct omap_iommu *obj, u32 da, u32 iommu_errs, void *priv), void *isr_priv); -extern void iommu_save_ctx(struct iommu *obj); -extern void iommu_restore_ctx(struct iommu *obj); +extern void omap_iommu_save_ctx(struct omap_iommu *obj); +extern void omap_iommu_restore_ctx(struct omap_iommu *obj); -extern int install_iommu_arch(const struct iommu_functions *ops); -extern void uninstall_iommu_arch(const struct iommu_functions *ops); +extern int omap_install_iommu_arch(const struct iommu_functions *ops); +extern void omap_uninstall_iommu_arch(const struct iommu_functions *ops); -extern int foreach_iommu_device(void *data, +extern int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *)); -extern ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t len); -extern size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t len); +extern ssize_t +omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len); +extern size_t +omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len); struct device *omap_find_iommu_device(const char *name); #endif /* __MACH_IOMMU_H */ diff --git a/arch/arm/plat-omap/include/plat/iommu2.h b/arch/arm/plat-omap/include/plat/iommu2.h index 10ad05f410e..d4116b595e4 100644 --- a/arch/arm/plat-omap/include/plat/iommu2.h +++ b/arch/arm/plat-omap/include/plat/iommu2.h @@ -83,12 +83,12 @@ /* * register accessors */ -static inline u32 iommu_read_reg(struct iommu *obj, size_t offs) +static inline u32 iommu_read_reg(struct omap_iommu *obj, size_t offs) { return __raw_readl(obj->regbase + offs); } -static inline void iommu_write_reg(struct iommu *obj, u32 val, size_t offs) +static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs) { __raw_writel(val, obj->regbase + offs); } diff --git a/arch/arm/plat-omap/include/plat/iopgtable.h b/arch/arm/plat-omap/include/plat/iopgtable.h index 33c7aa986f5..66a813977d5 100644 --- a/arch/arm/plat-omap/include/plat/iopgtable.h +++ b/arch/arm/plat-omap/include/plat/iopgtable.h @@ -115,6 +115,6 @@ static inline u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, } #define to_iommu(dev) \ - (struct iommu *)platform_get_drvdata(to_platform_device(dev)) + (struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)) #endif /* __PLAT_OMAP_IOMMU_H */ diff --git a/arch/arm/plat-omap/include/plat/iovmm.h b/arch/arm/plat-omap/include/plat/iovmm.h index fc9aa6fe590..6af1a91c0f3 100644 --- a/arch/arm/plat-omap/include/plat/iovmm.h +++ b/arch/arm/plat-omap/include/plat/iovmm.h @@ -16,7 +16,7 @@ #include struct iovm_struct { - struct iommu *iommu; /* iommu object which this belongs to */ + struct omap_iommu *iommu; /* iommu object which this belongs to */ u32 da_start; /* area definition */ u32 da_end; u32 flags; /* IOVMF_: see below */ @@ -72,15 +72,18 @@ struct iovm_struct { #define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT)) -extern struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da); -extern u32 iommu_vmap(struct iommu_domain *domain, struct iommu *obj, u32 da, +extern struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da); +extern u32 +omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da, const struct sg_table *sgt, u32 flags); -extern struct sg_table *iommu_vunmap(struct iommu_domain *domain, - struct iommu *obj, u32 da); -extern u32 iommu_vmalloc(struct iommu_domain *domain, struct iommu *obj, +extern struct sg_table *omap_iommu_vunmap(struct iommu_domain *domain, + struct omap_iommu *obj, u32 da); +extern u32 +omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj, u32 da, size_t bytes, u32 flags); -extern void iommu_vfree(struct iommu_domain *domain, struct iommu *obj, +extern void +omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj, const u32 da); -extern void *da_to_va(struct iommu *obj, u32 da); +extern void *omap_da_to_va(struct omap_iommu *obj, u32 da); #endif /* __IOMMU_MMAP_H */ diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c index 0f8c8dd5501..9c192e79f80 100644 --- a/drivers/iommu/omap-iommu-debug.c +++ b/drivers/iommu/omap-iommu-debug.c @@ -32,7 +32,7 @@ static struct dentry *iommu_debug_root; static ssize_t debug_read_ver(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { - u32 ver = iommu_arch_version(); + u32 ver = omap_iommu_arch_version(); char buf[MAXCOLUMN], *p = buf; p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf); @@ -43,7 +43,7 @@ static ssize_t debug_read_ver(struct file *file, char __user *userbuf, static ssize_t debug_read_regs(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { - struct iommu *obj = file->private_data; + struct omap_iommu *obj = file->private_data; char *p, *buf; ssize_t bytes; @@ -54,7 +54,7 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf, mutex_lock(&iommu_debug_lock); - bytes = iommu_dump_ctx(obj, p, count); + bytes = omap_iommu_dump_ctx(obj, p, count); bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes); mutex_unlock(&iommu_debug_lock); @@ -66,7 +66,7 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf, static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { - struct iommu *obj = file->private_data; + struct omap_iommu *obj = file->private_data; char *p, *buf; ssize_t bytes, rest; @@ -80,7 +80,7 @@ static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, p += sprintf(p, "%8s %8s\n", "cam:", "ram:"); p += sprintf(p, "-----------------------------------------\n"); rest = count - (p - buf); - p += dump_tlb_entries(obj, p, rest); + p += omap_dump_tlb_entries(obj, p, rest); bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); @@ -96,7 +96,7 @@ static ssize_t debug_write_pagetable(struct file *file, struct iotlb_entry e; struct cr_regs cr; int err; - struct iommu *obj = file->private_data; + struct omap_iommu *obj = file->private_data; char buf[MAXCOLUMN], *p = buf; count = min(count, sizeof(buf)); @@ -113,8 +113,8 @@ static ssize_t debug_write_pagetable(struct file *file, return -EINVAL; } - iotlb_cr_to_e(&cr, &e); - err = iopgtable_store_entry(obj, &e); + omap_iotlb_cr_to_e(&cr, &e); + err = omap_iopgtable_store_entry(obj, &e); if (err) dev_err(obj->dev, "%s: fail to store cr\n", __func__); @@ -136,7 +136,7 @@ static ssize_t debug_write_pagetable(struct file *file, __err; \ }) -static ssize_t dump_ioptable(struct iommu *obj, char *buf, ssize_t len) +static ssize_t dump_ioptable(struct omap_iommu *obj, char *buf, ssize_t len) { int i; u32 *iopgd; @@ -183,7 +183,7 @@ out: static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { - struct iommu *obj = file->private_data; + struct omap_iommu *obj = file->private_data; char *p, *buf; size_t bytes; @@ -211,7 +211,7 @@ static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, static ssize_t debug_read_mmap(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { - struct iommu *obj = file->private_data; + struct omap_iommu *obj = file->private_data; char *p, *buf; struct iovm_struct *tmp; int uninitialized_var(i); @@ -253,7 +253,7 @@ static ssize_t debug_read_mmap(struct file *file, char __user *userbuf, static ssize_t debug_read_mem(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { - struct iommu *obj = file->private_data; + struct omap_iommu *obj = file->private_data; char *p, *buf; struct iovm_struct *area; ssize_t bytes; @@ -267,7 +267,7 @@ static ssize_t debug_read_mem(struct file *file, char __user *userbuf, mutex_lock(&iommu_debug_lock); - area = find_iovm_area(obj, (u32)ppos); + area = omap_find_iovm_area(obj, (u32)ppos); if (IS_ERR(area)) { bytes = -EINVAL; goto err_out; @@ -286,7 +286,7 @@ err_out: static ssize_t debug_write_mem(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { - struct iommu *obj = file->private_data; + struct omap_iommu *obj = file->private_data; struct iovm_struct *area; char *p, *buf; @@ -304,7 +304,7 @@ static ssize_t debug_write_mem(struct file *file, const char __user *userbuf, goto err_out; } - area = find_iovm_area(obj, (u32)ppos); + area = omap_find_iovm_area(obj, (u32)ppos); if (IS_ERR(area)) { count = -EINVAL; goto err_out; @@ -360,7 +360,7 @@ DEBUG_FOPS(mem); static int iommu_debug_register(struct device *dev, void *data) { struct platform_device *pdev = to_platform_device(dev); - struct iommu *obj = platform_get_drvdata(pdev); + struct omap_iommu *obj = platform_get_drvdata(pdev); struct dentry *d, *parent; if (!obj || !obj->dev) @@ -396,7 +396,7 @@ static int __init iommu_debug_init(void) return -ENOMEM; iommu_debug_root = d; - err = foreach_iommu_device(d, iommu_debug_register); + err = omap_foreach_iommu_device(d, iommu_debug_register); if (err) goto err_out; return 0; diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index d0f28e73be6..dad45ab8cce 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -42,7 +42,7 @@ */ struct omap_iommu_domain { u32 *pgtable; - struct iommu *iommu_dev; + struct omap_iommu *iommu_dev; spinlock_t lock; }; @@ -53,13 +53,13 @@ static struct platform_driver omap_iommu_driver; static struct kmem_cache *iopte_cachep; /** - * install_iommu_arch - Install archtecure specific iommu functions + * omap_install_iommu_arch - Install archtecure specific iommu functions * @ops: a pointer to architecture specific iommu functions * * There are several kind of iommu algorithm(tlb, pagetable) among * omap series. This interface installs such an iommu algorighm. **/ -int install_iommu_arch(const struct iommu_functions *ops) +int omap_install_iommu_arch(const struct iommu_functions *ops) { if (arch_iommu) return -EBUSY; @@ -67,53 +67,53 @@ int install_iommu_arch(const struct iommu_functions *ops) arch_iommu = ops; return 0; } -EXPORT_SYMBOL_GPL(install_iommu_arch); +EXPORT_SYMBOL_GPL(omap_install_iommu_arch); /** - * uninstall_iommu_arch - Uninstall archtecure specific iommu functions + * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions * @ops: a pointer to architecture specific iommu functions * * This interface uninstalls the iommu algorighm installed previously. **/ -void uninstall_iommu_arch(const struct iommu_functions *ops) +void omap_uninstall_iommu_arch(const struct iommu_functions *ops) { if (arch_iommu != ops) pr_err("%s: not your arch\n", __func__); arch_iommu = NULL; } -EXPORT_SYMBOL_GPL(uninstall_iommu_arch); +EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch); /** - * iommu_save_ctx - Save registers for pm off-mode support + * omap_iommu_save_ctx - Save registers for pm off-mode support * @obj: target iommu **/ -void iommu_save_ctx(struct iommu *obj) +void omap_iommu_save_ctx(struct omap_iommu *obj) { arch_iommu->save_ctx(obj); } -EXPORT_SYMBOL_GPL(iommu_save_ctx); +EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); /** - * iommu_restore_ctx - Restore registers for pm off-mode support + * omap_iommu_restore_ctx - Restore registers for pm off-mode support * @obj: target iommu **/ -void iommu_restore_ctx(struct iommu *obj) +void omap_iommu_restore_ctx(struct omap_iommu *obj) { arch_iommu->restore_ctx(obj); } -EXPORT_SYMBOL_GPL(iommu_restore_ctx); +EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); /** - * iommu_arch_version - Return running iommu arch version + * omap_iommu_arch_version - Return running iommu arch version **/ -u32 iommu_arch_version(void) +u32 omap_iommu_arch_version(void) { return arch_iommu->version; } -EXPORT_SYMBOL_GPL(iommu_arch_version); +EXPORT_SYMBOL_GPL(omap_iommu_arch_version); -static int iommu_enable(struct iommu *obj) +static int iommu_enable(struct omap_iommu *obj) { int err; @@ -131,7 +131,7 @@ static int iommu_enable(struct iommu *obj) return err; } -static void iommu_disable(struct iommu *obj) +static void iommu_disable(struct omap_iommu *obj) { if (!obj) return; @@ -146,13 +146,13 @@ static void iommu_disable(struct iommu *obj) /* * TLB operations */ -void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) +void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) { BUG_ON(!cr || !e); arch_iommu->cr_to_e(cr, e); } -EXPORT_SYMBOL_GPL(iotlb_cr_to_e); +EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e); static inline int iotlb_cr_valid(struct cr_regs *cr) { @@ -162,7 +162,7 @@ static inline int iotlb_cr_valid(struct cr_regs *cr) return arch_iommu->cr_valid(cr); } -static inline struct cr_regs *iotlb_alloc_cr(struct iommu *obj, +static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj, struct iotlb_entry *e) { if (!e) @@ -181,12 +181,12 @@ static u32 get_iopte_attr(struct iotlb_entry *e) return arch_iommu->get_pte_attr(e); } -static u32 iommu_report_fault(struct iommu *obj, u32 *da) +static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) { return arch_iommu->fault_isr(obj, da); } -static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l) +static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) { u32 val; @@ -197,7 +197,7 @@ static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l) } -static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l) +static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l) { u32 val; @@ -207,12 +207,12 @@ static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l) iommu_write_reg(obj, val, MMU_LOCK); } -static void iotlb_read_cr(struct iommu *obj, struct cr_regs *cr) +static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) { arch_iommu->tlb_read_cr(obj, cr); } -static void iotlb_load_cr(struct iommu *obj, struct cr_regs *cr) +static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) { arch_iommu->tlb_load_cr(obj, cr); @@ -226,7 +226,7 @@ static void iotlb_load_cr(struct iommu *obj, struct cr_regs *cr) * @cr: contents of cam and ram register * @buf: output buffer **/ -static inline ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr, +static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, char *buf) { BUG_ON(!cr || !buf); @@ -235,7 +235,7 @@ static inline ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr, } /* only used in iotlb iteration for-loop */ -static struct cr_regs __iotlb_read_cr(struct iommu *obj, int n) +static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) { struct cr_regs cr; struct iotlb_lock l; @@ -254,7 +254,7 @@ static struct cr_regs __iotlb_read_cr(struct iommu *obj, int n) * @e: an iommu tlb entry info **/ #ifdef PREFETCH_IOTLB -static int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e) +static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) { int err = 0; struct iotlb_lock l; @@ -313,14 +313,14 @@ out: #else /* !PREFETCH_IOTLB */ -static int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e) +static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) { return 0; } #endif /* !PREFETCH_IOTLB */ -static int prefetch_iotlb_entry(struct iommu *obj, struct iotlb_entry *e) +static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) { return load_iotlb_entry(obj, e); } @@ -332,7 +332,7 @@ static int prefetch_iotlb_entry(struct iommu *obj, struct iotlb_entry *e) * * Clear an iommu tlb entry which includes 'da' address. **/ -static void flush_iotlb_page(struct iommu *obj, u32 da) +static void flush_iotlb_page(struct omap_iommu *obj, u32 da) { int i; struct cr_regs cr; @@ -366,7 +366,7 @@ static void flush_iotlb_page(struct iommu *obj, u32 da) * flush_iotlb_all - Clear all iommu tlb entries * @obj: target iommu **/ -static void flush_iotlb_all(struct iommu *obj) +static void flush_iotlb_all(struct omap_iommu *obj) { struct iotlb_lock l; @@ -383,7 +383,7 @@ static void flush_iotlb_all(struct iommu *obj) #if defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) -ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t bytes) +ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) { if (!obj || !buf) return -EINVAL; @@ -396,9 +396,10 @@ ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t bytes) return bytes; } -EXPORT_SYMBOL_GPL(iommu_dump_ctx); +EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx); -static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs, int num) +static int +__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) { int i; struct iotlb_lock saved; @@ -421,11 +422,11 @@ static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs, int num) } /** - * dump_tlb_entries - dump cr arrays to given buffer + * omap_dump_tlb_entries - dump cr arrays to given buffer * @obj: target iommu * @buf: output buffer **/ -size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t bytes) +size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes) { int i, num; struct cr_regs *cr; @@ -445,14 +446,14 @@ size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t bytes) return p - buf; } -EXPORT_SYMBOL_GPL(dump_tlb_entries); +EXPORT_SYMBOL_GPL(omap_dump_tlb_entries); -int foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) +int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) { return driver_for_each_device(&omap_iommu_driver.driver, NULL, data, fn); } -EXPORT_SYMBOL_GPL(foreach_iommu_device); +EXPORT_SYMBOL_GPL(omap_foreach_iommu_device); #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ @@ -485,7 +486,7 @@ static void iopte_free(u32 *iopte) kmem_cache_free(iopte_cachep, iopte); } -static u32 *iopte_alloc(struct iommu *obj, u32 *iopgd, u32 da) +static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da) { u32 *iopte; @@ -523,7 +524,7 @@ pte_ready: return iopte; } -static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot) +static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) { u32 *iopgd = iopgd_offset(obj, da); @@ -538,7 +539,7 @@ static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot) return 0; } -static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot) +static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) { u32 *iopgd = iopgd_offset(obj, da); int i; @@ -555,7 +556,7 @@ static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot) return 0; } -static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot) +static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) { u32 *iopgd = iopgd_offset(obj, da); u32 *iopte = iopte_alloc(obj, iopgd, da); @@ -572,7 +573,7 @@ static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot) return 0; } -static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot) +static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) { u32 *iopgd = iopgd_offset(obj, da); u32 *iopte = iopte_alloc(obj, iopgd, da); @@ -593,9 +594,10 @@ static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot) return 0; } -static int iopgtable_store_entry_core(struct iommu *obj, struct iotlb_entry *e) +static int +iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e) { - int (*fn)(struct iommu *, u32, u32, u32); + int (*fn)(struct omap_iommu *, u32, u32, u32); u32 prot; int err; @@ -631,11 +633,11 @@ static int iopgtable_store_entry_core(struct iommu *obj, struct iotlb_entry *e) } /** - * iopgtable_store_entry - Make an iommu pte entry + * omap_iopgtable_store_entry - Make an iommu pte entry * @obj: target iommu * @e: an iommu tlb entry info **/ -int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e) +int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) { int err; @@ -645,7 +647,7 @@ int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e) prefetch_iotlb_entry(obj, e); return err; } -EXPORT_SYMBOL_GPL(iopgtable_store_entry); +EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry); /** * iopgtable_lookup_entry - Lookup an iommu pte entry @@ -670,7 +672,7 @@ out: *ppte = iopte; } -static size_t iopgtable_clear_entry_core(struct iommu *obj, u32 da) +static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) { size_t bytes; u32 *iopgd = iopgd_offset(obj, da); @@ -723,7 +725,7 @@ out: * @obj: target iommu * @da: iommu device virtual address **/ -static size_t iopgtable_clear_entry(struct iommu *obj, u32 da) +static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da) { size_t bytes; @@ -737,7 +739,7 @@ static size_t iopgtable_clear_entry(struct iommu *obj, u32 da) return bytes; } -static void iopgtable_clear_entry_all(struct iommu *obj) +static void iopgtable_clear_entry_all(struct omap_iommu *obj) { int i; @@ -772,7 +774,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) { u32 da, errs; u32 *iopgd, *iopte; - struct iommu *obj = data; + struct omap_iommu *obj = data; if (!obj->refcount) return IRQ_NONE; @@ -808,7 +810,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) static int device_match_by_alias(struct device *dev, void *data) { - struct iommu *obj = to_iommu(dev); + struct omap_iommu *obj = to_iommu(dev); const char *name = data; pr_debug("%s: %s %s\n", __func__, obj->name, name); @@ -842,10 +844,10 @@ EXPORT_SYMBOL_GPL(omap_find_iommu_device); * @dev: target omap iommu device * @iopgd: page table **/ -static struct iommu *omap_iommu_attach(struct device *dev, u32 *iopgd) +static struct omap_iommu *omap_iommu_attach(struct device *dev, u32 *iopgd) { int err = -ENOMEM; - struct iommu *obj = to_iommu(dev); + struct omap_iommu *obj = to_iommu(dev); spin_lock(&obj->iommu_lock); @@ -883,7 +885,7 @@ err_enable: * omap_iommu_detach - release iommu device * @obj: target iommu **/ -static void omap_iommu_detach(struct iommu *obj) +static void omap_iommu_detach(struct omap_iommu *obj) { if (!obj || IS_ERR(obj)) return; @@ -902,13 +904,13 @@ static void omap_iommu_detach(struct iommu *obj) dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); } -int iommu_set_isr(const char *name, - int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs, +int omap_iommu_set_isr(const char *name, + int (*isr)(struct omap_iommu *obj, u32 da, u32 iommu_errs, void *priv), void *isr_priv) { struct device *dev; - struct iommu *obj; + struct omap_iommu *obj; dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name, device_match_by_alias); @@ -927,7 +929,7 @@ int iommu_set_isr(const char *name, return 0; } -EXPORT_SYMBOL_GPL(iommu_set_isr); +EXPORT_SYMBOL_GPL(omap_iommu_set_isr); /* * OMAP Device MMU(IOMMU) detection @@ -936,7 +938,7 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev) { int err = -ENODEV; int irq; - struct iommu *obj; + struct omap_iommu *obj; struct resource *res; struct iommu_platform_data *pdata = pdev->dev.platform_data; @@ -1011,7 +1013,7 @@ static int __devexit omap_iommu_remove(struct platform_device *pdev) { int irq; struct resource *res; - struct iommu *obj = platform_get_drvdata(pdev); + struct omap_iommu *obj = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); @@ -1046,7 +1048,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, phys_addr_t pa, int order, int prot) { struct omap_iommu_domain *omap_domain = domain->priv; - struct iommu *oiommu = omap_domain->iommu_dev; + struct omap_iommu *oiommu = omap_domain->iommu_dev; struct device *dev = oiommu->dev; size_t bytes = PAGE_SIZE << order; struct iotlb_entry e; @@ -1066,9 +1068,9 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, iotlb_init_entry(&e, da, pa, flags); - ret = iopgtable_store_entry(oiommu, &e); + ret = omap_iopgtable_store_entry(oiommu, &e); if (ret) { - dev_err(dev, "iopgtable_store_entry failed: %d\n", ret); + dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret); return ret; } @@ -1079,7 +1081,7 @@ static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, int order) { struct omap_iommu_domain *omap_domain = domain->priv; - struct iommu *oiommu = omap_domain->iommu_dev; + struct omap_iommu *oiommu = omap_domain->iommu_dev; struct device *dev = oiommu->dev; size_t bytes = PAGE_SIZE << order; size_t ret; @@ -1099,7 +1101,7 @@ static int omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) { struct omap_iommu_domain *omap_domain = domain->priv; - struct iommu *oiommu; + struct omap_iommu *oiommu; int ret = 0; spin_lock(&omap_domain->lock); @@ -1130,7 +1132,7 @@ static void omap_iommu_detach_dev(struct iommu_domain *domain, struct device *dev) { struct omap_iommu_domain *omap_domain = domain->priv; - struct iommu *oiommu = to_iommu(dev); + struct omap_iommu *oiommu = to_iommu(dev); spin_lock(&omap_domain->lock); @@ -1200,7 +1202,7 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, unsigned long da) { struct omap_iommu_domain *omap_domain = domain->priv; - struct iommu *oiommu = omap_domain->iommu_dev; + struct omap_iommu *oiommu = omap_domain->iommu_dev; struct device *dev = oiommu->dev; u32 *pgd, *pte; phys_addr_t ret = 0; diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c index 996bec0b4a2..5e7f97dc76e 100644 --- a/drivers/iommu/omap-iovmm.c +++ b/drivers/iommu/omap-iovmm.c @@ -191,7 +191,8 @@ static inline void vunmap_sg(const void *va) vunmap(va); } -static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da) +static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj, + const u32 da) { struct iovm_struct *tmp; @@ -213,12 +214,12 @@ static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da) } /** - * find_iovm_area - find iovma which includes @da + * omap_find_iovm_area - find iovma which includes @da * @da: iommu device virtual address * * Find the existing iovma starting at @da */ -struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da) +struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da) { struct iovm_struct *area; @@ -228,13 +229,13 @@ struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da) return area; } -EXPORT_SYMBOL_GPL(find_iovm_area); +EXPORT_SYMBOL_GPL(omap_find_iovm_area); /* * This finds the hole(area) which fits the requested address and len * in iovmas mmap, and returns the new allocated iovma. */ -static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da, +static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da, size_t bytes, u32 flags) { struct iovm_struct *new, *tmp; @@ -309,7 +310,7 @@ found: return new; } -static void free_iovm_area(struct iommu *obj, struct iovm_struct *area) +static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area) { size_t bytes; @@ -325,14 +326,14 @@ static void free_iovm_area(struct iommu *obj, struct iovm_struct *area) } /** - * da_to_va - convert (d) to (v) + * omap_da_to_va - convert (d) to (v) * @obj: objective iommu * @da: iommu device virtual address * @va: mpu virtual address * * Returns mpu virtual addr which corresponds to a given device virtual addr */ -void *da_to_va(struct iommu *obj, u32 da) +void *omap_da_to_va(struct omap_iommu *obj, u32 da) { void *va = NULL; struct iovm_struct *area; @@ -350,7 +351,7 @@ out: return va; } -EXPORT_SYMBOL_GPL(da_to_va); +EXPORT_SYMBOL_GPL(omap_da_to_va); static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va) { @@ -364,7 +365,7 @@ static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va) const size_t bytes = PAGE_SIZE; /* - * iommu 'superpage' isn't supported with 'iommu_vmalloc()' + * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()' */ pg = vmalloc_to_page(va); BUG_ON(!pg); @@ -443,7 +444,7 @@ err_out: } /* release 'da' <-> 'pa' mapping */ -static void unmap_iovm_area(struct iommu_domain *domain, struct iommu *obj, +static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj, struct iovm_struct *area) { u32 start; @@ -480,7 +481,7 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct iommu *obj, /* template function for all unmapping */ static struct sg_table *unmap_vm_area(struct iommu_domain *domain, - struct iommu *obj, const u32 da, + struct omap_iommu *obj, const u32 da, void (*fn)(const void *), u32 flags) { struct sg_table *sgt = NULL; @@ -521,7 +522,7 @@ out: return sgt; } -static u32 map_iommu_region(struct iommu_domain *domain, struct iommu *obj, +static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj, u32 da, const struct sg_table *sgt, void *va, size_t bytes, u32 flags) { @@ -555,7 +556,8 @@ err_alloc_iovma: return err; } -static inline u32 __iommu_vmap(struct iommu_domain *domain, struct iommu *obj, +static inline u32 +__iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da, const struct sg_table *sgt, void *va, size_t bytes, u32 flags) { @@ -563,7 +565,7 @@ static inline u32 __iommu_vmap(struct iommu_domain *domain, struct iommu *obj, } /** - * iommu_vmap - (d)-(p)-(v) address mapper + * omap_iommu_vmap - (d)-(p)-(v) address mapper * @obj: objective iommu * @sgt: address of scatter gather table * @flags: iovma and page property @@ -571,7 +573,7 @@ static inline u32 __iommu_vmap(struct iommu_domain *domain, struct iommu *obj, * Creates 1-n-1 mapping with given @sgt and returns @da. * All @sgt element must be io page size aligned. */ -u32 iommu_vmap(struct iommu_domain *domain, struct iommu *obj, u32 da, +u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da, const struct sg_table *sgt, u32 flags) { size_t bytes; @@ -600,22 +602,22 @@ u32 iommu_vmap(struct iommu_domain *domain, struct iommu *obj, u32 da, return da; } -EXPORT_SYMBOL_GPL(iommu_vmap); +EXPORT_SYMBOL_GPL(omap_iommu_vmap); /** - * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()' + * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()' * @obj: objective iommu * @da: iommu device virtual address * * Free the iommu virtually contiguous memory area starting at - * @da, which was returned by 'iommu_vmap()'. + * @da, which was returned by 'omap_iommu_vmap()'. */ struct sg_table * -iommu_vunmap(struct iommu_domain *domain, struct iommu *obj, u32 da) +omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da) { struct sg_table *sgt; /* - * 'sgt' is allocated before 'iommu_vmalloc()' is called. + * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called. * Just returns 'sgt' to the caller to free */ sgt = unmap_vm_area(domain, obj, da, vunmap_sg, @@ -624,10 +626,10 @@ iommu_vunmap(struct iommu_domain *domain, struct iommu *obj, u32 da) dev_dbg(obj->dev, "%s: No sgt\n", __func__); return sgt; } -EXPORT_SYMBOL_GPL(iommu_vunmap); +EXPORT_SYMBOL_GPL(omap_iommu_vunmap); /** - * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper + * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper * @obj: objective iommu * @da: contiguous iommu virtual memory * @bytes: allocation size @@ -636,7 +638,8 @@ EXPORT_SYMBOL_GPL(iommu_vunmap); * Allocate @bytes linearly and creates 1-n-1 mapping and returns * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. */ -u32 iommu_vmalloc(struct iommu_domain *domain, struct iommu *obj, u32 da, +u32 +omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj, u32 da, size_t bytes, u32 flags) { void *va; @@ -674,17 +677,18 @@ err_sgt_alloc: vfree(va); return da; } -EXPORT_SYMBOL_GPL(iommu_vmalloc); +EXPORT_SYMBOL_GPL(omap_iommu_vmalloc); /** - * iommu_vfree - release memory allocated by 'iommu_vmalloc()' + * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()' * @obj: objective iommu * @da: iommu device virtual address * * Frees the iommu virtually continuous memory area starting at - * @da, as obtained from 'iommu_vmalloc()'. + * @da, as obtained from 'omap_iommu_vmalloc()'. */ -void iommu_vfree(struct iommu_domain *domain, struct iommu *obj, const u32 da) +void omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj, + const u32 da) { struct sg_table *sgt; @@ -694,7 +698,7 @@ void iommu_vfree(struct iommu_domain *domain, struct iommu *obj, const u32 da) dev_dbg(obj->dev, "%s: No sgt\n", __func__); sgtable_free(sgt); } -EXPORT_SYMBOL_GPL(iommu_vfree); +EXPORT_SYMBOL_GPL(omap_iommu_vfree); static int __init iovmm_init(void) { diff --git a/drivers/media/video/omap3isp/isp.c b/drivers/media/video/omap3isp/isp.c index 0db45ac7489..a4baa6165c2 100644 --- a/drivers/media/video/omap3isp/isp.c +++ b/drivers/media/video/omap3isp/isp.c @@ -85,7 +85,7 @@ * any omap-specific iommu API */ #define to_iommu(dev) \ - (struct iommu *)platform_get_drvdata(to_platform_device(dev)) + (struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)) static unsigned int autoidle; module_param(autoidle, int, 0444); @@ -1115,7 +1115,7 @@ static void isp_save_ctx(struct isp_device *isp) { isp_save_context(isp, isp_reg_list); if (isp->iommu) - iommu_save_ctx(isp->iommu); + omap_iommu_save_ctx(isp->iommu); } /* @@ -1129,7 +1129,7 @@ static void isp_restore_ctx(struct isp_device *isp) { isp_restore_context(isp, isp_reg_list); if (isp->iommu) - iommu_restore_ctx(isp->iommu); + omap_iommu_restore_ctx(isp->iommu); omap3isp_ccdc_restore_context(isp); omap3isp_preview_restore_context(isp); } diff --git a/drivers/media/video/omap3isp/isp.h b/drivers/media/video/omap3isp/isp.h index c9ec7a2e53e..81fdd85deb6 100644 --- a/drivers/media/video/omap3isp/isp.h +++ b/drivers/media/video/omap3isp/isp.h @@ -295,7 +295,7 @@ struct isp_device { unsigned int sbl_resources; unsigned int subclk_resources; - struct iommu *iommu; + struct omap_iommu *iommu; struct iommu_domain *domain; struct device *iommu_dev; diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c index de254741373..9891dde2af7 100644 --- a/drivers/media/video/omap3isp/ispccdc.c +++ b/drivers/media/video/omap3isp/ispccdc.c @@ -365,7 +365,7 @@ static void ccdc_lsc_free_request(struct isp_ccdc_device *ccdc, dma_unmap_sg(isp->dev, req->iovm->sgt->sgl, req->iovm->sgt->nents, DMA_TO_DEVICE); if (req->table) - iommu_vfree(isp->domain, isp->iommu, req->table); + omap_iommu_vfree(isp->domain, isp->iommu, req->table); kfree(req); } @@ -437,7 +437,7 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc, req->enable = 1; - req->table = iommu_vmalloc(isp->domain, isp->iommu, 0, + req->table = omap_iommu_vmalloc(isp->domain, isp->iommu, 0, req->config.size, IOMMU_FLAG); if (IS_ERR_VALUE(req->table)) { req->table = 0; @@ -445,7 +445,7 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc, goto done; } - req->iovm = find_iovm_area(isp->iommu, req->table); + req->iovm = omap_find_iovm_area(isp->iommu, req->table); if (req->iovm == NULL) { ret = -ENOMEM; goto done; @@ -461,7 +461,7 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc, dma_sync_sg_for_cpu(isp->dev, req->iovm->sgt->sgl, req->iovm->sgt->nents, DMA_TO_DEVICE); - table = da_to_va(isp->iommu, req->table); + table = omap_da_to_va(isp->iommu, req->table); if (copy_from_user(table, config->lsc, req->config.size)) { ret = -EFAULT; goto done; @@ -730,18 +730,19 @@ static int ccdc_config(struct isp_ccdc_device *ccdc, /* * table_new must be 64-bytes aligned, but it's - * already done by iommu_vmalloc(). + * already done by omap_iommu_vmalloc(). */ size = ccdc->fpc.fpnum * 4; - table_new = iommu_vmalloc(isp->domain, isp->iommu, 0, - size, IOMMU_FLAG); + table_new = omap_iommu_vmalloc(isp->domain, isp->iommu, + 0, size, IOMMU_FLAG); if (IS_ERR_VALUE(table_new)) return -ENOMEM; - if (copy_from_user(da_to_va(isp->iommu, table_new), + if (copy_from_user(omap_da_to_va(isp->iommu, table_new), (__force void __user *) ccdc->fpc.fpcaddr, size)) { - iommu_vfree(isp->domain, isp->iommu, table_new); + omap_iommu_vfree(isp->domain, isp->iommu, + table_new); return -EFAULT; } @@ -751,7 +752,7 @@ static int ccdc_config(struct isp_ccdc_device *ccdc, ccdc_configure_fpc(ccdc); if (table_old != 0) - iommu_vfree(isp->domain, isp->iommu, table_old); + omap_iommu_vfree(isp->domain, isp->iommu, table_old); } return ccdc_lsc_config(ccdc, ccdc_struct); @@ -2286,5 +2287,5 @@ void omap3isp_ccdc_cleanup(struct isp_device *isp) ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue); if (ccdc->fpc.fpcaddr != 0) - iommu_vfree(isp->domain, isp->iommu, ccdc->fpc.fpcaddr); + omap_iommu_vfree(isp->domain, isp->iommu, ccdc->fpc.fpcaddr); } diff --git a/drivers/media/video/omap3isp/ispstat.c b/drivers/media/video/omap3isp/ispstat.c index 98af736b9a9..73290555226 100644 --- a/drivers/media/video/omap3isp/ispstat.c +++ b/drivers/media/video/omap3isp/ispstat.c @@ -366,7 +366,8 @@ static void isp_stat_bufs_free(struct ispstat *stat) dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl, buf->iovm->sgt->nents, DMA_FROM_DEVICE); - iommu_vfree(isp->domain, isp->iommu, buf->iommu_addr); + omap_iommu_vfree(isp->domain, isp->iommu, + buf->iommu_addr); } else { if (!buf->virt_addr) continue; @@ -399,7 +400,7 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size) struct iovm_struct *iovm; WARN_ON(buf->dma_addr); - buf->iommu_addr = iommu_vmalloc(isp->domain, isp->iommu, 0, + buf->iommu_addr = omap_iommu_vmalloc(isp->domain, isp->iommu, 0, size, IOMMU_FLAG); if (IS_ERR((void *)buf->iommu_addr)) { dev_err(stat->isp->dev, @@ -409,7 +410,7 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size) return -ENOMEM; } - iovm = find_iovm_area(isp->iommu, buf->iommu_addr); + iovm = omap_find_iovm_area(isp->iommu, buf->iommu_addr); if (!iovm || !dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents, DMA_FROM_DEVICE)) { @@ -418,7 +419,7 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size) } buf->iovm = iovm; - buf->virt_addr = da_to_va(stat->isp->iommu, + buf->virt_addr = omap_da_to_va(stat->isp->iommu, (u32)buf->iommu_addr); buf->empty = 1; dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated." diff --git a/drivers/media/video/omap3isp/ispvideo.c b/drivers/media/video/omap3isp/ispvideo.c index 023b5028a27..912ac071b10 100644 --- a/drivers/media/video/omap3isp/ispvideo.c +++ b/drivers/media/video/omap3isp/ispvideo.c @@ -446,7 +446,7 @@ ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen) sgt->nents = sglen; sgt->orig_nents = sglen; - da = iommu_vmap(isp->domain, isp->iommu, 0, sgt, IOMMU_FLAG); + da = omap_iommu_vmap(isp->domain, isp->iommu, 0, sgt, IOMMU_FLAG); if (IS_ERR_VALUE(da)) kfree(sgt); @@ -462,7 +462,7 @@ static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da) { struct sg_table *sgt; - sgt = iommu_vunmap(isp->domain, isp->iommu, (u32)da); + sgt = omap_iommu_vunmap(isp->domain, isp->iommu, (u32)da); kfree(sgt); } -- cgit v1.2.3-70-g09d2 From 329d8d3b474923087f6988737ff12137b58e55cc Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 2 Sep 2011 13:32:30 -0400 Subject: iommu/omap-iovmm: support non page-aligned buffers in iommu_vmap omap_iovmm requires page-aligned buffers, and that sometimes causes omap3isp failures (i.e. whenever the buffer passed from userspace is not page-aligned). Remove this limitation by rounding the address of the first page entry down, and adding the offset back to the device address. Signed-off-by: Laurent Pinchart Acked-by: Hiroshi DOYU [ohad@wizery.com: rebased, but tested only with aligned buffers] [ohad@wizery.com: slightly edited the commit log] Signed-off-by: Ohad Ben-Cohen Signed-off-by: Joerg Roedel --- drivers/iommu/omap-iovmm.c | 36 ++++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) (limited to 'drivers/iommu/omap-iovmm.c') diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c index 5e7f97dc76e..39bdb92aa96 100644 --- a/drivers/iommu/omap-iovmm.c +++ b/drivers/iommu/omap-iovmm.c @@ -27,6 +27,15 @@ static struct kmem_cache *iovm_area_cachep; +/* return the offset of the first scatterlist entry in a sg table */ +static unsigned int sgtable_offset(const struct sg_table *sgt) +{ + if (!sgt || !sgt->nents) + return 0; + + return sgt->sgl->offset; +} + /* return total bytes of sg buffers */ static size_t sgtable_len(const struct sg_table *sgt) { @@ -39,11 +48,17 @@ static size_t sgtable_len(const struct sg_table *sgt) for_each_sg(sgt->sgl, sg, sgt->nents, i) { size_t bytes; - bytes = sg->length; + bytes = sg->length + sg->offset; if (!iopgsz_ok(bytes)) { - pr_err("%s: sg[%d] not iommu pagesize(%x)\n", - __func__, i, bytes); + pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n", + __func__, i, bytes, sg->offset); + return 0; + } + + if (i && sg->offset) { + pr_err("%s: sg[%d] offset not allowed in internal " + "entries\n", __func__, i); return 0; } @@ -164,8 +179,8 @@ static void *vmap_sg(const struct sg_table *sgt) u32 pa; int err; - pa = sg_phys(sg); - bytes = sg->length; + pa = sg_phys(sg) - sg->offset; + bytes = sg->length + sg->offset; BUG_ON(bytes != PAGE_SIZE); @@ -405,8 +420,8 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new, u32 pa; size_t bytes; - pa = sg_phys(sg); - bytes = sg->length; + pa = sg_phys(sg) - sg->offset; + bytes = sg->length + sg->offset; flags &= ~IOVMF_PGSZ_MASK; @@ -432,7 +447,7 @@ err_out: for_each_sg(sgt->sgl, sg, i, j) { size_t bytes; - bytes = sg->length; + bytes = sg->length + sg->offset; order = get_order(bytes); /* ignore failures.. we're already handling one */ @@ -461,7 +476,7 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj, size_t bytes; int order; - bytes = sg->length; + bytes = sg->length + sg->offset; order = get_order(bytes); err = iommu_unmap(domain, start, order); @@ -600,7 +615,7 @@ u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da, if (IS_ERR_VALUE(da)) vunmap_sg(va); - return da; + return da + sgtable_offset(sgt); } EXPORT_SYMBOL_GPL(omap_iommu_vmap); @@ -620,6 +635,7 @@ omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da) * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called. * Just returns 'sgt' to the caller to free */ + da &= PAGE_MASK; sgt = unmap_vm_area(domain, obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO); if (!sgt) -- cgit v1.2.3-70-g09d2 From 5e1b612cb16f446996398bd23b6cd59ea0206938 Mon Sep 17 00:00:00 2001 From: Ohad Ben-Cohen Date: Fri, 2 Sep 2011 13:32:33 -0400 Subject: iommu/omap: ->unmap() should return order of unmapped page Users of the IOMMU API (kvm specifically) assume that iommu_unmap() returns the order of the unmapped page. Fix omap_iommu_unmap() to do so and adopt omap-iovmm accordingly. Signed-off-by: Ohad Ben-Cohen Signed-off-by: Joerg Roedel --- drivers/iommu/omap-iommu.c | 13 ++++--------- drivers/iommu/omap-iovmm.c | 2 +- 2 files changed, 5 insertions(+), 10 deletions(-) (limited to 'drivers/iommu/omap-iovmm.c') diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 4311bc32cfa..bd5f6064c74 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1081,18 +1081,13 @@ static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, struct omap_iommu_domain *omap_domain = domain->priv; struct omap_iommu *oiommu = omap_domain->iommu_dev; struct device *dev = oiommu->dev; - size_t bytes = PAGE_SIZE << order; - size_t ret; + size_t unmap_size; - dev_dbg(dev, "unmapping da 0x%lx size 0x%x\n", da, bytes); + dev_dbg(dev, "unmapping da 0x%lx order %d\n", da, order); - ret = iopgtable_clear_entry(oiommu, da); - if (ret != bytes) { - dev_err(dev, "entry @ 0x%lx was %d; not %d\n", da, ret, bytes); - return -EINVAL; - } + unmap_size = iopgtable_clear_entry(oiommu, da); - return 0; + return unmap_size ? get_order(unmap_size) : -EINVAL; } static int diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c index 39bdb92aa96..e8fdb8830f6 100644 --- a/drivers/iommu/omap-iovmm.c +++ b/drivers/iommu/omap-iovmm.c @@ -480,7 +480,7 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj, order = get_order(bytes); err = iommu_unmap(domain, start, order); - if (err) + if (err < 0) break; dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n", -- cgit v1.2.3-70-g09d2 From 08f2e6312c67fed80df9342e06ad36daf11eb80b Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 8 Nov 2011 18:29:15 +0800 Subject: iommu: omap: Fix compile failure Fix compile failure in drivers/iommu/omap-iommu-debug.c because of missing module.h include. Signed-off-by: Ming Lei Signed-off-by: Joerg Roedel --- drivers/iommu/omap-iommu-debug.c | 1 + drivers/iommu/omap-iovmm.c | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers/iommu/omap-iovmm.c') diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c index 9c192e79f80..288da5c1499 100644 --- a/drivers/iommu/omap-iommu-debug.c +++ b/drivers/iommu/omap-iommu-debug.c @@ -10,6 +10,7 @@ * published by the Free Software Foundation. */ +#include #include #include #include diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c index e8fdb8830f6..46be456fcc0 100644 --- a/drivers/iommu/omap-iovmm.c +++ b/drivers/iommu/omap-iovmm.c @@ -10,6 +10,7 @@ * published by the Free Software Foundation. */ +#include #include #include #include -- cgit v1.2.3-70-g09d2