summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/omap-iovmm.c
diff options
context:
space:
mode:
authorLaurent Pinchart <laurent.pinchart@ideasonboard.com>2011-09-02 13:32:30 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2011-09-05 15:14:37 +0200
commit329d8d3b474923087f6988737ff12137b58e55cc (patch)
tree9db30cae05fb15b6dfede6976aae1dfbd9dfd80d /drivers/iommu/omap-iovmm.c
parent024ae884a657f8ddeeff6b472c1fe538f277980e (diff)
iommu/omap-iovmm: support non page-aligned buffers in iommu_vmap
omap_iovmm requires page-aligned buffers, and that sometimes causes omap3isp failures (i.e. whenever the buffer passed from userspace is not page-aligned). Remove this limitation by rounding the address of the first page entry down, and adding the offset back to the device address. Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Acked-by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com> [ohad@wizery.com: rebased, but tested only with aligned buffers] [ohad@wizery.com: slightly edited the commit log] Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'drivers/iommu/omap-iovmm.c')
-rw-r--r--drivers/iommu/omap-iovmm.c36
1 files changed, 26 insertions, 10 deletions
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c
index 5e7f97dc76e..39bdb92aa96 100644
--- a/drivers/iommu/omap-iovmm.c
+++ b/drivers/iommu/omap-iovmm.c
@@ -27,6 +27,15 @@
static struct kmem_cache *iovm_area_cachep;
+/* return the offset of the first scatterlist entry in a sg table */
+static unsigned int sgtable_offset(const struct sg_table *sgt)
+{
+ if (!sgt || !sgt->nents)
+ return 0;
+
+ return sgt->sgl->offset;
+}
+
/* return total bytes of sg buffers */
static size_t sgtable_len(const struct sg_table *sgt)
{
@@ -39,11 +48,17 @@ static size_t sgtable_len(const struct sg_table *sgt)
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes;
- bytes = sg->length;
+ bytes = sg->length + sg->offset;
if (!iopgsz_ok(bytes)) {
- pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
- __func__, i, bytes);
+ pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n",
+ __func__, i, bytes, sg->offset);
+ return 0;
+ }
+
+ if (i && sg->offset) {
+ pr_err("%s: sg[%d] offset not allowed in internal "
+ "entries\n", __func__, i);
return 0;
}
@@ -164,8 +179,8 @@ static void *vmap_sg(const struct sg_table *sgt)
u32 pa;
int err;
- pa = sg_phys(sg);
- bytes = sg->length;
+ pa = sg_phys(sg) - sg->offset;
+ bytes = sg->length + sg->offset;
BUG_ON(bytes != PAGE_SIZE);
@@ -405,8 +420,8 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
u32 pa;
size_t bytes;
- pa = sg_phys(sg);
- bytes = sg->length;
+ pa = sg_phys(sg) - sg->offset;
+ bytes = sg->length + sg->offset;
flags &= ~IOVMF_PGSZ_MASK;
@@ -432,7 +447,7 @@ err_out:
for_each_sg(sgt->sgl, sg, i, j) {
size_t bytes;
- bytes = sg->length;
+ bytes = sg->length + sg->offset;
order = get_order(bytes);
/* ignore failures.. we're already handling one */
@@ -461,7 +476,7 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
size_t bytes;
int order;
- bytes = sg->length;
+ bytes = sg->length + sg->offset;
order = get_order(bytes);
err = iommu_unmap(domain, start, order);
@@ -600,7 +615,7 @@ u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
if (IS_ERR_VALUE(da))
vunmap_sg(va);
- return da;
+ return da + sgtable_offset(sgt);
}
EXPORT_SYMBOL_GPL(omap_iommu_vmap);
@@ -620,6 +635,7 @@ omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da)
* 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
* Just returns 'sgt' to the caller to free
*/
+ da &= PAGE_MASK;
sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
IOVMF_DISCONT | IOVMF_MMIO);
if (!sgt)