diff options
Diffstat (limited to 'arch/arm/plat-omap/iovmm.c')
-rw-r--r-- | arch/arm/plat-omap/iovmm.c | 102 |
1 files changed, 55 insertions, 47 deletions
diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c index 8ce0de247c7..51ef43e8def 100644 --- a/arch/arm/plat-omap/iovmm.c +++ b/arch/arm/plat-omap/iovmm.c @@ -87,35 +87,43 @@ static size_t sgtable_len(const struct sg_table *sgt) } #define sgtable_ok(x) (!!sgtable_len(x)) +static unsigned max_alignment(u32 addr) +{ + int i; + unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, }; + for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++) + ; + return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0; +} + /* * calculate the optimal number sg elements from total bytes based on * iommu superpages */ -static unsigned int sgtable_nents(size_t bytes) +static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa) { - int i; - unsigned int nr_entries; - const unsigned long pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, }; + unsigned nr_entries = 0, ent_sz; if (!IS_ALIGNED(bytes, PAGE_SIZE)) { pr_err("%s: wrong size %08x\n", __func__, bytes); return 0; } - nr_entries = 0; - for (i = 0; i < ARRAY_SIZE(pagesize); i++) { - if (bytes >= pagesize[i]) { - nr_entries += (bytes / pagesize[i]); - bytes %= pagesize[i]; - } + while (bytes) { + ent_sz = max_alignment(da | pa); + ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes)); + nr_entries++; + da += ent_sz; + pa += ent_sz; + bytes -= ent_sz; } - BUG_ON(bytes); return nr_entries; } /* allocate and initialize sg_table header(a kind of 'superblock') */ -static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags) +static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags, + u32 da, u32 pa) { unsigned int nr_entries; int err; @@ -127,9 +135,8 @@ static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags) if (!IS_ALIGNED(bytes, PAGE_SIZE)) return ERR_PTR(-EINVAL); - /* FIXME: IOVMF_DA_FIXED should support 'superpages' */ - if ((flags & IOVMF_LINEAR) && (flags & IOVMF_DA_ANON)) { - nr_entries = sgtable_nents(bytes); + if (flags & IOVMF_LINEAR) { + nr_entries = sgtable_nents(bytes, da, pa); if (!nr_entries) return ERR_PTR(-EINVAL); } else @@ -264,22 +271,24 @@ static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da, size_t bytes, u32 flags) { struct iovm_struct *new, *tmp; - u32 start, prev_end, alignement; + u32 start, prev_end, alignment; if (!obj || !bytes) return ERR_PTR(-EINVAL); start = da; - alignement = PAGE_SIZE; + alignment = PAGE_SIZE; + + if (~flags & IOVMF_DA_FIXED) { + /* Don't map address 0 */ + start = obj->da_start ? obj->da_start : alignment; - if (flags & IOVMF_DA_ANON) { - /* - * Reserve the first page for NULL - */ - start = PAGE_SIZE; if (flags & IOVMF_LINEAR) - alignement = iopgsz_max(bytes); - start = roundup(start, alignement); + alignment = iopgsz_max(bytes); + start = roundup(start, alignment); + } else if (start < obj->da_start || start > obj->da_end || + obj->da_end - start < bytes) { + return ERR_PTR(-EINVAL); } tmp = NULL; @@ -289,19 +298,19 @@ static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da, prev_end = 0; list_for_each_entry(tmp, &obj->mmap, list) { - if (prev_end >= start) + if (prev_end > start) break; - if (start + bytes < tmp->da_start) + if (tmp->da_start > start && (tmp->da_start - start) >= bytes) goto found; - if (flags & IOVMF_DA_ANON) - start = roundup(tmp->da_end + 1, alignement); + if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED) + start = roundup(tmp->da_end + 1, alignment); prev_end = tmp->da_end; } - if ((start > prev_end) && (ULONG_MAX - start >= bytes)) + if ((start >= prev_end) && (obj->da_end - start >= bytes)) goto found; dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n", @@ -409,7 +418,8 @@ static inline void sgtable_drain_vmalloc(struct sg_table *sgt) BUG_ON(!sgt); } -static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len) +static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da, + size_t len) { unsigned int i; struct scatterlist *sg; @@ -418,9 +428,10 @@ static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len) va = phys_to_virt(pa); for_each_sg(sgt->sgl, sg, sgt->nents, i) { - size_t bytes; + unsigned bytes; - bytes = iopgsz_max(len); + bytes = max_alignment(da | pa); + bytes = min_t(unsigned, bytes, iopgsz_max(len)); BUG_ON(!iopgsz_ok(bytes)); @@ -429,6 +440,7 @@ static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len) * 'pa' is cotinuous(linear). */ pa += bytes; + da += bytes; len -= bytes; } BUG_ON(len); @@ -639,7 +651,6 @@ u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt, flags &= IOVMF_HW_MASK; flags |= IOVMF_DISCONT; flags |= IOVMF_MMIO; - flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON); da = __iommu_vmap(obj, da, sgt, va, bytes, flags); if (IS_ERR_VALUE(da)) @@ -679,7 +690,7 @@ EXPORT_SYMBOL_GPL(iommu_vunmap); * @flags: iovma and page property * * Allocate @bytes linearly and creates 1-n-1 mapping and returns - * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set. + * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. */ u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) { @@ -695,18 +706,17 @@ u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) if (!va) return -ENOMEM; - sgt = sgtable_alloc(bytes, flags); + flags &= IOVMF_HW_MASK; + flags |= IOVMF_DISCONT; + flags |= IOVMF_ALLOC; + + sgt = sgtable_alloc(bytes, flags, da, 0); if (IS_ERR(sgt)) { da = PTR_ERR(sgt); goto err_sgt_alloc; } sgtable_fill_vmalloc(sgt, va); - flags &= IOVMF_HW_MASK; - flags |= IOVMF_DISCONT; - flags |= IOVMF_ALLOC; - flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON); - da = __iommu_vmap(obj, da, sgt, va, bytes, flags); if (IS_ERR_VALUE(da)) goto err_iommu_vmap; @@ -746,11 +756,11 @@ static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va, { struct sg_table *sgt; - sgt = sgtable_alloc(bytes, flags); + sgt = sgtable_alloc(bytes, flags, da, pa); if (IS_ERR(sgt)) return PTR_ERR(sgt); - sgtable_fill_kmalloc(sgt, pa, bytes); + sgtable_fill_kmalloc(sgt, pa, da, bytes); da = map_iommu_region(obj, da, sgt, va, bytes, flags); if (IS_ERR_VALUE(da)) { @@ -769,7 +779,7 @@ static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va, * @flags: iovma and page property * * Creates 1-1-1 mapping and returns @da again, which can be - * adjusted if 'IOVMF_DA_ANON' is set. + * adjusted if 'IOVMF_DA_FIXED' is not set. */ u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes, u32 flags) @@ -788,7 +798,6 @@ u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes, flags &= IOVMF_HW_MASK; flags |= IOVMF_LINEAR; flags |= IOVMF_MMIO; - flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON); da = __iommu_kmap(obj, da, pa, va, bytes, flags); if (IS_ERR_VALUE(da)) @@ -811,7 +820,7 @@ void iommu_kunmap(struct iommu *obj, u32 da) struct sg_table *sgt; typedef void (*func_t)(const void *); - sgt = unmap_vm_area(obj, da, (func_t)__iounmap, + sgt = unmap_vm_area(obj, da, (func_t)iounmap, IOVMF_LINEAR | IOVMF_MMIO); if (!sgt) dev_dbg(obj->dev, "%s: No sgt\n", __func__); @@ -827,7 +836,7 @@ EXPORT_SYMBOL_GPL(iommu_kunmap); * @flags: iovma and page property * * Allocate @bytes linearly and creates 1-1-1 mapping and returns - * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set. + * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. */ u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) { @@ -847,7 +856,6 @@ u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) flags &= IOVMF_HW_MASK; flags |= IOVMF_LINEAR; flags |= IOVMF_ALLOC; - flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON); da = __iommu_kmap(obj, da, pa, va, bytes, flags); if (IS_ERR_VALUE(da)) |