summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/iommu.c')
-rw-r--r--drivers/iommu/iommu.c86
1 files changed, 45 insertions, 41 deletions
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index d8f98b14e2f..fbe9ca734f8 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -754,6 +754,38 @@ int iommu_domain_has_cap(struct iommu_domain *domain,
}
EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
+static size_t iommu_pgsize(struct iommu_domain *domain,
+ unsigned long addr_merge, size_t size)
+{
+ unsigned int pgsize_idx;
+ size_t pgsize;
+
+ /* Max page size that still fits into 'size' */
+ pgsize_idx = __fls(size);
+
+ /* need to consider alignment requirements ? */
+ if (likely(addr_merge)) {
+ /* Max page size allowed by address */
+ unsigned int align_pgsize_idx = __ffs(addr_merge);
+ pgsize_idx = min(pgsize_idx, align_pgsize_idx);
+ }
+
+ /* build a mask of acceptable page sizes */
+ pgsize = (1UL << (pgsize_idx + 1)) - 1;
+
+ /* throw away page sizes not supported by the hardware */
+ pgsize &= domain->ops->pgsize_bitmap;
+
+ /* make sure we're still sane */
+ BUG_ON(!pgsize);
+
+ /* pick the biggest page */
+ pgsize_idx = __fls(pgsize);
+ pgsize = 1UL << pgsize_idx;
+
+ return pgsize;
+}
+
int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
@@ -775,45 +807,18 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
* size of the smallest page supported by the hardware
*/
if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
- pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
- "0x%x\n", iova, (unsigned long)paddr,
- (unsigned long)size, min_pagesz);
+ pr_err("unaligned: iova 0x%lx pa 0x%pa size 0x%zx min_pagesz 0x%x\n",
+ iova, &paddr, size, min_pagesz);
return -EINVAL;
}
- pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
- (unsigned long)paddr, (unsigned long)size);
+ pr_debug("map: iova 0x%lx pa 0x%pa size 0x%zx\n", iova, &paddr, size);
while (size) {
- unsigned long pgsize, addr_merge = iova | paddr;
- unsigned int pgsize_idx;
-
- /* Max page size that still fits into 'size' */
- pgsize_idx = __fls(size);
-
- /* need to consider alignment requirements ? */
- if (likely(addr_merge)) {
- /* Max page size allowed by both iova and paddr */
- unsigned int align_pgsize_idx = __ffs(addr_merge);
-
- pgsize_idx = min(pgsize_idx, align_pgsize_idx);
- }
-
- /* build a mask of acceptable page sizes */
- pgsize = (1UL << (pgsize_idx + 1)) - 1;
-
- /* throw away page sizes not supported by the hardware */
- pgsize &= domain->ops->pgsize_bitmap;
-
- /* make sure we're still sane */
- BUG_ON(!pgsize);
-
- /* pick the biggest page */
- pgsize_idx = __fls(pgsize);
- pgsize = 1UL << pgsize_idx;
+ size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
- pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
- (unsigned long)paddr, pgsize);
+ pr_debug("mapping: iova 0x%lx pa 0x%pa pgsize 0x%zx\n",
+ iova, &paddr, pgsize);
ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
if (ret)
@@ -850,27 +855,26 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
* by the hardware
*/
if (!IS_ALIGNED(iova | size, min_pagesz)) {
- pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
- iova, (unsigned long)size, min_pagesz);
+ pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
+ iova, size, min_pagesz);
return -EINVAL;
}
- pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
- (unsigned long)size);
+ pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
/*
* Keep iterating until we either unmap 'size' bytes (or more)
* or we hit an area that isn't mapped.
*/
while (unmapped < size) {
- size_t left = size - unmapped;
+ size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
- unmapped_page = domain->ops->unmap(domain, iova, left);
+ unmapped_page = domain->ops->unmap(domain, iova, pgsize);
if (!unmapped_page)
break;
- pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
- (unsigned long)unmapped_page);
+ pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
+ iova, unmapped_page);
iova += unmapped_page;
unmapped += unmapped_page;