diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2010-01-08 13:35:09 +0100 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2010-03-07 18:01:11 +0100 |
commit | cefc53c7f494240d4813c80154c7617452d1904d (patch) | |
tree | 675370ec20df0841e404ed7b191d2d41f30f7e52 /drivers/base | |
parent | 4abc14a733f9002c05623db755aaafdd27fa7a91 (diff) |
iommu-api: Add iommu_map and iommu_unmap functions
These two functions provide support for mapping and
unmapping physical addresses to io virtual addresses. The
difference to the iommu_(un)map_range() is that the new
functions take a gfp_order parameter instead of a size. This
allows the IOMMU backend implementations to detect easier if
a given range can be mapped by larger page sizes.
These new functions should replace the old ones in the long
term.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/iommu.c | 31 |
1 files changed, 31 insertions, 0 deletions
diff --git a/drivers/base/iommu.c b/drivers/base/iommu.c index f4c86c42929..cf7cbec116e 100644 --- a/drivers/base/iommu.c +++ b/drivers/base/iommu.c @@ -107,3 +107,34 @@ int iommu_domain_has_cap(struct iommu_domain *domain, return iommu_ops->domain_has_cap(domain, cap); } EXPORT_SYMBOL_GPL(iommu_domain_has_cap); + +int iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, int gfp_order, int prot) +{ + unsigned long invalid_mask; + size_t size; + + size = 0x1000UL << gfp_order; + invalid_mask = size - 1; + + BUG_ON((iova | paddr) & invalid_mask); + + return iommu_ops->map_range(domain, iova, paddr, size, prot); +} +EXPORT_SYMBOL_GPL(iommu_map); + +int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order) +{ + unsigned long invalid_mask; + size_t size; + + size = 0x1000UL << gfp_order; + invalid_mask = size - 1; + + BUG_ON(iova & invalid_mask); + + iommu_ops->unmap_range(domain, iova, size); + + return gfp_order; +} +EXPORT_SYMBOL_GPL(iommu_unmap); |