From: "Isaac J. Manjarres" <isa...@codeaurora.org>

Since iommu_pgsize can calculate how many pages of the
same size can be mapped/unmapped before the next largest
page size boundary, add support for invoking an IOMMU
driver's map_pages() callback, if it provides one.

Signed-off-by: Isaac J. Manjarres <isa...@codeaurora.org>
Suggested-by: Will Deacon <w...@kernel.org>
Signed-off-by: Georgi Djakov <quic_c_gdj...@quicinc.com>
---
 drivers/iommu/iommu.c | 43 +++++++++++++++++++++++++++++++++++--------
 1 file changed, 35 insertions(+), 8 deletions(-)

diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 725622c7e603..89f8ab6a72a9 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -2429,6 +2429,30 @@ static size_t iommu_pgsize(struct iommu_domain *domain, 
unsigned long iova,
        return pgsize;
 }
 
+static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
+                            phys_addr_t paddr, size_t size, int prot,
+                            gfp_t gfp, size_t *mapped)
+{
+       const struct iommu_ops *ops = domain->ops;
+       size_t pgsize, count;
+       int ret;
+
+       pgsize = iommu_pgsize(domain, iova, paddr, size, &count);
+
+       pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %ld\n",
+                iova, &paddr, pgsize, count);
+
+       if (ops->map_pages) {
+               ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot,
+                                    gfp, mapped);
+       } else {
+               ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
+               *mapped = ret ? 0 : pgsize;
+       }
+
+       return ret;
+}
+
 static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
                       phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
 {
@@ -2439,7 +2463,7 @@ static int __iommu_map(struct iommu_domain *domain, 
unsigned long iova,
        phys_addr_t orig_paddr = paddr;
        int ret = 0;
 
-       if (unlikely(ops->map == NULL ||
+       if (unlikely(!(ops->map || ops->map_pages) ||
                     domain->pgsize_bitmap == 0UL))
                return -ENODEV;
 
@@ -2463,18 +2487,21 @@ static int __iommu_map(struct iommu_domain *domain, 
unsigned long iova,
        pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
 
        while (size) {
-               size_t pgsize = iommu_pgsize(domain, iova, paddr, size, NULL);
+               size_t mapped = 0;
 
-               pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
-                        iova, &paddr, pgsize);
-               ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
+               ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp,
+                                       &mapped);
+               /*
+                * Some pages may have been mapped, even if an error occurred,
+                * so we should account for those so they can be unmapped.
+                */
+               size -= mapped;
 
                if (ret)
                        break;
 
-               iova += pgsize;
-               paddr += pgsize;
-               size -= pgsize;
+               iova += mapped;
+               paddr += mapped;
        }
 
        /* unroll mapping in case something went wrong */
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to