Hi,

On 1/28/21 4:00 AM, Chuck Lever wrote:
From: Yong Wu <[email protected]>

In the end of __iommu_map, It alway call iotlb_sync_map.

This patch moves iotlb_sync_map out from __iommu_map since it is
unnecessary to call this for each sg segment especially iotlb_sync_map
is flush tlb all currently. Add a little helper _iommu_map for this.

Signed-off-by: Yong Wu <[email protected]>
Reviewed-by: Robin Murphy <[email protected]>
Signed-off-by: Chuck Lever <[email protected]>
---
  drivers/iommu/iommu.c |   23 ++++++++++++++++++-----
  1 file changed, 18 insertions(+), 5 deletions(-)

diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index ffeebda8d6de..c304a6a30d42 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -2426,9 +2426,6 @@ static int __iommu_map(struct iommu_domain *domain, 
unsigned long iova,
                size -= pgsize;
        }
- if (ops->iotlb_sync_map)
-               ops->iotlb_sync_map(domain);
-
        /* unroll mapping in case something went wrong */
        if (ret)
                iommu_unmap(domain, orig_iova, orig_size - size);
@@ -2438,18 +2435,31 @@ static int __iommu_map(struct iommu_domain *domain, 
unsigned long iova,
        return ret;
  }
+static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
+                     phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+{
+       const struct iommu_ops *ops = domain->ops;
+       int ret;
+
+       ret = __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
+       if (ret == 0 && ops->iotlb_sync_map)
+               ops->iotlb_sync_map(domain);

Previous code called iotlb_sync_map() regardless of whether the mapping
was successful or not. Here the logic changes, and the callback is only
called if mapping successfully.

Any reason? It's safer to always call iotlb_sync_map() even in failed
mapping case. In this way, we can ensure the consistency of cache as
much as possible.

Best regards,
baolu

+
+       return ret;
+}
+
  int iommu_map(struct iommu_domain *domain, unsigned long iova,
              phys_addr_t paddr, size_t size, int prot)
  {
        might_sleep();
-       return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
+       return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
  }
  EXPORT_SYMBOL_GPL(iommu_map);
int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
              phys_addr_t paddr, size_t size, int prot)
  {
-       return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
+       return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
  }
  EXPORT_SYMBOL_GPL(iommu_map_atomic);
@@ -2533,6 +2543,7 @@ static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
                             struct scatterlist *sg, unsigned int nents, int 
prot,
                             gfp_t gfp)
  {
+       const struct iommu_ops *ops = domain->ops;
        size_t len = 0, mapped = 0;
        phys_addr_t start;
        unsigned int i = 0;
@@ -2563,6 +2574,8 @@ static size_t __iommu_map_sg(struct iommu_domain *domain, 
unsigned long iova,
                        sg = sg_next(sg);
        }
+ if (ops->iotlb_sync_map)
+               ops->iotlb_sync_map(domain);
        return mapped;
out_err:


_______________________________________________
iommu mailing list
[email protected]
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to