From: Nadav Amit <[email protected]>

Refactor iommu_iotlb_gather_add_page() and factor out the logic that
detects whether IOTLB gather range and a new range are disjoint. To be
used by the next patch that implements different gathering logic for
AMD.

Cc: Joerg Roedel <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Jiajun Cao <[email protected]>
Cc: Robin Murphy <[email protected]>
Cc: Lu Baolu <[email protected]>
Cc: [email protected]
Cc: [email protected]>
Signed-off-by: Nadav Amit <[email protected]>
---
 include/linux/iommu.h | 41 +++++++++++++++++++++++++++++++++--------
 1 file changed, 33 insertions(+), 8 deletions(-)

diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index f254c62f3720..b5a2bfc68fb0 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -497,6 +497,28 @@ static inline void iommu_iotlb_sync(struct iommu_domain 
*domain,
        iommu_iotlb_gather_init(iotlb_gather);
 }
 
+/**
+ * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint
+ *
+ * @gather: TLB gather data
+ * @iova: start of page to invalidate
+ * @size: size of page to invalidate
+ *
+ * Helper for IOMMU drivers to check whether a new range is and the gathered
+ * range are disjoint. For many IOMMUs, flushing the IOMMU in this case is
+ * better than merging the two, which might lead to unnecessary invalidations.
+ */
+static inline
+bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather,
+                                   unsigned long iova, size_t size)
+{
+       unsigned long start = iova, end = start + size - 1;
+
+       return gather->end != 0 &&
+               (end + 1 < gather->start || start > gather->end + 1);
+}
+
+
 /**
  * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation
  * @gather: TLB gather data
@@ -533,20 +555,16 @@ static inline void iommu_iotlb_gather_add_page(struct 
iommu_domain *domain,
                                               struct iommu_iotlb_gather 
*gather,
                                               unsigned long iova, size_t size)
 {
-       unsigned long start = iova, end = start + size - 1;
-
        /*
         * If the new page is disjoint from the current range or is mapped at
         * a different granularity, then sync the TLB so that the gather
         * structure can be rewritten.
         */
-       if (gather->pgsize != size ||
-           end + 1 < gather->start || start > gather->end + 1) {
-               if (gather->pgsize)
-                       iommu_iotlb_sync(domain, gather);
-               gather->pgsize = size;
-       }
+       if ((gather->pgsize && gather->pgsize != size) ||
+           iommu_iotlb_gather_is_disjoint(gather, iova, size))
+               iommu_iotlb_sync(domain, gather);
 
+       gather->pgsize = size;
        iommu_iotlb_gather_add_range(gather, iova, size);
 }
 
@@ -730,6 +748,13 @@ static inline void iommu_iotlb_sync(struct iommu_domain 
*domain,
 {
 }
 
+static inline
+bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather,
+                                   unsigned long iova, size_t size)
+{
+       return false;
+}
+
 static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather 
*gather,
                                                unsigned long iova, size_t size)
 {
-- 
2.25.1

_______________________________________________
iommu mailing list
[email protected]
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to