This implements the .cache_invalidate_user() callback to support iotlb
flush for nested domain.

Signed-off-by: Lu Baolu <[email protected]>
Signed-off-by: Yi Liu <[email protected]>
---
 drivers/iommu/intel/nested.c | 54 ++++++++++++++++++++++++++++++++++++
 1 file changed, 54 insertions(+)

diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c
index b5a5563ab32c..44ad48db7ea0 100644
--- a/drivers/iommu/intel/nested.c
+++ b/drivers/iommu/intel/nested.c
@@ -73,9 +73,63 @@ static void intel_nested_domain_free(struct iommu_domain 
*domain)
        kfree(to_dmar_domain(domain));
 }
 
+static void domain_flush_iotlb_psi(struct dmar_domain *domain,
+                                  u64 addr, unsigned long npages)
+{
+       struct iommu_domain_info *info;
+       unsigned long i;
+
+       xa_for_each(&domain->iommu_array, i, info)
+               iommu_flush_iotlb_psi(info->iommu, domain,
+                                     addr >> VTD_PAGE_SHIFT, npages, 1, 0);
+}
+
+static int intel_nested_cache_invalidate_user(struct iommu_domain *domain,
+                                             struct iommu_user_data_array 
*array,
+                                             u32 *cerror_idx)
+{
+       struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+       struct iommu_hwpt_vtd_s1_invalidate inv_info;
+       u32 index;
+       int ret;
+
+       /* REVISIT:
+        * VT-d has defined ITE, ICE, IQE for invalidation failure per hardware,
+        * but no error code yet, so just set the error code to be 0.
+        */
+       *cerror_idx = 0;
+
+       for (index = 0; index < array->entry_num; index++) {
+               ret = iommu_copy_struct_from_user_array(&inv_info, array,
+                                                       IOMMU_HWPT_DATA_VTD_S1,
+                                                       index, __reserved);
+               if (ret) {
+                       pr_err_ratelimited("Failed to fetch invalidation 
request\n");
+                       break;
+               }
+
+               if (inv_info.__reserved || (inv_info.flags & 
~IOMMU_VTD_INV_FLAGS_LEAF) ||
+                   !IS_ALIGNED(inv_info.addr, VTD_PAGE_SIZE)) {
+                       ret = -EINVAL;
+                       break;
+               }
+
+               if (inv_info.addr == 0 && inv_info.npages == -1)
+                       intel_flush_iotlb_all(domain);
+               else
+                       domain_flush_iotlb_psi(dmar_domain,
+                                              inv_info.addr, inv_info.npages);
+       }
+
+       array->entry_num = index;
+
+       return ret;
+}
+
 static const struct iommu_domain_ops intel_nested_domain_ops = {
        .attach_dev             = intel_nested_attach_dev,
        .free                   = intel_nested_domain_free,
+       .cache_invalidate_user  = intel_nested_cache_invalidate_user,
 };
 
 struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
-- 
2.34.1


Reply via email to