This adds the implementation of page table callbacks for
the first level page table.

Cc: Ashok Raj <[email protected]>
Cc: Jacob Pan <[email protected]>
Cc: Kevin Tian <[email protected]>
Cc: Liu Yi L <[email protected]>
Cc: Yi Sun <[email protected]>
Signed-off-by: Lu Baolu <[email protected]>
---
 drivers/iommu/intel-iommu.c | 56 +++++++++++++++++++++++++++++++++++++
 1 file changed, 56 insertions(+)

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a314892ee72b..695a7a5fbe8e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -414,6 +414,7 @@ int for_each_device_domain(int (*fn)(struct 
device_domain_info *info,
 }
 
 const struct iommu_ops intel_iommu_ops;
+static const struct pgtable_ops first_lvl_pgtable_ops;
 static const struct pgtable_ops second_lvl_pgtable_ops;
 
 static bool translation_pre_enabled(struct intel_iommu *iommu)
@@ -2330,6 +2331,61 @@ static int __domain_mapping(struct dmar_domain *domain, 
unsigned long iov_pfn,
        return 0;
 }
 
+static int first_lvl_domain_map_range(struct dmar_domain *domain,
+                                     unsigned long iova, phys_addr_t paddr,
+                                     size_t size, int prot)
+{
+       return first_lvl_map_range(domain, PAGE_ALIGN(iova),
+                                  round_up(iova + size, PAGE_SIZE),
+                                  PAGE_ALIGN(paddr), prot);
+}
+
+static struct page *
+first_lvl_domain_unmap_range(struct dmar_domain *domain,
+                            unsigned long iova, size_t size)
+{
+       return first_lvl_unmap_range(domain, PAGE_ALIGN(iova),
+                                    round_up(iova + size, PAGE_SIZE));
+}
+
+static phys_addr_t
+first_lvl_domain_iova_to_phys(struct dmar_domain *domain,
+                             unsigned long iova)
+{
+       return first_lvl_iova_to_phys(domain, iova);
+}
+
+static void
+first_lvl_domain_flush_tlb_range(struct dmar_domain *domain,
+                                struct intel_iommu *iommu,
+                                unsigned long iova, size_t size, bool ih)
+{
+       unsigned long pages = aligned_nrpages(iova, size);
+       u16 did = domain->iommu_did[iommu->seq_id];
+       unsigned int mask;
+
+       if (pages) {
+               mask = ilog2(__roundup_pow_of_two(pages));
+               iova &= (u64)-1 << (VTD_PAGE_SHIFT + mask);
+       } else {
+               mask = MAX_AGAW_PFN_WIDTH;
+               iova = 0;
+               pages = -1;
+       }
+
+       iommu->flush.p_iotlb_inv(iommu, did, domain->default_pasid,
+                                iova, pages, ih);
+
+       iommu_flush_dev_iotlb(domain, iova, mask);
+}
+
+static const struct pgtable_ops first_lvl_pgtable_ops = {
+       .map_range              = first_lvl_domain_map_range,
+       .unmap_range            = first_lvl_domain_unmap_range,
+       .iova_to_phys           = first_lvl_domain_iova_to_phys,
+       .flush_tlb_range        = first_lvl_domain_flush_tlb_range,
+};
+
 static int second_lvl_domain_map_range(struct dmar_domain *domain,
                                       unsigned long iova, phys_addr_t paddr,
                                       size_t size, int prot)
-- 
2.17.1

_______________________________________________
iommu mailing list
[email protected]
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to