Provide a way to know what maximum IOVA is usable for DMA.
This will be later used by PV-IOMMU to provide limits to guest.

Signed-off-by: Teddy Astie <[email protected]>
---
v7: introduced
---
 xen/drivers/passthrough/amd/pci_amd_iommu.c | 15 +++++++++++++++
 xen/drivers/passthrough/iommu.c             | 10 ++++++++++
 xen/drivers/passthrough/vtd/iommu.c         |  8 ++++++++
 xen/include/xen/iommu.h                     |  3 +++
 4 files changed, 36 insertions(+)

diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c 
b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index 3d08a925d6..4185e4cd64 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -706,6 +706,20 @@ static void cf_check amd_dump_page_tables(struct domain *d)
     }
 }
 
+static uint64_t cf_check amd_iommu_get_max_iova(struct domain *d)
+{
+    struct domain_iommu *hd = dom_iommu(d);
+    unsigned int bits = 12 + hd->arch.amd.paging_mode * 9;
+
+    /* If paging_mode == 6, which indicates 6-level page tables,
+       we have bits == 66 while the GPA space is still 64-bits
+     */
+    if (bits >= 64)
+        return ~0LLU;
+
+    return (1LLU << bits) - 1;
+}
+
 static const struct iommu_ops __initconst_cf_clobber _iommu_ops = {
     .page_sizes = PAGE_SIZE_4K | PAGE_SIZE_2M | PAGE_SIZE_1G,
     .init = amd_iommu_domain_init,
@@ -736,6 +750,7 @@ static const struct iommu_ops __initconst_cf_clobber 
_iommu_ops = {
     .get_reserved_device_memory = amd_iommu_get_reserved_device_memory,
     .dump_page_tables = amd_dump_page_tables,
     .quiesce = amd_iommu_quiesce,
+    .get_max_iova = amd_iommu_get_max_iova,
 };
 
 static const struct iommu_init_ops __initconstrel _iommu_init_ops = {
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index feda2e390b..4434a9dcd0 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -1229,6 +1229,16 @@ bool iommu_has_feature(struct domain *d, enum 
iommu_feature feature)
     return is_iommu_enabled(d) && test_bit(feature, dom_iommu(d)->features);
 }
 
+uint64_t iommu_get_max_iova(struct domain *d)
+{
+    struct domain_iommu *hd = dom_iommu(d);
+
+    if ( !hd->platform_ops->get_max_iova )
+        return 0;
+
+    return iommu_call(hd->platform_ops, get_max_iova, d);
+}
+
 #define MAX_EXTRA_RESERVED_RANGES 20
 struct extra_reserved_range {
     unsigned long start;
diff --git a/xen/drivers/passthrough/vtd/iommu.c 
b/xen/drivers/passthrough/vtd/iommu.c
index a602edd755..af3c6fb178 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -2595,6 +2595,13 @@ static int cf_check intel_iommu_remove_devfn(struct 
domain *d, struct pci_dev *p
     return unapply_context_single(d, drhd->iommu, prev_ctx, pdev->bus, devfn);
 }
 
+static uint64_t cf_check intel_iommu_get_max_iova(struct domain *d)
+{
+    struct domain_iommu *hd = dom_iommu(d);
+
+    return (1LLU << agaw_to_width(hd->arch.vtd.agaw)) - 1;
+}
+
 static void cf_check vtd_quiesce(void)
 {
     const struct acpi_drhd_unit *drhd;
@@ -2644,6 +2651,7 @@ static const struct iommu_ops __initconst_cf_clobber 
vtd_ops = {
     .get_reserved_device_memory = intel_iommu_get_reserved_device_memory,
     .dump_page_tables = vtd_dump_page_tables,
     .quiesce = vtd_quiesce,
+    .get_max_iova = intel_iommu_get_max_iova,
 };
 
 const struct iommu_init_ops __initconstrel intel_iommu_init_ops = {
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 8c20f575ee..66951c9809 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -403,6 +403,9 @@ struct iommu_ops {
 #endif
     /* Inhibit all interrupt generation, to be used at shutdown. */
     void (*quiesce)(void);
+
+    /* Get maximum domain device address (IOVA). */
+    uint64_t (*get_max_iova)(struct domain *d);
 };
 
 /*
-- 
2.51.2



--
Teddy Astie | Vates XCP-ng Developer

XCP-ng & Xen Orchestra - Vates solutions

web: https://vates.tech


Reply via email to