This implements iova_to_phys for AMD IOMMU v1 pagetable,
which will be used by the IO page table framework.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpa...@amd.com>
---
 drivers/iommu/amd/io_pgtable.c | 22 ++++++++++++++++++++++
 drivers/iommu/amd/iommu.c      | 16 +---------------
 2 files changed, 23 insertions(+), 15 deletions(-)

diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 93ff8cb452ed..7841e5e1e563 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -494,6 +494,26 @@ unsigned long iommu_unmap_page(struct protection_domain 
*dom,
        return unmapped;
 }
 
+static phys_addr_t iommu_v1_iova_to_phys(struct io_pgtable_ops *ops, unsigned 
long iova)
+{
+       struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
+       unsigned long offset_mask, pte_pgsize;
+       u64 *pte, __pte;
+
+       if (pgtable->mode == PAGE_MODE_NONE)
+               return iova;
+
+       pte = fetch_pte(pgtable, iova, &pte_pgsize);
+
+       if (!pte || !IOMMU_PTE_PRESENT(*pte))
+               return 0;
+
+       offset_mask = pte_pgsize - 1;
+       __pte       = __sme_clr(*pte & PM_ADDR_MASK);
+
+       return (__pte & ~offset_mask) | (iova & offset_mask);
+}
+
 /*
  * ----------------------------------------------------
  */
@@ -505,6 +525,8 @@ static struct io_pgtable *v1_alloc_pgtable(struct 
io_pgtable_cfg *cfg, void *coo
 {
        struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
 
+       pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;
+
        return &pgtable->iop;
 }
 
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 87cea1cde414..9a1a16031e00 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -2079,22 +2079,8 @@ static phys_addr_t amd_iommu_iova_to_phys(struct 
iommu_domain *dom,
 {
        struct protection_domain *domain = to_pdomain(dom);
        struct io_pgtable_ops *ops = &domain->iop.iop.ops;
-       struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
-       unsigned long offset_mask, pte_pgsize;
-       u64 *pte, __pte;
 
-       if (domain->iop.mode == PAGE_MODE_NONE)
-               return iova;
-
-       pte = fetch_pte(pgtable, iova, &pte_pgsize);
-
-       if (!pte || !IOMMU_PTE_PRESENT(*pte))
-               return 0;
-
-       offset_mask = pte_pgsize - 1;
-       __pte       = __sme_clr(*pte & PM_ADDR_MASK);
-
-       return (__pte & ~offset_mask) | (iova & offset_mask);
+       return ops->iova_to_phys(ops, iova);
 }
 
 static bool amd_iommu_capable(enum iommu_cap cap)
-- 
2.17.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to