From: Nadav Amit <na...@vmware.com>

Recent patch attempted to enable selective page flushes on AMD IOMMU but
neglected to adapt amd_iommu_iotlb_sync() to use the selective flushes.

Adapt amd_iommu_iotlb_sync() to use selective flushes and change
amd_iommu_unmap() to collect the flushes. As a defensive measure, to
avoid potential issues as those that the Intel IOMMU driver encountered
recently, flush the page-walk caches by always setting the "pde"
parameter. This can be removed later.

Cc: Joerg Roedel <j...@8bytes.org>
Cc: Will Deacon <w...@kernel.org>
Cc: Jiajun Cao <caojia...@vmware.com>
Cc: Robin Murphy <robin.mur...@arm.com>
Cc: Lu Baolu <baolu...@linux.intel.com>
Cc: iommu@lists.linux-foundation.org
Cc: linux-ker...@vger.kernel.org
Signed-off-by: Nadav Amit <na...@vmware.com>
---
 drivers/iommu/amd/iommu.c | 15 +++++++++++++--
 1 file changed, 13 insertions(+), 2 deletions(-)

diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 3ac42bbdefc6..3e40f6610b6a 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -2059,12 +2059,17 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, 
unsigned long iova,
 {
        struct protection_domain *domain = to_pdomain(dom);
        struct io_pgtable_ops *ops = &domain->iop.iop.ops;
+       size_t r;
 
        if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
            (domain->iop.mode == PAGE_MODE_NONE))
                return 0;
 
-       return (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0;
+       r = (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0;
+
+       iommu_iotlb_gather_add_page(dom, gather, iova, page_size);
+
+       return r;
 }
 
 static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
@@ -2167,7 +2172,13 @@ static void amd_iommu_flush_iotlb_all(struct 
iommu_domain *domain)
 static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
                                 struct iommu_iotlb_gather *gather)
 {
-       amd_iommu_flush_iotlb_all(domain);
+       struct protection_domain *dom = to_pdomain(domain);
+       unsigned long flags;
+
+       spin_lock_irqsave(&dom->lock, flags);
+       __domain_flush_pages(dom, gather->start, gather->end - gather->start, 
1);
+       amd_iommu_domain_flush_complete(dom);
+       spin_unlock_irqrestore(&dom->lock, flags);
 }
 
 static int amd_iommu_def_domain_type(struct device *dev)
-- 
2.25.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to