In preparation for the strict vs. non-strict decision for DMA domains to
be expressed in the domain type, make sure we expose our flush queue
awareness by accepting the new domain type, and test the specific
feature flag where we want to identify DMA domains in general. The DMA
ops setup can simply be made unconditional, since iommu-dma already
knows not to touch identity domains.

Signed-off-by: Robin Murphy <robin.mur...@arm.com>
---
 drivers/iommu/intel/iommu.c | 15 ++++++---------
 1 file changed, 6 insertions(+), 9 deletions(-)

diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index e2add5a0caef..77d322272743 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -601,7 +601,7 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain 
*domain)
        int iommu_id;
 
        /* si_domain and vm domain should not get here. */
-       if (WARN_ON(domain->domain.type != IOMMU_DOMAIN_DMA))
+       if (WARN_ON(!(domain->domain.type & __IOMMU_DOMAIN_DMA)))
                return NULL;
 
        for_each_domain_iommu(iommu_id, domain)
@@ -1035,7 +1035,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain 
*domain,
                        pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << 
VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
                        if (domain_use_first_level(domain)) {
                                pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
-                               if (domain->domain.type == IOMMU_DOMAIN_DMA)
+                               if (domain->domain.type & 
__IOMMU_DOMAIN_DMA_API)
                                        pteval |= DMA_FL_PTE_ACCESS;
                        }
                        if (cmpxchg64(&pte->val, 0ULL, pteval))
@@ -2346,7 +2346,7 @@ __domain_mapping(struct dmar_domain *domain, unsigned 
long iov_pfn,
        if (domain_use_first_level(domain)) {
                attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
 
-               if (domain->domain.type == IOMMU_DOMAIN_DMA) {
+               if (domain->domain.type & __IOMMU_DOMAIN_DMA_API) {
                        attr |= DMA_FL_PTE_ACCESS;
                        if (prot & DMA_PTE_WRITE)
                                attr |= DMA_FL_PTE_DIRTY;
@@ -4528,6 +4528,7 @@ static struct iommu_domain 
*intel_iommu_domain_alloc(unsigned type)
 
        switch (type) {
        case IOMMU_DOMAIN_DMA:
+       case IOMMU_DOMAIN_DMA_FQ:
        case IOMMU_DOMAIN_UNMANAGED:
                dmar_domain = alloc_domain(0);
                if (!dmar_domain) {
@@ -5164,12 +5165,8 @@ static void intel_iommu_release_device(struct device 
*dev)
 
 static void intel_iommu_probe_finalize(struct device *dev)
 {
-       struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
-
-       if (domain && domain->type == IOMMU_DOMAIN_DMA)
-               iommu_setup_dma_ops(dev, 0, U64_MAX);
-       else
-               set_dma_ops(dev, NULL);
+       set_dma_ops(dev, NULL);
+       iommu_setup_dma_ops(dev, 0, U64_MAX);
 }
 
 static void intel_iommu_get_resv_regions(struct device *device,
-- 
2.25.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to