Make iommu_flush_iotlb_psi() and flush_unmaps() easier to read.

Signed-off-by: Yu Zhao <yu.z...@intel.com>
---
 drivers/pci/intel-iommu.c |   46 +++++++++++++++++++++-----------------------
 1 files changed, 22 insertions(+), 24 deletions(-)

diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 3dfecb2..df92764 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -917,30 +917,27 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, 
u16 did,
 static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
        u64 addr, unsigned int pages, int non_present_entry_flush)
 {
-       unsigned int mask;
+       int rc;
+       unsigned int mask = ilog2(__roundup_pow_of_two(pages));
 
        BUG_ON(addr & (~VTD_PAGE_MASK));
        BUG_ON(pages == 0);
 
-       /* Fallback to domain selective flush if no PSI support */
-       if (!cap_pgsel_inv(iommu->cap))
-               return iommu->flush.flush_iotlb(iommu, did, 0, 0,
-                                               DMA_TLB_DSI_FLUSH,
-                                               non_present_entry_flush);
-
        /*
+        * Fallback to domain selective flush if no PSI support or the size is
+        * too big.
         * PSI requires page size to be 2 ^ x, and the base address is naturally
         * aligned to the size
         */
-       mask = ilog2(__roundup_pow_of_two(pages));
-       /* Fallback to domain selective flush if size is too big */
-       if (mask > cap_max_amask_val(iommu->cap))
-               return iommu->flush.flush_iotlb(iommu, did, 0, 0,
-                       DMA_TLB_DSI_FLUSH, non_present_entry_flush);
-
-       return iommu->flush.flush_iotlb(iommu, did, addr, mask,
-                                       DMA_TLB_PSI_FLUSH,
-                                       non_present_entry_flush);
+       if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
+               rc = iommu->flush.flush_iotlb(iommu, did, 0, 0,
+                                               DMA_TLB_DSI_FLUSH,
+                                               non_present_entry_flush);
+       else
+               rc = iommu->flush.flush_iotlb(iommu, did, addr, mask,
+                                               DMA_TLB_PSI_FLUSH,
+                                               non_present_entry_flush);
+       return rc;
 }
 
 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -2293,15 +2290,16 @@ static void flush_unmaps(void)
                if (!iommu)
                        continue;
 
-               if (deferred_flush[i].next) {
-                       iommu->flush.flush_iotlb(iommu, 0, 0, 0,
-                                                DMA_TLB_GLOBAL_FLUSH, 0);
-                       for (j = 0; j < deferred_flush[i].next; j++) {
-                               __free_iova(&deferred_flush[i].domain[j]->iovad,
-                                               deferred_flush[i].iova[j]);
-                       }
-                       deferred_flush[i].next = 0;
+               if (!deferred_flush[i].next)
+                       continue;
+
+               iommu->flush.flush_iotlb(iommu, 0, 0, 0,
+                                        DMA_TLB_GLOBAL_FLUSH, 0);
+               for (j = 0; j < deferred_flush[i].next; j++) {
+                       __free_iova(&deferred_flush[i].domain[j]->iovad,
+                                       deferred_flush[i].iova[j]);
                }
+               deferred_flush[i].next = 0;
        }
 
        list_size = 0;
-- 
1.5.6.4

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to