The minimum per-IOMMU PRQ queue size is one 4K page, this is more entries
than the hardcoded limit of 32 in the current VT-d code. Some devices can
support up to 512 outstanding PRQs but underutilized by this limit of 32.
Although, 32 gives some rough fairness when multiple devices share the same
IOMMU PRQ queue, but far from optimal for customized use case. This extends
the per-IOMMU PRQ queue size to four 4K pages and let the devices have as
many outstanding page requests as they can.

Signed-off-by: Jacob Pan <jacob.jun....@linux.intel.com>
Signed-off-by: Lu Baolu <baolu...@linux.intel.com>
Link: 
https://lore.kernel.org/r/20210720013856.4143880-1-baolu...@linux.intel.com
---
 include/linux/intel-svm.h   | 5 +++++
 drivers/iommu/intel/iommu.c | 3 ++-
 drivers/iommu/intel/svm.c   | 4 ----
 3 files changed, 7 insertions(+), 5 deletions(-)

diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h
index 10fa80eef13a..57cceecbe37f 100644
--- a/include/linux/intel-svm.h
+++ b/include/linux/intel-svm.h
@@ -14,6 +14,11 @@
 #define SVM_REQ_EXEC   (1<<1)
 #define SVM_REQ_PRIV   (1<<0)
 
+/* Page Request Queue depth */
+#define PRQ_ORDER      2
+#define PRQ_RING_MASK  ((0x1000 << PRQ_ORDER) - 0x20)
+#define PRQ_DEPTH      ((0x1000 << PRQ_ORDER) >> 5)
+
 /*
  * The SVM_FLAG_SUPERVISOR_MODE flag requests a PASID which can be used only
  * for access to kernel addresses. No IOTLB flushes are automatically done
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 8d4d49e12c51..d75f59ae28e6 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -33,6 +33,7 @@
 #include <linux/iommu.h>
 #include <linux/dma-iommu.h>
 #include <linux/intel-iommu.h>
+#include <linux/intel-svm.h>
 #include <linux/syscore_ops.h>
 #include <linux/tboot.h>
 #include <linux/dmi.h>
@@ -1541,7 +1542,7 @@ static void iommu_enable_dev_iotlb(struct 
device_domain_info *info)
 
        if (info->pri_supported &&
            (info->pasid_enabled ? pci_prg_resp_pasid_required(pdev) : 1)  &&
-           !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
+           !pci_reset_pri(pdev) && !pci_enable_pri(pdev, PRQ_DEPTH))
                info->pri_enabled = 1;
 #endif
        if (info->ats_supported && pci_ats_page_aligned(pdev) &&
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 4b9b3f35ba0e..2014fe8695ac 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -31,8 +31,6 @@ static irqreturn_t prq_event_thread(int irq, void *d);
 static void intel_svm_drain_prq(struct device *dev, u32 pasid);
 #define to_intel_svm_dev(handle) container_of(handle, struct intel_svm_dev, 
sva)
 
-#define PRQ_ORDER 0
-
 static DEFINE_XARRAY_ALLOC(pasid_private_array);
 static int pasid_private_add(ioasid_t pasid, void *priv)
 {
@@ -725,8 +723,6 @@ struct page_req_dsc {
        u64 priv_data[2];
 };
 
-#define PRQ_RING_MASK  ((0x1000 << PRQ_ORDER) - 0x20)
-
 static bool is_canonical_address(u64 addr)
 {
        int shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
-- 
2.25.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to