This patch enables the translation for requests without PASID in
the scalable mode by setting up the root and context entries.

Cc: Ashok Raj <ashok....@intel.com>
Cc: Jacob Pan <jacob.jun....@linux.intel.com>
Cc: Kevin Tian <kevin.t...@intel.com>
Cc: Liu Yi L <yi.l....@intel.com>
Signed-off-by: Sanjay Kumar <sanjay.k.ku...@intel.com>
Signed-off-by: Lu Baolu <baolu...@linux.intel.com>
Reviewed-by: Ashok Raj <ashok....@intel.com>
---
 drivers/iommu/intel-iommu.c | 109 +++++++++++++++++++++++++++++++++++++-------
 drivers/iommu/intel-pasid.h |   1 +
 include/linux/intel-iommu.h |   1 +
 3 files changed, 95 insertions(+), 16 deletions(-)

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index add7e3e..13f3d17 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1217,6 +1217,8 @@ static void iommu_set_root_entry(struct intel_iommu 
*iommu)
        unsigned long flag;
 
        addr = virt_to_phys(iommu->root_entry);
+       if (sm_supported(iommu))
+               addr |= DMA_RTADDR_SMT;
 
        raw_spin_lock_irqsave(&iommu->register_lock, flag);
        dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
@@ -1916,6 +1918,55 @@ static void domain_exit(struct dmar_domain *domain)
        free_domain_mem(domain);
 }
 
+/*
+ * Get the PASID directory size for scalable mode context entry.
+ * Value of X in the PDTS field of a scalable mode context entry
+ * indicates PASID directory with 2^(X + 7) entries.
+ */
+static inline unsigned long context_get_sm_pds(struct pasid_table *table)
+{
+       int pds, max_pde;
+
+       max_pde = table->max_pasid >> PASID_PDE_SHIFT;
+       pds = find_first_bit((unsigned long *)&max_pde, MAX_NR_PASID_BITS);
+       if (pds < 7)
+               return 0;
+
+       return pds - 7;
+}
+
+/*
+ * Set the RID_PASID field of a scalable mode context entry. The
+ * IOMMU hardware will use the PASID value set in this field for
+ * DMA translations of DMA requests without PASID.
+ */
+static inline void
+context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
+{
+       context->hi |= pasid & ((1 << 20) - 1);
+}
+
+/*
+ * Set the DTE(Device-TLB Enable) field of a scalable mode context
+ * entry.
+ */
+static inline void context_set_sm_dte(struct context_entry *context)
+{
+       context->lo |= (1 << 2);
+}
+
+/*
+ * Set the PRE(Page Request Enable) field of a scalable mode context
+ * entry.
+ */
+static inline void context_set_sm_pre(struct context_entry *context)
+{
+       context->lo |= (1 << 4);
+}
+
+/* Convert value to context PASID directory size field coding. */
+#define context_pdts(pds)      (((pds) & 0x7) << 9)
+
 static int domain_context_mapping_one(struct dmar_domain *domain,
                                      struct intel_iommu *iommu,
                                      struct pasid_table *table,
@@ -1974,9 +2025,7 @@ static int domain_context_mapping_one(struct dmar_domain 
*domain,
        }
 
        pgd = domain->pgd;
-
        context_clear_entry(context);
-       context_set_domain_id(context, did);
 
        /*
         * Skip top levels of page tables for iommu which has less agaw
@@ -1989,25 +2038,54 @@ static int domain_context_mapping_one(struct 
dmar_domain *domain,
                        if (!dma_pte_present(pgd))
                                goto out_unlock;
                }
+       }
 
-               info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
-               if (info && info->ats_supported)
-                       translation = CONTEXT_TT_DEV_IOTLB;
-               else
-                       translation = CONTEXT_TT_MULTI_LEVEL;
+       if (sm_supported(iommu)) {
+               unsigned long pds;
+
+               WARN_ON(!table);
+
+               /* Setup the PASID DIR pointer: */
+               pds = context_get_sm_pds(table);
+               context->lo = (u64)virt_to_phys(table->table) |
+                               context_pdts(pds);
+
+               /* Setup the RID_PASID field: */
+               context_set_sm_rid2pasid(context, PASID_RID2PASID);
 
-               context_set_address_root(context, virt_to_phys(pgd));
-               context_set_address_width(context, iommu->agaw);
-       } else {
                /*
-                * In pass through mode, AW must be programmed to
-                * indicate the largest AGAW value supported by
-                * hardware. And ASR is ignored by hardware.
+                * Setup the Device-TLB enable bit and Page request
+                * Enable bit:
                 */
-               context_set_address_width(context, iommu->msagaw);
+               info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
+               if (info && info->ats_supported)
+                       context_set_sm_dte(context);
+               if (info && info->pri_supported)
+                       context_set_sm_pre(context);
+       } else {
+               context_set_domain_id(context, did);
+
+               if (translation != CONTEXT_TT_PASS_THROUGH) {
+                       info = iommu_support_dev_iotlb(domain, iommu,
+                                                      bus, devfn);
+                       if (info && info->ats_supported)
+                               translation = CONTEXT_TT_DEV_IOTLB;
+                       else
+                               translation = CONTEXT_TT_MULTI_LEVEL;
+
+                       context_set_address_root(context, virt_to_phys(pgd));
+                       context_set_address_width(context, iommu->agaw);
+               } else {
+                       /*
+                        * In pass through mode, AW must be programmed to
+                        * indicate the largest AGAW value supported by
+                        * hardware. And ASR is ignored by hardware.
+                        */
+                       context_set_address_width(context, iommu->msagaw);
+               }
+               context_set_translation_type(context, translation);
        }
 
-       context_set_translation_type(context, translation);
        context_set_fault_enable(context);
        context_set_present(context);
        domain_flush_cache(domain, context, sizeof(*context));
@@ -5150,7 +5228,6 @@ static void intel_iommu_put_resv_regions(struct device 
*dev,
 }
 
 #ifdef CONFIG_INTEL_IOMMU_SVM
-#define MAX_NR_PASID_BITS (20)
 static inline unsigned long intel_iommu_get_pts(struct device *dev)
 {
        int pts, max_pasid;
diff --git a/drivers/iommu/intel-pasid.h b/drivers/iommu/intel-pasid.h
index 80fc88e..80d4667 100644
--- a/drivers/iommu/intel-pasid.h
+++ b/drivers/iommu/intel-pasid.h
@@ -17,6 +17,7 @@
 #define PASID_PTE_PRESENT              1
 #define PDE_PFN_MASK                   PAGE_MASK
 #define PASID_PDE_SHIFT                        6
+#define MAX_NR_PASID_BITS              20
 
 struct pasid_dir_entry {
        u64 val;
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index a20ebca..4b58946 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -194,6 +194,7 @@
 
 /* DMA_RTADDR_REG */
 #define DMA_RTADDR_RTT (((u64)1) << 11)
+#define DMA_RTADDR_SMT (((u64)1) << 10)
 
 /* CCMD_REG */
 #define DMA_CCMD_ICC (((u64)1) << 63)
-- 
2.7.4

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to