Two small changes to support TTB1. Note that I had to modify the Linux
driver in order to test this (see below), but other OSes might use TTB1.

Jean-Philippe Brucker (2):
  hw/arm/smmu-common: Support 64-bit addresses
  hw/arm/smmu-common: Fix TTB1 handling

 hw/arm/smmu-common.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)


--- 8< --- Linux hacks:
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h 
b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 8d772ea8a583..bf0ff699b64b 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -276,6 +276,11 @@
 #define CTXDESC_CD_0_TCR_IRGN0         GENMASK_ULL(9, 8)
 #define CTXDESC_CD_0_TCR_ORGN0         GENMASK_ULL(11, 10)
 #define CTXDESC_CD_0_TCR_SH0           GENMASK_ULL(13, 12)
+#define CTXDESC_CD_0_TCR_T1SZ          GENMASK_ULL(21, 16)
+#define CTXDESC_CD_0_TCR_TG1           GENMASK_ULL(23, 22)
+#define CTXDESC_CD_0_TCR_IRGN1         GENMASK_ULL(25, 24)
+#define CTXDESC_CD_0_TCR_ORGN1         GENMASK_ULL(27, 26)
+#define CTXDESC_CD_0_TCR_SH1           GENMASK_ULL(29, 28)
 #define CTXDESC_CD_0_TCR_EPD0          (1ULL << 14)
 #define CTXDESC_CD_0_TCR_EPD1          (1ULL << 30)

@@ -293,6 +298,7 @@
 #define CTXDESC_CD_0_ASID              GENMASK_ULL(63, 48)

 #define CTXDESC_CD_1_TTB0_MASK         GENMASK_ULL(51, 4)
+#define CTXDESC_CD_2_TTB1_MASK         GENMASK_ULL(51, 4)

 /*
  * When the SMMU only supports linear context descriptor tables, pick a
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c 
b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index f2425b0f0cd6..3a4343e60a54 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1075,8 +1075,8 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain 
*smmu_domain, int ssid,
                 * this substream's traffic
                 */
        } else { /* (1) and (2) */
-               cdptr[1] = cpu_to_le64(cd->ttbr & CTXDESC_CD_1_TTB0_MASK);
-               cdptr[2] = 0;
+               cdptr[1] = 0;
+               cdptr[2] = cpu_to_le64(cd->ttbr & CTXDESC_CD_2_TTB1_MASK);
                cdptr[3] = cpu_to_le64(cd->mair);

                /*
@@ -2108,13 +2108,13 @@ static int arm_smmu_domain_finalise_s1(struct 
arm_smmu_domain *smmu_domain,

        cfg->cd.asid    = (u16)asid;
        cfg->cd.ttbr    = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
-       cfg->cd.tcr     = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) |
-                         FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) |
-                         FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) |
-                         FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) |
-                         FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) |
+       cfg->cd.tcr     = FIELD_PREP(CTXDESC_CD_0_TCR_T1SZ, tcr->tsz) |
+                         FIELD_PREP(CTXDESC_CD_0_TCR_TG1, tcr->tg) |
+                         FIELD_PREP(CTXDESC_CD_0_TCR_IRGN1, tcr->irgn) |
+                         FIELD_PREP(CTXDESC_CD_0_TCR_ORGN1, tcr->orgn) |
+                         FIELD_PREP(CTXDESC_CD_0_TCR_SH1, tcr->sh) |
                          FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) |
-                         CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
+                         CTXDESC_CD_0_TCR_EPD0 | CTXDESC_CD_0_AA64;
        cfg->cd.mair    = pgtbl_cfg->arm_lpae_s1_cfg.mair;

        /*
@@ -2212,6 +2212,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain 
*domain,
                .pgsize_bitmap  = smmu->pgsize_bitmap,
                .ias            = ias,
                .oas            = oas,
+               .quirks         = IO_PGTABLE_QUIRK_ARM_TTBR1,
                .coherent_walk  = smmu->features & ARM_SMMU_FEAT_COHERENCY,
                .tlb            = &arm_smmu_flush_ops,
                .iommu_dev      = smmu->dev,
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 38434203bf04..3fe154c9782d 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -677,6 +677,10 @@ static int dma_info_to_prot(enum dma_data_direction dir, 
bool coherent,
        }
 }

+/* HACK */
+#define VA_SIZE                39
+#define VA_MASK                (~0ULL << VA_SIZE)
+
 static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
                size_t size, u64 dma_limit, struct device *dev)
 {
@@ -706,7 +710,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain 
*domain,
                iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
                                       true);

-       return (dma_addr_t)iova << shift;
+       return (dma_addr_t)iova << shift | VA_MASK;
 }

 static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
@@ -714,6 +718,7 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie 
*cookie,
 {
        struct iova_domain *iovad = &cookie->iovad;

+       iova &= ~VA_MASK;
        /* The MSI case is only ever cleaning up its most recent allocation */
        if (cookie->type == IOMMU_DMA_MSI_COOKIE)
                cookie->msi_iova -= size;



Reply via email to