is some thing like this looks good
+#ifdef CONFIG_64BIT
+#define smmu_writeq(reg64, addr) writeq_relaxed((reg64), (addr))
+#else
+#define smmu_writeq(reg64, addr) \
+ writel_relaxed(((reg64) >> 32), ((addr) + 4)); \
+ writel_relaxed((reg64), (addr))
+
+
/* Configuration registers */
#define ARM_SMMU_GR0_sCR0 0x0
#define sCR0_CLIENTPD (1 << 0)
@@ -226,7 +234,7 @@
#define TTBCR2_SEP_SHIFT 15
#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
-#define TTBRn_HI_ASID_SHIFT 16
+#define TTBRn_ASID_SHIFT 48
#define FSR_MULTI (1 << 31)
#define FSR_SS (1 << 30)
@@ -719,6 +727,7 @@ static void arm_smmu_init_context_bank(struct
arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg)
{
u32 reg;
+ u64 reg64;
bool stage1;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
struct arm_smmu_device *smmu = smmu_domain->smmu;
@@ -762,22 +771,16 @@ static void arm_smmu_init_context_bank(struct
arm_smmu_domain *smmu_domain,
/* TTBRs */
if (stage1) {
- reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
- reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0] >> 32;
- reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT;
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
-
- reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_LO);
- reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1] >> 32;
- reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT;
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_HI);
+ reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
+ reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT;
+ smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0_LO);
+
+ reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
+ reg64 |= ARM_SMMU_CB_ASID(cfg) << TTBRn_ASID_SHIFT;
+ smmu_writeq(reg, cb_base + ARM_SMMU_CB_TTBR1_LO);
} else {
- reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
- reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr >> 32;
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
+ reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
+ smmu_writeq(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
}
/* TTBCR */
@@ -1236,10 +1239,8 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct
iommu_domain *domain,
u32 reg = iova & ~0xfff;
writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO);
} else {
- u32 reg = iova & ~0xfff;
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO);
- reg = ((u64)iova & ~0xfff) >> 32;
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_HI);
+ u64 reg = iova & ~0xfff;
+ smmu_writeq(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO);
}
if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
~
On Jul 30, 2015, at 12:07 PM, Chalamarla, Tirumalesh
<[email protected]<mailto:[email protected]>> wrote:
On Jul 30, 2015, at 11:45 AM, Will Deacon
<[email protected]<mailto:[email protected]>> wrote:
Hello,
On Thu, Jul 30, 2015 at 06:55:06PM +0100,
[email protected]<mailto:[email protected]> wrote:
From: Tirumalesh Chalamarla
<[email protected]<mailto:[email protected]>>
The SMMU architecture defines two different behaviors when 64-bit
registers are written with 32-bit writes. The first behavior causes
zero extension into the upper 32-bits. The second behavior splits a
64-bit register into "normal" 32-bit register pairs.
On some passes of ThunderX,
the following registers incorrectly zero extended when they should
instead behave as normal 32-bit register pairs:
SMMU()_(S)GFAR
SMMU()_NSGFAR
SMMU()_CB()_TTBR0
SMMU()_CB()_TTBR1
SMMU()_CB()_FAR
Signed-off-by: Tirumalesh Chalamarla
<[email protected]<mailto:[email protected]>>
---
drivers/iommu/arm-smmu.c | 51 ++++++++++++++++++++++++++++++++++--------------
1 file changed, 36 insertions(+), 15 deletions(-)
[...]
@@ -762,22 +766,39 @@ static void arm_smmu_init_context_bank(struct
arm_smmu_domain *smmu_domain,
/* TTBRs */
if (stage1) {
- reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
- reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0] >> 32;
- reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT;
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
-
- reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_LO);
- reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1] >> 32;
- reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT;
- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_HI);
+ if (smmu->options & ARM_SMMU_OPT_64BIT_WRITES_ONLY) {
+ reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
+ reg64 |= ((u64) ARM_SMMU_CB_ASID(cfg)) <<
+ (TTBRn_HI_ASID_SHIFT + 32);
+ writeq_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
+
+ reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
+ reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) <<
+ (TTBRn_HI_ASID_SHIFT + 32);
+ writeq_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_LO);
+ } else {
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0] >> 32;
+ reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
+
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_LO);
+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1] >> 32;
+ reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_HI);
+ }
I'm fine with making this sort of change if you need it, but this is pretty
ugly. Worse, it won't compile for 32-bit ARM.
How about we add a wrapper around these, say smmu_writeq(...), which can
then either expand to 2x writel_relaxed or 1x writeq_relaxed depending on
CONFIG_64BIT and your erratum workaround?
i think we don’t even need to restrict it for work around.
we can just use CONFIG_64BIT and smmu_writeq.
if thats fine i will repost the patch.
Thanks,
Tirumalesh.
Don't forgot to update the ATOS code too (so you need to write the high word
first).
Will
_______________________________________________
iommu mailing list
[email protected]
https://lists.linuxfoundation.org/mailman/listinfo/iommu