From: Jan Kiszka <[email protected]> We only support version 2, and this should be checked during probe already. Makes arm_smmu_device_reset a bit simpler.
Signed-off-by: Jan Kiszka <[email protected]> --- hypervisor/arch/arm64/smmu.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/hypervisor/arch/arm64/smmu.c b/hypervisor/arch/arm64/smmu.c index 3b45cc5e..ac6d7472 100644 --- a/hypervisor/arch/arm64/smmu.c +++ b/hypervisor/arch/arm64/smmu.c @@ -286,7 +286,7 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu) { void *gr0_base = ARM_SMMU_GR0(smmu); unsigned int idx; - u32 reg, major; + u32 reg; int ret; /* Clear global FSR */ @@ -306,14 +306,11 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu) /* * Before clearing ARM_MMU500_ACTLR_CPRE, need to - * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK - * bit is only present in MMU-500r2 onwards. + * clear CACHE_LOCK bit of ACR first. */ - reg = mmio_read32(gr0_base + ARM_SMMU_GR0_ID7); - major = ID7_MAJOR(reg); reg = mmio_read32(gr0_base + ARM_SMMU_GR0_sACR); - if (major >= 2) - reg &= ~ARM_MMU500_ACR_CACHE_LOCK; + reg &= ~ARM_MMU500_ACR_CACHE_LOCK; + /* * Allow unmatched Stream IDs to allocate bypass * TLB entries for reduced latency. @@ -359,6 +356,10 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) unsigned long size; u32 id; + /* We only support version 2 */ + if (ID7_MAJOR(mmio_read32(gr0_base + ARM_SMMU_GR0_ID7)) != 2) + return trace_error(-EIO); + /* ID0 */ id = mmio_read32(gr0_base + ARM_SMMU_GR0_ID0); -- 2.26.2 -- You received this message because you are subscribed to the Google Groups "Jailhouse" group. To unsubscribe from this group and stop receiving emails from it, send an email to [email protected]. To view this discussion on the web visit https://groups.google.com/d/msgid/jailhouse-dev/16e2acc785aab3d1b2329fddafd2bf7e3bfec96f.1601838005.git.jan.kiszka%40siemens.com.
