Now we have relevant helpers for queue and irq
management, let's implement MMIO write operations.
Signed-off-by: Eric Auger
Signed-off-by: Prem Mallappa
---
v9 -> v10:
- s/hwaddr/uint64_t in trace-events
- added SMMU_FEATURE_2LVL_STE in this patch
- removed smmu_write64 and created writel/writell infra
- store capped log2size
- mask CR0 reserved bits
v7 -> v8:
- precise in the commit message invalidation commands
are not yet treated.
- use new queue helpers
- do not decode unhandled commands at this stage
---
hw/arm/smmuv3-internal.h | 8 +--
hw/arm/smmuv3.c | 172 +--
hw/arm/trace-events | 6 ++
3 files changed, 175 insertions(+), 11 deletions(-)
diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h
index 968fa25..8550be0 100644
--- a/hw/arm/smmuv3-internal.h
+++ b/hw/arm/smmuv3-internal.h
@@ -64,6 +64,8 @@ REG32(CR0, 0x20)
FIELD(CR0, EVENTQEN, 2, 1)
FIELD(CR0, CMDQEN,3, 1)
+#define SMMU_CR0_RESERVED 0xFC20
+
REG32(CR0ACK, 0x24)
REG32(CR1, 0x28)
REG32(CR2, 0x2c)
@@ -152,10 +154,6 @@ static inline bool smmuv3_gerror_irq_enabled(SMMUv3State
*s)
return FIELD_EX32(s->irq_ctrl, IRQ_CTRL, GERROR_IRQEN);
}
-/* public until callers get introduced */
-void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, uint32_t gerror_mask);
-void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t gerrorn);
-
/* Queue Handling */
#define Q_BASE(q) ((q)->base & SMMU_BASE_ADDR_MASK)
@@ -308,6 +306,6 @@ enum { /* Command completion notification */
addr; \
})
-int smmuv3_cmdq_consume(SMMUv3State *s);
+#define SMMU_FEATURE_2LVL_STE (1 << 0)
#endif
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index 899f0e0..82ad1af 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -37,7 +37,8 @@
* @irq: irq type
* @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
*/
-void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, uint32_t gerror_mask)
+static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
+ uint32_t gerror_mask)
{
bool pulse = false;
@@ -74,7 +75,7 @@ void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, uint32_t
gerror_mask)
}
}
-void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
+static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
{
uint32_t pending = s->gerror ^ s->gerrorn;
uint32_t toggled = s->gerrorn ^ new_gerrorn;
@@ -173,7 +174,7 @@ static void smmuv3_init_regs(SMMUv3State *s)
s->sid_split = 0;
}
-int smmuv3_cmdq_consume(SMMUv3State *s)
+static int smmuv3_cmdq_consume(SMMUv3State *s)
{
SMMUCmdError cmd_error = SMMU_CERROR_NONE;
SMMUQueue *q = &s->cmdq;
@@ -269,11 +270,170 @@ int smmuv3_cmdq_consume(SMMUv3State *s)
return 0;
}
-static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data,
+static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset,
+ uint64_t value, MemTxAttrs attrs)
+{
+switch (offset) {
+case A_GERROR_IRQ_CFG0:
+s->gerror_irq_cfg0 = value;
+return MEMTX_OK;
+case A_STRTAB_BASE:
+s->strtab_base = value;
+return MEMTX_OK;
+case A_CMDQ_BASE:
+s->cmdq.base = value;
+s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
+if (s->cmdq.log2size > SMMU_CMDQS) {
+s->cmdq.log2size = SMMU_CMDQS;
+}
+return MEMTX_OK;
+case A_EVENTQ_BASE:
+s->eventq.base = value;
+s->eventq.log2size = extract64(s->eventq.base, 0, 5);
+if (s->eventq.log2size > SMMU_EVENTQS) {
+s->eventq.log2size = SMMU_EVENTQS;
+}
+return MEMTX_OK;
+case A_EVENTQ_IRQ_CFG0:
+s->eventq_irq_cfg0 = value;
+return MEMTX_OK;
+default:
+qemu_log_mask(LOG_UNIMP,
+ "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n",
+ __func__, offset);
+return MEMTX_OK;
+}
+}
+
+static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
+ uint64_t value, MemTxAttrs attrs)
+{
+switch (offset) {
+case A_CR0:
+s->cr[0] = value;
+s->cr0ack = value & ~SMMU_CR0_RESERVED;
+/* in case the command queue has been enabled */
+smmuv3_cmdq_consume(s);
+return MEMTX_OK;
+case A_CR1:
+s->cr[1] = value;
+return MEMTX_OK;
+case A_CR2:
+s->cr[2] = value;
+return MEMTX_OK;
+case A_IRQ_CTRL:
+s->irq_ctrl = value;
+return MEMTX_OK;
+case A_GERRORN:
+smmuv3_write_gerrorn(s, value);
+/*
+ * By acknowledging the CMDQ_ERR, SW may notify cmds can
+ * be processed again
+ */
+smmuv3_cmdq_consume(s);
+return MEMTX_OK;
+case A_GE