On 10/12/25 5:14 PM, Tao Tang wrote:
The command queue and interrupt logic must operate within the correct
security context. Handling a command queue in one security state can
have side effects, such as interrupts or errors, that need to be
processed in another. This requires the IRQ and GERROR logic to be
fully aware of the multi-security-state environment.
This patch refactors the command queue processing and interrupt handling
to be security-state aware. Besides, unlike the command queue, the
event queue logic was already updated to be security-state aware in a
previous change. The SMMUSecSID is now passed through the relevant
functions to ensure that:
- Command queue operations are performed on the correct register bank.
- Interrupts are triggered and checked against the correct security
state's configuration.
- Errors from command processing are reported in the correct GERROR
register bank.
- Architectural access controls, like preventing secure commands from a
non-secure queue, are correctly enforced.
- As Secure Stage 2 is not yet implemented, commands that target it
are now correctly aborted during command queue processing.
Signed-off-by: Tao Tang <[email protected]>
---
hw/arm/smmuv3.c | 61 +++++++++++++++++++++++++++++++--------------
hw/arm/trace-events | 2 +-
2 files changed, 43 insertions(+), 20 deletions(-)
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index 432de88610..4ac7a2f3c7 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -46,11 +46,11 @@
*
* @irq: irq type
* @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
+ * @sec_sid: SEC_SID of the bank
*/
static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
- uint32_t gerror_mask)
+ uint32_t gerror_mask, SMMUSecSID sec_sid)
{
- SMMUSecSID sec_sid = SMMU_SEC_SID_NS;
SMMUv3RegBank *bank = smmuv3_bank(s, sec_sid);
bool pulse = false;
@@ -87,9 +87,9 @@ static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
}
}
-static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
+static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn,
+ SMMUSecSID sec_sid)
{
- SMMUSecSID sec_sid = SMMU_SEC_SID_NS;
SMMUv3RegBank *bank = smmuv3_bank(s, sec_sid);
uint32_t pending = bank->gerror ^ bank->gerrorn;
uint32_t toggled = bank->gerrorn ^ new_gerrorn;
@@ -109,7 +109,7 @@ static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t
new_gerrorn)
trace_smmuv3_write_gerrorn(toggled & pending, bank->gerrorn);
}
-static inline MemTxResult queue_read(SMMUQueue *q, Cmd *cmd)
+static inline MemTxResult queue_read(SMMUQueue *q, Cmd *cmd, SMMUSecSID
sec_sid)
why this change as sec_sid is not yet used?
besides the q is already specialized for a given sec_sid
{
dma_addr_t addr = Q_CONS_ENTRY(q);
MemTxResult ret;
@@ -167,7 +167,7 @@ static MemTxResult smmuv3_write_eventq(SMMUv3State *s,
SMMUSecSID sec_sid,
}
if (!smmuv3_q_empty(q)) {
- smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0);
+ smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0, sec_sid);
}
return MEMTX_OK;
}
@@ -263,7 +263,8 @@ void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo
*info)
info->sid);
r = smmuv3_write_eventq(s, sec_sid, &evt);
if (r != MEMTX_OK) {
- smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK);
+ smmuv3_trigger_irq(s, SMMU_IRQ_GERROR,
+ R_GERROR_EVENTQ_ABT_ERR_MASK, sec_sid);
}
info->recorded = true;
}
@@ -1457,11 +1458,10 @@ static bool smmu_eventq_irq_cfg_writable(SMMUv3State
*s, SMMUSecSID sec_sid)
return smmu_irq_ctl_evtq_irqen_disabled(s, sec_sid);
}
-static int smmuv3_cmdq_consume(SMMUv3State *s)
+static int smmuv3_cmdq_consume(SMMUv3State *s, SMMUSecSID sec_sid)
{
SMMUState *bs = ARM_SMMU(s);
SMMUCmdError cmd_error = SMMU_CERROR_NONE;
- SMMUSecSID sec_sid = SMMU_SEC_SID_NS;
SMMUv3RegBank *bank = smmuv3_bank(s, sec_sid);
SMMUQueue *q = &bank->cmdq;
SMMUCommandType type = 0;
@@ -1480,14 +1480,14 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
uint32_t pending = bank->gerror ^ bank->gerrorn;
Cmd cmd;
- trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q),
+ trace_smmuv3_cmdq_consume(sec_sid, Q_PROD(q), Q_CONS(q),
Q_PROD_WRAP(q), Q_CONS_WRAP(q));
if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) {
break;
}
- if (queue_read(q, &cmd) != MEMTX_OK) {
+ if (queue_read(q, &cmd, sec_sid) != MEMTX_OK) {
cmd_error = SMMU_CERROR_ABT;
break;
}
@@ -1500,7 +1500,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
switch (type) {
case SMMU_CMD_SYNC:
if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
- smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0);
+ smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0, sec_sid);
}
break;
case SMMU_CMD_PREFETCH_CONFIG:
@@ -1512,6 +1512,11 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
SMMUDevice *sdev = smmu_find_sdev(bs, sid);
if (CMD_SSEC(&cmd)) {
when reading the spec I have the impression SSEC is common to all commands
4.1.6 Common command fields
Can't you factorize that check?
+ if (sec_sid != SMMU_SEC_SID_S) {
+ /* Secure Stream with Non-Secure command */
+ cmd_error = SMMU_CERROR_ILL;
+ break;
+ }
cmd_error = SMMU_CERROR_ILL;
break;
}
@@ -1532,6 +1537,10 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
SMMUSIDRange sid_range;
if (CMD_SSEC(&cmd)) {
+ if (sec_sid != SMMU_SEC_SID_S) {
+ cmd_error = SMMU_CERROR_ILL;
+ break;
+ }
cmd_error = SMMU_CERROR_ILL;
break;
}
@@ -1551,6 +1560,10 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
SMMUDevice *sdev = smmu_find_sdev(bs, sid);
if (CMD_SSEC(&cmd)) {
+ if (sec_sid != SMMU_SEC_SID_S) {
+ cmd_error = SMMU_CERROR_ILL;
+ break;
+ }
cmd_error = SMMU_CERROR_ILL;
break;
}
@@ -1618,7 +1631,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
cmd_error = SMMU_CERROR_ILL;
break;
}
- smmuv3_range_inval(bs, &cmd, SMMU_STAGE_1, SMMU_SEC_SID_NS);
+ smmuv3_range_inval(bs, &cmd, SMMU_STAGE_1, sec_sid);
break;
case SMMU_CMD_TLBI_S12_VMALL:
{
@@ -1628,6 +1641,11 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
cmd_error = SMMU_CERROR_ILL;
break;
}
+ /* Secure Stage 2 isn't supported for now */
+ if (sec_sid != SMMU_SEC_SID_NS) {
+ cmd_error = SMMU_CERROR_ABT;
+ break;
+ }
trace_smmuv3_cmdq_tlbi_s12_vmid(vmid);
smmu_inv_notifiers_all(&s->smmu_state);
@@ -1639,11 +1657,16 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
cmd_error = SMMU_CERROR_ILL;
break;
}
+
+ if (sec_sid != SMMU_SEC_SID_NS) {
+ cmd_error = SMMU_CERROR_ABT;
+ break;
+ }
/*
* As currently only either s1 or s2 are supported
* we can reuse same function for s2.
*/
- smmuv3_range_inval(bs, &cmd, SMMU_STAGE_2, SMMU_SEC_SID_NS);
+ smmuv3_range_inval(bs, &cmd, SMMU_STAGE_2, sec_sid);
break;
case SMMU_CMD_TLBI_EL3_ALL:
case SMMU_CMD_TLBI_EL3_VA: