On 10/31/25 11:49 AM, Shameer Kolothum wrote:
> Provide a helper and use that to issue the invalidation cmd to host SMMUv3.
> We only issue one cmd at a time for now.
>
> Support for batching of commands will be added later after analysing the
> impact.
>
> Reviewed-by: Jonathan Cameron <[email protected]>
> Tested-by: Zhangfei Gao <[email protected]>
> Signed-off-by: Shameer Kolothum <[email protected]>
Reviewed-by: Eric Auger <[email protected]>
Eric
> ---
> hw/arm/smmuv3-accel.c | 35 +++++++++++++++++++++++++++++++++++
> hw/arm/smmuv3-accel.h | 8 ++++++++
> hw/arm/smmuv3.c | 30 ++++++++++++++++++++++++++++++
> 3 files changed, 73 insertions(+)
>
> diff --git a/hw/arm/smmuv3-accel.c b/hw/arm/smmuv3-accel.c
> index 395c8175da..a2deda3c32 100644
> --- a/hw/arm/smmuv3-accel.c
> +++ b/hw/arm/smmuv3-accel.c
> @@ -213,6 +213,41 @@ bool smmuv3_accel_install_nested_ste_range(SMMUv3State
> *s, SMMUSIDRange *range,
> return true;
> }
>
> +/*
> + * This issues the invalidation cmd to the host SMMUv3.
> + * Note: sdev can be NULL for certain invalidation commands
> + * e.g., SMMU_CMD_TLBI_NH_ASID, SMMU_CMD_TLBI_NH_VA etc.
> + */
> +bool smmuv3_accel_issue_inv_cmd(SMMUv3State *bs, void *cmd, SMMUDevice *sdev,
> + Error **errp)
> +{
> + SMMUv3State *s = ARM_SMMUV3(bs);
> + SMMUv3AccelState *s_accel = s->s_accel;
> + IOMMUFDViommu *viommu;
> + uint32_t entry_num = 1;
> +
> + /* No vIOMMU means no VFIO/IOMMUFD devices, nothing to invalidate. */
> + if (!s_accel || !s_accel->vsmmu) {
> + return true;
> + }
> +
> + /*
> + * Called for emulated bridges or root ports, but SID-based
> + * invalidations (e.g. CFGI_CD) apply only to vfio-pci endpoints
> + * with a valid vIOMMU vdev.
> + */
> + if (sdev && !container_of(sdev, SMMUv3AccelDevice, sdev)->vdev) {
> + return true;
> + }
> +
> + viommu = &s_accel->vsmmu->viommu;
> + /* Single command (entry_num = 1); no need to check returned entry_num */
> + return iommufd_backend_invalidate_cache(
> + viommu->iommufd, viommu->viommu_id,
> + IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3,
> + sizeof(Cmd), &entry_num, cmd, errp);
> +}
> +
> static SMMUv3AccelDevice *smmuv3_accel_get_dev(SMMUState *bs, SMMUPciBus
> *sbus,
> PCIBus *bus, int devfn)
> {
> diff --git a/hw/arm/smmuv3-accel.h b/hw/arm/smmuv3-accel.h
> index 8931e83dc5..ee79548370 100644
> --- a/hw/arm/smmuv3-accel.h
> +++ b/hw/arm/smmuv3-accel.h
> @@ -51,6 +51,8 @@ bool smmuv3_accel_install_nested_ste(SMMUv3State *s,
> SMMUDevice *sdev, int sid,
> Error **errp);
> bool smmuv3_accel_install_nested_ste_range(SMMUv3State *s, SMMUSIDRange
> *range,
> Error **errp);
> +bool smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd, SMMUDevice *sdev,
> + Error **errp);
> void smmuv3_accel_gbpa_update(SMMUv3State *s);
> void smmuv3_accel_reset(SMMUv3State *s);
> #else
> @@ -69,6 +71,12 @@ smmuv3_accel_install_nested_ste_range(SMMUv3State *s,
> SMMUSIDRange *range,
> {
> return true;
> }
> +static inline bool
> +smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd, SMMUDevice *sdev,
> + Error **errp)
> +{
> + return true;
> +}
> static inline void smmuv3_accel_gbpa_update(SMMUv3State *s)
> {
> }
> diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
> index cc32b618ed..15173ddc9c 100644
> --- a/hw/arm/smmuv3.c
> +++ b/hw/arm/smmuv3.c
> @@ -1381,6 +1381,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
> {
> uint32_t sid = CMD_SID(&cmd);
> SMMUDevice *sdev = smmu_find_sdev(bs, sid);
> + Error *local_err = NULL;
>
> if (CMD_SSEC(&cmd)) {
> cmd_error = SMMU_CERROR_ILL;
> @@ -1393,11 +1394,17 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
>
> trace_smmuv3_cmdq_cfgi_cd(sid);
> smmuv3_flush_config(sdev);
> + if (!smmuv3_accel_issue_inv_cmd(s, &cmd, sdev, &local_err)) {
> + error_report_err(local_err);
> + cmd_error = SMMU_CERROR_ILL;
> + break;
> + }
> break;
> }
> case SMMU_CMD_TLBI_NH_ASID:
> {
> int asid = CMD_ASID(&cmd);
> + Error *local_err = NULL;
> int vmid = -1;
>
> if (!STAGE1_SUPPORTED(s)) {
> @@ -1416,6 +1423,11 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
> trace_smmuv3_cmdq_tlbi_nh_asid(asid);
> smmu_inv_notifiers_all(&s->smmu_state);
> smmu_iotlb_inv_asid_vmid(bs, asid, vmid);
> + if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, &local_err)) {
> + error_report_err(local_err);
> + cmd_error = SMMU_CERROR_ILL;
> + break;
> + }
> break;
> }
> case SMMU_CMD_TLBI_NH_ALL:
> @@ -1440,18 +1452,36 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
> QEMU_FALLTHROUGH;
> }
> case SMMU_CMD_TLBI_NSNH_ALL:
> + {
> + Error *local_err = NULL;
> +
> trace_smmuv3_cmdq_tlbi_nsnh();
> smmu_inv_notifiers_all(&s->smmu_state);
> smmu_iotlb_inv_all(bs);
> + if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, &local_err)) {
> + error_report_err(local_err);
> + cmd_error = SMMU_CERROR_ILL;
> + break;
> + }
> break;
> + }
> case SMMU_CMD_TLBI_NH_VAA:
> case SMMU_CMD_TLBI_NH_VA:
> + {
> + Error *local_err = NULL;
> +
> if (!STAGE1_SUPPORTED(s)) {
> cmd_error = SMMU_CERROR_ILL;
> break;
> }
> smmuv3_range_inval(bs, &cmd, SMMU_STAGE_1);
> + if (!smmuv3_accel_issue_inv_cmd(s, &cmd, NULL, &local_err)) {
> + error_report_err(local_err);
> + cmd_error = SMMU_CERROR_ILL;
> + break;
> + }
> break;
> + }
> case SMMU_CMD_TLBI_S12_VMALL:
> {
> int vmid = CMD_VMID(&cmd);