> -----Original Message----- > From: Eric Auger <eric.au...@redhat.com> > Sent: 05 September 2025 11:31 > To: qemu-...@nongnu.org; qemu-devel@nongnu.org; Shameer Kolothum > <skolothum...@nvidia.com> > Cc: peter.mayd...@linaro.org; Jason Gunthorpe <j...@nvidia.com>; Nicolin > Chen <nicol...@nvidia.com>; ddut...@redhat.com; berra...@redhat.com; > Nathan Chen <nath...@nvidia.com>; Matt Ochs <mo...@nvidia.com>; > smost...@google.com; linux...@huawei.com; wangzh...@hisilicon.com; > jiangkun...@huawei.com; jonathan.came...@huawei.com; > zhangfei....@linaro.org; zhenzhong.d...@intel.com; > shameerkolot...@gmail.com > Subject: Re: [RFC PATCH v3 12/15] hw/arm/smmuv3-accel: Introduce helpers > to batch and issue cache invalidations > > External email: Use caution opening links or attachments > > > On 7/14/25 5:59 PM, Shameer Kolothum wrote: > > From: Nicolin Chen <nicol...@nvidia.com> > > > > Helpers will batch the commands and issue at once to host SMMUv3. > > > > Signed-off-by: Nicolin Chen <nicol...@nvidia.com> > > Signed-off-by: Shameer Kolothum > <shameerali.kolothum.th...@huawei.com> > > --- > > hw/arm/smmuv3-accel.c | 65 > ++++++++++++++++++++++++++++++++++++++++ > > hw/arm/smmuv3-accel.h | 16 ++++++++++ > > hw/arm/smmuv3-internal.h | 12 ++++++++ > > 3 files changed, 93 insertions(+) > > > > diff --git a/hw/arm/smmuv3-accel.c b/hw/arm/smmuv3-accel.c > > index 04c665ccf5..1298b4f6d0 100644 > > --- a/hw/arm/smmuv3-accel.c > > +++ b/hw/arm/smmuv3-accel.c > > @@ -168,6 +168,71 @@ > smmuv3_accel_install_nested_ste_range(SMMUState *bs, SMMUSIDRange > *range) > > g_hash_table_foreach(bs->configs, smmuv3_accel_ste_range, range); > > } > > > > +/* Update batch->ncmds to the number of execute cmds */ > > +bool smmuv3_accel_issue_cmd_batch(SMMUState *bs, > SMMUCommandBatch *batch) > > +{ > > + SMMUv3State *s = ARM_SMMUV3(bs); > > + SMMUv3AccelState *s_accel = s->s_accel; > > + uint32_t total = batch->ncmds; > > + IOMMUFDViommu *viommu_core; > > + int ret; > > + > > + if (!bs->accel) { > > + return true; > > + } > > + > > + if (!s_accel->viommu) { > > + return true; > > + } > > + > > + viommu_core = &s_accel->viommu->core; > > + ret = iommufd_backend_invalidate_cache(viommu_core->iommufd, > > + viommu_core->viommu_id, > > + > IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3, > > + sizeof(Cmd), &batch->ncmds, > > + batch->cmds, NULL); > > + if (!ret || total != batch->ncmds) { > > + error_report("%s failed: ret=%d, total=%d, done=%d", > > + __func__, ret, total, batch->ncmds); > > + return ret; > > + } > > + > > + batch->ncmds = 0; > > + return ret; > > +} > > + > > +/* > > + * Note: sdev can be NULL for certain invalidation commands > > + * e.g., SMMU_CMD_TLBI_NH_ASID, SMMU_CMD_TLBI_NH_VA etc. > > + */ > > +void smmuv3_accel_batch_cmd(SMMUState *bs, SMMUDevice *sdev, > > + SMMUCommandBatch *batch, Cmd *cmd, > > + uint32_t *cons) > > +{ > > + if (!bs->accel) { > > + return; > > + } > > + > > + /* > > + * We may end up here for any emulated PCI bridge or root port type > > + * devices. The batching of commands only matters for vfio-pci endpoint > > + * devices with Guest S1 translation enabled. Hence check that, if > > + * sdev is available. > > + */ > > + if (sdev) { > > + SMMUv3AccelDevice *accel_dev; > > + accel_dev = container_of(sdev, SMMUv3AccelDevice, sdev); > > + > > + if (!accel_dev->s1_hwpt) { > > + return; > > + } > > + } > > + > > + batch->cmds[batch->ncmds] = *cmd; > > + batch->cons[batch->ncmds++] = *cons; > > + return; > > +} > > + > > static SMMUv3AccelDevice *smmuv3_accel_get_dev(SMMUState *bs, > SMMUPciBus *sbus, > > PCIBus *bus, int devfn) > > { > > diff --git a/hw/arm/smmuv3-accel.h b/hw/arm/smmuv3-accel.h > > index 21028e60c8..d06c9664ba 100644 > > --- a/hw/arm/smmuv3-accel.h > > +++ b/hw/arm/smmuv3-accel.h > > @@ -13,6 +13,7 @@ > > #include "hw/arm/smmu-common.h" > > #include "system/iommufd.h" > > #include <linux/iommufd.h> > > +#include "smmuv3-internal.h" > > #include CONFIG_DEVICES > > > > typedef struct SMMUS2Hwpt { > > @@ -55,6 +56,10 @@ void smmuv3_accel_init(SMMUv3State *s); > > void smmuv3_accel_install_nested_ste(SMMUState *bs, SMMUDevice > *sdev, int sid); > > void smmuv3_accel_install_nested_ste_range(SMMUState *bs, > > SMMUSIDRange *range); > > +bool smmuv3_accel_issue_cmd_batch(SMMUState *bs, > SMMUCommandBatch *batch); > > +void smmuv3_accel_batch_cmd(SMMUState *bs, SMMUDevice *sdev, > > + SMMUCommandBatch *batch, struct Cmd *cmd, > > + uint32_t *cons); > > #else > > static inline void smmuv3_accel_init(SMMUv3State *d) > > { > > @@ -67,6 +72,17 @@ static inline void > > smmuv3_accel_install_nested_ste_range(SMMUState *bs, SMMUSIDRange > *range) > > { > > } > > +static inline bool smmuv3_accel_issue_cmd_batch(SMMUState *bs, > > + SMMUCommandBatch *batch) > > +{ > > + return true; > > +} > > +static inline void smmuv3_accel_batch_cmd(SMMUState *bs, > SMMUDevice *sdev, > > + SMMUCommandBatch *batch, > > + struct Cmd *cmd, uint32_t *cons) > > +{ > > + return; > > +} > > #endif > > > > #endif /* HW_ARM_SMMUV3_ACCEL_H */ > > diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h > > index 738061c6ad..8cb6a9238a 100644 > > --- a/hw/arm/smmuv3-internal.h > > +++ b/hw/arm/smmuv3-internal.h > > @@ -547,6 +547,18 @@ typedef struct CD { > > uint32_t word[16]; > > } CD; > > > > +/* > > + * SMMUCommandBatch - batch of invalidation commands for accel > smmuv3 > > + * @cmds: Pointer to list of commands > > + * @cons: Pointer to list of CONS corresponding to the commands > It is not totally clear to me how the list of "CONS" indexes is used. Is > it meant to store errors, how do you update cons index in case it starts > failing, ...
This is how I understand it, The cons here is to store SMMUv3 queue cons corresponding to the cmd. And in case batched invalidation fails(iommufd_backend_invalidate_cache()), it will update the batch->ncmds with the index of the last failed command. The cons value at that index is then used to update the SMMUv3 cons index. I will add some comments and make it clear. Also, I will double check whether the above statement is true, and this is indeed how I have implemented it in this series 😊. Thanks, Shameer