From: Nicolin Chen <[email protected]> When the guest enables the Event Queue and a vIOMMU is present, allocate a vEVENTQ object so that host-side events related to the vIOMMU can be received and propagated back to the guest.
For cold-plugged devices using SMMUv3 acceleration, the vIOMMU is created before the guest boots. In this case, the vEVENTQ is allocated when the guest writes to SMMU_CR0 and sets EVENTQEN = 1. If no cold-plugged device exists at boot (i.e. no vIOMMU initially), the vEVENTQ is allocated when a vIOMMU is created, i.e. during the first device hot-plug. Event read and propagation will be added in a later patch. Signed-off-by: Nicolin Chen <[email protected]> Signed-off-by: Shameer Kolothum <[email protected]> --- hw/arm/smmuv3-accel.c | 62 ++++++++++++++++++++++++++++++++++++++++++- hw/arm/smmuv3-accel.h | 6 +++++ hw/arm/smmuv3.c | 4 +++ 3 files changed, 71 insertions(+), 1 deletion(-) diff --git a/hw/arm/smmuv3-accel.c b/hw/arm/smmuv3-accel.c index dc0f61e841..74f0be3731 100644 --- a/hw/arm/smmuv3-accel.c +++ b/hw/arm/smmuv3-accel.c @@ -378,6 +378,58 @@ bool smmuv3_accel_issue_inv_cmd(SMMUv3State *bs, void *cmd, SMMUDevice *sdev, sizeof(Cmd), &entry_num, cmd, errp); } +static void smmuv3_accel_free_veventq(SMMUv3AccelState *accel) +{ + IOMMUFDVeventq *veventq = accel->veventq; + + if (!veventq) { + return; + } + iommufd_backend_free_id(accel->viommu.iommufd, veventq->veventq_id); + g_free(veventq); + accel->veventq = NULL; +} + +bool smmuv3_accel_alloc_veventq(SMMUv3State *s, Error **errp) +{ + SMMUv3AccelState *accel = s->s_accel; + IOMMUFDVeventq *veventq; + uint32_t veventq_id; + uint32_t veventq_fd; + + if (!accel) { + return true; + } + + if (accel->veventq) { + return true; + } + + /* + * Check whether the Guest has enabled the Event Queue. The queue enabled + * means EVENTQ_BASE has been programmed with a valid base address and size. + * If it’s not yet configured, return and retry later. + */ + if (!smmuv3_eventq_enabled(s)) { + return true; + } + + if (!iommufd_backend_alloc_veventq(accel->viommu.iommufd, + accel->viommu.viommu_id, + IOMMU_VEVENTQ_TYPE_ARM_SMMUV3, + 1 << s->eventq.log2size, &veventq_id, + &veventq_fd, errp)) { + return false; + } + + veventq = g_new(IOMMUFDVeventq, 1); + veventq->veventq_id = veventq_id; + veventq->veventq_fd = veventq_fd; + veventq->viommu = &accel->viommu; + accel->veventq = veventq; + return true; +} + static bool smmuv3_accel_alloc_viommu(SMMUv3State *s, HostIOMMUDeviceIOMMUFD *idev, Error **errp) @@ -421,14 +473,21 @@ smmuv3_accel_alloc_viommu(SMMUv3State *s, HostIOMMUDeviceIOMMUFD *idev, goto free_abort_hwpt; } + /* Allocate a vEVENTQ if guest has enabled event queue */ + if (!smmuv3_accel_alloc_veventq(s, errp)) { + goto free_bypass_hwpt; + } + /* Attach a HWPT based on SMMUv3 GBPA.ABORT value */ hwpt_id = smmuv3_accel_gbpa_hwpt(s, accel); if (!host_iommu_device_iommufd_attach_hwpt(idev, hwpt_id, errp)) { - goto free_bypass_hwpt; + goto free_veventq; } s->s_accel = accel; return true; +free_veventq: + smmuv3_accel_free_veventq(accel); free_bypass_hwpt: iommufd_backend_free_id(idev->iommufd, accel->bypass_hwpt_id); free_abort_hwpt: @@ -537,6 +596,7 @@ static void smmuv3_accel_unset_iommu_device(PCIBus *bus, void *opaque, trace_smmuv3_accel_unset_iommu_device(devfn, idev->devid); if (QLIST_EMPTY(&accel->device_list)) { + smmuv3_accel_free_veventq(accel); iommufd_backend_free_id(accel->viommu.iommufd, accel->bypass_hwpt_id); iommufd_backend_free_id(accel->viommu.iommufd, accel->abort_hwpt_id); iommufd_backend_free_id(accel->viommu.iommufd, accel->viommu.viommu_id); diff --git a/hw/arm/smmuv3-accel.h b/hw/arm/smmuv3-accel.h index 2f2904d86b..7b0f585769 100644 --- a/hw/arm/smmuv3-accel.h +++ b/hw/arm/smmuv3-accel.h @@ -20,6 +20,7 @@ */ typedef struct SMMUv3AccelState { IOMMUFDViommu viommu; + IOMMUFDVeventq *veventq; uint32_t bypass_hwpt_id; uint32_t abort_hwpt_id; QLIST_HEAD(, SMMUv3AccelDevice) device_list; @@ -48,6 +49,7 @@ bool smmuv3_accel_attach_gbpa_hwpt(SMMUv3State *s, Error **errp); bool smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd, SMMUDevice *sdev, Error **errp); void smmuv3_accel_idr_override(SMMUv3State *s); +bool smmuv3_accel_alloc_veventq(SMMUv3State *s, Error **errp); void smmuv3_accel_reset(SMMUv3State *s); #else static inline void smmuv3_accel_init(SMMUv3State *s) @@ -78,6 +80,10 @@ smmuv3_accel_issue_inv_cmd(SMMUv3State *s, void *cmd, SMMUDevice *sdev, static inline void smmuv3_accel_idr_override(SMMUv3State *s) { } +bool smmuv3_accel_alloc_veventq(SMMUv3State *s, Error **errp) +{ + return true; +} static inline void smmuv3_accel_reset(SMMUv3State *s) { } diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index 763f069a35..ac60ca0ce7 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -1602,6 +1602,10 @@ static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset, s->cr0ack = data & ~SMMU_CR0_RESERVED; /* in case the command queue has been enabled */ smmuv3_cmdq_consume(s, &local_err); + /* Allocate vEVENTQ if guest enables EventQ and vIOMMU is ready */ + if (local_err == NULL) { + smmuv3_accel_alloc_veventq(s, &local_err); + } break; case A_CR1: s->cr[1] = data; -- 2.43.0
