Hi ,
On 7/5/2025 6:43 AM, Nicolin Chen wrote: > Introduce a new IOMMUFD_CMD_HW_QUEUE_ALLOC ioctl for user space to allocate > a HW QUEUE object for a vIOMMU specific HW-accelerated queue, e.g.: > - NVIDIA's Virtual Command Queue > - AMD vIOMMU's Command Buffer, Event Log Buffers, and PPR Log Buffers > > Since this is introduced with NVIDIA's VCMDQs that access the guest memory > in the physical address space, add an iommufd_hw_queue_alloc_phys() helper > that will create an access object to the queue memory in the IOAS, to avoid > the mappings of the guest memory from being unmapped, during the life cycle > of the HW queue object. > > AMD's HW will need an hw_queue_init op that is mutually exclusive with the > hw_queue_init_phys op, and their case will bypass the access part, i.e. no > iommufd_hw_queue_alloc_phys() call. Thanks. We will implement hw_queue_init[_iova] to support AMD driver and fixup iommufd_hw_queue_alloc_ioctl(). Is that the correct understanding? -Vasant > > Reviewed-by: Pranjal Shrivastava <pr...@google.com> > Reviewed-by: Kevin Tian <kevin.t...@intel.com> > Reviewed-by: Lu Baolu <baolu...@linux.intel.com> > Signed-off-by: Nicolin Chen <nicol...@nvidia.com> > ---> drivers/iommu/iommufd/iommufd_private.h | 2 + > include/linux/iommufd.h | 1 + > include/uapi/linux/iommufd.h | 33 +++++ > drivers/iommu/iommufd/main.c | 6 + > drivers/iommu/iommufd/viommu.c | 177 ++++++++++++++++++++++++ > 5 files changed, 219 insertions(+) > > diff --git a/drivers/iommu/iommufd/iommufd_private.h > b/drivers/iommu/iommufd/iommufd_private.h > index 06b8c2e2d9e6..dcd609573244 100644 > --- a/drivers/iommu/iommufd/iommufd_private.h > +++ b/drivers/iommu/iommufd/iommufd_private.h > @@ -652,6 +652,8 @@ int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd); > void iommufd_viommu_destroy(struct iommufd_object *obj); > int iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd *ucmd); > void iommufd_vdevice_destroy(struct iommufd_object *obj); > +int iommufd_hw_queue_alloc_ioctl(struct iommufd_ucmd *ucmd); > +void iommufd_hw_queue_destroy(struct iommufd_object *obj); > > #ifdef CONFIG_IOMMUFD_TEST > int iommufd_test(struct iommufd_ucmd *ucmd); > diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h > index f13f3ca6adb5..ce4011a2fc27 100644 > --- a/include/linux/iommufd.h > +++ b/include/linux/iommufd.h > @@ -123,6 +123,7 @@ struct iommufd_vdevice { > struct iommufd_hw_queue { > struct iommufd_object obj; > struct iommufd_viommu *viommu; > + struct iommufd_access *access; > > u64 base_addr; /* in guest physical address space */ > size_t length; > diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h > index 640a8b5147c2..55459b9eee31 100644 > --- a/include/uapi/linux/iommufd.h > +++ b/include/uapi/linux/iommufd.h > @@ -56,6 +56,7 @@ enum { > IOMMUFD_CMD_VDEVICE_ALLOC = 0x91, > IOMMUFD_CMD_IOAS_CHANGE_PROCESS = 0x92, > IOMMUFD_CMD_VEVENTQ_ALLOC = 0x93, > + IOMMUFD_CMD_HW_QUEUE_ALLOC = 0x94, > }; > > /** > @@ -1156,4 +1157,36 @@ enum iommu_hw_queue_type { > IOMMU_HW_QUEUE_TYPE_DEFAULT = 0, > }; > > +/** > + * struct iommu_hw_queue_alloc - ioctl(IOMMU_HW_QUEUE_ALLOC) > + * @size: sizeof(struct iommu_hw_queue_alloc) > + * @flags: Must be 0 > + * @viommu_id: Virtual IOMMU ID to associate the HW queue with > + * @type: One of enum iommu_hw_queue_type > + * @index: The logical index to the HW queue per virtual IOMMU for a > multi-queue > + * model > + * @out_hw_queue_id: The ID of the new HW queue > + * @nesting_parent_iova: Base address of the queue memory in the guest > physical > + * address space > + * @length: Length of the queue memory > + * > + * Allocate a HW queue object for a vIOMMU-specific HW-accelerated queue, > which > + * allows HW to access a guest queue memory described using > @nesting_parent_iova > + * and @length. > + * > + * A vIOMMU can allocate multiple queues, but it must use a different @index > per > + * type to separate each allocation, e.g. > + * Type1 HW queue0, Type1 HW queue1, Type2 HW queue0, ... > + */ > +struct iommu_hw_queue_alloc { > + __u32 size; > + __u32 flags; > + __u32 viommu_id; > + __u32 type; > + __u32 index; > + __u32 out_hw_queue_id; > + __aligned_u64 nesting_parent_iova; > + __aligned_u64 length; > +}; > +#define IOMMU_HW_QUEUE_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HW_QUEUE_ALLOC) > #endif > diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c > index 778694d7c207..4e8dbbfac890 100644 > --- a/drivers/iommu/iommufd/main.c > +++ b/drivers/iommu/iommufd/main.c > @@ -354,6 +354,7 @@ union ucmd_buffer { > struct iommu_destroy destroy; > struct iommu_fault_alloc fault; > struct iommu_hw_info info; > + struct iommu_hw_queue_alloc hw_queue; > struct iommu_hwpt_alloc hwpt; > struct iommu_hwpt_get_dirty_bitmap get_dirty_bitmap; > struct iommu_hwpt_invalidate cache; > @@ -396,6 +397,8 @@ static const struct iommufd_ioctl_op iommufd_ioctl_ops[] > = { > struct iommu_fault_alloc, out_fault_fd), > IOCTL_OP(IOMMU_GET_HW_INFO, iommufd_get_hw_info, struct iommu_hw_info, > __reserved), > + IOCTL_OP(IOMMU_HW_QUEUE_ALLOC, iommufd_hw_queue_alloc_ioctl, > + struct iommu_hw_queue_alloc, length), > IOCTL_OP(IOMMU_HWPT_ALLOC, iommufd_hwpt_alloc, struct iommu_hwpt_alloc, > __reserved), > IOCTL_OP(IOMMU_HWPT_GET_DIRTY_BITMAP, iommufd_hwpt_get_dirty_bitmap, > @@ -559,6 +562,9 @@ static const struct iommufd_object_ops > iommufd_object_ops[] = { > [IOMMUFD_OBJ_FAULT] = { > .destroy = iommufd_fault_destroy, > }, > + [IOMMUFD_OBJ_HW_QUEUE] = { > + .destroy = iommufd_hw_queue_destroy, > + }, > [IOMMUFD_OBJ_HWPT_PAGING] = { > .destroy = iommufd_hwpt_paging_destroy, > .abort = iommufd_hwpt_paging_abort, > diff --git a/drivers/iommu/iommufd/viommu.c b/drivers/iommu/iommufd/viommu.c > index 081ee6697a11..00641204efb2 100644 > --- a/drivers/iommu/iommufd/viommu.c > +++ b/drivers/iommu/iommufd/viommu.c > @@ -201,3 +201,180 @@ int iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd > *ucmd) > iommufd_put_object(ucmd->ictx, &viommu->obj); > return rc; > } > + > +static void iommufd_hw_queue_destroy_access(struct iommufd_ctx *ictx, > + struct iommufd_access *access, > + u64 base_iova, size_t length) > +{ > + iommufd_access_unpin_pages(access, base_iova, length); > + iommufd_access_detach_internal(access); > + iommufd_access_destroy_internal(ictx, access); > +} > + > +void iommufd_hw_queue_destroy(struct iommufd_object *obj) > +{ > + struct iommufd_hw_queue *hw_queue = > + container_of(obj, struct iommufd_hw_queue, obj); > + > + if (hw_queue->destroy) > + hw_queue->destroy(hw_queue); > + if (hw_queue->access) > + iommufd_hw_queue_destroy_access(hw_queue->viommu->ictx, > + hw_queue->access, > + hw_queue->base_addr, > + hw_queue->length); > + if (hw_queue->viommu) > + refcount_dec(&hw_queue->viommu->obj.users); > +} > + > +/* > + * When the HW accesses the guest queue via physical addresses, the > underlying > + * physical pages of the guest queue must be contiguous. Also, for the > security > + * concern that IOMMUFD_CMD_IOAS_UNMAP could potentially remove the mappings > of > + * the guest queue from the nesting parent iopt while the HW is still > accessing > + * the guest queue memory physically, such a HW queue must require an access > to > + * pin the underlying pages and prevent that from happening. > + */ > +static struct iommufd_access * > +iommufd_hw_queue_alloc_phys(struct iommu_hw_queue_alloc *cmd, > + struct iommufd_viommu *viommu, phys_addr_t *base_pa) > +{ > + struct iommufd_access *access; > + struct page **pages; > + size_t max_npages; > + size_t length; > + u64 offset; > + size_t i; > + int rc; > + > + offset = > + cmd->nesting_parent_iova - PAGE_ALIGN(cmd->nesting_parent_iova); > + /* DIV_ROUND_UP(offset + cmd->length, PAGE_SIZE) */ > + if (check_add_overflow(offset, cmd->length, &length)) > + return ERR_PTR(-ERANGE); > + if (check_add_overflow(length, PAGE_SIZE - 1, &length)) > + return ERR_PTR(-ERANGE); > + max_npages = length / PAGE_SIZE; > + > + /* > + * Use kvcalloc() to avoid memory fragmentation for a large page array. > + * Set __GFP_NOWARN to avoid syzkaller blowups > + */ > + pages = kvcalloc(max_npages, sizeof(*pages), GFP_KERNEL | __GFP_NOWARN); > + if (!pages) > + return ERR_PTR(-ENOMEM); > + > + access = iommufd_access_create_internal(viommu->ictx); > + if (IS_ERR(access)) { > + rc = PTR_ERR(access); > + goto out_free; > + } > + > + rc = iommufd_access_attach_internal(access, viommu->hwpt->ioas); > + if (rc) > + goto out_destroy; > + > + rc = iommufd_access_pin_pages(access, cmd->nesting_parent_iova, > + cmd->length, pages, 0); > + if (rc) > + goto out_detach; > + > + /* Validate if the underlying physical pages are contiguous */ > + for (i = 1; i < max_npages; i++) { > + if (page_to_pfn(pages[i]) == page_to_pfn(pages[i - 1]) + 1) > + continue; > + rc = -EFAULT; > + goto out_unpin; > + } > + > + *base_pa = page_to_pfn(pages[0]) << PAGE_SHIFT; > + kfree(pages); > + return access; > + > +out_unpin: > + iommufd_access_unpin_pages(access, cmd->nesting_parent_iova, > + cmd->length); > +out_detach: > + iommufd_access_detach_internal(access); > +out_destroy: > + iommufd_access_destroy_internal(viommu->ictx, access); > +out_free: > + kfree(pages); > + return ERR_PTR(rc); > +} > + > +int iommufd_hw_queue_alloc_ioctl(struct iommufd_ucmd *ucmd) > +{ > + struct iommu_hw_queue_alloc *cmd = ucmd->cmd; > + struct iommufd_hw_queue *hw_queue; > + struct iommufd_viommu *viommu; > + struct iommufd_access *access; > + size_t hw_queue_size; > + phys_addr_t base_pa; > + u64 last; > + int rc; > + > + if (cmd->flags || cmd->type == IOMMU_HW_QUEUE_TYPE_DEFAULT) > + return -EOPNOTSUPP; > + if (!cmd->length) > + return -EINVAL; > + if (check_add_overflow(cmd->nesting_parent_iova, cmd->length - 1, > + &last)) > + return -EOVERFLOW; > + > + viommu = iommufd_get_viommu(ucmd, cmd->viommu_id); > + if (IS_ERR(viommu)) > + return PTR_ERR(viommu); > + > + if (!viommu->ops || !viommu->ops->get_hw_queue_size || > + !viommu->ops->hw_queue_init_phys) { > + rc = -EOPNOTSUPP; > + goto out_put_viommu; > + } > + > + hw_queue_size = viommu->ops->get_hw_queue_size(viommu, cmd->type); > + if (!hw_queue_size) { > + rc = -EOPNOTSUPP; > + goto out_put_viommu; > + } > + > + /* > + * It is a driver bug for providing a hw_queue_size smaller than the > + * core HW queue structure size > + */ > + if (WARN_ON_ONCE(hw_queue_size < sizeof(*hw_queue))) { > + rc = -EOPNOTSUPP; > + goto out_put_viommu; > + } > + > + hw_queue = (struct iommufd_hw_queue *)_iommufd_object_alloc_ucmd( > + ucmd, hw_queue_size, IOMMUFD_OBJ_HW_QUEUE); > + if (IS_ERR(hw_queue)) { > + rc = PTR_ERR(hw_queue); > + goto out_put_viommu; > + } > + > + access = iommufd_hw_queue_alloc_phys(cmd, viommu, &base_pa); > + if (IS_ERR(access)) { > + rc = PTR_ERR(access); > + goto out_put_viommu; > + } > + > + hw_queue->viommu = viommu; > + refcount_inc(&viommu->obj.users); > + hw_queue->access = access; > + hw_queue->type = cmd->type; > + hw_queue->length = cmd->length; > + hw_queue->base_addr = cmd->nesting_parent_iova; > + > + rc = viommu->ops->hw_queue_init_phys(hw_queue, cmd->index, base_pa); > + if (rc) > + goto out_put_viommu; > + > + cmd->out_hw_queue_id = hw_queue->obj.id; > + rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); > + > +out_put_viommu: > + iommufd_put_object(ucmd->ictx, &viommu->obj); > + return rc; > +}