On Mon, Jun 24, 2024 at 11:04:50AM +0200, Jiri Pirko wrote:
> From: Jiri Pirko <j...@nvidia.com>
> 
> Currently, the code waits in a busy loop on every admin virtqueue issued
> command to get a reply. That prevents callers from issuing multiple
> commands in parallel.
> 
> To overcome this limitation, introduce a virtqueue event callback for
> admin virtqueue. For every issued command, use completion mechanism
> to wait on a reply. In the event callback, trigger the completion
> is done for every incoming reply.
> 
> Alongside with that, introduce a spin lock to protect the admin
> virtqueue operations.
> 
> Signed-off-by: Jiri Pirko <j...@nvidia.com>
> ---
>  drivers/virtio/virtio_pci_common.c | 10 +++---
>  drivers/virtio/virtio_pci_common.h |  3 ++
>  drivers/virtio/virtio_pci_modern.c | 52 +++++++++++++++++++++++-------
>  include/linux/virtio.h             |  2 ++
>  4 files changed, 51 insertions(+), 16 deletions(-)
> 
> diff --git a/drivers/virtio/virtio_pci_common.c 
> b/drivers/virtio/virtio_pci_common.c
> index 07c0511f170a..5ff7304c7a2a 100644
> --- a/drivers/virtio/virtio_pci_common.c
> +++ b/drivers/virtio/virtio_pci_common.c
> @@ -346,6 +346,8 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, 
> unsigned int nvqs,
>               for (i = 0; i < nvqs; ++i)
>                       if (names[i] && callbacks[i])
>                               ++nvectors;
> +             if (avq_num)
> +                     ++nvectors;
>       } else {
>               /* Second best: one for change, shared for all vqs. */
>               nvectors = 2;
> @@ -375,8 +377,8 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, 
> unsigned int nvqs,
>       if (!avq_num)
>               return 0;
>       sprintf(avq->name, "avq.%u", avq->vq_index);
> -     vqs[i] = vp_find_one_vq_msix(vdev, avq->vq_index, NULL, avq->name,
> -                                  false, &allocated_vectors);
> +     vqs[i] = vp_find_one_vq_msix(vdev, avq->vq_index, vp_modern_avq_done,
> +                                  avq->name, false, &allocated_vectors);
>       if (IS_ERR(vqs[i])) {
>               err = PTR_ERR(vqs[i]);
>               goto error_find;
> @@ -432,8 +434,8 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, 
> unsigned int nvqs,
>       if (!avq_num)
>               return 0;
>       sprintf(avq->name, "avq.%u", avq->vq_index);
> -     vqs[i] = vp_setup_vq(vdev, queue_idx++, NULL, avq->name,
> -                          false, VIRTIO_MSI_NO_VECTOR);
> +     vqs[i] = vp_setup_vq(vdev, queue_idx++, vp_modern_avq_done,
> +                          avq->name, false, VIRTIO_MSI_NO_VECTOR);
>       if (IS_ERR(vqs[i])) {
>               err = PTR_ERR(vqs[i]);
>               goto out_del_vqs;
> diff --git a/drivers/virtio/virtio_pci_common.h 
> b/drivers/virtio/virtio_pci_common.h
> index b3ef76287b43..38a0b6df0844 100644
> --- a/drivers/virtio/virtio_pci_common.h
> +++ b/drivers/virtio/virtio_pci_common.h
> @@ -45,6 +45,8 @@ struct virtio_pci_vq_info {
>  struct virtio_pci_admin_vq {
>       /* serializing admin commands execution. */
>       struct mutex cmd_lock;
> +     /* Protects virtqueue access. */
> +     spinlock_t lock;
>       u64 supported_cmds;
>       /* Name of the admin queue: avq.$vq_index. */
>       char name[10];
> @@ -174,6 +176,7 @@ struct virtio_device *virtio_pci_vf_get_pf_dev(struct 
> pci_dev *pdev);
>  #define VIRTIO_ADMIN_CMD_BITMAP 0
>  #endif
>  
> +void vp_modern_avq_done(struct virtqueue *vq);
>  int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
>                            struct virtio_admin_cmd *cmd);
>  
> diff --git a/drivers/virtio/virtio_pci_modern.c 
> b/drivers/virtio/virtio_pci_modern.c
> index b4041e541fc3..b9937e4b8a69 100644
> --- a/drivers/virtio/virtio_pci_modern.c
> +++ b/drivers/virtio/virtio_pci_modern.c
> @@ -53,6 +53,23 @@ static bool vp_is_avq(struct virtio_device *vdev, unsigned 
> int index)
>       return index == vp_dev->admin_vq.vq_index;
>  }
>  
> +void vp_modern_avq_done(struct virtqueue *vq)
> +{
> +     struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
> +     struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
> +     struct virtio_admin_cmd *cmd;
> +     unsigned long flags;
> +     unsigned int len;
> +
> +     spin_lock_irqsave(&admin_vq->lock, flags);
> +     do {
> +             virtqueue_disable_cb(vq);
> +             while ((cmd = virtqueue_get_buf(vq, &len)))
> +                     complete(&cmd->completion);
> +     } while (!virtqueue_enable_cb(vq));
> +     spin_unlock_irqrestore(&admin_vq->lock, flags);
> +}
> +
>  static int virtqueue_exec_admin_cmd(struct virtio_pci_device *vp_dev,
>                                   struct virtio_pci_admin_vq *admin_vq,
>                                   u16 opcode,
> @@ -62,7 +79,8 @@ static int virtqueue_exec_admin_cmd(struct 
> virtio_pci_device *vp_dev,
>                                   struct virtio_admin_cmd *cmd)
>  {
>       struct virtqueue *vq;
> -     int ret, len;
> +     unsigned long flags;
> +     int ret;
>  
>       vq = vp_dev->vqs[admin_vq->vq_index]->vq;
>       if (!vq)
> @@ -73,21 +91,30 @@ static int virtqueue_exec_admin_cmd(struct 
> virtio_pci_device *vp_dev,
>           !((1ULL << opcode) & admin_vq->supported_cmds))
>               return -EOPNOTSUPP;
>  
> -     ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
> -     if (ret < 0)
> -             return -EIO;
> -
> -     if (unlikely(!virtqueue_kick(vq)))
> -             return -EIO;
> +     init_completion(&cmd->completion);
>  
> -     while (!virtqueue_get_buf(vq, &len) &&
> -            !virtqueue_is_broken(vq))
> -             cpu_relax();
> +again:
> +     spin_lock_irqsave(&admin_vq->lock, flags);
> +     ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_KERNEL);
> +     if (ret < 0) {
> +             if (ret == -ENOSPC) {
> +                     spin_unlock_irqrestore(&admin_vq->lock, flags);
> +                     cpu_relax();
> +                     goto again;
> +             }
> +             goto unlock_err;
> +     }
> +     if (WARN_ON_ONCE(!virtqueue_kick(vq)))
> +             goto unlock_err;


This can actually happen with suprise removal.
So WARN_ON_ONCE isn't really appropriate I think.


> +     spin_unlock_irqrestore(&admin_vq->lock, flags);
>  
> -     if (virtqueue_is_broken(vq))
> -             return -EIO;
> +     wait_for_completion(&cmd->completion);
>  
>       return 0;
> +
> +unlock_err:
> +     spin_unlock_irqrestore(&admin_vq->lock, flags);
> +     return -EIO;
>  }
>  
>  int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
> @@ -787,6 +814,7 @@ int virtio_pci_modern_probe(struct virtio_pci_device 
> *vp_dev)
>       vp_dev->isr = mdev->isr;
>       vp_dev->vdev.id = mdev->id;
>  
> +     spin_lock_init(&vp_dev->admin_vq.lock);
>       mutex_init(&vp_dev->admin_vq.cmd_lock);
>       return 0;
>  }
> diff --git a/include/linux/virtio.h b/include/linux/virtio.h
> index 26c4325aa373..5db8ee175e71 100644
> --- a/include/linux/virtio.h
> +++ b/include/linux/virtio.h
> @@ -10,6 +10,7 @@
>  #include <linux/mod_devicetable.h>
>  #include <linux/gfp.h>
>  #include <linux/dma-mapping.h>
> +#include <linux/completion.h>
>  
>  /**
>   * struct virtqueue - a queue to register buffers for sending or receiving.
> @@ -109,6 +110,7 @@ struct virtio_admin_cmd {
>       __le64 group_member_id;
>       struct scatterlist *data_sg;
>       struct scatterlist *result_sg;
> +     struct completion completion;
>  };
>  
>  /**
> -- 
> 2.45.1


Reply via email to