On Mon, Oct 30, 2023 at 06:06:45PM +0200, Yishai Hadas wrote:
> On 30/10/2023 17:57, Michael S. Tsirkin wrote:
> > On Mon, Oct 30, 2023 at 05:27:50PM +0200, Yishai Hadas wrote:
> > > On 29/10/2023 22:17, Michael S. Tsirkin wrote:
> > > > On Sun, Oct 29, 2023 at 05:59:48PM +0200, Yishai Hadas wrote:
> > > > > Initialize the supported admin commands upon activating the admin 
> > > > > queue.
> > > > > 
> > > > > The supported commands are saved as part of the admin queue context, 
> > > > > it
> > > > > will be used by the next patches from this series.
> > > > > 
> > > > > Note:
> > > > > As we don't want to let upper layers to execute admin commands before
> > > > > that this initialization step was completed, we set ref count to 1 
> > > > > only
> > > > > post of that flow and use a non ref counted version command for this
> > > > > internal flow.
> > > > > 
> > > > > Signed-off-by: Yishai Hadas <yish...@nvidia.com>
> > > > > ---
> > > > >    drivers/virtio/virtio_pci_common.h |  1 +
> > > > >    drivers/virtio/virtio_pci_modern.c | 77 
> > > > > +++++++++++++++++++++++++++++-
> > > > >    2 files changed, 77 insertions(+), 1 deletion(-)
> > > > > 
> > > > > diff --git a/drivers/virtio/virtio_pci_common.h 
> > > > > b/drivers/virtio/virtio_pci_common.h
> > > > > index a21b9ba01a60..9e07e556a51a 100644
> > > > > --- a/drivers/virtio/virtio_pci_common.h
> > > > > +++ b/drivers/virtio/virtio_pci_common.h
> > > > > @@ -46,6 +46,7 @@ struct virtio_pci_admin_vq {
> > > > >       struct virtio_pci_vq_info info;
> > > > >       struct completion flush_done;
> > > > >       refcount_t refcount;
> > > > > +     u64 supported_cmds;
> > > > >       /* Name of the admin queue: avq.$index. */
> > > > >       char name[10];
> > > > >       u16 vq_index;
> > > > > diff --git a/drivers/virtio/virtio_pci_modern.c 
> > > > > b/drivers/virtio/virtio_pci_modern.c
> > > > > index ccd7a4d9f57f..25e27aa79cab 100644
> > > > > --- a/drivers/virtio/virtio_pci_modern.c
> > > > > +++ b/drivers/virtio/virtio_pci_modern.c
> > > > > @@ -19,6 +19,9 @@
> > > > >    #define VIRTIO_RING_NO_LEGACY
> > > > >    #include "virtio_pci_common.h"
> > > > > +static int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
> > > > > +                                 struct virtio_admin_cmd *cmd);
> > > > > +
> > > > I don't much like forward declarations. Just order functions sensibly
> > > > and they will not be needed.
> > > OK, will be part of V3.
> > > 
> > > > >    static u64 vp_get_features(struct virtio_device *vdev)
> > > > >    {
> > > > >       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> > > > > @@ -59,6 +62,42 @@ vp_modern_avq_set_abort(struct virtio_pci_admin_vq 
> > > > > *admin_vq, bool abort)
> > > > >       WRITE_ONCE(admin_vq->abort, abort);
> > > > >    }
> > > > > +static void virtio_pci_admin_init_cmd_list(struct virtio_device 
> > > > > *virtio_dev)
> > > > > +{
> > > > > +     struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev);
> > > > > +     struct virtio_admin_cmd cmd = {};
> > > > > +     struct scatterlist result_sg;
> > > > > +     struct scatterlist data_sg;
> > > > > +     __le64 *data;
> > > > > +     int ret;
> > > > > +
> > > > > +     data = kzalloc(sizeof(*data), GFP_KERNEL);
> > > > > +     if (!data)
> > > > > +             return;
> > > > > +
> > > > > +     sg_init_one(&result_sg, data, sizeof(*data));
> > > > > +     cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LIST_QUERY);
> > > > > +     cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
> > > > > +     cmd.result_sg = &result_sg;
> > > > > +
> > > > > +     ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
> > > > > +     if (ret)
> > > > > +             goto end;
> > > > > +
> > > > > +     sg_init_one(&data_sg, data, sizeof(*data));
> > > > > +     cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LIST_USE);
> > > > > +     cmd.data_sg = &data_sg;
> > > > > +     cmd.result_sg = NULL;
> > > > > +
> > > > > +     ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
> > > > > +     if (ret)
> > > > > +             goto end;
> > > > > +
> > > > > +     vp_dev->admin_vq.supported_cmds = le64_to_cpu(*data);
> > > > > +end:
> > > > > +     kfree(data);
> > > > > +}
> > > > > +
> > > > >    static void vp_modern_avq_activate(struct virtio_device *vdev)
> > > > >    {
> > > > >       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
> > > > > @@ -67,6 +106,7 @@ static void vp_modern_avq_activate(struct 
> > > > > virtio_device *vdev)
> > > > >       if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
> > > > >               return;
> > > > > +     virtio_pci_admin_init_cmd_list(vdev);
> > > > >       init_completion(&admin_vq->flush_done);
> > > > >       refcount_set(&admin_vq->refcount, 1);
> > > > >       vp_modern_avq_set_abort(admin_vq, false);
> > > > > @@ -562,6 +602,35 @@ static bool vp_get_shm_region(struct 
> > > > > virtio_device *vdev,
> > > > >       return true;
> > > > >    }
> > > > > +static int __virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq 
> > > > > *admin_vq,
> > > > > +                                 struct scatterlist **sgs,
> > > > > +                                 unsigned int out_num,
> > > > > +                                 unsigned int in_num,
> > > > > +                                 void *data,
> > > > > +                                 gfp_t gfp)
> > > > > +{
> > > > > +     struct virtqueue *vq;
> > > > > +     int ret, len;
> > > > > +
> > > > > +     vq = admin_vq->info.vq;
> > > > > +
> > > > > +     ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, data, gfp);
> > > > > +     if (ret < 0)
> > > > > +             return ret;
> > > > > +
> > > > > +     if (unlikely(!virtqueue_kick(vq)))
> > > > > +             return -EIO;
> > > > > +
> > > > > +     while (!virtqueue_get_buf(vq, &len) &&
> > > > > +            !virtqueue_is_broken(vq))
> > > > > +             cpu_relax();
> > > > > +
> > > > > +     if (virtqueue_is_broken(vq))
> > > > > +             return -EIO;
> > > > > +
> > > > > +     return 0;
> > > > > +}
> > > > > +
> > > > This is tolerable I guess but it might pin the CPU for a long time.
> > > > The difficulty is handling suprize removal well which we currently
> > > > don't do with interrupts. I would say it's ok as is but add
> > > > a TODO comments along the lines of /* TODO: use interrupts once these 
> > > > virtqueue_is_broken */
> > > I assume that you asked for adding the below comment before the while 
> > > loop:
> > > /* TODO use interrupts to reduce cpu cycles in the future */
> > > 
> > > Right ?
> > > 
> > > Yishai
> > Well I wrote what I meant. in the future is redundant - everyone knows
> > we can't change the past.
> 
> I agree, no need for 'in the future'.
> 
> However, in your suggestion you mentioned  "once these virtqueue_is_broken".
> 
> What does that mean in current polling mode ?
> 
> Yishai

Maye better: /* TODO: use vq callback once it supports virtqueue_is_broken */

> > 
> > > > >    static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq 
> > > > > *admin_vq,
> > > > >                                   struct scatterlist **sgs,
> > > > >                                   unsigned int out_num,
> > > > > @@ -653,7 +722,13 @@ static int vp_modern_admin_cmd_exec(struct 
> > > > > virtio_device *vdev,
> > > > >               in_num++;
> > > > >       }
> > > > > -     ret = virtqueue_exec_admin_cmd(&vp_dev->admin_vq, sgs,
> > > > > +     if (cmd->opcode == VIRTIO_ADMIN_CMD_LIST_QUERY ||
> > > > > +         cmd->opcode == VIRTIO_ADMIN_CMD_LIST_USE)
> > > > > +             ret = __virtqueue_exec_admin_cmd(&vp_dev->admin_vq, sgs,
> > > > > +                                    out_num, in_num,
> > > > > +                                    sgs, GFP_KERNEL);
> > > > > +     else
> > > > > +             ret = virtqueue_exec_admin_cmd(&vp_dev->admin_vq, sgs,
> > > > >                                      out_num, in_num,
> > > > >                                      sgs, GFP_KERNEL);
> > > > >       if (ret) {
> > > > > -- 
> > > > > 2.27.0
> 

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to