> -----Original Message-----
> From: Laszlo Ersek [mailto:ler...@redhat.com]
> Sent: Wednesday, September 26, 2018 5:27 PM
> To: Liu, Changpeng <changpeng....@intel.com>; seabios@seabios.org
> Cc: Harris, James R <james.r.har...@intel.com>; stefa...@redhat.com;
> Zedlewski, Piotr <piotr.zedlew...@intel.com>
> Subject: Re: [SeaBIOS] [PATCH] virtio-blk/scsi: enable multi-queues support
> when
> starting device
>
> On 09/26/18 10:16, Liu, Changpeng wrote:
> > I posted the patch again, because I didn't get any response since several
> months ago... :).
>
> Indeed, you didn't receive any comments under that (July) posting,
> regrettably:
>
> http://mid.mail-archive.com/1531201226-4099-1-git-send-email-
> changpeng....@intel.com
>
> However, prior to that, the topic had been discussed several times. QEMU
> commit fb20fbb76 is correct, and the idea behind the present patch is wrong.
>
> Obviously I'm not a SeaBIOS maintainer, so take my opinion for what it's
> worth. However, I will definitely not accept a similar patch for OVMF --
> it was tried, and I rejected it:
>
> https://lists.01.org/pipermail/edk2-devel/2017-December/019131.html
>
> (In fact, although the author of the OVMF posting and the author of the
> QEMU patch were different persons, and it's also unclear whether they
> worked for the same organization, I suspect that the QEMU patch was
> actually the direct result of the OVMF discussion.)
OK, so changing the slave vhost target logic is the only way now, the vhost
library should start queues once got kicked.
>
> >> -----Original Message-----
> >> From: Liu, Changpeng
> >> Sent: Wednesday, September 26, 2018 4:24 PM
> >> To: seabios@seabios.org
> >> Cc: stefa...@redhat.com; Liu, Changpeng <changpeng....@intel.com>; Harris,
> >> James R <james.r.har...@intel.com>; Zedlewski, Piotr
> >> <piotr.zedlew...@intel.com>; marcandre.lur...@redhat.com
> >> Subject: [PATCH] virtio-blk/scsi: enable multi-queues support when starting
> >> device
> >>
> >> QEMU will not start all the queues since commit fb20fbb76
> >> "vhost: avoid to start/stop virtqueue which is not read",
> >> because seabios only use one queue when starting, this will
> >> not work for some vhost slave targets which expect the exact
> >> number of queues defined in virtio-pci configuration space,
>
> This expectation that you spell out in the commit message is what's
> wrong, in those "vhost slave targets".
>
> Thanks
> Laszlo
>
> >> while here, we also enable those queues in the BIOS phase.
> >>
> >> Signed-off-by: Changpeng Liu <changpeng....@intel.com>
> >> ---
> >> src/hw/virtio-blk.c | 26 +++++++++++++++++++-------
> >> src/hw/virtio-ring.h | 1 +
> >> src/hw/virtio-scsi.c | 28 +++++++++++++++++++---------
> >> 3 files changed, 39 insertions(+), 16 deletions(-)
> >>
> >> diff --git a/src/hw/virtio-blk.c b/src/hw/virtio-blk.c
> >> index 88d7e54..79638ec 100644
> >> --- a/src/hw/virtio-blk.c
> >> +++ b/src/hw/virtio-blk.c
> >> @@ -25,7 +25,7 @@
> >>
> >> struct virtiodrive_s {
> >> struct drive_s drive;
> >> - struct vring_virtqueue *vq;
> >> + struct vring_virtqueue *vq[MAX_NUM_QUEUES];
> >> struct vp_device vp;
> >> };
> >>
> >> @@ -34,7 +34,7 @@ virtio_blk_op(struct disk_op_s *op, int write)
> >> {
> >> struct virtiodrive_s *vdrive =
> >> container_of(op->drive_fl, struct virtiodrive_s, drive);
> >> - struct vring_virtqueue *vq = vdrive->vq;
> >> + struct vring_virtqueue *vq = vdrive->vq[0];
> >> struct virtio_blk_outhdr hdr = {
> >> .type = write ? VIRTIO_BLK_T_OUT : VIRTIO_BLK_T_IN,
> >> .ioprio = 0,
> >> @@ -96,6 +96,7 @@ virtio_blk_process_op(struct disk_op_s *op)
> >> static void
> >> init_virtio_blk(void *data)
> >> {
> >> + u32 i, num_queues = 1;
> >> struct pci_device *pci = data;
> >> u8 status = VIRTIO_CONFIG_S_ACKNOWLEDGE |
> VIRTIO_CONFIG_S_DRIVER;
> >> dprintf(1, "found virtio-blk at %pP\n", pci);
> >> @@ -109,10 +110,6 @@ init_virtio_blk(void *data)
> >> vdrive->drive.cntl_id = pci->bdf;
> >>
> >> vp_init_simple(&vdrive->vp, pci);
> >> - if (vp_find_vq(&vdrive->vp, 0, &vdrive->vq) < 0 ) {
> >> - dprintf(1, "fail to find vq for virtio-blk %pP\n", pci);
> >> - goto fail;
> >> - }
> >>
> >> if (vdrive->vp.use_modern) {
> >> struct vp_device *vp = &vdrive->vp;
> >> @@ -156,6 +153,11 @@ init_virtio_blk(void *data)
> >> vp_read(&vp->device, struct virtio_blk_config, heads);
> >> vdrive->drive.pchs.sector =
> >> vp_read(&vp->device, struct virtio_blk_config, sectors);
> >> +
> >> + num_queues = vp_read(&vp->common, virtio_pci_common_cfg,
> >> num_queues);
> >> + if (num_queues < 1 || num_queues > MAX_NUM_QUEUES) {
> >> + num_queues = 1;
> >> + }
> >> } else {
> >> struct virtio_blk_config cfg;
> >> vp_get_legacy(&vdrive->vp, 0, &cfg, sizeof(cfg));
> >> @@ -178,6 +180,13 @@ init_virtio_blk(void *data)
> >> vdrive->drive.pchs.sector = cfg.sectors;
> >> }
> >>
> >> + for (i = 0; i < num_queues; i++) {
> >> + if (vp_find_vq(&vdrive->vp, i, &vdrive->vq[i]) < 0 ) {
> >> + dprintf(1, "fail to find vq %u for virtio-blk %pP\n", i, pci);
> >> + goto fail_vq;
> >> + }
> >> + }
> >> +
> >> char *desc = znprintf(MAXDESCSIZE, "Virtio disk PCI:%pP", pci);
> >> boot_add_hd(&vdrive->drive, desc, bootprio_find_pci_device(pci));
> >>
> >> @@ -185,9 +194,12 @@ init_virtio_blk(void *data)
> >> vp_set_status(&vdrive->vp, status);
> >> return;
> >>
> >> +fail_vq:
> >> + for (i = 0; i < num_queues; i++) {
> >> + free(vdrive->vq[i]);
> >> + }
> >> fail:
> >> vp_reset(&vdrive->vp);
> >> - free(vdrive->vq);
> >> free(vdrive);
> >> }
> >>
> >> diff --git a/src/hw/virtio-ring.h b/src/hw/virtio-ring.h
> >> index 8604a01..3c8a2d1 100644
> >> --- a/src/hw/virtio-ring.h
> >> +++ b/src/hw/virtio-ring.h
> >> @@ -21,6 +21,7 @@
> >> #define VIRTIO_F_IOMMU_PLATFORM 33
> >>
> >> #define MAX_QUEUE_NUM (128)
> >> +#define MAX_NUM_QUEUES (128)
> >>
> >> #define VRING_DESC_F_NEXT 1
> >> #define VRING_DESC_F_WRITE 2
> >> diff --git a/src/hw/virtio-scsi.c b/src/hw/virtio-scsi.c
> >> index a87cad8..d08643f 100644
> >> --- a/src/hw/virtio-scsi.c
> >> +++ b/src/hw/virtio-scsi.c
> >> @@ -148,9 +148,10 @@ virtio_scsi_scan_target(struct pci_device *pci, struct
> >> vp_device *vp,
> >> static void
> >> init_virtio_scsi(void *data)
> >> {
> >> + u32 i, num_queues = 3;
> >> struct pci_device *pci = data;
> >> dprintf(1, "found virtio-scsi at %pP\n", pci);
> >> - struct vring_virtqueue *vq = NULL;
> >> + struct vring_virtqueue *vq[MAX_NUM_QUEUES];
> >> struct vp_device *vp = malloc_high(sizeof(*vp));
> >> if (!vp) {
> >> warn_noalloc();
> >> @@ -175,29 +176,38 @@ init_virtio_scsi(void *data)
> >> dprintf(1, "device didn't accept features: %pP\n", pci);
> >> goto fail;
> >> }
> >> +
> >> + num_queues = vp_read(&vp->common, virtio_pci_common_cfg,
> >> num_queues);
> >> + if (num_queues < 3 || num_queues > MAX_NUM_QUEUES) {
> >> + num_queues = 3;
> >> + }
> >> }
> >>
> >> - if (vp_find_vq(vp, 2, &vq) < 0 ) {
> >> - dprintf(1, "fail to find vq for virtio-scsi %pP\n", pci);
> >> - goto fail;
> >> + for (i = 0; i < num_queues; i++) {
> >> + if (vp_find_vq(vp, i, &vq[i]) < 0 ) {
> >> + dprintf(1, "fail to find vq %u for virtio-scsi %pP\n", i,
> >> pci);
> >> + goto fail;
> >> + }
> >> }
> >>
> >> status |= VIRTIO_CONFIG_S_DRIVER_OK;
> >> vp_set_status(vp, status);
> >>
> >> - int i, tot;
> >> + int tot;
> >> for (tot = 0, i = 0; i < 256; i++)
> >> - tot += virtio_scsi_scan_target(pci, vp, vq, i);
> >> + tot += virtio_scsi_scan_target(pci, vp, vq[2], i);
> >>
> >> if (!tot)
> >> - goto fail;
> >> + goto fail_vq;
> >>
> >> return;
> >> -
> >> +fail_vq:
> >> + for (i = 0; i < num_queues; i++) {
> >> + free(vq[i]);
> >> + }
> >> fail:
> >> vp_reset(vp);
> >> free(vp);
> >> - free(vq);
> >> }
> >>
> >> void
> >> --
> >> 1.9.3
> >
> >
> > _______________________________________________
> > SeaBIOS mailing list
> > SeaBIOS@seabios.org
> > https://mail.coreboot.org/mailman/listinfo/seabios
> >
_______________________________________________
SeaBIOS mailing list
SeaBIOS@seabios.org
https://mail.coreboot.org/mailman/listinfo/seabios