On Thu, 19 Dec 2019, Christian Schoenebeck wrote: > On Donnerstag, 19. Dezember 2019 01:42:51 CET Stefano Stabellini wrote: > > From: Stefano Stabellini <stefano.stabell...@xilinx.com> > > > > init_in_iov_from_pdu might not be able to allocate the full buffer size > > requested, which comes from the client and could be larger than the > > transport has available at the time of the request. Specifically, this > > can happen with read operations, with the client requesting a read up to > > the max allowed, which might be more than the transport has available at > > the time. > > I haven't looked thoroughly at this yet, but that's about addressing a > temporary, not a permanent transport buffer size limitation, right?
Yes, that is correct. > Because if it was a permanent one, then probably an adjusted (lowered) > msize should be returned on R_version response to client as well. > > I wonder why I never triggered this issue, because I was experimenting with > huge msize values for 9pfs performance checks. Was there anything specific to > trigger this issue? Lots of heavy usage by a Java application booting. Nothing like Java to stress the system :-) > > Today the implementation of init_in_iov_from_pdu throws an error, both > > Xen and Virtio. > > > > Instead, change the V9fsTransport interface so that the size becomes a > > pointer and can be limited by the implementation of > > init_in_iov_from_pdu. > > > > Change both the Xen and Virtio implementations to set the size to the > > size of the buffer they managed to allocate, instead of throwing an > > error. > > > > Signed-off-by: Stefano Stabellini <stefano.stabell...@xilinx.com> > > CC: gr...@kaod.org > > CC: anthony.per...@citrix.com > > --- > > hw/9pfs/9p.c | 22 +++++++++++++++------- > > hw/9pfs/9p.h | 2 +- > > hw/9pfs/virtio-9p-device.c | 10 +++------- > > hw/9pfs/xen-9p-backend.c | 12 ++++-------- > > 4 files changed, 23 insertions(+), 23 deletions(-) > > > > diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c > > index bdf7919abf..d6c89ce608 100644 > > --- a/hw/9pfs/9p.c > > +++ b/hw/9pfs/9p.c > > @@ -1682,22 +1682,30 @@ out_nofid: > > * with qemu_iovec_destroy(). > > */ > > static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu, > > - size_t skip, size_t size, > > + size_t skip, size_t *size, > > bool is_write) > > { > > QEMUIOVector elem; > > struct iovec *iov; > > unsigned int niov; > > + size_t alloc_size = *size + skip; > > > > if (is_write) { > > - pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov, size + > > skip); + pdu->s->transport->init_out_iov_from_pdu(pdu, &iov, &niov, > > alloc_size); } else { > > - pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, size + > > skip); + pdu->s->transport->init_in_iov_from_pdu(pdu, &iov, &niov, > > &alloc_size); + } > > + > > + if (alloc_size < skip) > > + { > > + *size = 0; > > + } else { > > + *size = alloc_size - skip; > > } > > > > Code style nitpicking: > > ERROR: that open brace { should be on the previous line > #56: FILE: hw/9pfs/9p.c:1699: > + if (alloc_size < skip) > + { Oops, sorry! I can fix that. > > > > static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, V9fsFidState *fidp, > > @@ -1722,7 +1730,7 @@ static int v9fs_xattr_read(V9fsState *s, V9fsPDU *pdu, > > V9fsFidState *fidp, } > > offset += err; > > > > - v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, read_count, false); > > + v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, &read_count, false); > > err = v9fs_pack(qiov_full.iov, qiov_full.niov, 0, > > ((char *)fidp->fs.xattr.value) + off, > > read_count); > > @@ -1852,7 +1860,7 @@ static void coroutine_fn v9fs_read(void *opaque) > > QEMUIOVector qiov; > > int32_t len; > > > > - v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, max_count, > > false); + v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset + 4, > > &max_count, false); qemu_iovec_init(&qiov, qiov_full.niov); > > do { > > qemu_iovec_reset(&qiov); > > @@ -2085,7 +2093,7 @@ static void coroutine_fn v9fs_write(void *opaque) > > return; > > } > > offset += err; > > - v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, count, true); > > + v9fs_init_qiov_from_pdu(&qiov_full, pdu, offset, &count, true); > > trace_v9fs_write(pdu->tag, pdu->id, fid, off, count, qiov_full.niov); > > > > fidp = get_fid(pdu, fid); > > diff --git a/hw/9pfs/9p.h b/hw/9pfs/9p.h > > index 8883761b2c..50f7e21da6 100644 > > --- a/hw/9pfs/9p.h > > +++ b/hw/9pfs/9p.h > > @@ -365,7 +365,7 @@ struct V9fsTransport { > > ssize_t (*pdu_vunmarshal)(V9fsPDU *pdu, size_t offset, const char > > *fmt, va_list ap); > > void (*init_in_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov, > > - unsigned int *pniov, size_t size); > > + unsigned int *pniov, size_t *size); > > void (*init_out_iov_from_pdu)(V9fsPDU *pdu, struct iovec **piov, > > unsigned int *pniov, size_t size); void (*push_and_notify)(V9fsPDU > > *pdu); > > diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c > > index 775e8ff766..68873c3f5f 100644 > > --- a/hw/9pfs/virtio-9p-device.c > > +++ b/hw/9pfs/virtio-9p-device.c > > @@ -145,19 +145,15 @@ static ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, > > size_t offset, } > > > > static void virtio_init_in_iov_from_pdu(V9fsPDU *pdu, struct iovec **piov, > > - unsigned int *pniov, size_t size) > > + unsigned int *pniov, size_t *size) > > { > > V9fsState *s = pdu->s; > > V9fsVirtioState *v = container_of(s, V9fsVirtioState, state); > > VirtQueueElement *elem = v->elems[pdu->idx]; > > size_t buf_size = iov_size(elem->in_sg, elem->in_num); > > > > - if (buf_size < size) { > > - VirtIODevice *vdev = VIRTIO_DEVICE(v); > > - > > - virtio_error(vdev, > > - "VirtFS reply type %d needs %zu bytes, buffer has > > %zu", - pdu->id + 1, size, buf_size); > > + if (buf_size < *size) { > > + *size = buf_size; > > } > > > > *piov = elem->in_sg; > > diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c > > index 3f54a21c76..3994a356d4 100644 > > --- a/hw/9pfs/xen-9p-backend.c > > +++ b/hw/9pfs/xen-9p-backend.c > > @@ -187,7 +187,7 @@ static void xen_9pfs_init_out_iov_from_pdu(V9fsPDU *pdu, > > static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU *pdu, > > struct iovec **piov, > > unsigned int *pniov, > > - size_t size) > > + size_t *size) > > { > > Xen9pfsDev *xen_9pfs = container_of(pdu->s, Xen9pfsDev, state); > > Xen9pfsRing *ring = &xen_9pfs->rings[pdu->tag % xen_9pfs->num_rings]; > > @@ -197,15 +197,11 @@ static void xen_9pfs_init_in_iov_from_pdu(V9fsPDU > > *pdu, g_free(ring->sg); > > > > ring->sg = g_malloc0(sizeof(*ring->sg) * 2); > > - xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, size); > > + xen_9pfs_in_sg(ring, ring->sg, &num, pdu->idx, *size); > > > > buf_size = iov_size(ring->sg, num); > > - if (buf_size < size) { > > - xen_pv_printf(&xen_9pfs->xendev, 0, "Xen 9pfs request type %d" > > - "needs %zu bytes, buffer has %zu\n", pdu->id, size, > > - buf_size); > > - xen_be_set_state(&xen_9pfs->xendev, XenbusStateClosing); > > - xen_9pfs_disconnect(&xen_9pfs->xendev); > > + if (buf_size < *size) { > > + *size = buf_size; > > } > > > > *piov = ring->sg; > > Best regards, > Christian Schoenebeck > > >