Anthony Liguori wrote: > This patch implements a very naive virtio block device backend in QEMU. > There's a lot of room for future optimization. We need to merge a -disk patch > before we can provide a mechanism to expose this to users. > > Signed-off-by: Anthony Liguori <[EMAIL PROTECTED]> > > diff --git a/qemu/Makefile.target b/qemu/Makefile.target > index c7686b2..49c0fc7 100644 > [snip] > + > + if (1) { > + BlockDriverState *bs = bdrv_new("vda"); > + if (bdrv_open(bs, "/home/anthony/images/linux.img", BDRV_O_SNAPSHOT)) > + exit(1); > Can you add a printf to the exit(1). I had to gdb the code to find why my qemu is not running no more (in earlier version I did remember to change the path but not after the new patches. [snip] > + > +static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) > +{ > + VirtIOBlock *s = to_virtio_blk(vdev); > + VirtQueueElement elem; > + unsigned int count; > + > + while ((count = virtqueue_pop(vq, &elem)) != 0) { > + struct virtio_blk_inhdr *in; > + struct virtio_blk_outhdr *out; > + unsigned int wlen; > + off_t off; > + int i; > + > + out = (void *)elem.out_sg[0].iov_base; > + in = (void *)elem.in_sg[elem.in_num - 1].iov_base; > + off = out->sector; > + > + if (out->type & VIRTIO_BLK_T_SCSI_CMD) { > + wlen = sizeof(*in); > + in->status = VIRTIO_BLK_S_UNSUPP; > + } else if (out->type & VIRTIO_BLK_T_OUT) { > + wlen = sizeof(*in); > + > + for (i = 1; i < elem.out_num; i++) { > + bdrv_write(s->bs, off, > + elem.out_sg[i].iov_base, > + elem.out_sg[i].iov_len / 512); > + off += elem.out_sg[i].iov_len / 512; > + } > + > + in->status = VIRTIO_BLK_S_OK; > + } else { > + wlen = sizeof(*in); > + > + for (i = 0; i < elem.in_num - 1; i++) { > + bdrv_read(s->bs, off, > + elem.in_sg[i].iov_base, > + elem.in_sg[i].iov_len / 512); > + off += elem.in_sg[i].iov_len / 512; > + wlen += elem.in_sg[i].iov_len; > + } > + > + in->status = VIRTIO_BLK_S_OK; > + } > + > + virtqueue_push(vq, &elem, wlen); > + virtio_notify(vdev, vq); > + } > You can move the notify out of the while loop. This way you save irqs. > +} > + > +static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config) > +{ > + VirtIOBlock *s = to_virtio_blk(vdev); > + int64_t capacity; > + uint32_t v; > + > + bdrv_get_geometry(s->bs, &capacity); > + memcpy(config + VIRTIO_CONFIG_BLK_F_CAPACITY, &capacity, > sizeof(capacity)); > + > + v = VIRTQUEUE_MAX_SIZE - 2; > + memcpy(config + VIRTIO_CONFIG_BLK_F_SEG_MAX, &v, sizeof(v)); > +} > + > +static uint32_t virtio_blk_get_features(VirtIODevice *vdev) > +{ > + return (1 << VIRTIO_BLK_F_SEG_MAX); > In general I think we need to add another feature or even version number ( I know you guys hate it). The reason is - Let's say you dont change functionality but change the irq protocol (for example the isr won't be zeroed on read), then an old guest driver wouldn't know it runs on a new host version and will have its irq line pulled up. So I suggest adding a capability of VIRTIO_ISR_CLEAR_XXX or adding a version number. Comments? > +} > + > +VirtIODevice *virtio_blk_init(PCIBus *bus, uint16_t vendor, uint16_t device, > + BlockDriverState *bs) > +{ > + VirtIOBlock *s; > + > + s = (VirtIOBlock *)virtio_init_pci(bus, "virtio-blk", vendor, device, > + vendor, VIRTIO_ID_BLOCK, > + 16, sizeof(VirtIOBlock)); > + > + s->vdev.update_config = virtio_blk_update_config; > + s->vdev.get_features = virtio_blk_get_features; > + s->bs = bs; > + > + virtio_add_queue(&s->vdev, virtio_blk_handle_output); > + > + return &s->vdev; > +} > diff --git a/qemu/vl.h b/qemu/vl.h > index fafcf09..249ede2 100644 > --- a/qemu/vl.h > +++ b/qemu/vl.h > @@ -1396,6 +1396,9 @@ void vmchannel_init(CharDriverState *hd, uint32_t > deviceid, uint32_t index); > > typedef struct VirtIODevice VirtIODevice; > > +VirtIODevice *virtio_blk_init(PCIBus *bus, uint16_t vendor, uint16_t device, > + BlockDriverState *bs); > + > /* buf = NULL means polling */ > typedef int ADBDeviceRequest(ADBDevice *d, uint8_t *buf_out, > const uint8_t *buf, int len); > >
------------------------------------------------------------------------- This SF.net email is sponsored by: Splunk Inc. Still grepping through log files to find problems? Stop. Now Search log events and configuration files using AJAX and a browser. Download your FREE copy of Splunk now >> http://get.splunk.com/ _______________________________________________ kvm-devel mailing list kvm-devel@lists.sourceforge.net https://lists.sourceforge.net/lists/listinfo/kvm-devel