This patch implements the core of save/restore support for virtio. It's
modelled after how PCI save/restore works.
N.B. This makes savevm/loadvm work, but not live migration. The issue with
live migration is that we're manipulating guest memory without updating the
dirty bitmap correctly. I will submit a patch in the near future that
addresses that problem.
Since v1, I fixed the Signed-off-by line. Sorry about that.
Signed-off-by: Anthony Liguori <[EMAIL PROTECTED]>
diff --git a/qemu/hw/virtio.c b/qemu/hw/virtio.c
index a4c9d10..440cc69 100644
--- a/qemu/hw/virtio.c
+++ b/qemu/hw/virtio.c
@@ -420,7 +420,6 @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int
queue_size,
vdev->vq[i].vring.num = queue_size;
vdev->vq[i].handle_output = handle_output;
- vdev->vq[i].index = i;
return &vdev->vq[i];
}
@@ -436,6 +435,71 @@ void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
virtio_update_irq(vdev);
}
+void virtio_save(VirtIODevice *vdev, QEMUFile *f)
+{
+ int i;
+
+ pci_device_save(&vdev->pci_dev, f);
+
+ qemu_put_be32s(f, &vdev->addr);
+ qemu_put_8s(f, &vdev->status);
+ qemu_put_8s(f, &vdev->isr);
+ qemu_put_be16s(f, &vdev->queue_sel);
+ qemu_put_be32s(f, &vdev->features);
+ qemu_put_be32(f, vdev->config_len);
+ qemu_put_buffer(f, vdev->config, vdev->config_len);
+
+ for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
+ if (vdev->vq[i].vring.num == 0)
+ break;
+ }
+
+ qemu_put_be32(f, i);
+
+ for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
+ if (vdev->vq[i].vring.num == 0)
+ break;
+
+ qemu_put_be32(f, vdev->vq[i].vring.num);
+ qemu_put_be32s(f, &vdev->vq[i].pfn);
+ qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
+ }
+}
+
+void virtio_load(VirtIODevice *vdev, QEMUFile *f)
+{
+ int num, i;
+
+ pci_device_load(&vdev->pci_dev, f);
+
+ qemu_get_be32s(f, &vdev->addr);
+ qemu_get_8s(f, &vdev->status);
+ qemu_get_8s(f, &vdev->isr);
+ qemu_get_be16s(f, &vdev->queue_sel);
+ qemu_get_be32s(f, &vdev->features);
+ vdev->config_len = qemu_get_be32(f);
+ qemu_get_buffer(f, vdev->config, vdev->config_len);
+
+ num = qemu_get_be32(f);
+
+ for (i = 0; i < num; i++) {
+ vdev->vq[i].vring.num = qemu_get_be32(f);
+ qemu_get_be32s(f, &vdev->vq[i].pfn);
+ qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
+
+ if (vdev->vq[i].pfn) {
+ size_t size;
+ target_phys_addr_t pa;
+
+ pa = (ram_addr_t)vdev->vq[i].pfn << TARGET_PAGE_BITS;
+ size = virtqueue_size(vdev->vq[i].vring.num);
+ virtqueue_init(&vdev->vq[i], virtio_map_gpa(pa, size));
+ }
+ }
+
+ virtio_update_irq(vdev);
+}
+
VirtIODevice *virtio_init_pci(PCIBus *bus, const char *name,
uint16_t vendor, uint16_t device,
uint16_t subvendor, uint16_t subdevice,
diff --git a/qemu/hw/virtio.h b/qemu/hw/virtio.h
index dee97ba..ed8cfd6 100644
--- a/qemu/hw/virtio.h
+++ b/qemu/hw/virtio.h
@@ -87,7 +87,6 @@ struct VirtQueue
uint32_t pfn;
uint16_t last_avail_idx;
void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq);
- int index;
};
#define VIRTQUEUE_MAX_SIZE 1024
@@ -108,8 +107,6 @@ struct VirtIODevice
PCIDevice pci_dev;
const char *name;
uint32_t addr;
- uint16_t vendor;
- uint16_t device;
uint8_t status;
uint8_t isr;
uint16_t queue_sel;
@@ -140,4 +137,8 @@ int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem);
void virtio_notify(VirtIODevice *vdev, VirtQueue *vq);
+void virtio_save(VirtIODevice *vdev, QEMUFile *f);
+
+void virtio_load(VirtIODevice *vdev, QEMUFile *f);
+
#endif
-------------------------------------------------------------------------
This SF.net email is sponsored by the 2008 JavaOne(SM) Conference
Don't miss this year's exciting event. There's still time to save $100.
Use priority code J8TL2D2.
http://ad.doubleclick.net/clk;198757673;13503038;p?http://java.sun.com/javaone
_______________________________________________
kvm-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/kvm-devel