Grab device locks when moving data through block devices in the host->guest 
direction.

Also protect the aio_list by a separate lock.

Index: kvm-userspace.io/qemu/block-qcow.c
===================================================================
--- kvm-userspace.io.orig/qemu/block-qcow.c
+++ kvm-userspace.io/qemu/block-qcow.c
@@ -543,7 +543,9 @@ static void qcow_aio_read_cb(void *opaqu
     acb->hd_aiocb = NULL;
     if (ret < 0) {
     fail:
+        qemu_aio_lock(bs);
         acb->common.cb(acb->common.opaque, ret);
+        qemu_aio_unlock(bs);
         qemu_aio_release(acb);
         return;
     }
@@ -568,7 +570,9 @@ static void qcow_aio_read_cb(void *opaqu
 
     if (acb->nb_sectors == 0) {
         /* request completed */
+        qemu_aio_lock(bs);
         acb->common.cb(acb->common.opaque, 0);
+        qemu_aio_unlock(bs);
         qemu_aio_release(acb);
         return;
     }
@@ -646,7 +650,9 @@ static void qcow_aio_write_cb(void *opaq
 
     if (ret < 0) {
     fail:
+        qemu_aio_lock(bs);
         acb->common.cb(acb->common.opaque, ret);
+        qemu_aio_unlock(bs);
         qemu_aio_release(acb);
         return;
     }
@@ -657,7 +663,9 @@ static void qcow_aio_write_cb(void *opaq
 
     if (acb->nb_sectors == 0) {
         /* request completed */
+        qemu_aio_lock(bs);
         acb->common.cb(acb->common.opaque, 0);
+        qemu_aio_unlock(bs);
         qemu_aio_release(acb);
         return;
     }
Index: kvm-userspace.io/qemu/block-raw-posix.c
===================================================================
--- kvm-userspace.io.orig/qemu/block-raw-posix.c
+++ kvm-userspace.io/qemu/block-raw-posix.c
@@ -260,11 +260,14 @@ static void aio_signal_handler(int signu
 #endif
 }
 
+qemu_mutex_t aio_list_lock;
+
 void qemu_aio_init(void)
 {
     struct sigaction act;
 
     aio_initialized = 1;
+    qemu_mutex_init(&aio_list_lock);
 
     sigfillset(&act.sa_mask);
     act.sa_flags = 0; /* do not restart syscalls to interrupt select() */
@@ -291,6 +294,7 @@ void qemu_aio_poll(void)
     int ret;
 
     for(;;) {
+        qemu_mutex_lock(&aio_list_lock);
         pacb = &first_aio;
         for(;;) {
             acb = *pacb;
@@ -300,7 +304,9 @@ void qemu_aio_poll(void)
             if (ret == ECANCELED) {
                 /* remove the request */
                 *pacb = acb->next;
+                qemu_mutex_unlock(&aio_list_lock);
                 qemu_aio_release(acb);
+                break;
             } else if (ret != EINPROGRESS) {
                 /* end of aio */
                 if (ret == 0) {
@@ -314,8 +320,11 @@ void qemu_aio_poll(void)
                 }
                 /* remove the request */
                 *pacb = acb->next;
+                qemu_mutex_unlock(&aio_list_lock);
                 /* call the callback */
+                qemu_aio_lock(acb->common.bs);
                 acb->common.cb(acb->common.opaque, ret);
+                qemu_aio_unlock(acb->common.bs);
                 qemu_aio_release(acb);
                 break;
             } else {
@@ -323,7 +332,8 @@ void qemu_aio_poll(void)
             }
         }
     }
- the_end: ;
+ the_end:
+    qemu_mutex_unlock(&aio_list_lock);
 }
 
 /* Wait for all IO requests to complete.  */
@@ -331,6 +341,7 @@ void qemu_aio_flush(void)
 {
     qemu_aio_wait_start();
     qemu_aio_poll();
+    /* FIXME: first_aio is protected by aio_list_lock. */
     while (first_aio) {
         qemu_aio_wait();
     }
@@ -410,8 +421,10 @@ static RawAIOCB *raw_aio_setup(BlockDriv
     else
         acb->aiocb.aio_nbytes = nb_sectors * 512;
     acb->aiocb.aio_offset = sector_num * 512;
+    qemu_mutex_lock(&aio_list_lock);
     acb->next = first_aio;
     first_aio = acb;
+    qemu_mutex_unlock(&aio_list_lock);
     return acb;
 }
 
@@ -451,7 +464,6 @@ static void raw_aio_cancel(BlockDriverAI
 {
     int ret;
     RawAIOCB *acb = (RawAIOCB *)blockacb;
-    RawAIOCB **pacb;
 
     ret = aio_cancel(acb->aiocb.aio_fildes, &acb->aiocb);
     if (ret == AIO_NOTCANCELED) {
@@ -459,7 +471,9 @@ static void raw_aio_cancel(BlockDriverAI
            it */
         while (aio_error(&acb->aiocb) == EINPROGRESS);
     }
+    /* qemu_aio_poll will remove it from the queue */
 
+#if 0
     /* remove the callback from the queue */
     pacb = &first_aio;
     for(;;) {
@@ -467,11 +481,14 @@ static void raw_aio_cancel(BlockDriverAI
             break;
         } else if (*pacb == acb) {
             *pacb = acb->next;
+            qemu_mutex_unlock(&aio_list_lock);
             qemu_aio_release(acb);
+            qemu_mutex_lock(&aio_list_lock);
             break;
         }
         pacb = &acb->next;
     }
+#endif
 }
 
 static void raw_close(BlockDriverState *bs)
Index: kvm-userspace.io/qemu/block.c
===================================================================
--- kvm-userspace.io.orig/qemu/block.c
+++ kvm-userspace.io/qemu/block.c
@@ -501,6 +501,21 @@ int bdrv_commit(BlockDriverState *bs)
     return 0;
 }
 
+void qemu_aio_lock(BlockDriverState *bs)
+{
+    if (!bs->qemu_dev) {
+        unsigned long *ptr = 0;
+        printf("bs->drv = NULL\n");
+        *ptr = 0;
+    }
+    qemu_mutex_lock(&bs->qemu_dev->lock);
+}
+
+void qemu_aio_unlock(BlockDriverState *bs)
+{
+    qemu_mutex_unlock(&bs->qemu_dev->lock);
+}
+
 /* return < 0 if error. See bdrv_write() for the return codes */
 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
               uint8_t *buf, int nb_sectors)
@@ -1326,7 +1341,9 @@ static void bdrv_aio_cancel_em(BlockDriv
 static void bdrv_aio_bh_cb(void *opaque)
 {
     BlockDriverAIOCBSync *acb = opaque;
+    qemu_aio_lock(acb->common.bs);
     acb->common.cb(acb->common.opaque, acb->ret);
+    qemu_aio_unlock(acb->common.bs);
     qemu_aio_release(acb);
 }
 
@@ -1365,6 +1382,7 @@ static BlockDriverAIOCB *bdrv_aio_write_
 static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
 {
     BlockDriverAIOCBSync *acb = (BlockDriverAIOCBSync *)blockacb;
+    /* FIXME: the bh list needs a lock */
     qemu_bh_cancel(acb->bh);
     qemu_aio_release(acb);
 }
@@ -1459,7 +1477,6 @@ void *qemu_aio_get(BlockDriverState *bs,
 
 void qemu_aio_release(void *p)
 {
-    BlockDriverAIOCB *acb = p;
     qemu_free(p);
 }
 
Index: kvm-userspace.io/qemu/block.h
===================================================================
--- kvm-userspace.io.orig/qemu/block.h
+++ kvm-userspace.io/qemu/block.h
@@ -96,6 +96,9 @@ void qemu_aio_wait_start(void);
 void qemu_aio_wait(void);
 void qemu_aio_wait_end(void);
 
+void qemu_aio_lock(BlockDriverState *bs);
+void qemu_aio_unlock(BlockDriverState *bs);
+
 int qemu_key_check(BlockDriverState *bs, const char *name);
 
 /* Ensure contents are flushed to disk.  */
Index: kvm-userspace.io/qemu/block_int.h
===================================================================
--- kvm-userspace.io.orig/qemu/block_int.h
+++ kvm-userspace.io/qemu/block_int.h
@@ -130,6 +130,7 @@ struct BlockDriverState {
     char device_name[32];
     /* PCI devfn of parent */
     int devfn;
+    QEMUDevice *qemu_dev;
     BlockDriverState *next;
 };
 
Index: kvm-userspace.io/qemu/block-qcow2.c
===================================================================
--- kvm-userspace.io.orig/qemu/block-qcow2.c
+++ kvm-userspace.io/qemu/block-qcow2.c
@@ -812,7 +812,9 @@ static void qcow_aio_read_cb(void *opaqu
     acb->hd_aiocb = NULL;
     if (ret < 0) {
     fail:
+        qemu_aio_lock(bs);
         acb->common.cb(acb->common.opaque, ret);
+        qemu_aio_unlock(bs);
         qemu_aio_release(acb);
         return;
     }
@@ -837,7 +839,9 @@ static void qcow_aio_read_cb(void *opaqu
 
     if (acb->nb_sectors == 0) {
         /* request completed */
+        qemu_aio_lock(bs);
         acb->common.cb(acb->common.opaque, 0);
+        qemu_aio_unlock(bs);
         qemu_aio_release(acb);
         return;
     }
@@ -933,7 +937,9 @@ static void qcow_aio_write_cb(void *opaq
 
     if (ret < 0) {
     fail:
+        qemu_aio_lock(bs);
         acb->common.cb(acb->common.opaque, ret);
+        qemu_aio_unlock(bs);
         qemu_aio_release(acb);
         return;
     }
@@ -944,7 +950,9 @@ static void qcow_aio_write_cb(void *opaq
 
     if (acb->nb_sectors == 0) {
         /* request completed */
+        qemu_aio_lock(bs);
         acb->common.cb(acb->common.opaque, 0);
+        qemu_aio_unlock(bs);
         qemu_aio_release(acb);
         return;
     }
Index: kvm-userspace.io/qemu/hw/ide.c
===================================================================
--- kvm-userspace.io.orig/qemu/hw/ide.c
+++ kvm-userspace.io/qemu/hw/ide.c
@@ -2983,8 +2983,10 @@ void pci_piix3_ide_init(PCIBus *bus, Blo
     ide_init_ioport(&d->ide_if[2], 0x170, 0x376, &d->dev.qemu_dev);
 
     for (i = 0; i < 4; i++)
-        if (hd_table[i])
+        if (hd_table[i]) {
+            hd_table[i]->qemu_dev = &d->dev.qemu_dev;
             hd_table[i]->devfn = d->dev.devfn;
+        }
 
     register_savevm("ide", 0, 1, pci_ide_save, pci_ide_load, d);
 }
Index: kvm-userspace.io/qemu/hw/lsi53c895a.c
===================================================================
--- kvm-userspace.io.orig/qemu/hw/lsi53c895a.c
+++ kvm-userspace.io/qemu/hw/lsi53c895a.c
@@ -594,6 +594,7 @@ static void lsi_command_complete(void *o
 {
     LSIState *s = (LSIState *)opaque;
     int out;
+    assert_is_locked(&s->pci_dev.qemu_dev.lock);
 
     out = (s->sstat1 & PHASE_MASK) == PHASE_DO;
     if (reason == SCSI_REASON_DONE) {
@@ -735,6 +736,8 @@ static void lsi_do_msgout(LSIState *s)
     uint8_t msg;
     int len;
 
+    assert_is_locked(&s->pci_dev.qemu_dev.lock);
+
     DPRINTF("MSG out len=%d\n", s->dbc);
     while (s->dbc) {
         msg = lsi_get_msgbyte(s);
@@ -1217,6 +1220,8 @@ static uint8_t lsi_reg_readb(LSIState *s
     case addr + 2: return (s->name >> 16) & 0xff; \
     case addr + 3: return (s->name >> 24) & 0xff;
 
+    assert_is_locked(&s->pci_dev.qemu_dev.lock);
+
 #ifdef DEBUG_LSI_REG
     DPRINTF("Read reg %x\n", offset);
 #endif
@@ -1383,6 +1388,8 @@ static void lsi_reg_writeb(LSIState *s, 
     case addr + 2: s->name &= 0xff00ffff; s->name |= val << 16; break; \
     case addr + 3: s->name &= 0x00ffffff; s->name |= val << 24; break;
 
+    assert_is_locked(&s->pci_dev.qemu_dev.lock);
+
 #ifdef DEBUG_LSI_REG
     DPRINTF("Write reg %x = %02x\n", offset, val);
 #endif
@@ -1592,6 +1599,7 @@ static void lsi_mmio_writeb(void *opaque
 {
     LSIState *s = (LSIState *)opaque;
 
+    assert_is_locked(&s->pci_dev.qemu_dev.lock);
     lsi_reg_writeb(s, addr & 0xff, val);
 }
 
@@ -1599,6 +1607,7 @@ static void lsi_mmio_writew(void *opaque
 {
     LSIState *s = (LSIState *)opaque;
 
+    assert_is_locked(&s->pci_dev.qemu_dev.lock);
     addr &= 0xff;
     lsi_reg_writeb(s, addr, val & 0xff);
     lsi_reg_writeb(s, addr + 1, (val >> 8) & 0xff);
@@ -1608,6 +1617,7 @@ static void lsi_mmio_writel(void *opaque
 {
     LSIState *s = (LSIState *)opaque;
 
+    assert_is_locked(&s->pci_dev.qemu_dev.lock);
     addr &= 0xff;
     lsi_reg_writeb(s, addr, val & 0xff);
     lsi_reg_writeb(s, addr + 1, (val >> 8) & 0xff);
@@ -1619,6 +1629,7 @@ static uint32_t lsi_mmio_readb(void *opa
 {
     LSIState *s = (LSIState *)opaque;
 
+    assert_is_locked(&s->pci_dev.qemu_dev.lock);
     return lsi_reg_readb(s, addr & 0xff);
 }
 
@@ -1627,6 +1638,7 @@ static uint32_t lsi_mmio_readw(void *opa
     LSIState *s = (LSIState *)opaque;
     uint32_t val;
 
+    assert_is_locked(&s->pci_dev.qemu_dev.lock);
     addr &= 0xff;
     val = lsi_reg_readb(s, addr);
     val |= lsi_reg_readb(s, addr + 1) << 8;
@@ -1662,6 +1674,7 @@ static void lsi_ram_writeb(void *opaque,
     LSIState *s = (LSIState *)opaque;
     uint32_t newval;
     int shift;
+    assert_is_locked(&s->pci_dev.qemu_dev.lock);
 
     addr &= 0x1fff;
     newval = s->script_ram[addr >> 2];
@@ -1675,6 +1688,7 @@ static void lsi_ram_writew(void *opaque,
 {
     LSIState *s = (LSIState *)opaque;
     uint32_t newval;
+    assert_is_locked(&s->pci_dev.qemu_dev.lock);
 
     addr &= 0x1fff;
     newval = s->script_ram[addr >> 2];
@@ -1690,6 +1704,7 @@ static void lsi_ram_writew(void *opaque,
 static void lsi_ram_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
 {
     LSIState *s = (LSIState *)opaque;
+    assert_is_locked(&s->pci_dev.qemu_dev.lock);
 
     addr &= 0x1fff;
     s->script_ram[addr >> 2] = val;
@@ -1699,6 +1714,7 @@ static uint32_t lsi_ram_readb(void *opaq
 {
     LSIState *s = (LSIState *)opaque;
     uint32_t val;
+    assert_is_locked(&s->pci_dev.qemu_dev.lock);
 
     addr &= 0x1fff;
     val = s->script_ram[addr >> 2];
@@ -1710,6 +1726,7 @@ static uint32_t lsi_ram_readw(void *opaq
 {
     LSIState *s = (LSIState *)opaque;
     uint32_t val;
+    assert_is_locked(&s->pci_dev.qemu_dev.lock);
 
     addr &= 0x1fff;
     val = s->script_ram[addr >> 2];
@@ -1721,6 +1738,7 @@ static uint32_t lsi_ram_readw(void *opaq
 static uint32_t lsi_ram_readl(void *opaque, target_phys_addr_t addr)
 {
     LSIState *s = (LSIState *)opaque;
+    assert_is_locked(&s->pci_dev.qemu_dev.lock);
 
     addr &= 0x1fff;
     return le32_to_cpu(s->script_ram[addr >> 2]);
@@ -1850,6 +1868,7 @@ void lsi_scsi_attach(void *opaque, Block
     if (s->scsi_dev[id] == NULL)
         s->scsi_dev[id] = scsi_disk_init(bd, 1, lsi_command_complete, s);
     bd->devfn = s->pci_dev.devfn;
+    bd->qemu_dev = &s->pci_dev.qemu_dev;
 }
 
 int lsi_scsi_uninit(PCIDevice *d)

-- 


-------------------------------------------------------------------------
This SF.net email is sponsored by the 2008 JavaOne(SM) Conference 
Don't miss this year's exciting event. There's still time to save $100. 
Use priority code J8TL2D2. 
http://ad.doubleclick.net/clk;198757673;13503038;p?http://java.sun.com/javaone
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to