Record which device owns ioports/memports, and use that to grab the appropriate
lock when entering ioport/iomem processing.

Make cpu_physical_memory_rw() unlocked (called from inside device code), and
locking optional from __cpu_physical_memory_rw(), to be called when a vcpu
enters mmio read/write.

The debugging checks should aid finding most problems.

Index: kvm-userspace.io/qemu/exec.c
===================================================================
--- kvm-userspace.io.orig/qemu/exec.c
+++ kvm-userspace.io/qemu/exec.c
@@ -170,6 +170,7 @@ PhysPageDesc **l1_phys_map;
 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
+QEMUDevice *io_mem_devices[IO_MEM_NB_ENTRIES];
 char io_mem_used[IO_MEM_NB_ENTRIES];
 #if defined(CONFIG_SOFTMMU)
 static int io_mem_watch;
@@ -2039,6 +2040,11 @@ static inline void tlb_set_dirty(CPUStat
 }
 #endif /* defined(CONFIG_USER_ONLY) */
 
+QEMUDevice *qemu_find_device_iomem(int io_index)
+{
+    return io_mem_devices[io_index >> IO_MEM_SHIFT];
+}
+
 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
                              int memory);
 static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
@@ -2502,11 +2508,12 @@ static void *subpage_init (target_phys_a
 {
     subpage_t *mmio;
     int subpage_memory;
+    QEMUDevice *dev = qemu_find_device_iomem(*phys);
 
     mmio = qemu_mallocz(sizeof(subpage_t));
     if (mmio != NULL) {
         mmio->base = base;
-        subpage_memory = cpu_register_io_memory(NULL, 0, subpage_read, 
subpage_write, mmio);
+        subpage_memory = cpu_register_io_memory(dev, 0, subpage_read, 
subpage_write, mmio);
 #if defined(DEBUG_SUBPAGE)
         printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
                mmio, base, TARGET_PAGE_SIZE, subpage_memory);
@@ -2582,6 +2589,7 @@ int cpu_register_io_memory(QEMUDevice *d
         io_mem_write[io_index][i] = mem_write[i];
     }
     io_mem_opaque[io_index] = opaque;
+    io_mem_devices[io_index] = dev;
     return (io_index << IO_MEM_SHIFT) | subwidth;
 }
 
@@ -2651,8 +2659,42 @@ void cpu_physical_memory_rw(target_phys_
 }
 
 #else
-void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
-                            int len, int is_write)
+
+#define WRITE 0x80
+
+void qemu_iomem_dev_lock(int io_index, void *unassigned_io_fn, int iotype,
+                         int has_lock)
+{
+    if (!io_mem_devices[io_index]) {
+        void *handler;
+
+        if (iotype & WRITE)
+            handler = io_mem_write[io_index][iotype & ~WRITE];
+        else
+            handler = io_mem_read[io_index][iotype];
+
+        if (handler != unassigned_io_fn)
+            hw_error("iomem %x has no registered QEMUDevice, but has valid "
+                     "handling fn\n", io_index);
+        return;
+    }
+    if (has_lock)
+        assert_is_locked(&io_mem_devices[io_index]->lock);
+    else
+        qemu_mutex_lock(&io_mem_devices[io_index]->lock);
+}
+
+void qemu_iomem_dev_unlock(int io_index, int has_lock)
+{
+    if (has_lock)
+        assert_is_locked(&io_mem_devices[io_index]->lock);
+    else if (io_mem_devices[io_index])
+        qemu_mutex_unlock(&io_mem_devices[io_index]->lock);
+}
+
+
+void __cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
+                            int len, int is_write, int has_lock)
 {
     int l, io_index;
     uint8_t *ptr;
@@ -2666,6 +2708,7 @@ void cpu_physical_memory_rw(target_phys_
         l = (page + TARGET_PAGE_SIZE) - addr;
         if (l > len)
             l = len;
+        /* FIXME: verify safety of concurrent phys_page_find */
         p = phys_page_find(page >> TARGET_PAGE_BITS);
         if (!p) {
             pd = IO_MEM_UNASSIGNED;
@@ -2681,19 +2724,26 @@ void cpu_physical_memory_rw(target_phys_
                 if (l >= 4 && ((addr & 3) == 0)) {
                     /* 32 bit write access */
                     val = ldl_p(buf);
+                    qemu_iomem_dev_lock(io_index, unassigned_mem_writeb,
+                                        2|WRITE, has_lock);
                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr, 
val);
                     l = 4;
                 } else if (l >= 2 && ((addr & 1) == 0)) {
                     /* 16 bit write access */
                     val = lduw_p(buf);
+                    qemu_iomem_dev_lock(io_index, unassigned_mem_writeb,
+                                        1|WRITE, has_lock);
                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr, 
val);
                     l = 2;
                 } else {
                     /* 8 bit write access */
                     val = ldub_p(buf);
+                    qemu_iomem_dev_lock(io_index, unassigned_mem_writeb,
+                                        0|WRITE, has_lock);
                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr, 
val);
                     l = 1;
                 }
+                qemu_iomem_dev_unlock(io_index, has_lock);
             } else {
                 unsigned long addr1;
                 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
@@ -2719,21 +2769,28 @@ void cpu_physical_memory_rw(target_phys_
                 /* I/O case */
                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
                 if (l >= 4 && ((addr & 3) == 0)) {
+                    qemu_iomem_dev_lock(io_index, unassigned_mem_readb, 2,
+                                        has_lock);
                     /* 32 bit read access */
                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], 
addr);
                     stl_p(buf, val);
                     l = 4;
                 } else if (l >= 2 && ((addr & 1) == 0)) {
                     /* 16 bit read access */
+                    qemu_iomem_dev_lock(io_index, unassigned_mem_readb, 1,
+                                        has_lock);
                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], 
addr);
                     stw_p(buf, val);
                     l = 2;
                 } else {
                     /* 8 bit read access */
+                    qemu_iomem_dev_lock(io_index, unassigned_mem_readb, 0,
+                                        has_lock);
                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], 
addr);
                     stb_p(buf, val);
                     l = 1;
                 }
+                qemu_iomem_dev_unlock(io_index, has_lock);
             } else {
                 /* RAM case */
                 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
@@ -2747,6 +2804,13 @@ void cpu_physical_memory_rw(target_phys_
     }
 }
 
+void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
+                            int len, int is_write)
+{
+    __cpu_physical_memory_rw(addr, buf, len, is_write, 1);
+    return;
+}
+
 /* used for ROM loading : can write in RAM and ROM */
 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
                                    const uint8_t *buf, int len)
Index: kvm-userspace.io/qemu/vl.c
===================================================================
--- kvm-userspace.io.orig/qemu/vl.c
+++ kvm-userspace.io/qemu/vl.c
@@ -170,6 +170,7 @@ int inet_aton(const char *cp, struct in_
 const char *bios_dir = CONFIG_QEMU_SHAREDIR;
 const char *bios_name = NULL;
 void *ioport_opaque[MAX_IOPORTS];
+QEMUDevice *ioport_devices[MAX_IOPORTS];
 IOPortReadFunc *ioport_read_table[3][MAX_IOPORTS];
 IOPortWriteFunc *ioport_write_table[3][MAX_IOPORTS];
 /* Note: drives_table[MAX_DRIVES] is a dummy block driver if none available
@@ -366,6 +367,10 @@ int register_ioport_read(QEMUDevice *dev
         if (ioport_opaque[i] != NULL && ioport_opaque[i] != opaque)
             hw_error("register_ioport_read: invalid opaque");
         ioport_opaque[i] = opaque;
+
+        if (ioport_devices[i] != NULL && ioport_devices[i] != dev)
+            hw_error("register_ioport_read: invalid device");
+        ioport_devices[i] = dev;
     }
     return 0;
 }
@@ -391,6 +396,10 @@ int register_ioport_write(QEMUDevice *de
         if (ioport_opaque[i] != NULL && ioport_opaque[i] != opaque)
             hw_error("register_ioport_write: invalid opaque");
         ioport_opaque[i] = opaque;
+
+        if (ioport_devices[i] != NULL && ioport_devices[i] != dev)
+            hw_error("register_ioport_write: invalid device");
+        ioport_devices[i] = dev;
     }
     return 0;
 }
@@ -409,18 +418,47 @@ void isa_unassign_ioport(int start, int 
         ioport_write_table[2][i] = default_ioport_writel;
 
         ioport_opaque[i] = NULL;
+        ioport_devices[i] = NULL;
     }
 }
 
+#define WRITE 0x80
+
 /***********************************************************/
 
+void qemu_ioport_dev_lock(int addr, void *unassigned_io_fn, int iotype)
+{
+    if (!ioport_devices[addr]) {
+        void *handler;
+
+        if (iotype & WRITE)
+            handler = ioport_write_table[iotype & ~WRITE][addr];
+        else
+            handler = ioport_read_table[iotype][addr];
+
+        if (handler != unassigned_io_fn)
+            hw_error("ioport %x has no registered QEMUDevice, but has valid "
+                     "handling fn\n", addr);
+        return;
+    }
+    qemu_mutex_lock(&ioport_devices[addr]->lock);
+}
+
+void qemu_ioport_dev_unlock(int addr)
+{
+    if (ioport_devices[addr])
+        qemu_mutex_unlock(&ioport_devices[addr]->lock);
+}
+
 void cpu_outb(CPUState *env, int addr, int val)
 {
 #ifdef DEBUG_IOPORT
     if (loglevel & CPU_LOG_IOPORT)
         fprintf(logfile, "outb: %04x %02x\n", addr, val);
 #endif
+    qemu_ioport_dev_lock(addr, default_ioport_writeb, 0|WRITE);
     ioport_write_table[0][addr](ioport_opaque[addr], addr, val);
+    qemu_ioport_dev_unlock(addr);
 #ifdef USE_KQEMU
     if (env)
         env->last_io_time = cpu_get_time_fast();
@@ -433,7 +471,9 @@ void cpu_outw(CPUState *env, int addr, i
     if (loglevel & CPU_LOG_IOPORT)
         fprintf(logfile, "outw: %04x %04x\n", addr, val);
 #endif
+    qemu_ioport_dev_lock(addr, default_ioport_writew, 1|WRITE);
     ioport_write_table[1][addr](ioport_opaque[addr], addr, val);
+    qemu_ioport_dev_unlock(addr);
 #ifdef USE_KQEMU
     if (env)
         env->last_io_time = cpu_get_time_fast();
@@ -446,7 +486,9 @@ void cpu_outl(CPUState *env, int addr, i
     if (loglevel & CPU_LOG_IOPORT)
         fprintf(logfile, "outl: %04x %08x\n", addr, val);
 #endif
+    qemu_ioport_dev_lock(addr, default_ioport_writel, 2|WRITE);
     ioport_write_table[2][addr](ioport_opaque[addr], addr, val);
+    qemu_ioport_dev_unlock(addr);
 #ifdef USE_KQEMU
     if (env)
         env->last_io_time = cpu_get_time_fast();
@@ -456,7 +498,9 @@ void cpu_outl(CPUState *env, int addr, i
 int cpu_inb(CPUState *env, int addr)
 {
     int val;
+    qemu_ioport_dev_lock(addr, default_ioport_readb, 0);
     val = ioport_read_table[0][addr](ioport_opaque[addr], addr);
+    qemu_ioport_dev_unlock(addr);
 #ifdef DEBUG_IOPORT
     if (loglevel & CPU_LOG_IOPORT)
         fprintf(logfile, "inb : %04x %02x\n", addr, val);
@@ -471,7 +515,9 @@ int cpu_inb(CPUState *env, int addr)
 int cpu_inw(CPUState *env, int addr)
 {
     int val;
+    qemu_ioport_dev_lock(addr, default_ioport_readw, 1);
     val = ioport_read_table[1][addr](ioport_opaque[addr], addr);
+    qemu_ioport_dev_unlock(addr);
 #ifdef DEBUG_IOPORT
     if (loglevel & CPU_LOG_IOPORT)
         fprintf(logfile, "inw : %04x %04x\n", addr, val);
@@ -486,7 +532,9 @@ int cpu_inw(CPUState *env, int addr)
 int cpu_inl(CPUState *env, int addr)
 {
     int val;
+    qemu_ioport_dev_lock(addr, default_ioport_readl, 2);
     val = ioport_read_table[2][addr](ioport_opaque[addr], addr);
+    qemu_ioport_dev_unlock(addr);
 #ifdef DEBUG_IOPORT
     if (loglevel & CPU_LOG_IOPORT)
         fprintf(logfile, "inl : %04x %08x\n", addr, val);
Index: kvm-userspace.io/qemu/hw/pc.c
===================================================================
--- kvm-userspace.io.orig/qemu/hw/pc.c
+++ kvm-userspace.io/qemu/hw/pc.c
@@ -1046,7 +1046,6 @@ static void pc_init1(ram_addr_t ram_size
         }
     }
 
-    qemu_system_hot_add_init(cpu_model);
 #define USE_HYPERCALL
 #ifdef USE_HYPERCALL
     pci_hypercall_init(pci_bus);
@@ -1110,6 +1109,8 @@ static void pc_init1(ram_addr_t ram_size
         i440fx_init_memory_mappings(i440fx_state);
     }
 
+    qemu_system_hot_add_init(cpu_model);
+
     if (pci_enabled) {
        int max_bus;
         int bus, unit;
Index: kvm-userspace.io/qemu/cpu-all.h
===================================================================
--- kvm-userspace.io.orig/qemu/cpu-all.h
+++ kvm-userspace.io/qemu/cpu-all.h
@@ -850,6 +850,10 @@ CPUReadMemoryFunc **cpu_get_io_memory_re
 
 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
                             int len, int is_write);
+/* the same as above, but specify whether device locks are held */
+void __cpu_physical_memory_rw_lock(target_phys_addr_t addr, uint8_t *buf,
+                            int len, int is_write, int has_lock);
+
 static inline void cpu_physical_memory_read(target_phys_addr_t addr,
                                             uint8_t *buf, int len)
 {
Index: kvm-userspace.io/qemu/qemu-kvm.c
===================================================================
--- kvm-userspace.io.orig/qemu/qemu-kvm.c
+++ kvm-userspace.io/qemu/qemu-kvm.c
@@ -555,13 +555,13 @@ static int kvm_outl(void *opaque, uint16
 
 static int kvm_mmio_read(void *opaque, uint64_t addr, uint8_t *data, int len)
 {
-       cpu_physical_memory_rw(addr, data, len, 0);
+       __cpu_physical_memory_rw(addr, data, len, 0, 0);
        return 0;
 }
 
 static int kvm_mmio_write(void *opaque, uint64_t addr, uint8_t *data, int len)
 {
-       cpu_physical_memory_rw(addr, data, len, 1);
+       __cpu_physical_memory_rw(addr, data, len, 1, 0);
        return 0;
 }
 

-- 


-------------------------------------------------------------------------
This SF.net email is sponsored by the 2008 JavaOne(SM) Conference 
Don't miss this year's exciting event. There's still time to save $100. 
Use priority code J8TL2D2. 
http://ad.doubleclick.net/clk;198757673;13503038;p?http://java.sun.com/javaone
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to