Get rid of kvm->lock dependency on coalesced_mmio methods. Use an 
atomic variable instead to guarantee only one vcpu is batching
data into the ring at a given time.

Signed-off-by: Marcelo Tosatti <[email protected]>

Index: kvm-irqlock/virt/kvm/coalesced_mmio.c
===================================================================
--- kvm-irqlock.orig/virt/kvm/coalesced_mmio.c
+++ kvm-irqlock/virt/kvm/coalesced_mmio.c
@@ -26,9 +26,12 @@ static int coalesced_mmio_in_range(struc
        if (!is_write)
                return 0;
 
-       /* kvm->lock is taken by the caller and must be not released before
-         * dev.read/write
-         */
+       /*
+        * Some other vcpu might be batching data into the ring,
+        * fallback to userspace. Ordering not our problem.
+        */
+       if (!atomic_add_unless(&dev->in_use, 1, 1))
+               return 0;
 
        /* Are we able to batch it ? */
 
@@ -41,7 +44,7 @@ static int coalesced_mmio_in_range(struc
                                                        KVM_COALESCED_MMIO_MAX;
        if (next == dev->kvm->coalesced_mmio_ring->first) {
                /* full */
-               return 0;
+               goto out_denied;
        }
 
        /* is it in a batchable area ? */
@@ -57,6 +60,8 @@ static int coalesced_mmio_in_range(struc
                    addr + len <= zone->addr + zone->size)
                        return 1;
        }
+out_denied:
+       atomic_set(&dev->in_use, 0);
        return 0;
 }
 
@@ -67,15 +72,14 @@ static void coalesced_mmio_write(struct 
                                (struct kvm_coalesced_mmio_dev*)this->private;
        struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
 
-       /* kvm->lock must be taken by caller before call to in_range()*/
-
        /* copy data in first free entry of the ring */
 
        ring->coalesced_mmio[ring->last].phys_addr = addr;
        ring->coalesced_mmio[ring->last].len = len;
        memcpy(ring->coalesced_mmio[ring->last].data, val, len);
-       smp_wmb();
        ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
+       smp_wmb();
+       atomic_set(&dev->in_use, 0);
 }
 
 static void coalesced_mmio_destructor(struct kvm_io_device *this)
@@ -90,6 +94,8 @@ int kvm_coalesced_mmio_init(struct kvm *
        dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
        if (!dev)
                return -ENOMEM;
+       atomic_set(&dev->in_use, 0);
+
        dev->dev.write  = coalesced_mmio_write;
        dev->dev.in_range  = coalesced_mmio_in_range;
        dev->dev.destructor  = coalesced_mmio_destructor;
Index: kvm-irqlock/virt/kvm/coalesced_mmio.h
===================================================================
--- kvm-irqlock.orig/virt/kvm/coalesced_mmio.h
+++ kvm-irqlock/virt/kvm/coalesced_mmio.h
@@ -12,6 +12,7 @@
 struct kvm_coalesced_mmio_dev {
        struct kvm_io_device dev;
        struct kvm *kvm;
+       atomic_t in_use;
        int nb_zones;
        struct kvm_coalesced_mmio_zone zone[KVM_COALESCED_MMIO_ZONE_MAX];
 };

-- 

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to