[Qemu-devel] [PULL 16/19] kvm: Introduce slots lock for memory listener

2019-07-12 Thread Juan Quintela
From: Peter Xu 

Introduce KVMMemoryListener.slots_lock to protect the slots inside the
kvm memory listener.  Currently it is close to useless because all the
KVM code path now is always protected by the BQL.  But it'll start to
make sense in follow up patches where we might do remote dirty bitmap
clear and also we'll update the per-slot cached dirty bitmap even
without the BQL.  So let's prepare for it.

We can also use per-slot lock for above reason but it seems to be an
overkill.  Let's just use this bigger one (which covers all the slots
of a single address space) but anyway this lock is still much smaller
than the BQL.

Reviewed-by: Dr. David Alan Gilbert 
Signed-off-by: Peter Xu 
Message-Id: <20190603065056.25211-10-pet...@redhat.com>
Signed-off-by: Juan Quintela 
---
 accel/kvm/kvm-all.c  | 58 +++-
 include/sysemu/kvm_int.h |  2 ++
 2 files changed, 48 insertions(+), 12 deletions(-)

diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index 23ace52b9e..621c9a43ab 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -138,6 +138,9 @@ static const KVMCapabilityInfo kvm_required_capabilites[] = 
{
 KVM_CAP_LAST_INFO
 };
 
+#define kvm_slots_lock(kml)  qemu_mutex_lock(&(kml)->slots_lock)
+#define kvm_slots_unlock(kml)qemu_mutex_unlock(&(kml)->slots_lock)
+
 int kvm_get_max_memslots(void)
 {
 KVMState *s = KVM_STATE(current_machine->accelerator);
@@ -165,6 +168,7 @@ int kvm_memcrypt_encrypt_data(uint8_t *ptr, uint64_t len)
 return 1;
 }
 
+/* Called with KVMMemoryListener.slots_lock held */
 static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
 {
 KVMState *s = kvm_state;
@@ -182,10 +186,17 @@ static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
 bool kvm_has_free_slot(MachineState *ms)
 {
 KVMState *s = KVM_STATE(ms->accelerator);
+bool result;
+KVMMemoryListener *kml = >memory_listener;
 
-return kvm_get_free_slot(>memory_listener);
+kvm_slots_lock(kml);
+result = !!kvm_get_free_slot(kml);
+kvm_slots_unlock(kml);
+
+return result;
 }
 
+/* Called with KVMMemoryListener.slots_lock held */
 static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
 {
 KVMSlot *slot = kvm_get_free_slot(kml);
@@ -244,18 +255,21 @@ int kvm_physical_memory_addr_from_host(KVMState *s, void 
*ram,
hwaddr *phys_addr)
 {
 KVMMemoryListener *kml = >memory_listener;
-int i;
+int i, ret = 0;
 
+kvm_slots_lock(kml);
 for (i = 0; i < s->nr_slots; i++) {
 KVMSlot *mem = >slots[i];
 
 if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
 *phys_addr = mem->start_addr + (ram - mem->ram);
-return 1;
+ret = 1;
+break;
 }
 }
+kvm_slots_unlock(kml);
 
-return 0;
+return ret;
 }
 
 static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, 
bool new)
@@ -396,6 +410,7 @@ static int kvm_mem_flags(MemoryRegion *mr)
 return flags;
 }
 
+/* Called with KVMMemoryListener.slots_lock held */
 static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
  MemoryRegion *mr)
 {
@@ -414,19 +429,26 @@ static int kvm_section_update_flags(KVMMemoryListener 
*kml,
 {
 hwaddr start_addr, size;
 KVMSlot *mem;
+int ret = 0;
 
 size = kvm_align_section(section, _addr);
 if (!size) {
 return 0;
 }
 
+kvm_slots_lock(kml);
+
 mem = kvm_lookup_matching_slot(kml, start_addr, size);
 if (!mem) {
 /* We don't have a slot if we want to trap every access. */
-return 0;
+goto out;
 }
 
-return kvm_slot_update_flags(kml, mem, section->mr);
+ret = kvm_slot_update_flags(kml, mem, section->mr);
+
+out:
+kvm_slots_unlock(kml);
+return ret;
 }
 
 static void kvm_log_start(MemoryListener *listener,
@@ -483,6 +505,8 @@ static int 
kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
  * This function will first try to fetch dirty bitmap from the kernel,
  * and then updates qemu's dirty bitmap.
  *
+ * NOTE: caller must be with kml->slots_lock held.
+ *
  * @kml: the KVM memory listener object
  * @section: the memory section to sync the dirty bitmap with
  */
@@ -493,13 +517,14 @@ static int 
kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
 struct kvm_dirty_log d = {};
 KVMSlot *mem;
 hwaddr start_addr, size;
+int ret = 0;
 
 size = kvm_align_section(section, _addr);
 if (size) {
 mem = kvm_lookup_matching_slot(kml, start_addr, size);
 if (!mem) {
 /* We don't have a slot if we want to trap every access. */
-return 0;
+goto out;
 }
 
 /* XXX bad kernel interface alert
@@ -525,13 +550,14 @@ static int 
kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
 d.slot = mem->slot | (kml->as_id << 16);
 if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, ) == 

[Qemu-devel] [PULL 16/19] kvm: Introduce slots lock for memory listener

2019-07-11 Thread Juan Quintela
From: Peter Xu 

Introduce KVMMemoryListener.slots_lock to protect the slots inside the
kvm memory listener.  Currently it is close to useless because all the
KVM code path now is always protected by the BQL.  But it'll start to
make sense in follow up patches where we might do remote dirty bitmap
clear and also we'll update the per-slot cached dirty bitmap even
without the BQL.  So let's prepare for it.

We can also use per-slot lock for above reason but it seems to be an
overkill.  Let's just use this bigger one (which covers all the slots
of a single address space) but anyway this lock is still much smaller
than the BQL.

Reviewed-by: Dr. David Alan Gilbert 
Signed-off-by: Peter Xu 
Reviewed-by: Juan Quintela 
Message-Id: <20190603065056.25211-10-pet...@redhat.com>
Signed-off-by: Juan Quintela 
---
 accel/kvm/kvm-all.c  | 58 +++-
 include/sysemu/kvm_int.h |  2 ++
 2 files changed, 48 insertions(+), 12 deletions(-)

diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index 23ace52b9e..621c9a43ab 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -138,6 +138,9 @@ static const KVMCapabilityInfo kvm_required_capabilites[] = 
{
 KVM_CAP_LAST_INFO
 };
 
+#define kvm_slots_lock(kml)  qemu_mutex_lock(&(kml)->slots_lock)
+#define kvm_slots_unlock(kml)qemu_mutex_unlock(&(kml)->slots_lock)
+
 int kvm_get_max_memslots(void)
 {
 KVMState *s = KVM_STATE(current_machine->accelerator);
@@ -165,6 +168,7 @@ int kvm_memcrypt_encrypt_data(uint8_t *ptr, uint64_t len)
 return 1;
 }
 
+/* Called with KVMMemoryListener.slots_lock held */
 static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
 {
 KVMState *s = kvm_state;
@@ -182,10 +186,17 @@ static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
 bool kvm_has_free_slot(MachineState *ms)
 {
 KVMState *s = KVM_STATE(ms->accelerator);
+bool result;
+KVMMemoryListener *kml = >memory_listener;
 
-return kvm_get_free_slot(>memory_listener);
+kvm_slots_lock(kml);
+result = !!kvm_get_free_slot(kml);
+kvm_slots_unlock(kml);
+
+return result;
 }
 
+/* Called with KVMMemoryListener.slots_lock held */
 static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
 {
 KVMSlot *slot = kvm_get_free_slot(kml);
@@ -244,18 +255,21 @@ int kvm_physical_memory_addr_from_host(KVMState *s, void 
*ram,
hwaddr *phys_addr)
 {
 KVMMemoryListener *kml = >memory_listener;
-int i;
+int i, ret = 0;
 
+kvm_slots_lock(kml);
 for (i = 0; i < s->nr_slots; i++) {
 KVMSlot *mem = >slots[i];
 
 if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
 *phys_addr = mem->start_addr + (ram - mem->ram);
-return 1;
+ret = 1;
+break;
 }
 }
+kvm_slots_unlock(kml);
 
-return 0;
+return ret;
 }
 
 static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, 
bool new)
@@ -396,6 +410,7 @@ static int kvm_mem_flags(MemoryRegion *mr)
 return flags;
 }
 
+/* Called with KVMMemoryListener.slots_lock held */
 static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
  MemoryRegion *mr)
 {
@@ -414,19 +429,26 @@ static int kvm_section_update_flags(KVMMemoryListener 
*kml,
 {
 hwaddr start_addr, size;
 KVMSlot *mem;
+int ret = 0;
 
 size = kvm_align_section(section, _addr);
 if (!size) {
 return 0;
 }
 
+kvm_slots_lock(kml);
+
 mem = kvm_lookup_matching_slot(kml, start_addr, size);
 if (!mem) {
 /* We don't have a slot if we want to trap every access. */
-return 0;
+goto out;
 }
 
-return kvm_slot_update_flags(kml, mem, section->mr);
+ret = kvm_slot_update_flags(kml, mem, section->mr);
+
+out:
+kvm_slots_unlock(kml);
+return ret;
 }
 
 static void kvm_log_start(MemoryListener *listener,
@@ -483,6 +505,8 @@ static int 
kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
  * This function will first try to fetch dirty bitmap from the kernel,
  * and then updates qemu's dirty bitmap.
  *
+ * NOTE: caller must be with kml->slots_lock held.
+ *
  * @kml: the KVM memory listener object
  * @section: the memory section to sync the dirty bitmap with
  */
@@ -493,13 +517,14 @@ static int 
kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
 struct kvm_dirty_log d = {};
 KVMSlot *mem;
 hwaddr start_addr, size;
+int ret = 0;
 
 size = kvm_align_section(section, _addr);
 if (size) {
 mem = kvm_lookup_matching_slot(kml, start_addr, size);
 if (!mem) {
 /* We don't have a slot if we want to trap every access. */
-return 0;
+goto out;
 }
 
 /* XXX bad kernel interface alert
@@ -525,13 +550,14 @@ static int 
kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
 d.slot = mem->slot | (kml->as_id << 16);
 if