Coalescing MMIO allows us to avoid an exit every time we have a
MMIO write, instead - MMIO writes are coalesced in a ring which
can be flushed once an exit for a different reason is needed.
A MMIO exit is also trigged once the ring is full.

Coalesce all MMIO regions registered in the MMIO mapper.
Add a coalescing handler under kvm_cpu.

Signed-off-by: Sasha Levin <[email protected]>
---
 tools/kvm/hw/vesa.c             |    2 +-
 tools/kvm/include/kvm/kvm-cpu.h |    2 ++
 tools/kvm/include/kvm/kvm.h     |    4 ++--
 tools/kvm/kvm-cpu.c             |   24 ++++++++++++++++++++++++
 tools/kvm/mmio.c                |   24 ++++++++++++++++++++++--
 5 files changed, 51 insertions(+), 5 deletions(-)

diff --git a/tools/kvm/hw/vesa.c b/tools/kvm/hw/vesa.c
index b99f2de..a12c601 100644
--- a/tools/kvm/hw/vesa.c
+++ b/tools/kvm/hw/vesa.c
@@ -77,7 +77,7 @@ void vesa__init(struct kvm *kvm)
        vesa_pci_device.bar[0]          = vesa_base_addr | 
PCI_BASE_ADDRESS_SPACE_IO;
        pci__register(&vesa_pci_device, dev);
 
-       kvm__register_mmio(VESA_MEM_ADDR, VESA_MEM_SIZE, &vesa_mmio_callback);
+       kvm__register_mmio(kvm, VESA_MEM_ADDR, VESA_MEM_SIZE, 
&vesa_mmio_callback);
 
        pthread_create(&thread, NULL, vesa__dovnc, kvm);
 }
diff --git a/tools/kvm/include/kvm/kvm-cpu.h b/tools/kvm/include/kvm/kvm-cpu.h
index 4d99246..1eb4a52 100644
--- a/tools/kvm/include/kvm/kvm-cpu.h
+++ b/tools/kvm/include/kvm/kvm-cpu.h
@@ -24,6 +24,8 @@ struct kvm_cpu {
 
        u8                      is_running;
        u8                      paused;
+
+       struct kvm_coalesced_mmio_ring  *ring;
 };
 
 struct kvm_cpu *kvm_cpu__init(struct kvm *kvm, unsigned long cpu_id);
diff --git a/tools/kvm/include/kvm/kvm.h b/tools/kvm/include/kvm/kvm.h
index d22a849..55551de 100644
--- a/tools/kvm/include/kvm/kvm.h
+++ b/tools/kvm/include/kvm/kvm.h
@@ -49,8 +49,8 @@ void kvm__stop_timer(struct kvm *kvm);
 void kvm__irq_line(struct kvm *kvm, int irq, int level);
 bool kvm__emulate_io(struct kvm *kvm, u16 port, void *data, int direction, int 
size, u32 count);
 bool kvm__emulate_mmio(struct kvm *kvm, u64 phys_addr, u8 *data, u32 len, u8 
is_write);
-bool kvm__register_mmio(u64 phys_addr, u64 phys_addr_len, void 
(*kvm_mmio_callback_fn)(u64 addr, u8 *data, u32 len, u8 is_write));
-bool kvm__deregister_mmio(u64 phys_addr);
+bool kvm__register_mmio(struct kvm *kvm, u64 phys_addr, u64 phys_addr_len, 
void (*kvm_mmio_callback_fn)(u64 addr, u8 *data, u32 len, u8 is_write));
+bool kvm__deregister_mmio(struct kvm *kvm, u64 phys_addr);
 void kvm__pause(void);
 void kvm__continue(void);
 void kvm__notify_paused(void);
diff --git a/tools/kvm/kvm-cpu.c b/tools/kvm/kvm-cpu.c
index be0528b..1fb1c74 100644
--- a/tools/kvm/kvm-cpu.c
+++ b/tools/kvm/kvm-cpu.c
@@ -14,6 +14,8 @@
 #include <errno.h>
 #include <stdio.h>
 
+#define PAGE_SIZE (sysconf(_SC_PAGE_SIZE))
+
 extern __thread struct kvm_cpu *current_kvm_cpu;
 
 static inline bool is_in_protected_mode(struct kvm_cpu *vcpu)
@@ -70,6 +72,7 @@ struct kvm_cpu *kvm_cpu__init(struct kvm *kvm, unsigned long 
cpu_id)
 {
        struct kvm_cpu *vcpu;
        int mmap_size;
+       int coalesced_offset;
 
        vcpu            = kvm_cpu__new(kvm);
        if (!vcpu)
@@ -89,6 +92,10 @@ struct kvm_cpu *kvm_cpu__init(struct kvm *kvm, unsigned long 
cpu_id)
        if (vcpu->kvm_run == MAP_FAILED)
                die("unable to mmap vcpu fd");
 
+       coalesced_offset = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, 
KVM_CAP_COALESCED_MMIO);
+       if (coalesced_offset)
+               vcpu->ring = (void *)vcpu->kvm_run + (coalesced_offset * 
PAGE_SIZE);
+
        vcpu->is_running = true;
 
        return vcpu;
@@ -395,6 +402,22 @@ static void kvm_cpu_signal_handler(int signum)
        }
 }
 
+static void kvm_cpu__handle_coalesced_mmio(struct kvm_cpu *cpu)
+{
+       if (cpu->ring) {
+               while (cpu->ring->first != cpu->ring->last) {
+                       struct kvm_coalesced_mmio *m;
+                       m = &cpu->ring->coalesced_mmio[cpu->ring->first];
+                       kvm__emulate_mmio(cpu->kvm,
+                                       m->phys_addr,
+                                       m->data,
+                                       m->len,
+                                       1);
+                       cpu->ring->first = (cpu->ring->first + 1) % 
KVM_COALESCED_MMIO_MAX;
+               }
+       }
+}
+
 int kvm_cpu__start(struct kvm_cpu *cpu)
 {
        sigset_t sigset;
@@ -462,6 +485,7 @@ int kvm_cpu__start(struct kvm_cpu *cpu)
                default:
                        goto panic_kvm;
                }
+               kvm_cpu__handle_coalesced_mmio(cpu);
        }
 
 exit_kvm:
diff --git a/tools/kvm/mmio.c b/tools/kvm/mmio.c
index acd091e..64bef37 100644
--- a/tools/kvm/mmio.c
+++ b/tools/kvm/mmio.c
@@ -5,6 +5,8 @@
 #include <stdio.h>
 #include <stdlib.h>
 
+#include <sys/ioctl.h>
+#include <linux/kvm.h>
 #include <linux/types.h>
 #include <linux/rbtree.h>
 
@@ -53,9 +55,10 @@ static const char *to_direction(u8 is_write)
        return "read";
 }
 
-bool kvm__register_mmio(u64 phys_addr, u64 phys_addr_len, void 
(*kvm_mmio_callback_fn)(u64 addr, u8 *data, u32 len, u8 is_write))
+bool kvm__register_mmio(struct kvm *kvm, u64 phys_addr, u64 phys_addr_len, 
void (*kvm_mmio_callback_fn)(u64 addr, u8 *data, u32 len, u8 is_write))
 {
        struct mmio_mapping *mmio;
+       struct kvm_coalesced_mmio_zone zone;
        int ret;
 
        mmio = malloc(sizeof(*mmio));
@@ -67,6 +70,16 @@ bool kvm__register_mmio(u64 phys_addr, u64 phys_addr_len, 
void (*kvm_mmio_callba
                .kvm_mmio_callback_fn = kvm_mmio_callback_fn,
        };
 
+       zone = (struct kvm_coalesced_mmio_zone) {
+               .addr   = phys_addr,
+               .size   = phys_addr_len,
+       };
+       ret = ioctl(kvm->vm_fd, KVM_REGISTER_COALESCED_MMIO, &zone);
+       if (ret < 0) {
+               free(mmio);
+               return false;
+       }
+
        br_write_lock();
        ret = mmio_insert(&mmio_tree, mmio);
        br_write_unlock();
@@ -74,9 +87,10 @@ bool kvm__register_mmio(u64 phys_addr, u64 phys_addr_len, 
void (*kvm_mmio_callba
        return ret;
 }
 
-bool kvm__deregister_mmio(u64 phys_addr)
+bool kvm__deregister_mmio(struct kvm *kvm, u64 phys_addr)
 {
        struct mmio_mapping *mmio;
+       struct kvm_coalesced_mmio_zone zone;
 
        br_write_lock();
        mmio = mmio_search_single(&mmio_tree, phys_addr);
@@ -85,6 +99,12 @@ bool kvm__deregister_mmio(u64 phys_addr)
                return false;
        }
 
+       zone = (struct kvm_coalesced_mmio_zone) {
+               .addr   = phys_addr,
+               .size   = 1,
+       };
+       ioctl(kvm->vm_fd, KVM_UNREGISTER_COALESCED_MMIO, &zone);
+
        rb_int_erase(&mmio_tree, &mmio->node);
        br_write_unlock();
 
-- 
1.7.5.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to