This patch is the kernel part of the "batch writes to MMIO" patch.

When kernel has to send MMIO writes to userspace, it stores them
in memory until it has to pass the hand to userspace for another
reason. This avoids to have too many context switches on operations
that can wait.

WARNING: this breaks compatibility with old userspace part.

Signed-off-by: Laurent Vivier <[EMAIL PROTECTED]>
---
 arch/x86/kvm/x86.c         |   21 +++++++++++++++++++++
 include/asm-x86/kvm_host.h |    2 ++
 include/linux/kvm.h        |   10 +++++++++-
 virt/kvm/kvm_main.c        |    3 +++
 4 files changed, 35 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0ce5563..3881056 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2942,8 +2942,21 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, 
struct kvm_run *kvm_run)
                kvm_x86_ops->decache_regs(vcpu);
        }
 
+batch:
        r = __vcpu_run(vcpu, kvm_run);
 
+       if (!r && vcpu->mmio_is_write &&
+           kvm_run->exit_reason == KVM_EXIT_MMIO &&
+           kvm_run->batch_count < KVM_MAX_BATCH) {
+               struct kvm_batch *batch = vcpu->arch.batch_data;
+               int i = kvm_run->batch_count++;
+
+               batch[i].phys_addr = vcpu->mmio_phys_addr;
+               batch[i].len = vcpu->mmio_size;
+               memcpy(batch[i].data, vcpu->mmio_data, batch[i].len);
+
+               goto batch;
+       }
 out:
        if (vcpu->sigset_active)
                sigprocmask(SIG_SETMASK, &sigsaved, NULL);
@@ -3830,6 +3843,13 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        }
        vcpu->arch.pio_data = page_address(page);
 
+       page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+       if (!page) {
+               r = -ENOMEM;
+               goto fail;
+       }
+       vcpu->arch.batch_data = page_address(page);
+
        r = kvm_mmu_create(vcpu);
        if (r < 0)
                goto fail_free_pio_data;
@@ -3857,6 +3877,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
        kvm_mmu_destroy(vcpu);
        up_read(&vcpu->kvm->slots_lock);
        free_page((unsigned long)vcpu->arch.pio_data);
+       free_page((unsigned long)vcpu->arch.batch_data);
 }
 
 struct  kvm *kvm_arch_create_vm(void)
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 9d963cd..2824652 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -26,6 +26,7 @@
 #define KVM_PRIVATE_MEM_SLOTS 4
 
 #define KVM_PIO_PAGE_OFFSET 1
+#define KVM_MMIO_PAGE_OFFSET 2
 
 #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
 #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
@@ -255,6 +256,7 @@ struct kvm_vcpu_arch {
        gva_t mmio_fault_cr2;
        struct kvm_pio_request pio;
        void *pio_data;
+       void *batch_data;
 
        struct kvm_queued_exception {
                bool pending;
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index a281afe..cf0d266 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -94,7 +94,8 @@ struct kvm_run {
        __u32 exit_reason;
        __u8 ready_for_interrupt_injection;
        __u8 if_flag;
-       __u8 padding2[2];
+       __u8 batch_count;
+       __u8 padding2;
 
        /* in (pre_kvm_run), out (post_kvm_run) */
        __u64 cr8;
@@ -173,6 +174,13 @@ struct kvm_run {
        };
 };
 
+#define KVM_MAX_BATCH (PAGE_SIZE / sizeof(struct kvm_batch))
+struct kvm_batch {
+       __u64 phys_addr;
+       __u32 len;
+       __u8  data[8];
+};
+
 /* for KVM_TRANSLATE */
 struct kvm_translation {
        /* in */
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d3cb4cc..b2234b3 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -796,6 +796,8 @@ static int kvm_vcpu_fault(struct vm_area_struct *vma, 
struct vm_fault *vmf)
 #ifdef CONFIG_X86
        else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
                page = virt_to_page(vcpu->arch.pio_data);
+       else if (vmf->pgoff == KVM_MMIO_PAGE_OFFSET)
+               page = virt_to_page(vcpu->arch.batch_data);
 #endif
        else
                return VM_FAULT_SIGBUS;
@@ -1214,6 +1216,7 @@ static long kvm_dev_ioctl(struct file *filp,
                r = PAGE_SIZE;     /* struct kvm_run */
 #ifdef CONFIG_X86
                r += PAGE_SIZE;    /* pio data page */
+               r += PAGE_SIZE;    /* mmio batch page */
 #endif
                break;
        case KVM_TRACE_ENABLE:
-- 
1.5.2.4


-------------------------------------------------------------------------
This SF.net email is sponsored by the 2008 JavaOne(SM) Conference 
Don't miss this year's exciting event. There's still time to save $100. 
Use priority code J8TL2D2. 
http://ad.doubleclick.net/clk;198757673;13503038;p?http://java.sun.com/javaone
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to