repository: /home/avi/kvm/linux-2.6
branch: master
commit 2c9c2e9251adc775720df60d1522accf433d15d0
Author: Avi Kivity <[EMAIL PROTECTED]>
Date:   Thu Nov 15 18:46:20 2007 +0200

    KVM: VMX: Optimize vm entry by using %rsp as a GPR
    
    Instead of storing the vcpu pointer on the stack, store it in rsp (as that
    is the only GPR restored by vmx).  This has two advantages:
    
    - we no longer need to write HOST_RSP into the vmcs, as the vcpu pointer is
      constant for a given guest
    - finding the vcpu pointer (which is much more interesting than the stack) 
is
      simpler
    
    This is safe since interrupts are disabled at this point.
    
    Signed-off-by: Avi Kivity <[EMAIL PROTECTED]>

diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 7649986..90a019f 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -46,6 +46,7 @@ struct vmcs {
 
 struct vcpu_vmx {
        struct kvm_vcpu       vcpu;
+       unsigned long         host_rsp;
        int                   launched;
        u8                    fail;
        struct kvm_msr_entry *guest_msrs;
@@ -1497,6 +1498,13 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
        unsigned long kvm_vmx_return;
        u32 exec_control;
 
+       /*
+        * We use rsp as a general purpose register pointing at the vcpu
+        * immediately after vmexit, as it's (a) more useful (b) doesn't
+        * change across invocations, saving us a vmwrite in the fast path.
+        */
+       vmcs_writel(HOST_RSP, (unsigned long)vmx);
+
        /* I/O */
        vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
        vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
@@ -2188,6 +2196,7 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, 
struct kvm_vcpu *vcpu)
                kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
                kvm_run->fail_entry.hardware_entry_failure_reason
                        = vmcs_read32(VM_INSTRUCTION_ERROR);
+               vmx->fail = 0;
                return 0;
        }
 
@@ -2293,13 +2302,14 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
        asm(
                /* Store host registers */
 #ifdef CONFIG_X86_64
-               "push %%rdx; push %%rbp;"
-               "push %%rcx \n\t"
+               "push %%rbp \n\t"
+               "mov %%rsp, %c[host_rsp](%0) \n\t"
+               "mov %0, %%rsp \n\t"  /* in case vm entry fails */
 #else
-               "push %%edx; push %%ebp;"
-               "push %%ecx \n\t"
+               "push %%ebp \n\t"
+               "mov %%esp, %c[host_rsp](%0) \n\t"
+               "mov %0, %%esp \n\t"  /* in case vm entry fails */
 #endif
-               ASM_VMX_VMWRITE_RSP_RDX "\n\t"
                /* Check if vmlaunch of vmresume is needed */
                "cmpl $0, %c[launched](%0) \n\t"
                /* Load guest registers.  Don't clobber flags. */
@@ -2335,15 +2345,21 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
                /* Enter guest mode */
                "jne .Llaunched \n\t"
                ASM_VMX_VMLAUNCH "\n\t"
-               "jmp .Lkvm_vmx_return \n\t"
+               "jmp .Lkvm_vmx_fail \n\t"
                ".Llaunched: " ASM_VMX_VMRESUME "\n\t"
+               ".Lkvm_vmx_fail: \n\t"
+#ifdef CONFIG_X86_64
+               "movb $1, %c[fail](%%rsp) \n\t"
+#else
+               "movb $1, %c[fail](%%esp) \n\t"
+#endif
                ".Lkvm_vmx_return: "
                /* Save guest registers, load host registers, keep flags */
 #ifdef CONFIG_X86_64
-               "xchg %0,     (%%rsp) \n\t"
+               "xchg %0,   %%rsp \n\t"
                "mov %%rax, %c[rax](%0) \n\t"
                "mov %%rbx, %c[rbx](%0) \n\t"
-               "pushq (%%rsp); popq %c[rcx](%0) \n\t"
+               "mov %%rsp, %c[rcx](%0) \n\t"
                "mov %%rdx, %c[rdx](%0) \n\t"
                "mov %%rsi, %c[rsi](%0) \n\t"
                "mov %%rdi, %c[rdi](%0) \n\t"
@@ -2359,12 +2375,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
                "mov %%cr2, %%rax   \n\t"
                "mov %%rax, %c[cr2](%0) \n\t"
 
-               "pop  %%rbp; pop  %%rbp; pop  %%rdx \n\t"
+               "mov %c[host_rsp](%0), %%rsp \n\t"
+               "pop  %%rbp \n\t"
 #else
-               "xchg %0, (%%esp) \n\t"
+               "xchg %0, %%esp \n\t"
                "mov %%eax, %c[rax](%0) \n\t"
                "mov %%ebx, %c[rbx](%0) \n\t"
-               "pushl (%%esp); popl %c[rcx](%0) \n\t"
+               "mov %%esp, %c[rcx](%0) \n\t"
                "mov %%edx, %c[rdx](%0) \n\t"
                "mov %%esi, %c[rsi](%0) \n\t"
                "mov %%edi, %c[rdi](%0) \n\t"
@@ -2372,12 +2389,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
                "mov %%cr2, %%eax  \n\t"
                "mov %%eax, %c[cr2](%0) \n\t"
 
-               "pop %%ebp; pop %%ebp; pop %%edx \n\t"
+               "mov %c[host_rsp](%0), %%esp \n\t"
+               "pop %%ebp \n\t"
 #endif
-               "setbe %c[fail](%0) \n\t"
-             : : "c"(vmx), "d"((unsigned long)HOST_RSP),
+             : : "c"(vmx),
                [launched]"i"(offsetof(struct vcpu_vmx, launched)),
                [fail]"i"(offsetof(struct vcpu_vmx, fail)),
+               [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
                [rax]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RAX])),
                [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RBX])),
                [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.regs[VCPU_REGS_RCX])),

-------------------------------------------------------------------------
This SF.net email is sponsored by: Splunk Inc.
Still grepping through log files to find problems?  Stop.
Now Search log events and configuration files using AJAX and a browser.
Download your FREE copy of Splunk now >> http://get.splunk.com/
_______________________________________________
kvm-commits mailing list
kvm-commits@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-commits

Reply via email to