[EMAIL PROTECTED] wrote:
> BTW, I have another patch in hand to further reduce MSR
> save/restore and
> thus
> improve performance more for lightweight VM Exit. Base on my
> observation for FC5 32 bits
> guest, 93% VM Exit will fall into the lightweight path.
> 
This patch reduce the VM Exit handling cost continuously
for those lightweight VM Exit which occupies 93% of VM Exit in
KB case if 64 bits OS has similar situation with 32 bits. In my old 
machine, I saw 20% performance increasement of KB within 64 bits 
RHEL5 guest and flat for 32bits FC5.
        There are still some room to improvment here, but this one
focus on basic MSR save/restore framework only for now and leave
 future to opitmize specific MSRs like GS_BASE etc.
thx,eddie

Signed-off-by:  Yaozu(Eddie) Dong [EMAIL PROTECTED]

against 5cf48c367dec74ba8553c53ed332cd075fa38b88


commit a7294eae555b7d42f7e44b8d7955becad2feebf8
Author: root <[EMAIL PROTECTED](none)>
Date:   Tue May 8 17:32:24 2007 +0800

    Avoid MSR save/restore for lightweight VM Exit
    
    Signed-off-by:  Yaozu(Eddie) Dong [EMAIL PROTECTED]

diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 11eb25e..86abf2d 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -285,6 +285,7 @@ struct kvm_vcpu {
        u64 apic_base;
        u64 ia32_misc_enable_msr;
        int nmsrs;
+       int sw_save_msrs;
        struct vmx_msr_entry *guest_msrs;
        struct vmx_msr_entry *host_msrs;
 
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 4e04b85..c2d06b5 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -80,23 +80,11 @@ static const u32 vmx_msr_index[] = {
 #ifdef CONFIG_X86_64
        MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
 #endif
-       MSR_EFER, MSR_K6_STAR,
+       MSR_K6_STAR, MSR_EFER,
 };
+#define NR_HW_SAVE_MSRS        1       /* HW save MSR_EFER */
 #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
 
-#ifdef CONFIG_X86_64
-static unsigned msr_offset_kernel_gs_base;
-#define NR_64BIT_MSRS 4
-/*
- * avoid save/load MSR_SYSCALL_MASK and MSR_LSTAR by std vt
- * mechanism (cpu bug AA24)
- */
-#define NR_BAD_MSRS 2
-#else
-#define NR_64BIT_MSRS 0
-#define NR_BAD_MSRS 0
-#endif
-
 static inline int is_page_fault(u32 intr_info)
 {
        return (intr_info & (INTR_INFO_INTR_TYPE_MASK |
INTR_INFO_VECTOR_MASK |
@@ -339,23 +327,19 @@ static void vmx_inject_gp(struct kvm_vcpu *vcpu,
unsigned error_code)
  */
 static void setup_msrs(struct kvm_vcpu *vcpu)
 {
-       int nr_skip, nr_good_msrs;
+       int nr_skip;
 
-       if (is_long_mode(vcpu))
-               nr_skip = NR_BAD_MSRS;
-       else
-               nr_skip = NR_64BIT_MSRS;
-       nr_good_msrs = vcpu->nmsrs - nr_skip;
+       vcpu->sw_save_msrs = nr_skip = vcpu->nmsrs - NR_HW_SAVE_MSRS;
 
        /*
         * MSR_K6_STAR is only needed on long mode guests, and only
         * if efer.sce is enabled.
         */
        if (find_msr_entry(vcpu, MSR_K6_STAR)) {
-               --nr_good_msrs;
+               --vcpu->sw_save_msrs;
 #ifdef CONFIG_X86_64
                if (is_long_mode(vcpu) && (vcpu->shadow_efer &
EFER_SCE))
-                       ++nr_good_msrs;
+                       ++vcpu->sw_save_msrs;
 #endif
        }
 
@@ -365,9 +349,9 @@ static void setup_msrs(struct kvm_vcpu *vcpu)
                    virt_to_phys(vcpu->guest_msrs + nr_skip));
        vmcs_writel(VM_EXIT_MSR_LOAD_ADDR,
                    virt_to_phys(vcpu->host_msrs + nr_skip));
-       vmcs_write32(VM_EXIT_MSR_STORE_COUNT, nr_good_msrs); /* 22.2.2
*/
-       vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, nr_good_msrs);  /* 22.2.2
*/
-       vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2
*/
+       vmcs_write32(VM_EXIT_MSR_STORE_COUNT, NR_HW_SAVE_MSRS);
+       vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, NR_HW_SAVE_MSRS);
+       vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, NR_HW_SAVE_MSRS);
 }
 
 /*
@@ -486,7 +470,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32
msr_index, u64 data)
                msr = find_msr_entry(vcpu, msr_index);
                if (msr)
                        msr->data = data;
-               load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
+               load_msrs(vcpu->guest_msrs, vcpu->sw_save_msrs);
                break;
 #endif
        case MSR_IA32_SYSENTER_CS:
@@ -1218,10 +1202,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
                vcpu->host_msrs[j].reserved = 0;
                vcpu->host_msrs[j].data = data;
                vcpu->guest_msrs[j] = vcpu->host_msrs[j];
-#ifdef CONFIG_X86_64
-               if (index == MSR_KERNEL_GS_BASE)
-                       msr_offset_kernel_gs_base = j;
-#endif
                ++vcpu->nmsrs;
        }
 
@@ -1861,12 +1841,8 @@ preempted:
                fx_restore(vcpu->guest_fx_image);
        }
 
-#ifdef CONFIG_X86_64
-       if (is_long_mode(vcpu)) {
-               save_msrs(vcpu->host_msrs + msr_offset_kernel_gs_base,
1);
-               load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
-       }
-#endif
+       save_msrs(vcpu->host_msrs, vcpu->sw_save_msrs);
+       load_msrs(vcpu->guest_msrs, vcpu->sw_save_msrs);
 
 again:
        asm (
@@ -2052,12 +2028,8 @@ out:
 
                reload_tss();
        }
-#ifdef CONFIG_X86_64
-       if (is_long_mode(vcpu)) {
-               save_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
-               load_msrs(vcpu->host_msrs, NR_BAD_MSRS);
-       }
-#endif
+       save_msrs(vcpu->guest_msrs, vcpu->sw_save_msrs);
+       load_msrs(vcpu->host_msrs, vcpu->sw_save_msrs);
 
        if (vcpu->fpu_active) {
                fx_save(vcpu->guest_fx_image);

Attachment: lightweight-msr.patch
Description: lightweight-msr.patch

-------------------------------------------------------------------------
This SF.net email is sponsored by DB2 Express
Download DB2 Express C - the FREE version of DB2 express and take
control of your XML. No limits. Just data. Click to get it now.
http://sourceforge.net/powerbar/db2/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to