Avi Kivity wrote: > > Some exit handlers (even the #PF handler) can sleep sometimes. They > call kvm_arch_ops->vcpu_put(), do some sleepy thing, then call > kvm_arch_ops->vcpu_load(). The changes in the commit make > sure that if > vcpu_put() is called, the lightweight exit is converted to a > heavyweight exit. Since such sleeps are rare, this is not expected > to impact performance. > > See for example mmu_topup_memory_caches(). > > OK, how about this patch which further reduce the light weight VM Exit MSR save/restore?
thx,eddie
Signed-off-by: Yaozu(Eddie) Dong [EMAIL PROTECTED]
against ca76d209b88c344fc6a8eac17057c0088a3d6940.
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 1bbafba..e61a7e6 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -287,6 +287,7 @@ struct kvm_vcpu {
u64 apic_base;
u64 ia32_misc_enable_msr;
int nmsrs;
+ int smsrs_bitmap;
struct vmx_msr_entry *guest_msrs;
struct vmx_msr_entry *host_msrs;
@@ -513,6 +514,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32
msr, u64 data);
void fx_init(struct kvm_vcpu *vcpu);
+void load_msrs_select(struct vmx_msr_entry *e, int bitmap);
+void save_msrs_select(struct vmx_msr_entry *e, int bitmap);
void load_msrs(struct vmx_msr_entry *e, int n);
void save_msrs(struct vmx_msr_entry *e, int n);
void kvm_resched(struct kvm_vcpu *vcpu);
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 1288cff..ef96fae 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -1596,6 +1596,30 @@ void kvm_resched(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_resched);
+void load_msrs_select(struct vmx_msr_entry *e, int bitmap)
+{
+ unsigned long nr;
+
+ while (bitmap) {
+ nr = __ffs(bitmap);
+ clear_bit(nr,&bitmap);
+ wrmsrl(e[nr].index, e[nr].data);
+ }
+}
+EXPORT_SYMBOL_GPL(load_msrs_select);
+
+void save_msrs_select(struct vmx_msr_entry *e, int bitmap)
+{
+ unsigned long nr;
+
+ while (bitmap) {
+ nr = __ffs(bitmap);
+ clear_bit(nr,&bitmap);
+ rdmsrl(e[nr].index, e[nr].data);
+ }
+}
+EXPORT_SYMBOL_GPL(save_msrs_select);
+
void load_msrs(struct vmx_msr_entry *e, int n)
{
int i;
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 804a623..67d076c 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -86,15 +86,6 @@ static const u32 vmx_msr_index[] = {
#ifdef CONFIG_X86_64
static unsigned msr_offset_kernel_gs_base;
-#define NR_64BIT_MSRS 4
-/*
- * avoid save/load MSR_SYSCALL_MASK and MSR_LSTAR by std vt
- * mechanism (cpu bug AA24)
- */
-#define NR_BAD_MSRS 2
-#else
-#define NR_64BIT_MSRS 0
-#define NR_BAD_MSRS 0
#endif
static inline int is_page_fault(u32 intr_info)
@@ -117,13 +108,23 @@ static inline int is_external_interrupt(u32
intr_info)
== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
}
-static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32
msr)
+static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr)
{
int i;
for (i = 0; i < vcpu->nmsrs; ++i)
if (vcpu->guest_msrs[i].index == msr)
- return &vcpu->guest_msrs[i];
+ return i;
+ return -1;
+}
+
+static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32
msr)
+{
+ int i;
+
+ i = __find_msr_index(vcpu, msr);
+ if (i >= 0)
+ return &vcpu->guest_msrs[i];
return NULL;
}
@@ -307,9 +308,9 @@ static void vmx_save_host_state(struct kvm_vcpu
*vcpu)
#ifdef CONFIG_X86_64
if (is_long_mode(vcpu)) {
save_msrs(vcpu->host_msrs + msr_offset_kernel_gs_base,
1);
- load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
}
#endif
+ load_msrs_select(vcpu->guest_msrs, vcpu->smsrs_bitmap);
}
static void vmx_load_host_state(struct kvm_vcpu *vcpu)
@@ -336,12 +337,8 @@ static void vmx_load_host_state(struct kvm_vcpu
*vcpu)
reload_tss();
}
-#ifdef CONFIG_X86_64
- if (is_long_mode(vcpu)) {
- save_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
- load_msrs(vcpu->host_msrs, NR_BAD_MSRS);
- }
-#endif
+ save_msrs_select(vcpu->guest_msrs, vcpu->smsrs_bitmap);
+ load_msrs_select(vcpu->host_msrs, vcpu->smsrs_bitmap);
}
/*
@@ -469,35 +466,51 @@ static void vmx_inject_gp(struct kvm_vcpu *vcpu,
unsigned error_code)
*/
static void setup_msrs(struct kvm_vcpu *vcpu)
{
- int nr_skip, nr_good_msrs;
-
- if (is_long_mode(vcpu))
- nr_skip = NR_BAD_MSRS;
- else
- nr_skip = NR_64BIT_MSRS;
- nr_good_msrs = vcpu->nmsrs - nr_skip;
+ int index,save_msrs;
- /*
- * MSR_K6_STAR is only needed on long mode guests, and only
- * if efer.sce is enabled.
- */
- if (find_msr_entry(vcpu, MSR_K6_STAR)) {
- --nr_good_msrs;
-#ifdef CONFIG_X86_64
- if (is_long_mode(vcpu) && (vcpu->shadow_efer &
EFER_SCE))
- ++nr_good_msrs;
+ vcpu->smsrs_bitmap = 0;
+ if (is_long_mode(vcpu)) {
+ if ((index=__find_msr_index(vcpu, MSR_SYSCALL_MASK)) >=
0) {
+ set_bit(index, &vcpu->smsrs_bitmap);
+ }
+ if ((index=__find_msr_index(vcpu, MSR_LSTAR)) >= 0) {
+ set_bit(index, &vcpu->smsrs_bitmap);
+ }
+ if ((index=__find_msr_index(vcpu, MSR_CSTAR)) >= 0) {
+ set_bit(index, &vcpu->smsrs_bitmap);
+ }
+ if ((index=__find_msr_index(vcpu, MSR_KERNEL_GS_BASE))
>= 0) {
+ set_bit(index, &vcpu->smsrs_bitmap);
+ }
+ /*
+ * MSR_K6_STAR is only needed on long mode guests, and
only
+ * if efer.sce is enabled.
+ */
+ if ((index=__find_msr_index(vcpu, MSR_K6_STAR)) >= 0
+#ifdef X86_64
+ && (vcpu->shadow_efer & EFER_SCE)
#endif
+ ) {
+ set_bit(index, &vcpu->smsrs_bitmap);
+ }
}
+ if ((index = __find_msr_index(vcpu, MSR_EFER)) >= 0) {
+ save_msrs = 1;
+ }
+ else {
+ save_msrs = 0;
+ index = 0;
+ }
vmcs_writel(VM_ENTRY_MSR_LOAD_ADDR,
- virt_to_phys(vcpu->guest_msrs + nr_skip));
+ virt_to_phys(vcpu->guest_msrs + index));
vmcs_writel(VM_EXIT_MSR_STORE_ADDR,
- virt_to_phys(vcpu->guest_msrs + nr_skip));
+ virt_to_phys(vcpu->guest_msrs + index));
vmcs_writel(VM_EXIT_MSR_LOAD_ADDR,
- virt_to_phys(vcpu->host_msrs + nr_skip));
- vmcs_write32(VM_EXIT_MSR_STORE_COUNT, nr_good_msrs); /* 22.2.2
*/
- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2
*/
- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2
*/
+ virt_to_phys(vcpu->host_msrs + index));
+ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, save_msrs);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, save_msrs);
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, save_msrs);
}
/*
@@ -594,14 +607,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32
msr_index, u64 data)
case MSR_GS_BASE:
vmcs_writel(GUEST_GS_BASE, data);
break;
- case MSR_LSTAR:
- case MSR_SYSCALL_MASK:
- msr = find_msr_entry(vcpu, msr_index);
- if (msr)
- msr->data = data;
- if (vcpu->vmx_host_state.loaded)
- load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
- break;
#endif
case MSR_IA32_SYSENTER_CS:
vmcs_write32(GUEST_SYSENTER_CS, data);
@@ -619,6 +624,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32
msr_index, u64 data)
msr = find_msr_entry(vcpu, msr_index);
if (msr) {
msr->data = data;
+ if (vcpu->vmx_host_state.loaded)
+ load_msrs_select(vcpu->guest_msrs,
+ vcpu->smsrs_bitmap);
break;
}
return kvm_set_msr_common(vcpu, msr_index, data);
msr30.patch
Description: msr30.patch
------------------------------------------------------------------------- This SF.net email is sponsored by DB2 Express Download DB2 Express C - the FREE version of DB2 express and take control of your XML. No limits. Just data. Click to get it now. http://sourceforge.net/powerbar/db2/
_______________________________________________ kvm-devel mailing list [email protected] https://lists.sourceforge.net/lists/listinfo/kvm-devel
