Re: [PATCH v3 2/2] KVM: nVMX: nested VPID emulation

2015-09-16 Thread Jan Kiszka
On 2015-09-16 05:51, Wanpeng Li wrote:
> VPID is used to tag address space and avoid a TLB flush. Currently L0 use 
> the same VPID to run L1 and all its guests. KVM flushes VPID when switching 
> between L1 and L2. 
> 
> This patch advertises VPID to the L1 hypervisor, then address space of L1 and 
> L2 can be separately treated and avoid TLB flush when swithing between L1 and 
> L2. For each nested vmentry, if vpid12 is changed, reuse shadow vpid w/ an 
> invvpid.
> 
> Performance: 
> 
> run lmbench on L2 w/ 3.5 kernel.
> 
> Context switching - times in microseconds - smaller is better
> -
> Host OS  2p/0K 2p/16K 2p/64K 8p/16K 8p/64K 16p/16K 16p/64K
>  ctxsw  ctxsw  ctxsw ctxsw  ctxsw   ctxsw   ctxsw
> - - -- -- -- -- -- --- ---
> kernelLinux 3.5.0-1 1.2200 1.3700 1.4500 4.7800 2.3300 5.6 2.88000  
> nested VPID 
> kernelLinux 3.5.0-1 1.2600 1.4300 1.5600   12.7   12.9 3.49000 7.46000  
> vanilla
> 
> Suggested-by: Wincy Van 
> Signed-off-by: Wanpeng Li 
> ---
>  arch/x86/kvm/vmx.c | 37 +++--
>  1 file changed, 31 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 4956081..2fd5b5e 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -424,6 +424,9 @@ struct nested_vmx {
>   /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
>   u64 vmcs01_debugctl;
>  
> + u16 vpid02;
> + u16 last_vpid;
> +
>   u32 nested_vmx_procbased_ctls_low;
>   u32 nested_vmx_procbased_ctls_high;
>   u32 nested_vmx_true_procbased_ctls_low;
> @@ -1155,6 +1158,11 @@ static inline bool 
> nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
>   return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
>  }
>  
> +static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
> +{
> + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
> +}
> +
>  static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
>  {
>   return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
> @@ -2469,6 +2477,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx 
> *vmx)
>   SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
>   SECONDARY_EXEC_RDTSCP |
>   SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
> + SECONDARY_EXEC_ENABLE_VPID |
>   SECONDARY_EXEC_APIC_REGISTER_VIRT |
>   SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
>   SECONDARY_EXEC_WBINVD_EXITING |
> @@ -6662,6 +6671,7 @@ static void free_nested(struct vcpu_vmx *vmx)
>   return;
>  
>   vmx->nested.vmxon = false;
> + free_vpid(vmx->nested.vpid02);
>   nested_release_vmcs12(vmx);
>   if (enable_shadow_vmcs)
>   free_vmcs(vmx->nested.current_shadow_vmcs);
> @@ -8547,8 +8557,10 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm 
> *kvm, unsigned int id)
>   goto free_vmcs;
>   }
>  
> - if (nested)
> + if (nested) {
>   nested_vmx_setup_ctls_msrs(vmx);
> + vmx->nested.vpid02 = allocate_vpid();
> + }
>  
>   vmx->nested.posted_intr_nv = -1;
>   vmx->nested.current_vmptr = -1ull;
> @@ -8569,6 +8581,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm 
> *kvm, unsigned int id)
>   return >vcpu;
>  
>  free_vmcs:
> + free_vpid(vmx->nested.vpid02);
>   free_loaded_vmcs(vmx->loaded_vmcs);
>  free_msrs:
>   kfree(vmx->guest_msrs);
> @@ -9444,12 +9457,24 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, 
> struct vmcs12 *vmcs12)
>  
>   if (enable_vpid) {
>   /*
> -  * Trivially support vpid by letting L2s share their parent
> -  * L1's vpid. TODO: move to a more elaborate solution, giving
> -  * each L2 its own vpid and exposing the vpid feature to L1.
> +  * There is no direct mapping between vpid02 and vpid12, the
> +  * vpid02 is per-vCPU for L0 and reused while the value of
> +  * vpid12 is changed w/ one invvpid during nested vmentry.
> +  * The vpid12 is allocated by L1 for L2, so it will not
> +  * influence global bitmap(for vpid01 and vpid02 allocation)
> +  * even if spawn a lot of nested vCPUs.
>*/
> - vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
> - vmx_flush_tlb(vcpu);
> + if (nested_cpu_has_vpid(vmcs12)) {
> + vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
> + if (vmcs12->virtual_processor_id != 
> vmx->nested.last_vpid) {
> + vmx->nested.last_vpid = 
> vmcs12->virtual_processor_id;
> + vmx_flush_tlb(vcpu);
> +  

[PATCH v3 2/2] KVM: nVMX: nested VPID emulation

2015-09-15 Thread Wanpeng Li
VPID is used to tag address space and avoid a TLB flush. Currently L0 use 
the same VPID to run L1 and all its guests. KVM flushes VPID when switching 
between L1 and L2. 

This patch advertises VPID to the L1 hypervisor, then address space of L1 and 
L2 can be separately treated and avoid TLB flush when swithing between L1 and 
L2. For each nested vmentry, if vpid12 is changed, reuse shadow vpid w/ an 
invvpid.

Performance: 

run lmbench on L2 w/ 3.5 kernel.

Context switching - times in microseconds - smaller is better
-
Host OS  2p/0K 2p/16K 2p/64K 8p/16K 8p/64K 16p/16K 16p/64K
 ctxsw  ctxsw  ctxsw ctxsw  ctxsw   ctxsw   ctxsw
- - -- -- -- -- -- --- ---
kernelLinux 3.5.0-1 1.2200 1.3700 1.4500 4.7800 2.3300 5.6 2.88000  
nested VPID 
kernelLinux 3.5.0-1 1.2600 1.4300 1.5600   12.7   12.9 3.49000 7.46000  
vanilla

Suggested-by: Wincy Van 
Signed-off-by: Wanpeng Li 
---
 arch/x86/kvm/vmx.c | 37 +++--
 1 file changed, 31 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4956081..2fd5b5e 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -424,6 +424,9 @@ struct nested_vmx {
/* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
u64 vmcs01_debugctl;
 
+   u16 vpid02;
+   u16 last_vpid;
+
u32 nested_vmx_procbased_ctls_low;
u32 nested_vmx_procbased_ctls_high;
u32 nested_vmx_true_procbased_ctls_low;
@@ -1155,6 +1158,11 @@ static inline bool 
nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
 }
 
+static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
+{
+   return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
+}
+
 static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
 {
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
@@ -2469,6 +2477,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx 
*vmx)
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
SECONDARY_EXEC_RDTSCP |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
+   SECONDARY_EXEC_ENABLE_VPID |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_WBINVD_EXITING |
@@ -6662,6 +6671,7 @@ static void free_nested(struct vcpu_vmx *vmx)
return;
 
vmx->nested.vmxon = false;
+   free_vpid(vmx->nested.vpid02);
nested_release_vmcs12(vmx);
if (enable_shadow_vmcs)
free_vmcs(vmx->nested.current_shadow_vmcs);
@@ -8547,8 +8557,10 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, 
unsigned int id)
goto free_vmcs;
}
 
-   if (nested)
+   if (nested) {
nested_vmx_setup_ctls_msrs(vmx);
+   vmx->nested.vpid02 = allocate_vpid();
+   }
 
vmx->nested.posted_intr_nv = -1;
vmx->nested.current_vmptr = -1ull;
@@ -8569,6 +8581,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, 
unsigned int id)
return >vcpu;
 
 free_vmcs:
+   free_vpid(vmx->nested.vpid02);
free_loaded_vmcs(vmx->loaded_vmcs);
 free_msrs:
kfree(vmx->guest_msrs);
@@ -9444,12 +9457,24 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, 
struct vmcs12 *vmcs12)
 
if (enable_vpid) {
/*
-* Trivially support vpid by letting L2s share their parent
-* L1's vpid. TODO: move to a more elaborate solution, giving
-* each L2 its own vpid and exposing the vpid feature to L1.
+* There is no direct mapping between vpid02 and vpid12, the
+* vpid02 is per-vCPU for L0 and reused while the value of
+* vpid12 is changed w/ one invvpid during nested vmentry.
+* The vpid12 is allocated by L1 for L2, so it will not
+* influence global bitmap(for vpid01 and vpid02 allocation)
+* even if spawn a lot of nested vCPUs.
 */
-   vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
-   vmx_flush_tlb(vcpu);
+   if (nested_cpu_has_vpid(vmcs12)) {
+   vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
+   if (vmcs12->virtual_processor_id != 
vmx->nested.last_vpid) {
+   vmx->nested.last_vpid = 
vmcs12->virtual_processor_id;
+   vmx_flush_tlb(vcpu);
+   }
+   } else {
+   vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
+   vmx_flush_tlb(vcpu);
+