On Wed, Apr 17, 2013 at 02:55:10PM +0300, Abel Gordon wrote:
> Synchronize between the VMCS12 software controlled structure and the
> processor-specific shadow vmcs
>
> Signed-off-by: Abel Gordon <[email protected]>
> ---
> arch/x86/kvm/vmx.c | 24 ++++++++++++++++++++++++
> 1 file changed, 24 insertions(+)
>
> --- .before/arch/x86/kvm/vmx.c 2013-04-17 14:20:51.000000000 +0300
> +++ .after/arch/x86/kvm/vmx.c 2013-04-17 14:20:51.000000000 +0300
> @@ -356,6 +356,11 @@ struct nested_vmx {
> struct page *current_vmcs12_page;
> struct vmcs12 *current_vmcs12;
> struct vmcs *current_shadow_vmcs;
> + /*
> + * Indicates if the shadow vmcs must be updated with the
> + * data hold by vmcs12
> + */
> + bool sync_shadow_vmcs;
>
> /* vmcs02_list cache of VMCSs recently used to run L2 guests */
> struct list_head vmcs02_pool;
> @@ -5587,6 +5592,10 @@ static inline void nested_release_vmcs12
> {
> if (enable_shadow_vmcs) {
> if (vmx->nested.current_vmcs12 != NULL) {
> + /* copy to memory all shadowed fields in case
> + they were modified */
> + copy_shadow_to_vmcs12(vmx);
> + vmx->nested.sync_shadow_vmcs = false;
> free_vmcs(vmx->nested.current_shadow_vmcs);
> }
> }
> @@ -5716,6 +5725,10 @@ static void nested_vmx_failValid(struct
> X86_EFLAGS_SF | X86_EFLAGS_OF))
> | X86_EFLAGS_ZF);
> get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
> + /*
> + * We don't need to force a shadow sync because
> + * VM_INSTRUCTION_ERROR is not shdowed
---------------------------------------^ shadowed.
But lets just request sync. This is slow path anyway.
> + */
> }
>
> /* Emulate the VMCLEAR instruction */
> @@ -6127,6 +6140,7 @@ static int handle_vmptrld(struct kvm_vcp
> /* init shadow vmcs */
> vmcs_clear(shadow_vmcs);
> vmx->nested.current_shadow_vmcs = shadow_vmcs;
> + vmx->nested.sync_shadow_vmcs = true;
> }
> }
>
> @@ -6876,6 +6890,10 @@ static void __noclone vmx_vcpu_run(struc
> {
> struct vcpu_vmx *vmx = to_vmx(vcpu);
> unsigned long debugctlmsr;
Leave free line here and move it after if(vmx->emulation_required).
> + if (vmx->nested.sync_shadow_vmcs) {
> + copy_vmcs12_to_shadow(vmx);
> + vmx->nested.sync_shadow_vmcs = false;
> + }
>
> /* Record the guest's net vcpu time for enforced NMI injections. */
> if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
> @@ -7496,6 +7514,8 @@ static int nested_vmx_run(struct kvm_vcp
> skip_emulated_instruction(vcpu);
> vmcs12 = get_vmcs12(vcpu);
>
> + if (enable_shadow_vmcs)
> + copy_shadow_to_vmcs12(vmx);
And free line here.
> /*
> * The nested entry process starts with enforcing various prerequisites
> * on vmcs12 as required by the Intel SDM, and act appropriately when
> @@ -7938,6 +7958,8 @@ static void nested_vmx_vmexit(struct kvm
> nested_vmx_failValid(vcpu, vmcs_read32(VM_INSTRUCTION_ERROR));
> } else
> nested_vmx_succeed(vcpu);
> + if (enable_shadow_vmcs)
> + vmx->nested.sync_shadow_vmcs = true;
> }
>
> /*
> @@ -7955,6 +7977,8 @@ static void nested_vmx_entry_failure(str
> vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
> vmcs12->exit_qualification = qualification;
> nested_vmx_succeed(vcpu);
> + if (enable_shadow_vmcs)
> + to_vmx(vcpu)->nested.sync_shadow_vmcs = true;
> }
>
> static int vmx_check_intercept(struct kvm_vcpu *vcpu,
>
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
--
Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html