This patch implements the VMCLEAR instruction.

Signed-off-by: Nadav Har'El <n...@il.ibm.com>
---
 arch/x86/kvm/vmx.c |   60 ++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 59 insertions(+), 1 deletion(-)

--- .before/arch/x86/kvm/vmx.c  2010-12-08 18:56:50.000000000 +0200
+++ .after/arch/x86/kvm/vmx.c   2010-12-08 18:56:50.000000000 +0200
@@ -279,6 +279,8 @@ struct __packed vmcs12 {
        u32 abort;
 
        struct vmcs_fields fields;
+
+       bool launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
 };
 
 /*
@@ -4413,6 +4415,62 @@ static void nested_vmx_failValid(struct 
        get_vmcs12_fields(vcpu)->vm_instruction_error = vm_instruction_error;
 }
 
+/* Emulate the VMCLEAR instruction */
+static int handle_vmclear(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       gva_t gva;
+       gpa_t vmcs12_addr;
+       struct vmcs12 *vmcs12;
+       struct page *page;
+
+       if (!nested_vmx_check_permission(vcpu))
+               return 1;
+
+       if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+                       vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
+               return 1;
+
+       if (kvm_read_guest_virt(gva, &vmcs12_addr, sizeof(vmcs12_addr),
+                               vcpu, NULL)) {
+               kvm_queue_exception(vcpu, PF_VECTOR);
+               return 1;
+       }
+
+       if (!IS_ALIGNED(vmcs12_addr, PAGE_SIZE)) {
+               nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
+               skip_emulated_instruction(vcpu);
+               return 1;
+       }
+
+       if (vmcs12_addr == vmx->nested.current_vmptr) {
+               nested_release_page(vmx->nested.current_vmcs12_page);
+               vmx->nested.current_vmptr = -1ull;
+       }
+
+       page = nested_get_page(vcpu, vmcs12_addr);
+       if (page == NULL) {
+               /*
+                * For accurate processor emulation, VMCLEAR beyond available
+                * physical memory should do nothing at all. However, it is
+                * possible that a nested vmx bug, not a guest hypervisor bug,
+                * resulted in this case, so let's shut down before doing any
+                * more damage:
+                */
+               set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
+               return 1;
+       }
+       vmcs12 = kmap(page);
+       vmcs12->launch_state = 0;
+       nested_release_page(page);
+
+       nested_free_vmcs(vcpu, vmcs12_addr);
+
+       skip_emulated_instruction(vcpu);
+       nested_vmx_succeed(vcpu);
+       return 1;
+}
+
 /*
  * The exit handlers return 1 if the exit was handled fully and guest execution
  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
@@ -4434,7 +4492,7 @@ static int (*kvm_vmx_exit_handlers[])(st
        [EXIT_REASON_INVD]                    = handle_invd,
        [EXIT_REASON_INVLPG]                  = handle_invlpg,
        [EXIT_REASON_VMCALL]                  = handle_vmcall,
-       [EXIT_REASON_VMCLEAR]                 = handle_vmx_insn,
+       [EXIT_REASON_VMCLEAR]                 = handle_vmclear,
        [EXIT_REASON_VMLAUNCH]                = handle_vmx_insn,
        [EXIT_REASON_VMPTRLD]                 = handle_vmx_insn,
        [EXIT_REASON_VMPTRST]                 = handle_vmx_insn,
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to