Re: [PATCH 11/29] nVMX: Implement VMCLEAR

2011-01-30 Thread Avi Kivity

On 01/27/2011 10:35 AM, Nadav Har'El wrote:

This patch implements the VMCLEAR instruction.



+/* Emulate the VMCLEAR instruction */
+static int handle_vmclear(struct kvm_vcpu *vcpu)
+{
+   struct vcpu_vmx *vmx = to_vmx(vcpu);
+   gva_t gva;
+   gpa_t vmcs12_addr;
+   struct vmcs12 *vmcs12;
+   struct page *page;
+
+   if (!nested_vmx_check_permission(vcpu))
+   return 1;
+
+   if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+   vmcs_read32(VMX_INSTRUCTION_INFO),gva))
+   return 1;
+
+   if (kvm_read_guest_virt(gva,vmcs12_addr, sizeof(vmcs12_addr),
+   vcpu, NULL)) {
+   kvm_queue_exception(vcpu, PF_VECTOR);


This generates an exception without an error code.  Use the 'struct 
x86_exception' parameter to kvm_read_guest_virt() to obtain the correct 
exception/error code pair.



+   return 1;
+   }
+
+   if (!IS_ALIGNED(vmcs12_addr, PAGE_SIZE)) {
+   nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
+   skip_emulated_instruction(vcpu);
+   return 1;
+   }
+
+   if (vmcs12_addr == vmx-nested.current_vmptr) {
+   kunmap(vmx-nested.current_vmcs12_page);
+   nested_release_page(vmx-nested.current_vmcs12_page);
+   vmx-nested.current_vmptr = -1ull;
+   }
+
+   page = nested_get_page(vcpu, vmcs12_addr);
+   if (page == NULL) {
+   /*
+* For accurate processor emulation, VMCLEAR beyond available
+* physical memory should do nothing at all. However, it is
+* possible that a nested vmx bug, not a guest hypervisor bug,
+* resulted in this case, so let's shut down before doing any
+* more damage:
+*/
+   kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+   nested_release_page(page);


nested_release_page(NULL) unneeded.


+   return 1;
+   }
+   vmcs12 = kmap(page);
+   vmcs12-launch_state = 0;
+   kunmap(page);
+   nested_release_page(page);
+
+   nested_free_vmcs(vmx, vmcs12_addr);
+
+   skip_emulated_instruction(vcpu);
+   nested_vmx_succeed(vcpu);
+   return 1;
+}
+


--
error compiling committee.c: too many arguments to function

--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 11/29] nVMX: Implement VMCLEAR

2011-01-27 Thread Nadav Har'El
This patch implements the VMCLEAR instruction.

Signed-off-by: Nadav Har'El n...@il.ibm.com
---
 arch/x86/kvm/vmx.c |   63 ++-
 1 file changed, 62 insertions(+), 1 deletion(-)

--- .before/arch/x86/kvm/vmx.c  2011-01-26 18:06:04.0 +0200
+++ .after/arch/x86/kvm/vmx.c   2011-01-26 18:06:04.0 +0200
@@ -283,6 +283,8 @@ struct __packed vmcs12 {
u32 abort;
 
struct vmcs_fields fields;
+
+   bool launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
 };
 
 /*
@@ -4582,6 +4584,65 @@ static void nested_vmx_failValid(struct 
get_vmcs12_fields(vcpu)-vm_instruction_error = vm_instruction_error;
 }
 
+/* Emulate the VMCLEAR instruction */
+static int handle_vmclear(struct kvm_vcpu *vcpu)
+{
+   struct vcpu_vmx *vmx = to_vmx(vcpu);
+   gva_t gva;
+   gpa_t vmcs12_addr;
+   struct vmcs12 *vmcs12;
+   struct page *page;
+
+   if (!nested_vmx_check_permission(vcpu))
+   return 1;
+
+   if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+   vmcs_read32(VMX_INSTRUCTION_INFO), gva))
+   return 1;
+
+   if (kvm_read_guest_virt(gva, vmcs12_addr, sizeof(vmcs12_addr),
+   vcpu, NULL)) {
+   kvm_queue_exception(vcpu, PF_VECTOR);
+   return 1;
+   }
+
+   if (!IS_ALIGNED(vmcs12_addr, PAGE_SIZE)) {
+   nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
+   skip_emulated_instruction(vcpu);
+   return 1;
+   }
+
+   if (vmcs12_addr == vmx-nested.current_vmptr) {
+   kunmap(vmx-nested.current_vmcs12_page);
+   nested_release_page(vmx-nested.current_vmcs12_page);
+   vmx-nested.current_vmptr = -1ull;
+   }
+
+   page = nested_get_page(vcpu, vmcs12_addr);
+   if (page == NULL) {
+   /*
+* For accurate processor emulation, VMCLEAR beyond available
+* physical memory should do nothing at all. However, it is
+* possible that a nested vmx bug, not a guest hypervisor bug,
+* resulted in this case, so let's shut down before doing any
+* more damage:
+*/
+   kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+   nested_release_page(page);
+   return 1;
+   }
+   vmcs12 = kmap(page);
+   vmcs12-launch_state = 0;
+   kunmap(page);
+   nested_release_page(page);
+
+   nested_free_vmcs(vmx, vmcs12_addr);
+
+   skip_emulated_instruction(vcpu);
+   nested_vmx_succeed(vcpu);
+   return 1;
+}
+
 /*
  * The exit handlers return 1 if the exit was handled fully and guest execution
  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
@@ -4603,7 +4664,7 @@ static int (*kvm_vmx_exit_handlers[])(st
[EXIT_REASON_INVD]= handle_invd,
[EXIT_REASON_INVLPG]  = handle_invlpg,
[EXIT_REASON_VMCALL]  = handle_vmcall,
-   [EXIT_REASON_VMCLEAR] = handle_vmx_insn,
+   [EXIT_REASON_VMCLEAR] = handle_vmclear,
[EXIT_REASON_VMLAUNCH]= handle_vmx_insn,
[EXIT_REASON_VMPTRLD] = handle_vmx_insn,
[EXIT_REASON_VMPTRST] = handle_vmx_insn,
--
To unsubscribe from this list: send the line unsubscribe kvm in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html