On 09/02/2009 06:38 PM, [email protected] wrote:
From: Orit Wasserman<[email protected]>

---
  arch/x86/kvm/svm.c |    3 -
  arch/x86/kvm/vmx.c |  187 +++++++++++++++++++++++++++++++++++++++++++++++++++-
  arch/x86/kvm/x86.c |    6 ++-
  arch/x86/kvm/x86.h |    2 +
  4 files changed, 192 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 2df9b45..3c1f22a 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -124,9 +124,6 @@ static int npt = 1;

  module_param(npt, int, S_IRUGO);

-static int nested = 1;
-module_param(nested, int, S_IRUGO);
-
  static void svm_flush_tlb(struct kvm_vcpu *vcpu);
  static void svm_complete_interrupts(struct vcpu_svm *svm);

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 78101dd..abba325 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -67,6 +67,11 @@ struct vmcs {
        char data[0];
  };

+struct nested_vmx {
+       /* Has the level1 guest done vmon? */
+       bool vmon;
+};

vmxon

@@ -967,6 +975,69 @@ static void guest_write_tsc(u64 guest_tsc, u64 host_tsc)
  }

  /*
+ * Handles msr read for nested virtualization
+ */
+static int nested_vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index,
+                             u64 *pdata)
+{
+       u32 vmx_msr_low = 0, vmx_msr_high = 0;
+
+       switch (msr_index) {
+       case MSR_IA32_FEATURE_CONTROL:
+               *pdata = 0;
+               break;
+       case MSR_IA32_VMX_BASIC:
+               rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);

Use rdmsrl, it's easier.

I think we need to mask it with the capabilities we support. Otherwise the guest can try to use some new feature which we don't support yet, and crash.

+               *pdata = vmx_msr_low | ((u64)vmx_msr_high<<  32);
+               break;
+       case MSR_IA32_VMX_PINBASED_CTLS:
+               *pdata = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING |
+                       PIN_BASED_VIRTUAL_NMIS;

Need to mask with actual cpu capabilities in case we run on an older cpu.

+               break;
+       case MSR_IA32_VMX_PROCBASED_CTLS:
+               *pdata =  CPU_BASED_HLT_EXITING |
+#ifdef CONFIG_X86_64
+                       CPU_BASED_CR8_LOAD_EXITING |
+                       CPU_BASED_CR8_STORE_EXITING |
+#endif
+                       CPU_BASED_CR3_LOAD_EXITING |
+                       CPU_BASED_CR3_STORE_EXITING |
+                       CPU_BASED_USE_IO_BITMAPS |
+                       CPU_BASED_MOV_DR_EXITING |
+                       CPU_BASED_USE_TSC_OFFSETING |
+                       CPU_BASED_INVLPG_EXITING;

Same here... or are all these guaranteed to be present?

+
+static int handle_vmon(struct kvm_vcpu *vcpu)
+{
+       struct kvm_segment cs;
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+       if (!nested) {
+               printk(KERN_DEBUG "%s: nested vmx not enabled\n", __func__);
+               kvm_queue_exception(vcpu, UD_VECTOR);
+               return 1;
+       }
+
+       vmx_get_segment(vcpu,&cs, VCPU_SREG_CS);
+
+       if (!(vcpu->arch.cr4&  X86_CR4_VMXE) ||
+           !(vcpu->arch.cr0&  X86_CR0_PE) ||
+           (vmx_get_rflags(vcpu)&  X86_EFLAGS_VM) ||
+           ((find_msr_entry(to_vmx(vcpu),
+                            MSR_EFER)->data&  EFER_LMA)&&  !cs.l)) {

Not everyone has EFER.  Better to wrap this in an #ifdef CONFIG_X86_64.

--
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to