[PATCH] KVM: nVMX: nested MSR auto load/restore emulation.

2014-11-22 Thread Wincy Van
Some hypervisors need MSR auto load/restore feature.

We read MSRs from vm-entry MSR load area which specified by L1,
and load them via kvm_set_msr in the nested entry.
When nested exit occurs, we get MSRs via kvm_get_msr, writing
them to L1`s MSR store area. After this, we read MSRs from vm-exit
MSR load area, and load them via kvm_set_msr.

VirtualBox will work fine with this patch.

Signed-off-by: Wincy Van 
---
 arch/x86/include/uapi/asm/vmx.h |5 ++
 arch/x86/kvm/vmx.c  |  123 --
 arch/x86/kvm/x86.c  |1 +
 virt/kvm/kvm_main.c |1 +
 4 files changed, 123 insertions(+), 7 deletions(-)

diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index 990a2fe..986af3f 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -56,6 +56,7 @@
 #define EXIT_REASON_MSR_READ31
 #define EXIT_REASON_MSR_WRITE   32
 #define EXIT_REASON_INVALID_STATE   33
+#define EXIT_REASON_MSR_LOAD_FAIL   34
 #define EXIT_REASON_MWAIT_INSTRUCTION   36
 #define EXIT_REASON_MONITOR_INSTRUCTION 39
 #define EXIT_REASON_PAUSE_INSTRUCTION   40
@@ -114,8 +115,12 @@
{ EXIT_REASON_APIC_WRITE,"APIC_WRITE" }, \
{ EXIT_REASON_EOI_INDUCED,   "EOI_INDUCED" }, \
{ EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
+   { EXIT_REASON_MSR_LOAD_FAIL, "MSR_LOAD_FAIL" }, \
{ EXIT_REASON_INVD,  "INVD" }, \
{ EXIT_REASON_INVVPID,   "INVVPID" }, \
{ EXIT_REASON_INVPCID,   "INVPCID" }
 
+#define VMX_ABORT_SAVE_GUEST_MSR_FAIL1
+#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
+
 #endif /* _UAPIVMX_H */
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 330a08a..03daefc 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6088,6 +6088,13 @@ static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
 */
 }
 
+static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
+{
+   /* TODO: not to reset guest simply here. */
+   kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+   pr_warn("kvm: nested vmx abort, indicator %d\n", indicator);
+}
+
 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
 {
struct vcpu_vmx *vmx =
@@ -8215,6 +8222,92 @@ static void vmx_start_preemption_timer(struct kvm_vcpu 
*vcpu)
  ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
 }
 
+static inline int nested_msr_check_common(struct vmx_msr_entry *e)
+{
+   if (e->index >> 8 == 0x8 || e->reserved != 0)
+   return -EINVAL;
+   return 0;
+}
+
+static inline int nested_load_msr_check(struct vmx_msr_entry *e)
+{
+   if (e->index == MSR_FS_BASE ||
+   e->index == MSR_GS_BASE ||
+   nested_msr_check_common(e))
+   return -EINVAL;
+   return 0;
+}
+
+/* load guest msr at nested entry.
+ * return 0 for success, entry index for failed.
+ */
+static u32 nested_entry_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
+{
+   u32 i = 0;
+   struct vmx_msr_entry e;
+   struct msr_data msr;
+
+   msr.host_initiated = false;
+   while (i < count) {
+   kvm_read_guest(vcpu->kvm,
+   gpa + i * sizeof(struct vmx_msr_entry),
+   , sizeof(struct vmx_msr_entry));
+   if (nested_load_msr_check())
+   goto fail;
+   msr.index = e.index;
+   msr.data = e.value;
+   if (kvm_set_msr(vcpu, ))
+   goto fail;
+   ++i;
+   }
+   return 0;
+fail:
+   return i + 1;
+}
+
+static int nested_exit_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
+{
+   u32 i = 0;
+   struct vmx_msr_entry e;
+
+   while (i < count) {
+   kvm_read_guest(vcpu->kvm,
+   gpa + i * sizeof(struct vmx_msr_entry),
+   , sizeof(struct vmx_msr_entry));
+   if (nested_msr_check_common())
+   return -EINVAL;
+   if (kvm_get_msr(vcpu, e.index, ))
+   return -EINVAL;
+   kvm_write_guest(vcpu->kvm,
+   gpa + i * sizeof(struct vmx_msr_entry),
+   , sizeof(struct vmx_msr_entry));
+   ++i;
+   }
+   return 0;
+}
+
+static int nested_exit_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
+{
+   u32 i = 0;
+   struct vmx_msr_entry e;
+   struct msr_data msr;
+
+   msr.host_initiated = false;
+   while (i < count) {
+   kvm_read_guest(vcpu->kvm,
+   gpa + i * sizeof(struct vmx_msr_entry),
+   , sizeof(struct vmx_msr_entry));
+   if (nested_load_msr_check())
+   return -EINVAL;
+ 

[PATCH] KVM: nVMX: nested MSR auto load/restore emulation.

2014-11-22 Thread Wincy Van
Some hypervisors need MSR auto load/restore feature.

We read MSRs from vm-entry MSR load area which specified by L1,
and load them via kvm_set_msr in the nested entry.
When nested exit occurs, we get MSRs via kvm_get_msr, writing
them to L1`s MSR store area. After this, we read MSRs from vm-exit
MSR load area, and load them via kvm_set_msr.

VirtualBox will work fine with this patch.

Signed-off-by: Wincy Van fanwenyi0...@gmail.com
---
 arch/x86/include/uapi/asm/vmx.h |5 ++
 arch/x86/kvm/vmx.c  |  123 --
 arch/x86/kvm/x86.c  |1 +
 virt/kvm/kvm_main.c |1 +
 4 files changed, 123 insertions(+), 7 deletions(-)

diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index 990a2fe..986af3f 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -56,6 +56,7 @@
 #define EXIT_REASON_MSR_READ31
 #define EXIT_REASON_MSR_WRITE   32
 #define EXIT_REASON_INVALID_STATE   33
+#define EXIT_REASON_MSR_LOAD_FAIL   34
 #define EXIT_REASON_MWAIT_INSTRUCTION   36
 #define EXIT_REASON_MONITOR_INSTRUCTION 39
 #define EXIT_REASON_PAUSE_INSTRUCTION   40
@@ -114,8 +115,12 @@
{ EXIT_REASON_APIC_WRITE,APIC_WRITE }, \
{ EXIT_REASON_EOI_INDUCED,   EOI_INDUCED }, \
{ EXIT_REASON_INVALID_STATE, INVALID_STATE }, \
+   { EXIT_REASON_MSR_LOAD_FAIL, MSR_LOAD_FAIL }, \
{ EXIT_REASON_INVD,  INVD }, \
{ EXIT_REASON_INVVPID,   INVVPID }, \
{ EXIT_REASON_INVPCID,   INVPCID }
 
+#define VMX_ABORT_SAVE_GUEST_MSR_FAIL1
+#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
+
 #endif /* _UAPIVMX_H */
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 330a08a..03daefc 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6088,6 +6088,13 @@ static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
 */
 }
 
+static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
+{
+   /* TODO: not to reset guest simply here. */
+   kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+   pr_warn(kvm: nested vmx abort, indicator %d\n, indicator);
+}
+
 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
 {
struct vcpu_vmx *vmx =
@@ -8215,6 +8222,92 @@ static void vmx_start_preemption_timer(struct kvm_vcpu 
*vcpu)
  ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
 }
 
+static inline int nested_msr_check_common(struct vmx_msr_entry *e)
+{
+   if (e-index  8 == 0x8 || e-reserved != 0)
+   return -EINVAL;
+   return 0;
+}
+
+static inline int nested_load_msr_check(struct vmx_msr_entry *e)
+{
+   if (e-index == MSR_FS_BASE ||
+   e-index == MSR_GS_BASE ||
+   nested_msr_check_common(e))
+   return -EINVAL;
+   return 0;
+}
+
+/* load guest msr at nested entry.
+ * return 0 for success, entry index for failed.
+ */
+static u32 nested_entry_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
+{
+   u32 i = 0;
+   struct vmx_msr_entry e;
+   struct msr_data msr;
+
+   msr.host_initiated = false;
+   while (i  count) {
+   kvm_read_guest(vcpu-kvm,
+   gpa + i * sizeof(struct vmx_msr_entry),
+   e, sizeof(struct vmx_msr_entry));
+   if (nested_load_msr_check(e))
+   goto fail;
+   msr.index = e.index;
+   msr.data = e.value;
+   if (kvm_set_msr(vcpu, msr))
+   goto fail;
+   ++i;
+   }
+   return 0;
+fail:
+   return i + 1;
+}
+
+static int nested_exit_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
+{
+   u32 i = 0;
+   struct vmx_msr_entry e;
+
+   while (i  count) {
+   kvm_read_guest(vcpu-kvm,
+   gpa + i * sizeof(struct vmx_msr_entry),
+   e, sizeof(struct vmx_msr_entry));
+   if (nested_msr_check_common(e))
+   return -EINVAL;
+   if (kvm_get_msr(vcpu, e.index, e.value))
+   return -EINVAL;
+   kvm_write_guest(vcpu-kvm,
+   gpa + i * sizeof(struct vmx_msr_entry),
+   e, sizeof(struct vmx_msr_entry));
+   ++i;
+   }
+   return 0;
+}
+
+static int nested_exit_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
+{
+   u32 i = 0;
+   struct vmx_msr_entry e;
+   struct msr_data msr;
+
+   msr.host_initiated = false;
+   while (i  count) {
+   kvm_read_guest(vcpu-kvm,
+   gpa + i * sizeof(struct vmx_msr_entry),
+   e, sizeof(struct vmx_msr_entry));
+   if (nested_load_msr_check(e))
+   return 

Re: [PATCH] KVM: nVMX: nested MSR auto load/restore emulation.

2014-11-21 Thread Jan Kiszka
On 2014-11-22 05:24, Wincy Van wrote:
> Some hypervisors need MSR auto load/restore feature.
> 
> We read MSRs from vm-entry MSR load area which specified by L1,
> and load them via kvm_set_msr in the nested entry.
> When nested exit occurs, we get MSRs via kvm_get_msr, writting
> them to L1`s MSR store area. After this, we read MSRs from vm-exit
> MSR load area, and load them via kvm_set_msr.
> 
> VirtualBox will work fine with this patch.

Cool! This feature is long overdue.

Patch is unfortunately misformatted which makes it very hard to read.
Please check via linux/scripts/checkpatch.pl for the proper style.

Could you also write a corresponding kvm-unit-test (see x86/vmx_tests.c)?

Jan




signature.asc
Description: OpenPGP digital signature


[PATCH] KVM: nVMX: nested MSR auto load/restore emulation.

2014-11-21 Thread Wincy Van
Some hypervisors need MSR auto load/restore feature.

We read MSRs from vm-entry MSR load area which specified by L1,
and load them via kvm_set_msr in the nested entry.
When nested exit occurs, we get MSRs via kvm_get_msr, writting
them to L1`s MSR store area. After this, we read MSRs from vm-exit
MSR load area, and load them via kvm_set_msr.

VirtualBox will work fine with this patch.

Signed-off-by: Wincy Van 

diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index 990a2fe..986af3f 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -56,6 +56,7 @@
 #define EXIT_REASON_MSR_READ31
 #define EXIT_REASON_MSR_WRITE   32
 #define EXIT_REASON_INVALID_STATE   33
+#define EXIT_REASON_MSR_LOAD_FAIL   34
 #define EXIT_REASON_MWAIT_INSTRUCTION   36
 #define EXIT_REASON_MONITOR_INSTRUCTION 39
 #define EXIT_REASON_PAUSE_INSTRUCTION   40
@@ -114,8 +115,12 @@
  { EXIT_REASON_APIC_WRITE,"APIC_WRITE" }, \
  { EXIT_REASON_EOI_INDUCED,   "EOI_INDUCED" }, \
  { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
+ { EXIT_REASON_MSR_LOAD_FAIL, "MSR_LOAD_FAIL" }, \
  { EXIT_REASON_INVD,  "INVD" }, \
  { EXIT_REASON_INVVPID,   "INVVPID" }, \
  { EXIT_REASON_INVPCID,   "INVPCID" }

+#define VMX_ABORT_SAVE_GUEST_MSR_FAIL1
+#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
+
 #endif /* _UAPIVMX_H */
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 6a951d8..377e405 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6088,6 +6088,13 @@ static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
  */
 }

+static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
+{
+ /* TODO: not to simply reset guest here. */
+ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+ printk(KERN_WARNING"kvm: nested vmx abort, indicator %d\n", indicator);
+}
+
 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
 {
  struct vcpu_vmx *vmx =
@@ -8215,6 +8222,88 @@ static void vmx_start_preemption_timer(struct
kvm_vcpu *vcpu)
   ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
 }

+static inline int nested_msr_check_common(struct vmx_msr_entry *e)
+{
+ if (e->index >> 8 == 0x8 || e->reserved != 0)
+ return -EINVAL;
+return 0;
+}
+
+static inline int nested_load_msr_check(struct vmx_msr_entry *e)
+{
+ if (e->index == MSR_FS_BASE ||
+e->index == MSR_GS_BASE ||
+nested_msr_check_common(e))
+ return -EINVAL;
+ return 0;
+}
+
+/* load guest msr at nested entry.
+ * return 0 for success, entry index for failed.
+ */
+static u32 nested_entry_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
+{
+ u32 i = 0;
+ struct vmx_msr_entry e;
+ struct msr_data msr;
+
+ msr.host_initiated = false;
+ while (i < count) {
+ kvm_read_guest(vcpu->kvm, gpa + i * sizeof(struct vmx_msr_entry),
+ , sizeof(struct vmx_msr_entry));
+ if (nested_load_msr_check())
+ goto fail;
+ msr.index = e.index;
+ msr.data = e.value;
+ if (kvm_set_msr(vcpu, ))
+ goto fail;
+ ++i;
+}
+ return 0;
+fail:
+ return i + 1;
+}
+
+static int nested_exit_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
+{
+ u32 i = 0;
+ struct vmx_msr_entry e;
+
+while (i < count) {
+ kvm_read_guest(vcpu->kvm, gpa + i * sizeof(struct vmx_msr_entry),
+ , sizeof(struct vmx_msr_entry));
+ if (nested_msr_check_common())
+ return -EINVAL;
+ if (kvm_get_msr(vcpu, e.index, ))
+ return -EINVAL;
+ kvm_write_guest(vcpu->kvm, gpa + i * sizeof(struct vmx_msr_entry),
+ , sizeof(struct vmx_msr_entry));
+ ++i;
+ }
+ return 0;
+}
+
+static int nested_exit_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
+{
+ u32 i = 0;
+ struct vmx_msr_entry e;
+ struct msr_data msr;
+
+ msr.host_initiated = false;
+ while (i < count) {
+ kvm_read_guest(vcpu->kvm, gpa + i * sizeof(struct vmx_msr_entry),
+ , sizeof(struct vmx_msr_entry));
+ if (nested_load_msr_check())
+ return -EINVAL;
+ msr.index = e.index;
+ msr.data = e.value;
+ if (kvm_set_msr(vcpu, ))
+ return -EINVAL;
+ ++i;
+ }
+ return 0;
+}
+
 /*
  * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
  * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
@@ -8509,6 +8598,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu,
bool launch)
  int cpu;
  struct loaded_vmcs *vmcs02;
  bool ia32e;
+ u32 msr_entry_idx;

  if (!nested_vmx_check_permission(vcpu) ||
 !nested_vmx_check_vmcs12(vcpu))
@@ -8556,11 +8646,12 @@ static int nested_vmx_run(struct kvm_vcpu
*vcpu, bool launch)
  return 1;
  }

- if (vmcs12->vm_entry_msr_load_count > 0 ||
-vmcs12->vm_exit_msr_load_count > 0 ||
-vmcs12->vm_exit_msr_store_count > 0) {
- pr_warn_ratelimited("%s: VMCS MSR_{LOAD,STORE} unsupported\n",
-__func__);
+ if ((vmcs12->vm_entry_msr_load_count > 0 &&
+ !IS_ALIGNED(vmcs12->vm_entry_msr_load_addr, 16)) ||
+(vmcs12->vm_exit_msr_load_count > 0 &&
+ !IS_ALIGNED(vmcs12->vm_exit_msr_load_addr, 16)) ||
+

[PATCH] KVM: nVMX: nested MSR auto load/restore emulation.

2014-11-21 Thread Wincy Van
Some hypervisors need MSR auto load/restore feature.

We read MSRs from vm-entry MSR load area which specified by L1,
and load them via kvm_set_msr in the nested entry.
When nested exit occurs, we get MSRs via kvm_get_msr, writting
them to L1`s MSR store area. After this, we read MSRs from vm-exit
MSR load area, and load them via kvm_set_msr.

VirtualBox will work fine with this patch.

Signed-off-by: Wincy Van fanwenyi0...@gmail.com

diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index 990a2fe..986af3f 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -56,6 +56,7 @@
 #define EXIT_REASON_MSR_READ31
 #define EXIT_REASON_MSR_WRITE   32
 #define EXIT_REASON_INVALID_STATE   33
+#define EXIT_REASON_MSR_LOAD_FAIL   34
 #define EXIT_REASON_MWAIT_INSTRUCTION   36
 #define EXIT_REASON_MONITOR_INSTRUCTION 39
 #define EXIT_REASON_PAUSE_INSTRUCTION   40
@@ -114,8 +115,12 @@
  { EXIT_REASON_APIC_WRITE,APIC_WRITE }, \
  { EXIT_REASON_EOI_INDUCED,   EOI_INDUCED }, \
  { EXIT_REASON_INVALID_STATE, INVALID_STATE }, \
+ { EXIT_REASON_MSR_LOAD_FAIL, MSR_LOAD_FAIL }, \
  { EXIT_REASON_INVD,  INVD }, \
  { EXIT_REASON_INVVPID,   INVVPID }, \
  { EXIT_REASON_INVPCID,   INVPCID }

+#define VMX_ABORT_SAVE_GUEST_MSR_FAIL1
+#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
+
 #endif /* _UAPIVMX_H */
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 6a951d8..377e405 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6088,6 +6088,13 @@ static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
  */
 }

+static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
+{
+ /* TODO: not to simply reset guest here. */
+ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+ printk(KERN_WARNINGkvm: nested vmx abort, indicator %d\n, indicator);
+}
+
 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
 {
  struct vcpu_vmx *vmx =
@@ -8215,6 +8222,88 @@ static void vmx_start_preemption_timer(struct
kvm_vcpu *vcpu)
   ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
 }

+static inline int nested_msr_check_common(struct vmx_msr_entry *e)
+{
+ if (e-index  8 == 0x8 || e-reserved != 0)
+ return -EINVAL;
+return 0;
+}
+
+static inline int nested_load_msr_check(struct vmx_msr_entry *e)
+{
+ if (e-index == MSR_FS_BASE ||
+e-index == MSR_GS_BASE ||
+nested_msr_check_common(e))
+ return -EINVAL;
+ return 0;
+}
+
+/* load guest msr at nested entry.
+ * return 0 for success, entry index for failed.
+ */
+static u32 nested_entry_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
+{
+ u32 i = 0;
+ struct vmx_msr_entry e;
+ struct msr_data msr;
+
+ msr.host_initiated = false;
+ while (i  count) {
+ kvm_read_guest(vcpu-kvm, gpa + i * sizeof(struct vmx_msr_entry),
+ e, sizeof(struct vmx_msr_entry));
+ if (nested_load_msr_check(e))
+ goto fail;
+ msr.index = e.index;
+ msr.data = e.value;
+ if (kvm_set_msr(vcpu, msr))
+ goto fail;
+ ++i;
+}
+ return 0;
+fail:
+ return i + 1;
+}
+
+static int nested_exit_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
+{
+ u32 i = 0;
+ struct vmx_msr_entry e;
+
+while (i  count) {
+ kvm_read_guest(vcpu-kvm, gpa + i * sizeof(struct vmx_msr_entry),
+ e, sizeof(struct vmx_msr_entry));
+ if (nested_msr_check_common(e))
+ return -EINVAL;
+ if (kvm_get_msr(vcpu, e.index, e.value))
+ return -EINVAL;
+ kvm_write_guest(vcpu-kvm, gpa + i * sizeof(struct vmx_msr_entry),
+ e, sizeof(struct vmx_msr_entry));
+ ++i;
+ }
+ return 0;
+}
+
+static int nested_exit_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
+{
+ u32 i = 0;
+ struct vmx_msr_entry e;
+ struct msr_data msr;
+
+ msr.host_initiated = false;
+ while (i  count) {
+ kvm_read_guest(vcpu-kvm, gpa + i * sizeof(struct vmx_msr_entry),
+ e, sizeof(struct vmx_msr_entry));
+ if (nested_load_msr_check(e))
+ return -EINVAL;
+ msr.index = e.index;
+ msr.data = e.value;
+ if (kvm_set_msr(vcpu, msr))
+ return -EINVAL;
+ ++i;
+ }
+ return 0;
+}
+
 /*
  * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
  * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function merges it
@@ -8509,6 +8598,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu,
bool launch)
  int cpu;
  struct loaded_vmcs *vmcs02;
  bool ia32e;
+ u32 msr_entry_idx;

  if (!nested_vmx_check_permission(vcpu) ||
 !nested_vmx_check_vmcs12(vcpu))
@@ -8556,11 +8646,12 @@ static int nested_vmx_run(struct kvm_vcpu
*vcpu, bool launch)
  return 1;
  }

- if (vmcs12-vm_entry_msr_load_count  0 ||
-vmcs12-vm_exit_msr_load_count  0 ||
-vmcs12-vm_exit_msr_store_count  0) {
- pr_warn_ratelimited(%s: VMCS MSR_{LOAD,STORE} unsupported\n,
-__func__);
+ if ((vmcs12-vm_entry_msr_load_count  0 
+ !IS_ALIGNED(vmcs12-vm_entry_msr_load_addr, 16)) ||
+(vmcs12-vm_exit_msr_load_count  0 
+ !IS_ALIGNED(vmcs12-vm_exit_msr_load_addr, 16)) ||
+

Re: [PATCH] KVM: nVMX: nested MSR auto load/restore emulation.

2014-11-21 Thread Jan Kiszka
On 2014-11-22 05:24, Wincy Van wrote:
 Some hypervisors need MSR auto load/restore feature.
 
 We read MSRs from vm-entry MSR load area which specified by L1,
 and load them via kvm_set_msr in the nested entry.
 When nested exit occurs, we get MSRs via kvm_get_msr, writting
 them to L1`s MSR store area. After this, we read MSRs from vm-exit
 MSR load area, and load them via kvm_set_msr.
 
 VirtualBox will work fine with this patch.

Cool! This feature is long overdue.

Patch is unfortunately misformatted which makes it very hard to read.
Please check via linux/scripts/checkpatch.pl for the proper style.

Could you also write a corresponding kvm-unit-test (see x86/vmx_tests.c)?

Jan




signature.asc
Description: OpenPGP digital signature