This implements the VMLOAD and VMSAVE instructions, that usually surround
the VMRUN instructions. Both instructions load / restore the same elements,
so we only need to implement them once.
Signed-off-by: Alexander Graf <[EMAIL PROTECTED]>
---
arch/x86/kvm/svm.c | 69 ++++++++++++++++++++++++++++++++++++++++++++++++++-
1 files changed, 67 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b440731..f857642 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1219,6 +1219,71 @@ static int nested_svm_do(struct vcpu_svm *svm,
return retval;
}
+static int nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
+{
+ memcpy(&to_vmcb->save.fs, &from_vmcb->save.fs, sizeof(struct vmcb_seg));
+ memcpy(&to_vmcb->save.gs, &from_vmcb->save.gs, sizeof(struct vmcb_seg));
+ memcpy(&to_vmcb->save.tr, &from_vmcb->save.tr, sizeof(struct vmcb_seg));
+ memcpy(&to_vmcb->save.ldtr, &from_vmcb->save.ldtr,
+ sizeof(struct vmcb_seg));
+ to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
+ to_vmcb->save.star = from_vmcb->save.star;
+ to_vmcb->save.lstar = from_vmcb->save.lstar;
+ to_vmcb->save.cstar = from_vmcb->save.cstar;
+ to_vmcb->save.sfmask = from_vmcb->save.sfmask;
+ to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
+ to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
+ to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
+
+ return 1;
+}
+
+static int nested_svm_vmload(struct vcpu_svm *svm, void *nested_vmcb,
+ void *arg2, void *opaque)
+{
+ return nested_svm_vmloadsave((struct vmcb *)nested_vmcb, svm->vmcb);
+}
+
+static int nested_svm_vmsave(struct vcpu_svm *svm, void *nested_vmcb,
+ void *arg2, void *opaque)
+{
+ return nested_svm_vmloadsave(svm->vmcb, (struct vmcb *)nested_vmcb);
+}
+
+static int vmload_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+ svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
+ skip_emulated_instruction(&svm->vcpu);
+
+ if (svm->vmcb->save.cpl) {
+ printk(KERN_ERR "%s: invalid cpl 0x%x at ip 0x%lx\n",
+ __func__, svm->vmcb->save.cpl,
+ kvm_rip_read(&svm->vcpu));
+ kvm_queue_exception(&svm->vcpu, GP_VECTOR);
+ return 1;
+ }
+
+ nested_svm_do(svm, svm->vmcb->save.rax, 0, NULL, nested_svm_vmload);
+
+ return 1;
+}
+
+static int vmsave_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+ svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
+ skip_emulated_instruction(&svm->vcpu);
+
+ if (svm->vmcb->save.cpl) {
+ printk(KERN_ERR "%s: invalid cpl 0x%x at ip 0x%lx\n",
+ __func__, svm->vmcb->save.cpl, kvm_rip_read(&svm->vcpu));
+ kvm_queue_exception(&svm->vcpu, GP_VECTOR);
+ return 1;
+ }
+
+ nested_svm_do(svm, svm->vmcb->save.rax, 0, NULL, nested_svm_vmsave);
+
+ return 1;
+}
static int stgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
{
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
@@ -1555,8 +1620,8 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
[SVM_EXIT_SHUTDOWN] = shutdown_interception,
[SVM_EXIT_VMRUN] = invalid_op_interception,
[SVM_EXIT_VMMCALL] = vmmcall_interception,
- [SVM_EXIT_VMLOAD] = invalid_op_interception,
- [SVM_EXIT_VMSAVE] = invalid_op_interception,
+ [SVM_EXIT_VMLOAD] = vmload_interception,
+ [SVM_EXIT_VMSAVE] = vmsave_interception,
[SVM_EXIT_STGI] = stgi_interception,
[SVM_EXIT_CLGI] = clgi_interception,
[SVM_EXIT_SKINIT] = invalid_op_interception,
--
1.5.6
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at http://vger.kernel.org/majordomo-info.html