On 10/15/2009 11:41 PM, [email protected] wrote:
/*
+ * Handles msr read for nested virtualization
+ */
+static int nested_vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index,
+ u64 *pdata)
+{
+ u64 vmx_msr = 0;
+
+ switch (msr_index) {
+ case MSR_IA32_FEATURE_CONTROL:
+ *pdata = 0;
+ break;
+ case MSR_IA32_VMX_BASIC:
+ *pdata = 0;
+ rdmsrl(MSR_IA32_VMX_BASIC, vmx_msr);
+ *pdata = (vmx_msr& 0x00ffffcfffffffff);
+ break;
+
This (and the rest of the msrs) must be controllable from userspace.
Otherwise a live migration from a newer host to an older host would break.
/*
+ * Writes msr value for nested virtualization
+ * Returns 0 on success, non-0 otherwise.
+ */
+static int nested_vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
+{
+ switch (msr_index) {
+ case MSR_IA32_FEATURE_CONTROL:
+ if ((data& (FEATURE_CONTROL_LOCKED |
+ FEATURE_CONTROL_VMXON_ENABLED))
+ != (FEATURE_CONTROL_LOCKED |
+ FEATURE_CONTROL_VMXON_ENABLED))
+ return 1;
+ break;
+ default:
+ return 1;
+ }
+
+ return 0;
+}
+
Need to export this msr to userspace for live migration. See
msrs_to_save[].
+/*
+ * Check to see if vcpu can execute vmx command
+ * Inject the corrseponding exception
+ */
+static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
+{
+ struct kvm_segment cs;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct kvm_msr_entry *msr;
+
+ vmx_get_segment(vcpu,&cs, VCPU_SREG_CS);
+
+ if (!vmx->nested.vmxon) {
+ printk(KERN_DEBUG "%s: vmx not on\n", __func__);
pr_debug
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 0;
+ }
+
+ msr = find_msr_entry(vmx, MSR_EFER);
+
+ if ((vmx_get_rflags(vcpu)& X86_EFLAGS_VM) ||
+ ((msr->data& EFER_LMA)&& !cs.l)) {
is_long_mode()
static int handle_vmx_insn(struct kvm_vcpu *vcpu)
{
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
+static int handle_vmoff(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+
+ vmx->nested.vmxon = 0;
+
+ skip_emulated_instruction(vcpu);
+ return 1;
+}
+
+static int handle_vmon(struct kvm_vcpu *vcpu)
+{
+ struct kvm_segment cs;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (!nested) {
+ printk(KERN_DEBUG "%s: nested vmx not enabled\n", __func__);
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+
+ vmx_get_segment(vcpu,&cs, VCPU_SREG_CS);
+
+ if (!(vcpu->arch.cr4& X86_CR4_VMXE) ||
+ !(vcpu->arch.cr0& X86_CR0_PE) ||
+ (vmx_get_rflags(vcpu)& X86_EFLAGS_VM)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ printk(KERN_INFO "%s invalid register state\n", __func__);
+ return 1;
+ }
+#ifdef CONFIG_X86_64
+ if (((find_msr_entry(to_vmx(vcpu),
+ MSR_EFER)->data& EFER_LMA)&& !cs.l)) {
is_long_mode(), and you can avoid the #ifdef.
VMXON is supposed to block INIT, please add that (in a separate patch).
--
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html