On Sun, Jun 13, 2010 at 03:30:46PM +0300, Nadav Har'El wrote:
> Implement the VMLAUNCH and VMRESUME instructions, allowing a guest
> hypervisor to run its own guests.
>
> Signed-off-by: Nadav Har'El <[email protected]>
> ---
> --- .before/arch/x86/kvm/vmx.c 2010-06-13 15:01:29.000000000 +0300
> +++ .after/arch/x86/kvm/vmx.c 2010-06-13 15:01:29.000000000 +0300
> @@ -272,6 +272,9 @@ struct __attribute__ ((__packed__)) vmcs
> struct shadow_vmcs shadow_vmcs;
>
> bool launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
> +
> + int cpu;
> + int launched;
> };
>
> struct vmcs_list {
> @@ -297,6 +300,24 @@ struct nested_vmx {
> /* list of real (hardware) VMCS, one for each L2 guest of L1 */
> struct list_head l2_vmcs_list; /* a vmcs_list */
> int l2_vmcs_num;
> +
> + /* Are we running a nested guest now */
> + bool nested_mode;
> + /* Level 1 state for switching to level 2 and back */
> + struct {
> + u64 efer;
> + unsigned long cr3;
> + unsigned long cr4;
> + u64 io_bitmap_a;
> + u64 io_bitmap_b;
> + u64 msr_bitmap;
> + int cpu;
> + int launched;
> + } l1_state;
> + /* Level 1 shadow vmcs for switching to level 2 and back */
> + struct shadow_vmcs *l1_shadow_vmcs;
> + /* Level 1 vmcs loaded into the processor */
> + struct vmcs *l1_vmcs;
> };
>
> enum vmcs_field_type {
> @@ -1407,6 +1428,19 @@ static void vmx_vcpu_load(struct kvm_vcp
> new_offset = vmcs_read64(TSC_OFFSET) + delta;
> vmcs_write64(TSC_OFFSET, new_offset);
> }
> +
> + if (vmx->nested.l1_shadow_vmcs != NULL) {
> + struct shadow_vmcs *l1svmcs =
> + vmx->nested.l1_shadow_vmcs;
> + l1svmcs->host_tr_base = vmcs_readl(HOST_TR_BASE);
> + l1svmcs->host_gdtr_base = vmcs_readl(HOST_GDTR_BASE);
> + l1svmcs->host_ia32_sysenter_esp =
> + vmcs_readl(HOST_IA32_SYSENTER_ESP);
> + if (tsc_this < vcpu->arch.host_tsc)
> + l1svmcs->tsc_offset = vmcs_read64(TSC_OFFSET);
> + if (vmx->nested.nested_mode)
> + load_vmcs_host_state(l1svmcs);
> + }
> }
> }
>
> @@ -2301,6 +2335,9 @@ static void free_l1_state(struct kvm_vcp
> kfree(list_item);
> }
> vmx->nested.l2_vmcs_num = 0;
> +
> + kfree(vmx->nested.l1_shadow_vmcs);
> + vmx->nested.l1_shadow_vmcs = NULL;
> }
>
> static void free_kvm_area(void)
> @@ -4158,6 +4195,13 @@ static int handle_vmon(struct kvm_vcpu *
> INIT_LIST_HEAD(&(vmx->nested.l2_vmcs_list));
> vmx->nested.l2_vmcs_num = 0;
>
> + vmx->nested.l1_shadow_vmcs = kzalloc(PAGE_SIZE, GFP_KERNEL);
> + if (!vmx->nested.l1_shadow_vmcs) {
> + printk(KERN_INFO
> + "couldn't allocate memory for l1_shadow_vmcs\n");
> + return -ENOMEM;
> + }
> +
> vmx->nested.vmxon = 1;
>
> skip_emulated_instruction(vcpu);
> @@ -4348,6 +4392,42 @@ static int handle_vmclear(struct kvm_vcp
> return 1;
> }
>
> +static int nested_vmx_run(struct kvm_vcpu *vcpu);
> +
> +static int handle_launch_or_resume(struct kvm_vcpu *vcpu, bool launch)
> +{
> + if (!nested_vmx_check_permission(vcpu))
> + return 1;
> +
> + if (!nested_map_current(vcpu))
> + return 1;
> + if (to_vmx(vcpu)->nested.current_l2_page->launch_state == launch) {
> + /* Must use VMLAUNCH for the first time, VMRESUME later */
> + set_rflags_to_vmx_fail_valid(vcpu);
> + nested_unmap_current(vcpu);
> + return 1;
> + }
Should also check MOV SS blocking. Why Intel decided that vm entry
should fail in this case? How knows, but spec says so.
> + nested_unmap_current(vcpu);
> +
> + skip_emulated_instruction(vcpu);
> +
> + nested_vmx_run(vcpu);
> + return 1;
> +}
> +
> +/* Emulate the VMLAUNCH instruction */
> +static int handle_vmlaunch(struct kvm_vcpu *vcpu)
> +{
> + return handle_launch_or_resume(vcpu, true);
> +}
> +
> +/* Emulate the VMRESUME instruction */
> +static int handle_vmresume(struct kvm_vcpu *vcpu)
> +{
> +
> + return handle_launch_or_resume(vcpu, false);
> +}
> +
> static inline bool nested_vmcs_read_any(struct kvm_vcpu *vcpu,
> unsigned long field, u64 *ret)
> {
> @@ -4892,11 +4972,11 @@ static int (*kvm_vmx_exit_handlers[])(st
> [EXIT_REASON_INVLPG] = handle_invlpg,
> [EXIT_REASON_VMCALL] = handle_vmcall,
> [EXIT_REASON_VMCLEAR] = handle_vmclear,
> - [EXIT_REASON_VMLAUNCH] = handle_vmx_insn,
> + [EXIT_REASON_VMLAUNCH] = handle_vmlaunch,
> [EXIT_REASON_VMPTRLD] = handle_vmptrld,
> [EXIT_REASON_VMPTRST] = handle_vmptrst,
> [EXIT_REASON_VMREAD] = handle_vmread,
> - [EXIT_REASON_VMRESUME] = handle_vmx_insn,
> + [EXIT_REASON_VMRESUME] = handle_vmresume,
> [EXIT_REASON_VMWRITE] = handle_vmwrite,
> [EXIT_REASON_VMOFF] = handle_vmoff,
> [EXIT_REASON_VMON] = handle_vmon,
> @@ -4958,7 +5038,8 @@ static int vmx_handle_exit(struct kvm_vc
> "(0x%x) and exit reason is 0x%x\n",
> __func__, vectoring_info, exit_reason);
>
> - if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) {
> + if (!vmx->nested.nested_mode &&
> + unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) {
> if (vmx_interrupt_allowed(vcpu)) {
> vmx->soft_vnmi_blocked = 0;
> } else if (vmx->vnmi_blocked_time > 1000000000LL &&
> @@ -5771,6 +5852,138 @@ int prepare_vmcs_02(struct kvm_vcpu *vcp
> return 0;
> }
>
> +static int nested_vmx_run(struct kvm_vcpu *vcpu)
> +{
> + struct vcpu_vmx *vmx = to_vmx(vcpu);
> +
> + vmx->nested.nested_mode = 1;
> + sync_cached_regs_to_vmcs(vcpu);
> + save_vmcs(vmx->nested.l1_shadow_vmcs);
> +
> + vmx->nested.l1_state.efer = vcpu->arch.efer;
> + if (!enable_ept)
> + vmx->nested.l1_state.cr3 = vcpu->arch.cr3;
> + vmx->nested.l1_state.cr4 = vcpu->arch.cr4;
> +
> + if (!nested_map_current(vcpu)) {
> + set_rflags_to_vmx_fail_valid(vcpu);
> + return 1;
> + }
> +
> + if (cpu_has_vmx_msr_bitmap())
> + vmx->nested.l1_state.msr_bitmap = vmcs_read64(MSR_BITMAP);
> + else
> + vmx->nested.l1_state.msr_bitmap = 0;
> +
> + vmx->nested.l1_state.io_bitmap_a = vmcs_read64(IO_BITMAP_A);
> + vmx->nested.l1_state.io_bitmap_b = vmcs_read64(IO_BITMAP_B);
> + vmx->nested.l1_vmcs = vmx->vmcs;
> + vmx->nested.l1_state.cpu = vcpu->cpu;
> + vmx->nested.l1_state.launched = vmx->launched;
> +
> + vmx->vmcs = nested_get_current_vmcs(vcpu);
> + if (!vmx->vmcs) {
> + printk(KERN_ERR "Missing VMCS\n");
> + set_rflags_to_vmx_fail_valid(vcpu);
> + return 1;
> + }
> +
> + vcpu->cpu = vmx->nested.current_l2_page->cpu;
> + vmx->launched = vmx->nested.current_l2_page->launched;
> +
> + if (!vmx->nested.current_l2_page->launch_state || !vmx->launched) {
> + vmcs_clear(vmx->vmcs);
> + vmx->launched = 0;
> + vmx->nested.current_l2_page->launch_state = 1;
> + }
> +
> + vmx_vcpu_load(vcpu, get_cpu());
> + put_cpu();
> +
> + prepare_vmcs_02(vcpu,
> + get_shadow_vmcs(vcpu), vmx->nested.l1_shadow_vmcs);
> +
> + if (get_shadow_vmcs(vcpu)->vm_entry_controls &
> + VM_ENTRY_IA32E_MODE) {
> + if (!((vcpu->arch.efer & EFER_LMA) &&
> + (vcpu->arch.efer & EFER_LME)))
> + vcpu->arch.efer |= (EFER_LMA | EFER_LME);
> + } else {
> + if ((vcpu->arch.efer & EFER_LMA) ||
> + (vcpu->arch.efer & EFER_LME))
> + vcpu->arch.efer = 0;
> + }
> +
> + /* vmx_set_cr0() sets the cr0 that L2 will read, to be the one that L1
> + * dictated, and takes appropriate actions for special cr0 bits (like
> + * real mode, etc.).
> + */
> + vmx_set_cr0(vcpu,
> + (get_shadow_vmcs(vcpu)->guest_cr0 &
> + ~get_shadow_vmcs(vcpu)->cr0_guest_host_mask) |
> + (get_shadow_vmcs(vcpu)->cr0_read_shadow &
> + get_shadow_vmcs(vcpu)->cr0_guest_host_mask));
> +
> + /* However, vmx_set_cr0 incorrectly enforces KVM's relationship between
> + * GUEST_CR0 and CR0_READ_SHADOW, e.g., that the former is the same as
> + * the latter with with TS added if !fpu_active. We need to take the
> + * actual GUEST_CR0 that L1 wanted, just with added TS if !fpu_active
> + * like KVM wants (for the "lazy fpu" feature, to avoid the costly
> + * restoration of fpu registers until the FPU is really used).
> + */
> + vmcs_writel(GUEST_CR0, get_shadow_vmcs(vcpu)->guest_cr0 |
> + (vcpu->fpu_active ? 0 : X86_CR0_TS));
> +
> + vmx_set_cr4(vcpu, get_shadow_vmcs(vcpu)->guest_cr4);
> + vmcs_writel(CR4_READ_SHADOW,
> + get_shadow_vmcs(vcpu)->cr4_read_shadow);
> +
> + /* we have to set the X86_CR0_PG bit of the cached cr0, because
> + * kvm_mmu_reset_context enables paging only if X86_CR0_PG is set in
> + * CR0 (we need the paging so that KVM treat this guest as a paging
> + * guest so we can easly forward page faults to L1.)
> + */
> + vcpu->arch.cr0 |= X86_CR0_PG;
> +
> + if (enable_ept && !nested_cpu_has_vmx_ept(vcpu)) {
> + vmcs_write32(GUEST_CR3, get_shadow_vmcs(vcpu)->guest_cr3);
> + vmx->vcpu.arch.cr3 = get_shadow_vmcs(vcpu)->guest_cr3;
> + } else {
> + int r;
> + kvm_set_cr3(vcpu, get_shadow_vmcs(vcpu)->guest_cr3);
> + kvm_mmu_reset_context(vcpu);
> +
> + nested_unmap_current(vcpu);
> +
> + r = kvm_mmu_load(vcpu);
> + if (unlikely(r)) {
> + printk(KERN_ERR "Error in kvm_mmu_load r %d\n", r);
> + set_rflags_to_vmx_fail_valid(vcpu);
> + /* switch back to L1 */
> + vmx->nested.nested_mode = 0;
> + vmx->vmcs = vmx->nested.l1_vmcs;
> + vcpu->cpu = vmx->nested.l1_state.cpu;
> + vmx->launched = vmx->nested.l1_state.launched;
> +
> + vmx_vcpu_load(vcpu, get_cpu());
> + put_cpu();
> +
> + return 1;
> + }
> +
> + nested_map_current(vcpu);
> + }
> +
> + kvm_register_write(vcpu, VCPU_REGS_RSP,
> + get_shadow_vmcs(vcpu)->guest_rsp);
> + kvm_register_write(vcpu, VCPU_REGS_RIP,
> + get_shadow_vmcs(vcpu)->guest_rip);
> +
> + nested_unmap_current(vcpu);
> +
> + return 1;
> +}
> +
> static struct kvm_x86_ops vmx_x86_ops = {
> .cpu_has_kvm_support = cpu_has_kvm_support,
> .disabled_by_bios = vmx_disabled_by_bios,
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
--
Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html