This patch adds correct handling of IDT_VECTORING_INFO_FIELD for the nested
case.

When a guest exits while handling an interrupt or exception, we get this
information in IDT_VECTORING_INFO_FIELD in the VMCS. When L2 exits to L1,
there's nothing we need to do, because L1 will see this field in vmcs12, and
handle it itself. However, when L2 exits and L0 handles the exit itself and
plans to return to L2, L0 must inject this event to L2.

In the normal non-nested case, the idt_vectoring_info case is discovered after
the exit, and the decision to inject (though not the injection itself) is made
at that point. However, in the nested case a decision of whether to return
to L2 or L1 also happens during the injection phase (see the previous
patches), so in the nested case we can only decide what to do about the
idt_vectoring_info right after the injection, i.e., in the beginning of
vmx_vcpu_run, which is the first time we know for sure if we're staying in
L2 (i.e., nested_mode is true).

Signed-off-by: Nadav Har'El <n...@il.ibm.com>
---
 arch/x86/kvm/vmx.c |   32 ++++++++++++++++++++++++++++++++
 1 file changed, 32 insertions(+)

--- .before/arch/x86/kvm/vmx.c  2010-12-08 18:56:51.000000000 +0200
+++ .after/arch/x86/kvm/vmx.c   2010-12-08 18:56:51.000000000 +0200
@@ -335,6 +335,10 @@ struct nested_vmx {
        unsigned long l1_arch_cr3;
        /* L2 must run next, and mustn't decide to exit to L1. */
        bool nested_run_pending;
+       /* true if last exit was of L2, and had a valid idt_vectoring_info */
+       bool valid_idt_vectoring_info;
+       /* These are saved if valid_idt_vectoring_info */
+       u32 vm_exit_instruction_len, idt_vectoring_error_code;
 };
 
 struct vcpu_vmx {
@@ -5384,6 +5388,22 @@ static void vmx_cancel_injection(struct 
        vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
 }
 
+static void nested_handle_valid_idt_vectoring_info(struct vcpu_vmx *vmx)
+{
+       int irq  = vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
+       int type = vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
+       int errCodeValid = vmx->idt_vectoring_info &
+               VECTORING_INFO_DELIVER_CODE_MASK;
+       vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
+               irq | type | INTR_INFO_VALID_MASK | errCodeValid);
+
+       vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
+               vmx->nested.vm_exit_instruction_len);
+       if (errCodeValid)
+               vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
+                       vmx->nested.idt_vectoring_error_code);
+}
+
 static inline void sync_cached_regs_to_vmcs(struct kvm_vcpu *vcpu)
 {
        if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
@@ -5405,6 +5425,9 @@ static void vmx_vcpu_run(struct kvm_vcpu
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
+       if (is_guest_mode(vcpu) && vmx->nested.valid_idt_vectoring_info)
+               nested_handle_valid_idt_vectoring_info(vmx);
+
        /* Record the guest's net vcpu time for enforced NMI injections. */
        if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
                vmx->entry_time = ktime_get();
@@ -5525,6 +5548,15 @@ static void vmx_vcpu_run(struct kvm_vcpu
 
        vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
 
+       vmx->nested.valid_idt_vectoring_info = is_guest_mode(vcpu) &&
+               (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
+       if (vmx->nested.valid_idt_vectoring_info) {
+               vmx->nested.vm_exit_instruction_len =
+                       vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+               vmx->nested.idt_vectoring_error_code =
+                       vmcs_read32(IDT_VECTORING_ERROR_CODE);
+       }
+
        asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
        vmx->launched = 1;
 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to