When a fault triggers a task switch, the error code, if existent, has to
be pushed on the new task's stack. Implement the missing bits.

Signed-off-by: Jan Kiszka <[email protected]>
---

This is a backport of the proposed fix for master. It applies at least
down to 2.6.32 and should be considered for the stable queues.

 arch/x86/include/asm/kvm_host.h |    3 ++-
 arch/x86/include/asm/svm.h      |    1 +
 arch/x86/kvm/svm.c              |   11 ++++++++++-
 arch/x86/kvm/vmx.c              |   12 +++++++++++-
 arch/x86/kvm/x86.c              |   17 ++++++++++++++++-
 5 files changed, 40 insertions(+), 4 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 06d9e79..94bb19b 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -616,7 +616,8 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
 void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
 int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
 
-int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
+int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
+                   bool has_error_code, u32 error_code);
 
 void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 38638cd..7d13531 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -238,6 +238,7 @@ struct __attribute__ ((__packed__)) vmcb {
 
 #define SVM_EXITINFOSHIFT_TS_REASON_IRET 36
 #define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
+#define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
 
 #define        SVM_EXIT_READ_CR0       0x000
 #define        SVM_EXIT_READ_CR3       0x003
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 52f78dd..17668db 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2012,6 +2012,8 @@ static int task_switch_interception(struct vcpu_svm *svm)
                svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
        uint32_t idt_v =
                svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
+       bool has_error_code = false;
+       u32 error_code = 0;
 
        tss_selector = (u16)svm->vmcb->control.exit_info_1;
 
@@ -2032,6 +2034,12 @@ static int task_switch_interception(struct vcpu_svm *svm)
                        svm->vcpu.arch.nmi_injected = false;
                        break;
                case SVM_EXITINTINFO_TYPE_EXEPT:
+                       if (svm->vmcb->control.exit_info_2 &
+                           (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
+                               has_error_code = true;
+                               error_code =
+                                       (u32)svm->vmcb->control.exit_info_2;
+                       }
                        kvm_clear_exception_queue(&svm->vcpu);
                        break;
                case SVM_EXITINTINFO_TYPE_INTR:
@@ -2048,7 +2056,8 @@ static int task_switch_interception(struct vcpu_svm *svm)
             (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
                skip_emulated_instruction(&svm->vcpu);
 
-       return kvm_task_switch(&svm->vcpu, tss_selector, reason);
+       return kvm_task_switch(&svm->vcpu, tss_selector, reason,
+                              has_error_code, error_code);
 }
 
 static int cpuid_interception(struct vcpu_svm *svm)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 14873b9..5ba2d35 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3286,6 +3286,8 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long exit_qualification;
+       bool has_error_code = false;
+       u32 error_code = 0;
        u16 tss_selector;
        int reason, type, idt_v;
 
@@ -3308,6 +3310,13 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
                        kvm_clear_interrupt_queue(vcpu);
                        break;
                case INTR_TYPE_HARD_EXCEPTION:
+                       if (vmx->idt_vectoring_info &
+                           VECTORING_INFO_DELIVER_CODE_MASK) {
+                               has_error_code = true;
+                               error_code =
+                                       vmcs_read32(IDT_VECTORING_ERROR_CODE);
+                       }
+                       /* fall through */
                case INTR_TYPE_SOFT_EXCEPTION:
                        kvm_clear_exception_queue(vcpu);
                        break;
@@ -3322,7 +3331,8 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
                       type != INTR_TYPE_NMI_INTR))
                skip_emulated_instruction(vcpu);
 
-       if (!kvm_task_switch(vcpu, tss_selector, reason))
+       if (!kvm_task_switch(vcpu, tss_selector, reason, has_error_code,
+                            error_code))
                return 0;
 
        /* clear all local breakpoint enable flags */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 35eabd8..fa90e4e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5137,7 +5137,8 @@ out:
        return ret;
 }
 
-int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
+int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
+                   bool has_error_code, u32 error_code)
 {
        struct kvm_segment tr_seg;
        struct desc_struct cseg_desc;
@@ -5213,6 +5214,20 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 
tss_selector, int reason)
        seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
        tr_seg.type = 11;
        kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
+
+       if (ret && has_error_code) {
+               unsigned size = (nseg_desc.type & 8) ? 4 : 2;
+               unsigned long rsp =
+                       kvm_register_read(vcpu, VCPU_REGS_RSP) - size;
+
+               if (kvm_write_guest(vcpu->kvm,
+                                   get_segment_base(vcpu, VCPU_SREG_SS) + rsp,
+                                   &error_code, size))
+                       ret = 0;
+               else
+                       kvm_register_write(vcpu, VCPU_REGS_RSP, rsp);
+       }
+
 out:
        return ret;
 }

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to