This reverts commit 5045b468037dfe1c848827ce10e99d87f5669160.  Although the
cs.dpl=cs.rpl check is mentioned in table 7-1 of the SDM as causing a #TSS
exception, it is not mentioned in table 6-6 that lists "invalid TSS conditions"
which cause #TSS exceptions. As it causes some tests, which pass on bare-metal,
to fail - it should be reverted.

Signed-off-by: Nadav Amit <na...@cs.technion.ac.il>
---
 arch/x86/kvm/emulate.c | 32 +++++++++++++++-----------------
 1 file changed, 15 insertions(+), 17 deletions(-)

diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 56657b0..b73c9e8 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -1415,7 +1415,7 @@ static int write_segment_descriptor(struct 
x86_emulate_ctxt *ctxt,
 
 /* Does not support long mode */
 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
-                                    u16 selector, int seg, u8 cpl, bool 
in_task_switch)
+                                    u16 selector, int seg, u8 cpl)
 {
        struct desc_struct seg_desc, old_desc;
        u8 dpl, rpl;
@@ -1491,9 +1491,6 @@ static int __load_segment_descriptor(struct 
x86_emulate_ctxt *ctxt,
                        goto exception;
                break;
        case VCPU_SREG_CS:
-               if (in_task_switch && rpl != dpl)
-                       goto exception;
-
                if (!(seg_desc.type & 8))
                        goto exception;
 
@@ -1560,7 +1557,7 @@ static int load_segment_descriptor(struct 
x86_emulate_ctxt *ctxt,
                                   u16 selector, int seg)
 {
        u8 cpl = ctxt->ops->cpl(ctxt);
-       return __load_segment_descriptor(ctxt, selector, seg, cpl, false);
+       return __load_segment_descriptor(ctxt, selector, seg, cpl);
 }
 
 static void write_register_operand(struct operand *op)
@@ -2460,19 +2457,19 @@ static int load_state_from_tss16(struct 
x86_emulate_ctxt *ctxt,
         * Now load segment descriptors. If fault happens at this stage
         * it is handled in a context of new task
         */
-       ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, 
true);
+       ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl);
        if (ret != X86EMUL_CONTINUE)
                return ret;
-       ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true);
+       ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl);
        if (ret != X86EMUL_CONTINUE)
                return ret;
-       ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true);
+       ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl);
        if (ret != X86EMUL_CONTINUE)
                return ret;
-       ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true);
+       ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl);
        if (ret != X86EMUL_CONTINUE)
                return ret;
-       ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true);
+       ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl);
        if (ret != X86EMUL_CONTINUE)
                return ret;
 
@@ -2597,25 +2594,26 @@ static int load_state_from_tss32(struct 
x86_emulate_ctxt *ctxt,
         * Now load segment descriptors. If fault happenes at this stage
         * it is handled in a context of new task
         */
-       ret = __load_segment_descriptor(ctxt, tss->ldt_selector, 
VCPU_SREG_LDTR, cpl, true);
+       ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
+                                       cpl);
        if (ret != X86EMUL_CONTINUE)
                return ret;
-       ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true);
+       ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl);
        if (ret != X86EMUL_CONTINUE)
                return ret;
-       ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true);
+       ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl);
        if (ret != X86EMUL_CONTINUE)
                return ret;
-       ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true);
+       ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl);
        if (ret != X86EMUL_CONTINUE)
                return ret;
-       ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true);
+       ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl);
        if (ret != X86EMUL_CONTINUE)
                return ret;
-       ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, true);
+       ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl);
        if (ret != X86EMUL_CONTINUE)
                return ret;
-       ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, true);
+       ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl);
        if (ret != X86EMUL_CONTINUE)
                return ret;
 
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to