Signed-off-by: Jan Beulich
---
v3: Ignore VEX.l. Add fic.exn_raised constraint to invoke_stub() use.
v2: Add missing RET to stub. Generate #UD (instead of simply failing)
when VEX.l is disallowed.
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -249,7 +249,7 @@ static const struct {
[0x2a] = { DstImplicit|SrcMem|ModRM|Mov, simd_other },
[0x2b] = { DstMem|SrcImplicit|ModRM|Mov, simd_any_fp },
[0x2c ... 0x2d] = { DstImplicit|SrcMem|ModRM|Mov, simd_other },
-[0x2e ... 0x2f] = { ImplicitOps|ModRM },
+[0x2e ... 0x2f] = { ImplicitOps|ModRM|TwoOp },
[0x30 ... 0x35] = { ImplicitOps },
[0x37] = { ImplicitOps },
[0x38] = { DstReg|SrcMem|ModRM },
@@ -5462,6 +5462,54 @@ x86_emulate(
state->simd_size = simd_none;
break;
+CASE_SIMD_PACKED_FP(, 0x0f, 0x2e): /* ucomis{s,d} xmm/mem,xmm */
+CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x2e): /* vucomis{s,d} xmm/mem,xmm */
+CASE_SIMD_PACKED_FP(, 0x0f, 0x2f): /* comis{s,d} xmm/mem,xmm */
+CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x2f): /* vcomis{s,d} xmm/mem,xmm */
+if ( vex.opcx == vex_none )
+{
+if ( vex.pfx )
+vcpu_must_have(sse2);
+else
+vcpu_must_have(sse);
+get_fpu(X86EMUL_FPU_xmm, );
+}
+else
+{
+host_and_vcpu_must_have(avx);
+get_fpu(X86EMUL_FPU_ymm, );
+}
+
+opc = init_prefixes(stub);
+opc[0] = b;
+opc[1] = modrm;
+if ( ea.type == OP_MEM )
+{
+rc = ops->read(ea.mem.seg, ea.mem.off, mmvalp, vex.pfx ? 8 : 4,
+ ctxt);
+if ( rc != X86EMUL_OKAY )
+goto done;
+
+/* Convert memory operand to (%rAX). */
+rex_prefix &= ~REX_B;
+vex.b = 1;
+opc[1] &= 0x38;
+}
+fic.insn_bytes = PFX_BYTES + 2;
+opc[2] = 0xc3;
+
+invoke_stub(_PRE_EFLAGS("[eflags]", "[mask]", "[tmp]"),
+_POST_EFLAGS("[eflags]", "[mask]", "[tmp]"),
+[eflags] "+g" (_regs._eflags),
+[tmp] "=" (cr4 /* dummy */), "+m" (*mmvalp),
+"+m" (fic.exn_raised)
+: [func] "rm" (stub.func), "a" (mmvalp),
+ [mask] "i" (EFLAGS_MASK));
+
+put_stub(stub);
+put_fpu();
+break;
+
case X86EMUL_OPC(0x0f, 0x30): /* wrmsr */
generate_exception_if(!mode_ring0(), EXC_GP, 0);
fail_if(ops->write_msr == NULL);
x86emul: support {,V}{,U}COMIS{S,D}
Signed-off-by: Jan Beulich
---
v3: Ignore VEX.l. Add fic.exn_raised constraint to invoke_stub() use.
v2: Add missing RET to stub. Generate #UD (instead of simply failing)
when VEX.l is disallowed.
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -249,7 +249,7 @@ static const struct {
[0x2a] = { DstImplicit|SrcMem|ModRM|Mov, simd_other },
[0x2b] = { DstMem|SrcImplicit|ModRM|Mov, simd_any_fp },
[0x2c ... 0x2d] = { DstImplicit|SrcMem|ModRM|Mov, simd_other },
-[0x2e ... 0x2f] = { ImplicitOps|ModRM },
+[0x2e ... 0x2f] = { ImplicitOps|ModRM|TwoOp },
[0x30 ... 0x35] = { ImplicitOps },
[0x37] = { ImplicitOps },
[0x38] = { DstReg|SrcMem|ModRM },
@@ -5462,6 +5462,54 @@ x86_emulate(
state->simd_size = simd_none;
break;
+CASE_SIMD_PACKED_FP(, 0x0f, 0x2e): /* ucomis{s,d} xmm/mem,xmm */
+CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x2e): /* vucomis{s,d} xmm/mem,xmm */
+CASE_SIMD_PACKED_FP(, 0x0f, 0x2f): /* comis{s,d} xmm/mem,xmm */
+CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x2f): /* vcomis{s,d} xmm/mem,xmm */
+if ( vex.opcx == vex_none )
+{
+if ( vex.pfx )
+vcpu_must_have(sse2);
+else
+vcpu_must_have(sse);
+get_fpu(X86EMUL_FPU_xmm, );
+}
+else
+{
+host_and_vcpu_must_have(avx);
+get_fpu(X86EMUL_FPU_ymm, );
+}
+
+opc = init_prefixes(stub);
+opc[0] = b;
+opc[1] = modrm;
+if ( ea.type == OP_MEM )
+{
+rc = ops->read(ea.mem.seg, ea.mem.off, mmvalp, vex.pfx ? 8 : 4,
+ ctxt);
+if ( rc != X86EMUL_OKAY )
+goto done;
+
+/* Convert memory operand to (%rAX). */
+rex_prefix &= ~REX_B;
+vex.b = 1;
+opc[1] &= 0x38;
+}
+fic.insn_bytes = PFX_BYTES + 2;
+opc[2] = 0xc3;
+
+invoke_stub(_PRE_EFLAGS("[eflags]", "[mask]", "[tmp]"),
+_POST_EFLAGS("[eflags]", "[mask]", "[tmp]"),
+[eflags] "+g" (_regs._eflags),
+[tmp] "=" (cr4 /* dummy */), "+m" (*mmvalp),
+"+m" (fic.exn_raised)
+: