These are a mixed match, including the first two horizontal (66 and F2 only) operations, more moves, and SSE4a extract/insert.
Because SSE4a is pretty rare, I chose to leave the helper as they are, but it is possible to unify them by loading index and length from the source XMM register. Signed-off-by: Paolo Bonzini <pbonz...@redhat.com> --- target/i386/tcg/decode-new.c.inc | 23 +++++++++ target/i386/tcg/emit.c.inc | 81 ++++++++++++++++++++++++++++++++ target/i386/tcg/translate.c | 1 + 3 files changed, 105 insertions(+) diff --git a/target/i386/tcg/decode-new.c.inc b/target/i386/tcg/decode-new.c.inc index 5a94e05d71..6aa8bac74f 100644 --- a/target/i386/tcg/decode-new.c.inc +++ b/target/i386/tcg/decode-new.c.inc @@ -159,6 +159,22 @@ static void decode_0F6F(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui } } +static void decode_0F7E(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b) +{ + static const X86OpEntry movd_from_vec = + X86_OP_ENTRY3(MOVD_from, E,y, None,None, V,y, vex5 mmx); + static const X86OpEntry movq = + X86_OP_ENTRY3(MOVQ, V,x, None,None, W,q, vex5); /* wrong dest Vy on SDM! */ + + if (s->prefix & PREFIX_REPNZ) { + entry->gen = NULL; + } else if (s->prefix & PREFIX_REPZ) { + *entry = movq; + } else { + *entry = movd_from_vec; + } +} + static const X86OpEntry opcodes_0F38_00toEF[240] = { }; @@ -297,6 +313,13 @@ static const X86OpEntry opcodes_0F[256] = { [0x6e] = X86_OP_ENTRY3(MOVD_to, V,x, None,None, E,y, vex5 mmx p_00_66), /* wrong dest Vy on SDM! */ [0x6f] = X86_OP_GROUP3(0F6F, V,x, None,None, W,x, vex5 mmx p_00_66_f3), + [0x78] = X86_OP_ENTRY2(SSE4a_I, V,x, I,w, cpuid(SSE4A) p_66_f2), + [0x79] = X86_OP_ENTRY2(SSE4a_R, V,x, W,x, cpuid(SSE4A) p_66_f2), + [0x7c] = X86_OP_ENTRY3(VHADD, V,x, H,x, W,x, vex2 cpuid(SSE3) p_66_f2), + [0x7d] = X86_OP_ENTRY3(VHSUB, V,x, H,x, W,x, vex2 cpuid(SSE3) p_66_f2), + [0x7e] = X86_OP_GROUP0(0F7E), + [0x7f] = X86_OP_GROUP3(0F6F, W,x, None,None, V,x, vex5 mmx p_00_66_f3), + /* Incorrectly missing from 2-17 */ [0xd8] = X86_OP_ENTRY3(PSUBUSB, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), [0xd9] = X86_OP_ENTRY3(PSUBUSW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66), diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc index 5feb50efdb..2053c9d8fb 100644 --- a/target/i386/tcg/emit.c.inc +++ b/target/i386/tcg/emit.c.inc @@ -380,6 +380,30 @@ static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decod UNARY_FP32_SSE(VRSQRT, rsqrt) UNARY_FP32_SSE(VRCP, rcp) +/* + * 66 = v*pd Vpd, Hpd, Wpd + * f2 = v*ps Vps, Hps, Wps + */ +static inline void gen_horizontal_fp_sse(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, + SSEFunc_0_eppp pd_xmm, SSEFunc_0_eppp ps_xmm, + SSEFunc_0_eppp pd_ymm, SSEFunc_0_eppp ps_ymm) +{ + SSEFunc_0_eppp ps, pd, fn; + ps = s->vex_l ? ps_ymm : ps_xmm; + pd = s->vex_l ? pd_ymm : pd_xmm; + fn = s->prefix & PREFIX_DATA ? pd : ps; + fn(cpu_env, s->ptr0, s->ptr1, s->ptr2); +} +#define HORIZONTAL_FP_SSE(uname, lname) \ +static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \ +{ \ + gen_horizontal_fp_sse(s, env, decode, \ + gen_helper_##lname##pd_xmm, gen_helper_##lname##ps_xmm, \ + gen_helper_##lname##pd_ymm, gen_helper_##lname##ps_ymm); \ +} +HORIZONTAL_FP_SSE(VHADD, hadd) +HORIZONTAL_FP_SSE(VHSUB, hsub) + /* * 00 = p* Pq, Qq (if mmx not NULL; no VEX) * 66 = vp* Vx, Hx, Wx @@ -621,6 +645,28 @@ static void gen_MOVBE(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) } } +static void gen_MOVD_from(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + MemOp ot = decode->op[2].ot; + int lo_ofs = decode->op[2].offset + - xmm_offset(decode->op[2].ot) + + xmm_offset(ot); + + switch (ot) { + case MO_32: +#ifdef TARGET_X86_64 + tcg_gen_ld_i32(s->tmp2_i32, cpu_env, lo_ofs); + tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32); + break; + case MO_64: +#endif + tcg_gen_ld_tl(s->T0, cpu_env, lo_ofs); + break; + default: + abort(); + } +} + static void gen_MOVD_to(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { MemOp ot = decode->op[2].ot; @@ -661,6 +707,18 @@ static void gen_MOVMSK(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32); } +static void gen_MOVQ(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + int vec_len = sse_vec_len(s, decode); + int lo_ofs = decode->op[0].offset + - xmm_offset(decode->op[0].ot) + + xmm_offset(MO_64); + + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, decode->op[2].offset); + tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, lo_ofs); +} + static void gen_MULX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { MemOp ot = decode->op[0].ot; @@ -878,6 +936,29 @@ static void gen_SHRX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) tcg_gen_shr_tl(s->T0, s->T0, s->T1); } +static void gen_SSE4a_I(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + TCGv_i32 length = tcg_const_i32(decode->immediate & 255); + TCGv_i32 index = tcg_const_i32(decode->immediate >> 8); + + if (s->prefix & PREFIX_DATA) { + gen_helper_extrq_i(cpu_env, s->ptr0, index, length); + } else { + gen_helper_insertq_i(cpu_env, s->ptr0, index, length); + } + tcg_temp_free_i32(length); + tcg_temp_free_i32(index); +} + +static void gen_SSE4a_R(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + if (s->prefix & PREFIX_DATA) { + gen_helper_extrq_r(cpu_env, s->ptr0, s->ptr2); + } else { + gen_helper_insertq_r(cpu_env, s->ptr0, s->ptr2); + } +} + #define gen_VAND gen_PAND #define gen_VANDN gen_PANDN diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c index 8ef419dd59..53d693279a 100644 --- a/target/i386/tcg/translate.c +++ b/target/i386/tcg/translate.c @@ -4668,6 +4668,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) #endif if (use_new && ((b >= 0x150 && b <= 0x16f) || + (b >= 0x178 && b <= 0x17f) || (b >= 0x1d8 && b <= 0x1ff && (b & 8)))) { return disas_insn_new(s, cpu, b + 0x100); } -- 2.37.2