ldq takes a pointer to the first byte to load the 64-bit word in; ldo takes a pointer to the first byte of the ZMMReg. Make them consistent, which will be useful in the new SSE decoder's load/writeback routines.
Signed-off-by: Paolo Bonzini <pbonz...@redhat.com> --- target/i386/tcg/translate.c | 44 +++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 21 deletions(-) diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c index 001af76663..9a85010dcd 100644 --- a/target/i386/tcg/translate.c +++ b/target/i386/tcg/translate.c @@ -2761,28 +2761,29 @@ static inline void gen_ldo_env_A0(DisasContext *s, int offset) { int mem_index = s->mem_index; tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index, MO_LEUQ); - tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0))); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0))); tcg_gen_addi_tl(s->tmp0, s->A0, 8); tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); - tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1))); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1))); } static inline void gen_sto_env_A0(DisasContext *s, int offset) { int mem_index = s->mem_index; - tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0))); + offset -= offsetof(ZMMReg, ZMM_Q(0)); + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0))); tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index, MO_LEUQ); tcg_gen_addi_tl(s->tmp0, s->A0, 8); - tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1))); + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1))); tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); } static inline void gen_op_movo(DisasContext *s, int d_offset, int s_offset) { - tcg_gen_ld_i64(s->tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(0))); - tcg_gen_st_i64(s->tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(0))); - tcg_gen_ld_i64(s->tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(1))); - tcg_gen_st_i64(s->tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(1))); + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, s_offset + offsetof(XMMReg, XMM_Q(0))); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, d_offset + offsetof(XMMReg, XMM_Q(0))); + tcg_gen_ld_i64(s->tmp1_i64, cpu_env, s_offset + offsetof(XMMReg, XMM_Q(1))); + tcg_gen_st_i64(s->tmp1_i64, cpu_env, d_offset + offsetof(XMMReg, XMM_Q(1))); } static inline void gen_op_movq(DisasContext *s, int d_offset, int s_offset) @@ -2804,6 +2805,7 @@ static inline void gen_op_movq_env_0(DisasContext *s, int d_offset) } #define ZMM_OFFSET(reg) offsetof(CPUX86State, xmm_regs[reg]) +#define XMM_OFFSET(reg) offsetof(CPUX86State, xmm_regs[reg].ZMM_X(0)) typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg); typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg); @@ -3317,13 +3319,13 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (mod == 3) goto illegal_op; gen_lea_modrm(env, s, modrm); - gen_sto_env_A0(s, ZMM_OFFSET(reg)); + gen_sto_env_A0(s, XMM_OFFSET(reg)); break; case 0x3f0: /* lddqu */ if (mod == 3) goto illegal_op; gen_lea_modrm(env, s, modrm); - gen_ldo_env_A0(s, ZMM_OFFSET(reg)); + gen_ldo_env_A0(s, XMM_OFFSET(reg)); break; case 0x22b: /* movntss */ case 0x32b: /* movntsd */ @@ -3392,10 +3394,10 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, case 0x26f: /* movdqu xmm, ea */ if (mod != 3) { gen_lea_modrm(env, s, modrm); - gen_ldo_env_A0(s, ZMM_OFFSET(reg)); + gen_ldo_env_A0(s, XMM_OFFSET(reg)); } else { rm = (modrm & 7) | REX_B(s); - gen_op_movo(s, ZMM_OFFSET(reg), ZMM_OFFSET(rm)); + gen_op_movo(s, XMM_OFFSET(reg), XMM_OFFSET(rm)); } break; case 0x210: /* movss xmm, ea */ @@ -3451,7 +3453,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, case 0x212: /* movsldup */ if (mod != 3) { gen_lea_modrm(env, s, modrm); - gen_ldo_env_A0(s, ZMM_OFFSET(reg)); + gen_ldo_env_A0(s, XMM_OFFSET(reg)); } else { rm = (modrm & 7) | REX_B(s); gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0)), @@ -3493,7 +3495,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, case 0x216: /* movshdup */ if (mod != 3) { gen_lea_modrm(env, s, modrm); - gen_ldo_env_A0(s, ZMM_OFFSET(reg)); + gen_ldo_env_A0(s, XMM_OFFSET(reg)); } else { rm = (modrm & 7) | REX_B(s); gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(1)), @@ -3587,10 +3589,10 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, case 0x27f: /* movdqu ea, xmm */ if (mod != 3) { gen_lea_modrm(env, s, modrm); - gen_sto_env_A0(s, ZMM_OFFSET(reg)); + gen_sto_env_A0(s, XMM_OFFSET(reg)); } else { rm = (modrm & 7) | REX_B(s); - gen_op_movo(s, ZMM_OFFSET(rm), ZMM_OFFSET(reg)); + gen_op_movo(s, XMM_OFFSET(rm), XMM_OFFSET(reg)); } break; case 0x211: /* movss ea, xmm */ @@ -3742,7 +3744,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, gen_helper_enter_mmx(cpu_env); if (mod != 3) { gen_lea_modrm(env, s, modrm); - op2_offset = offsetof(CPUX86State,xmm_t0); + op2_offset = offsetof(CPUX86State,xmm_t0.ZMM_X(0)); gen_ldo_env_A0(s, op2_offset); } else { rm = (modrm & 7) | REX_B(s); @@ -3906,9 +3908,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, } if (b1) { - op1_offset = ZMM_OFFSET(reg); + op1_offset = XMM_OFFSET(reg); if (mod == 3) { - op2_offset = ZMM_OFFSET(rm | REX_B(s)); + op2_offset = XMM_OFFSET(rm | REX_B(s)); } else { op2_offset = offsetof(CPUX86State,xmm_t0); gen_lea_modrm(env, s, modrm); @@ -4516,7 +4518,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (mod == 3) { op2_offset = ZMM_OFFSET(rm | REX_B(s)); } else { - op2_offset = offsetof(CPUX86State, xmm_t0); + op2_offset = offsetof(CPUX86State, xmm_t0.ZMM_X(0)); gen_lea_modrm(env, s, modrm); gen_ldo_env_A0(s, op2_offset); } @@ -4625,7 +4627,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, break; default: /* 128 bit access */ - gen_ldo_env_A0(s, op2_offset); + gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_X(0))); break; } } else { -- 2.37.2