Reviewed-by: Peter Maydell <peter.mayd...@linaro.org> Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- target/arm/tcg/translate-a64.c | 176 ++++++++++++++++++++++++++++++--- target/arm/tcg/a64.decode | 24 +++++ 2 files changed, 186 insertions(+), 14 deletions(-)
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index 77324e0145..e6290e1145 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -7163,6 +7163,122 @@ static bool do_vec_shift_imm_narrow(DisasContext *s, arg_qrri_e *a, return true; } +static void gen_sqshrn_b(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + tcg_gen_sari_i64(d, s, i); + tcg_gen_ext16u_i64(d, d); + gen_helper_neon_narrow_sat_s8(d, tcg_env, d); +} + +static void gen_sqshrn_h(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + tcg_gen_sari_i64(d, s, i); + tcg_gen_ext32u_i64(d, d); + gen_helper_neon_narrow_sat_s16(d, tcg_env, d); +} + +static void gen_sqshrn_s(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + gen_sshr_d(d, s, i); + gen_helper_neon_narrow_sat_s32(d, tcg_env, d); +} + +static void gen_uqshrn_b(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + tcg_gen_shri_i64(d, s, i); + gen_helper_neon_narrow_sat_u8(d, tcg_env, d); +} + +static void gen_uqshrn_h(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + tcg_gen_shri_i64(d, s, i); + gen_helper_neon_narrow_sat_u16(d, tcg_env, d); +} + +static void gen_uqshrn_s(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + gen_ushr_d(d, s, i); + gen_helper_neon_narrow_sat_u32(d, tcg_env, d); +} + +static void gen_sqshrun_b(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + tcg_gen_sari_i64(d, s, i); + tcg_gen_ext16u_i64(d, d); + gen_helper_neon_unarrow_sat8(d, tcg_env, d); +} + +static void gen_sqshrun_h(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + tcg_gen_sari_i64(d, s, i); + tcg_gen_ext32u_i64(d, d); + gen_helper_neon_unarrow_sat16(d, tcg_env, d); +} + +static void gen_sqshrun_s(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + gen_sshr_d(d, s, i); + gen_helper_neon_unarrow_sat32(d, tcg_env, d); +} + +static void gen_sqrshrn_b(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + gen_srshr_bhs(d, s, i); + tcg_gen_ext16u_i64(d, d); + gen_helper_neon_narrow_sat_s8(d, tcg_env, d); +} + +static void gen_sqrshrn_h(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + gen_srshr_bhs(d, s, i); + tcg_gen_ext32u_i64(d, d); + gen_helper_neon_narrow_sat_s16(d, tcg_env, d); +} + +static void gen_sqrshrn_s(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + gen_srshr_d(d, s, i); + gen_helper_neon_narrow_sat_s32(d, tcg_env, d); +} + +static void gen_uqrshrn_b(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + gen_urshr_bhs(d, s, i); + gen_helper_neon_narrow_sat_u8(d, tcg_env, d); +} + +static void gen_uqrshrn_h(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + gen_urshr_bhs(d, s, i); + gen_helper_neon_narrow_sat_u16(d, tcg_env, d); +} + +static void gen_uqrshrn_s(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + gen_urshr_d(d, s, i); + gen_helper_neon_narrow_sat_u32(d, tcg_env, d); +} + +static void gen_sqrshrun_b(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + gen_srshr_bhs(d, s, i); + tcg_gen_ext16u_i64(d, d); + gen_helper_neon_unarrow_sat8(d, tcg_env, d); +} + +static void gen_sqrshrun_h(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + gen_srshr_bhs(d, s, i); + tcg_gen_ext32u_i64(d, d); + gen_helper_neon_unarrow_sat16(d, tcg_env, d); +} + +static void gen_sqrshrun_s(TCGv_i64 d, TCGv_i64 s, int64_t i) +{ + gen_srshr_d(d, s, i); + gen_helper_neon_unarrow_sat32(d, tcg_env, d); +} + static WideShiftImmFn * const shrn_fns[] = { tcg_gen_shri_i64, tcg_gen_shri_i64, @@ -7177,6 +7293,48 @@ static WideShiftImmFn * const rshrn_fns[] = { }; TRANS(RSHRN_v, do_vec_shift_imm_narrow, a, rshrn_fns, 0) +static WideShiftImmFn * const sqshrn_fns[] = { + gen_sqshrn_b, + gen_sqshrn_h, + gen_sqshrn_s, +}; +TRANS(SQSHRN_v, do_vec_shift_imm_narrow, a, sqshrn_fns, MO_SIGN) + +static WideShiftImmFn * const uqshrn_fns[] = { + gen_uqshrn_b, + gen_uqshrn_h, + gen_uqshrn_s, +}; +TRANS(UQSHRN_v, do_vec_shift_imm_narrow, a, uqshrn_fns, 0) + +static WideShiftImmFn * const sqshrun_fns[] = { + gen_sqshrun_b, + gen_sqshrun_h, + gen_sqshrun_s, +}; +TRANS(SQSHRUN_v, do_vec_shift_imm_narrow, a, sqshrun_fns, MO_SIGN) + +static WideShiftImmFn * const sqrshrn_fns[] = { + gen_sqrshrn_b, + gen_sqrshrn_h, + gen_sqrshrn_s, +}; +TRANS(SQRSHRN_v, do_vec_shift_imm_narrow, a, sqrshrn_fns, MO_SIGN) + +static WideShiftImmFn * const uqrshrn_fns[] = { + gen_uqrshrn_b, + gen_uqrshrn_h, + gen_uqrshrn_s, +}; +TRANS(UQRSHRN_v, do_vec_shift_imm_narrow, a, uqrshrn_fns, 0) + +static WideShiftImmFn * const sqrshrun_fns[] = { + gen_sqrshrun_b, + gen_sqrshrun_h, + gen_sqrshrun_s, +}; +TRANS(SQRSHRUN_v, do_vec_shift_imm_narrow, a, sqrshrun_fns, MO_SIGN) + /* * Advanced SIMD Scalar Shift by Immediate */ @@ -10514,20 +10672,6 @@ static void disas_simd_shift_imm(DisasContext *s, uint32_t insn) } switch (opcode) { - case 0x10: /* SHRN / SQSHRUN */ - case 0x11: /* RSHRN / SQRSHRUN */ - if (is_u) { - handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb, - opcode, rn, rd); - } else { - unallocated_encoding(s); - } - break; - case 0x12: /* SQSHRN / UQSHRN */ - case 0x13: /* SQRSHRN / UQRSHRN */ - handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb, - opcode, rn, rd); - break; case 0x1c: /* SCVTF / UCVTF */ handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb, opcode, rn, rd); @@ -10544,6 +10688,10 @@ static void disas_simd_shift_imm(DisasContext *s, uint32_t insn) case 0x0a: /* SHL / SLI */ case 0x0c: /* SQSHLU */ case 0x0e: /* SQSHL, UQSHL */ + case 0x10: /* SHRN / SQSHRUN */ + case 0x11: /* RSHRN / SQRSHRUN */ + case 0x12: /* SQSHRN / UQSHRN */ + case 0x13: /* SQRSHRN / UQRSHRN */ case 0x14: /* SSHLL / USHLL */ unallocated_encoding(s); return; diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode index 63e04ddfcd..042dc79d88 100644 --- a/target/arm/tcg/a64.decode +++ b/target/arm/tcg/a64.decode @@ -1302,6 +1302,30 @@ SQSHLU_vi 0.10 11110 .... ... 01100 1 ..... ..... @q_shli_h SQSHLU_vi 0.10 11110 .... ... 01100 1 ..... ..... @q_shli_s SQSHLU_vi 0.10 11110 .... ... 01100 1 ..... ..... @q_shli_d +SQSHRN_v 0.00 11110 .... ... 10010 1 ..... ..... @q_shri_b +SQSHRN_v 0.00 11110 .... ... 10010 1 ..... ..... @q_shri_h +SQSHRN_v 0.00 11110 .... ... 10010 1 ..... ..... @q_shri_s + +UQSHRN_v 0.10 11110 .... ... 10010 1 ..... ..... @q_shri_b +UQSHRN_v 0.10 11110 .... ... 10010 1 ..... ..... @q_shri_h +UQSHRN_v 0.10 11110 .... ... 10010 1 ..... ..... @q_shri_s + +SQSHRUN_v 0.10 11110 .... ... 10000 1 ..... ..... @q_shri_b +SQSHRUN_v 0.10 11110 .... ... 10000 1 ..... ..... @q_shri_h +SQSHRUN_v 0.10 11110 .... ... 10000 1 ..... ..... @q_shri_s + +SQRSHRN_v 0.00 11110 .... ... 10011 1 ..... ..... @q_shri_b +SQRSHRN_v 0.00 11110 .... ... 10011 1 ..... ..... @q_shri_h +SQRSHRN_v 0.00 11110 .... ... 10011 1 ..... ..... @q_shri_s + +UQRSHRN_v 0.10 11110 .... ... 10011 1 ..... ..... @q_shri_b +UQRSHRN_v 0.10 11110 .... ... 10011 1 ..... ..... @q_shri_h +UQRSHRN_v 0.10 11110 .... ... 10011 1 ..... ..... @q_shri_s + +SQRSHRUN_v 0.10 11110 .... ... 10001 1 ..... ..... @q_shri_b +SQRSHRUN_v 0.10 11110 .... ... 10001 1 ..... ..... @q_shri_h +SQRSHRUN_v 0.10 11110 .... ... 10001 1 ..... ..... @q_shri_s + # Advanced SIMD scalar shift by immediate @shri_d .... ..... 1 ...... ..... . rn:5 rd:5 \ -- 2.43.0