We always pass the same value for round; compute it within common code. Reviewed-by: Philippe Mathieu-Daudé <phi...@linaro.org> Reviewed-by: Peter Maydell <peter.mayd...@linaro.org> Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- target/arm/tcg/translate-a64.c | 32 ++++++-------------------------- 1 file changed, 6 insertions(+), 26 deletions(-)
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index e00d7fbf48..e59236330a 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -9205,11 +9205,10 @@ static void disas_data_proc_fp(DisasContext *s, uint32_t insn) * the vector and scalar code. */ static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src, - TCGv_i64 tcg_rnd, bool accumulate, + bool round, bool accumulate, bool is_u, int size, int shift) { bool extended_result = false; - bool round = tcg_rnd != NULL; int ext_lshift = 0; TCGv_i64 tcg_src_hi; @@ -9227,6 +9226,7 @@ static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src, /* Deal with the rounding step */ if (round) { + TCGv_i64 tcg_rnd = tcg_constant_i64(1ull << (shift - 1)); if (extended_result) { TCGv_i64 tcg_zero = tcg_constant_i64(0); if (!is_u) { @@ -9294,7 +9294,6 @@ static void handle_scalar_simd_shri(DisasContext *s, bool insert = false; TCGv_i64 tcg_rn; TCGv_i64 tcg_rd; - TCGv_i64 tcg_round; if (!extract32(immh, 3, 1)) { unallocated_encoding(s); @@ -9320,12 +9319,6 @@ static void handle_scalar_simd_shri(DisasContext *s, break; } - if (round) { - tcg_round = tcg_constant_i64(1ULL << (shift - 1)); - } else { - tcg_round = NULL; - } - tcg_rn = read_fp_dreg(s, rn); tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64(); @@ -9339,7 +9332,7 @@ static void handle_scalar_simd_shri(DisasContext *s, tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, 0, esize - shift); } } else { - handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round, + handle_shri_with_rndacc(tcg_rd, tcg_rn, round, accumulate, is_u, size, shift); } @@ -9392,7 +9385,7 @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q, int elements = is_scalar ? 1 : (64 / esize); bool round = extract32(opcode, 0, 1); MemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN); - TCGv_i64 tcg_rn, tcg_rd, tcg_round; + TCGv_i64 tcg_rn, tcg_rd; TCGv_i32 tcg_rd_narrowed; TCGv_i64 tcg_final; @@ -9437,15 +9430,9 @@ static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q, tcg_rd_narrowed = tcg_temp_new_i32(); tcg_final = tcg_temp_new_i64(); - if (round) { - tcg_round = tcg_constant_i64(1ULL << (shift - 1)); - } else { - tcg_round = NULL; - } - for (i = 0; i < elements; i++) { read_vec_element(s, tcg_rn, rn, i, ldop); - handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round, + handle_shri_with_rndacc(tcg_rd, tcg_rn, round, false, is_u_shift, size+1, shift); narrowfn(tcg_rd_narrowed, tcg_env, tcg_rd); tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed); @@ -10495,7 +10482,6 @@ static void handle_vec_simd_shrn(DisasContext *s, bool is_q, int shift = (2 * esize) - immhb; bool round = extract32(opcode, 0, 1); TCGv_i64 tcg_rn, tcg_rd, tcg_final; - TCGv_i64 tcg_round; int i; if (extract32(immh, 3, 1)) { @@ -10512,15 +10498,9 @@ static void handle_vec_simd_shrn(DisasContext *s, bool is_q, tcg_final = tcg_temp_new_i64(); read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64); - if (round) { - tcg_round = tcg_constant_i64(1ULL << (shift - 1)); - } else { - tcg_round = NULL; - } - for (i = 0; i < elements; i++) { read_vec_element(s, tcg_rn, rn, i, size+1); - handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round, + handle_shri_with_rndacc(tcg_rd, tcg_rn, round, false, true, size+1, shift); tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize); -- 2.43.0