We can implement this in two instructions, using SLI. Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- tcg/aarch64/tcg-target.opc.h | 1 + tcg/aarch64/tcg-target.inc.c | 20 +++++++++++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-)
diff --git a/tcg/aarch64/tcg-target.opc.h b/tcg/aarch64/tcg-target.opc.h index 26bfd9c460..bce30accd9 100644 --- a/tcg/aarch64/tcg-target.opc.h +++ b/tcg/aarch64/tcg-target.opc.h @@ -12,3 +12,4 @@ */ DEF(aa64_sshl_vec, 1, 2, 0, IMPLVEC) +DEF(aa64_sli_vec, 1, 2, 1, IMPLVEC) diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c index 3b5a5d78c7..4bc9b30254 100644 --- a/tcg/aarch64/tcg-target.inc.c +++ b/tcg/aarch64/tcg-target.inc.c @@ -557,6 +557,7 @@ typedef enum { I3614_SSHR = 0x0f000400, I3614_SSRA = 0x0f001400, I3614_SHL = 0x0f005400, + I3614_SLI = 0x2f005400, I3614_USHR = 0x2f000400, I3614_USRA = 0x2f001400, @@ -2402,6 +2403,9 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, case INDEX_op_sari_vec: tcg_out_insn(s, 3614, SSHR, is_q, a0, a1, (16 << vece) - a2); break; + case INDEX_op_aa64_sli_vec: + tcg_out_insn(s, 3614, SLI, is_q, a0, a2, args[3] + (8 << vece)); + break; case INDEX_op_shlv_vec: tcg_out_insn(s, 3616, USHL, is_q, vece, a0, a1, a2); break; @@ -2488,6 +2492,7 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) case INDEX_op_shlv_vec: case INDEX_op_bitsel_vec: return 1; + case INDEX_op_rotli_vec: case INDEX_op_shrv_vec: case INDEX_op_sarv_vec: return -1; @@ -2508,13 +2513,23 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, { va_list va; TCGv_vec v0, v1, v2, t1; + TCGArg a2; va_start(va, a0); v0 = temp_tcgv_vec(arg_temp(a0)); v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); - v2 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); + a2 = va_arg(va, TCGArg); + v2 = temp_tcgv_vec(arg_temp(a2)); switch (opc) { + case INDEX_op_rotli_vec: + t1 = tcg_temp_new_vec(type); + tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1)); + vec_gen_4(INDEX_op_aa64_sli_vec, type, vece, + tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2); + tcg_temp_free_vec(t1); + break; + case INDEX_op_shrv_vec: case INDEX_op_sarv_vec: /* Right shifts are negative left shifts for AArch64. */ @@ -2547,6 +2562,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) static const TCGTargetOpDef lZ_l = { .args_ct_str = { "lZ", "l" } }; static const TCGTargetOpDef r_r_r = { .args_ct_str = { "r", "r", "r" } }; static const TCGTargetOpDef w_w_w = { .args_ct_str = { "w", "w", "w" } }; + static const TCGTargetOpDef w_0_w = { .args_ct_str = { "w", "0", "w" } }; static const TCGTargetOpDef w_w_wO = { .args_ct_str = { "w", "w", "wO" } }; static const TCGTargetOpDef w_w_wN = { .args_ct_str = { "w", "w", "wN" } }; static const TCGTargetOpDef w_w_wZ = { .args_ct_str = { "w", "w", "wZ" } }; @@ -2741,6 +2757,8 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) return &w_w_wZ; case INDEX_op_bitsel_vec: return &w_w_w_w; + case INDEX_op_aa64_sli_vec: + return &w_0_w; default: return NULL; -- 2.20.1