On 4/15/25 12:24, Richard Henderson wrote:
Reviewed-by: Philippe Mathieu-Daudé <phi...@linaro.org>
Signed-off-by: Richard Henderson <richard.hender...@linaro.org>
---
  include/tcg/tcg-opc.h      |  5 +----
  tcg/optimize.c             | 10 +++++-----
  tcg/tcg-op.c               | 16 ++++++++--------
  tcg/tcg.c                  |  6 ++----
  docs/devel/tcg-ops.rst     |  4 ++--
  target/i386/tcg/emit.c.inc | 12 +-----------
  6 files changed, 19 insertions(+), 34 deletions(-)

diff --git a/include/tcg/tcg-opc.h b/include/tcg/tcg-opc.h
index c6848b3c63..1f995c54be 100644
--- a/include/tcg/tcg-opc.h
+++ b/include/tcg/tcg-opc.h
@@ -56,6 +56,7 @@ DEF(divu, 1, 2, 0, TCG_OPF_INT)
  DEF(divu2, 2, 3, 0, TCG_OPF_INT)
  DEF(eqv, 1, 2, 0, TCG_OPF_INT)
  DEF(extract, 1, 1, 2, TCG_OPF_INT)
+DEF(extract2, 1, 2, 1, TCG_OPF_INT)
  DEF(movcond, 1, 4, 1, TCG_OPF_INT)
  DEF(mul, 1, 2, 0, TCG_OPF_INT)
  DEF(muls2, 2, 2, 0, TCG_OPF_INT)
@@ -90,8 +91,6 @@ DEF(ld_i32, 1, 1, 1, 0)
  DEF(st8_i32, 0, 2, 1, 0)
  DEF(st16_i32, 0, 2, 1, 0)
  DEF(st_i32, 0, 2, 1, 0)
-/* shifts/rotates */
-DEF(extract2_i32, 1, 2, 1, 0)
DEF(add2_i32, 2, 4, 0, 0)
  DEF(sub2_i32, 2, 4, 0, 0)
@@ -110,8 +109,6 @@ DEF(st8_i64, 0, 2, 1, 0)
  DEF(st16_i64, 0, 2, 1, 0)
  DEF(st32_i64, 0, 2, 1, 0)
  DEF(st_i64, 0, 2, 1, 0)
-/* shifts/rotates */
-DEF(extract2_i64, 1, 2, 1, 0)
/* size changing ops */
  DEF(ext_i32_i64, 1, 1, 0, 0)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index f1adea829b..9595b32d54 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -1861,12 +1861,12 @@ static bool fold_extract2(OptContext *ctx, TCGOp *op)
          uint64_t v2 = arg_info(op->args[2])->val;
          int shr = op->args[3];
- if (op->opc == INDEX_op_extract2_i64) {
-            v1 >>= shr;
-            v2 <<= 64 - shr;
-        } else {
+        if (ctx->type == TCG_TYPE_I32) {
              v1 = (uint32_t)v1 >> shr;
              v2 = (uint64_t)((int32_t)v2 << (32 - shr));
+        } else {
+            v1 >>= shr;
+            v2 <<= 64 - shr;
          }
          return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
      }
@@ -2866,7 +2866,7 @@ void tcg_optimize(TCGContext *s)
          case INDEX_op_extract:
              done = fold_extract(&ctx, op);
              break;
-        CASE_OP_32_64(extract2):
+        case INDEX_op_extract2:
              done = fold_extract2(&ctx, op);
              break;
          case INDEX_op_ext_i32_i64:
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
index 5f95350d5d..edbb214f7c 100644
--- a/tcg/tcg-op.c
+++ b/tcg/tcg-op.c
@@ -921,7 +921,7 @@ void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, 
TCGv_i32 arg2,
t1 = tcg_temp_ebb_new_i32(); - if (tcg_op_supported(INDEX_op_extract2_i32, TCG_TYPE_I32, 0)) {
+    if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I32, 0)) {
          if (ofs + len == 32) {
              tcg_gen_shli_i32(t1, arg1, len);
              tcg_gen_extract2_i32(ret, t1, arg2, len);
@@ -1077,8 +1077,8 @@ void tcg_gen_extract2_i32(TCGv_i32 ret, TCGv_i32 al, 
TCGv_i32 ah,
          tcg_gen_mov_i32(ret, ah);
      } else if (al == ah) {
          tcg_gen_rotri_i32(ret, al, ofs);
-    } else if (tcg_op_supported(INDEX_op_extract2_i32, TCG_TYPE_I32, 0)) {
-        tcg_gen_op4i_i32(INDEX_op_extract2_i32, ret, al, ah, ofs);
+    } else if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I32, 0)) {
+        tcg_gen_op4i_i32(INDEX_op_extract2, ret, al, ah, ofs);
      } else {
          TCGv_i32 t0 = tcg_temp_ebb_new_i32();
          tcg_gen_shri_i32(t0, al, ofs);
@@ -1799,7 +1799,7 @@ static inline void tcg_gen_shifti_i64(TCGv_i64 ret, 
TCGv_i64 arg1,
              tcg_gen_movi_i32(TCGV_LOW(ret), 0);
          }
      } else if (right) {
-        if (tcg_op_supported(INDEX_op_extract2_i32, TCG_TYPE_I32, 0)) {
+        if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I32, 0)) {
              tcg_gen_extract2_i32(TCGV_LOW(ret),
                                   TCGV_LOW(arg1), TCGV_HIGH(arg1), c);
          } else {
@@ -1813,7 +1813,7 @@ static inline void tcg_gen_shifti_i64(TCGv_i64 ret, 
TCGv_i64 arg1,
              tcg_gen_shri_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c);
          }
      } else {
-        if (tcg_op_supported(INDEX_op_extract2_i32, TCG_TYPE_I32, 0)) {
+        if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I32, 0)) {
              tcg_gen_extract2_i32(TCGV_HIGH(ret),
                                   TCGV_LOW(arg1), TCGV_HIGH(arg1), 32 - c);
          } else {
@@ -2553,7 +2553,7 @@ void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, 
TCGv_i64 arg2,
t1 = tcg_temp_ebb_new_i64(); - if (tcg_op_supported(INDEX_op_extract2_i64, TCG_TYPE_I64, 0)) {
+    if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I64, 0)) {
          if (ofs + len == 64) {
              tcg_gen_shli_i64(t1, arg1, len);
              tcg_gen_extract2_i64(ret, t1, arg2, len);
@@ -2781,8 +2781,8 @@ void tcg_gen_extract2_i64(TCGv_i64 ret, TCGv_i64 al, 
TCGv_i64 ah,
          tcg_gen_mov_i64(ret, ah);
      } else if (al == ah) {
          tcg_gen_rotri_i64(ret, al, ofs);
-    } else if (tcg_op_supported(INDEX_op_extract2_i64, TCG_TYPE_I64, 0)) {
-        tcg_gen_op4i_i64(INDEX_op_extract2_i64, ret, al, ah, ofs);
+    } else if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_I64, 0)) {
+        tcg_gen_op4i_i64(INDEX_op_extract2, ret, al, ah, ofs);
      } else {
          TCGv_i64 t0 = tcg_temp_ebb_new_i64();
          tcg_gen_shri_i64(t0, al, ofs);
diff --git a/tcg/tcg.c b/tcg/tcg.c
index f34f9cdd13..ed03840988 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -1146,8 +1146,7 @@ static const TCGOutOp * const all_outop[NB_OPS] = {
      OUTOP(INDEX_op_divu2, TCGOutOpDivRem, outop_divu2),
      OUTOP(INDEX_op_eqv, TCGOutOpBinary, outop_eqv),
      OUTOP(INDEX_op_extract, TCGOutOpExtract, outop_extract),
-    OUTOP(INDEX_op_extract2_i32, TCGOutOpExtract2, outop_extract2),
-    OUTOP(INDEX_op_extract2_i64, TCGOutOpExtract2, outop_extract2),
+    OUTOP(INDEX_op_extract2, TCGOutOpExtract2, outop_extract2),
      OUTOP(INDEX_op_movcond, TCGOutOpMovcond, outop_movcond),
      OUTOP(INDEX_op_mul, TCGOutOpBinary, outop_mul),
      OUTOP(INDEX_op_muls2, TCGOutOpMul2, outop_muls2),
@@ -5594,8 +5593,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp 
*op)
          }
          break;
- case INDEX_op_extract2_i32:
-    case INDEX_op_extract2_i64:
+    case INDEX_op_extract2:
          {
              const TCGOutOpExtract2 *out = &outop_extract2;
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
index aea8a897bd..9392d88069 100644
--- a/docs/devel/tcg-ops.rst
+++ b/docs/devel/tcg-ops.rst
@@ -476,9 +476,9 @@ Misc
         |
         | (using an arithmetic right shift) on TCG_TYPE_I32.
- * - extract2_i32/i64 *dest*, *t1*, *t2*, *pos*
+   * - extract2 *dest*, *t1*, *t2*, *pos*
- - | For N = {32,64}, extract an N-bit quantity from the concatenation
+     - | For TCG_TYPE_I{N}, extract an N-bit quantity from the concatenation
           of *t2*:*t1*, beginning at *pos*. The tcg_gen_extract2_{i32,i64} 
expander
           accepts 0 <= *pos* <= N as inputs. The backend code generator will
           not see either 0 or N as inputs for these opcodes.
diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc
index 0fa1664a24..a3cba8d791 100644
--- a/target/i386/tcg/emit.c.inc
+++ b/target/i386/tcg/emit.c.inc
@@ -19,16 +19,6 @@
   * License along with this library; if not, see 
<http://www.gnu.org/licenses/>.
   */
-/*
- * Sometimes, knowing what the backend has can produce better code.
- * The exact opcode to check depends on 32- vs. 64-bit.
- */
-#ifdef TARGET_X86_64
-#define INDEX_op_extract2_tl            INDEX_op_extract2_i64
-#else
-#define INDEX_op_extract2_tl            INDEX_op_extract2_i32
-#endif
-
  #define MMX_OFFSET(reg)                        \
    ({ assert((reg) >= 0 && (reg) <= 7);         \
       offsetof(CPUX86State, fpregs[reg].mmx); })
@@ -2997,7 +2987,7 @@ static void gen_PMOVMSKB(DisasContext *s, X86DecodedInsn 
*decode)
      tcg_gen_ld8u_tl(s->T0, tcg_env, offsetof(CPUX86State, 
xmm_t0.ZMM_B(vec_len - 1)));
      while (vec_len > 8) {
          vec_len -= 8;
-        if (tcg_op_supported(INDEX_op_extract2_tl, TCG_TYPE_TL, 0)) {
+        if (tcg_op_supported(INDEX_op_extract2, TCG_TYPE_TL, 0)) {
              /*
               * Load the next byte of the result into the high byte of T.
               * TCG does a similar expansion of deposit to shl+extract2; by

Reviewed-by: Pierrick Bouvier <pierrick.bouv...@linaro.org>

Reply via email to