Re: [PATCH 20/42] tcg/i386: Use TCGType not bool is_64 in tcg_out_qemu_{ld, st}

2023-04-12 Thread Richard Henderson

On 4/11/23 14:12, Philippe Mathieu-Daudé wrote:

On 8/4/23 04:42, Richard Henderson wrote:

There are several places where we already convert back from
bool to type.  Clean things up by using type throughout.

Signed-off-by: Richard Henderson 
---
  tcg/i386/tcg-target.c.inc | 35 +--
  1 file changed, 17 insertions(+), 18 deletions(-)


Reviewed-by: Philippe Mathieu-Daudé 



Thanks, though there is now a v2 patch set which does not contain this patch, or some 
others you have reviewed in the last days.



r~



Re: [PATCH 20/42] tcg/i386: Use TCGType not bool is_64 in tcg_out_qemu_{ld, st}

2023-04-11 Thread Philippe Mathieu-Daudé

On 8/4/23 04:42, Richard Henderson wrote:

There are several places where we already convert back from
bool to type.  Clean things up by using type throughout.

Signed-off-by: Richard Henderson 
---
  tcg/i386/tcg-target.c.inc | 35 +--
  1 file changed, 17 insertions(+), 18 deletions(-)


Reviewed-by: Philippe Mathieu-Daudé 




[PATCH 20/42] tcg/i386: Use TCGType not bool is_64 in tcg_out_qemu_{ld, st}

2023-04-07 Thread Richard Henderson
There are several places where we already convert back from
bool to type.  Clean things up by using type throughout.

Signed-off-by: Richard Henderson 
---
 tcg/i386/tcg-target.c.inc | 35 +--
 1 file changed, 17 insertions(+), 18 deletions(-)

diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
index 54465c7f46..ff4062ef54 100644
--- a/tcg/i386/tcg-target.c.inc
+++ b/tcg/i386/tcg-target.c.inc
@@ -1886,8 +1886,8 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg 
addrlo, TCGReg addrhi,
  * Record the context of a call to the out of line helper code for the slow 
path
  * for a load or store, so that we can later generate the correct helper code
  */
-static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
-MemOpIdx oi,
+static void add_qemu_ldst_label(TCGContext *s, bool is_ld,
+TCGType type, MemOpIdx oi,
 TCGReg datalo, TCGReg datahi,
 TCGReg addrlo, TCGReg addrhi,
 tcg_insn_unit *raddr,
@@ -1897,7 +1897,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool 
is_ld, bool is_64,
 
 label->is_ld = is_ld;
 label->oi = oi;
-label->type = is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
+label->type = type;
 label->datalo_reg = datalo;
 label->datahi_reg = datahi;
 label->addrlo_reg = addrlo;
@@ -2151,11 +2151,10 @@ static inline int setup_guest_base_seg(void)
 
 static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
TCGReg base, int index, intptr_t ofs,
-   int seg, bool is64, MemOp memop)
+   int seg, TCGType type, MemOp memop)
 {
-TCGType type = is64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
 bool use_movbe = false;
-int rexw = is64 * P_REXW;
+int rexw = (type == TCG_TYPE_I32 ? 0 : P_REXW);
 int movop = OPC_MOVL_GvEv;
 
 /* Do big-endian loads with movbe.  */
@@ -2248,7 +2247,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg 
datalo, TCGReg datahi,
 /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
EAX. It will be useful once fixed registers globals are less
common. */
-static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
+static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType d_type)
 {
 TCGReg datalo, datahi, addrlo;
 TCGReg addrhi __attribute__((unused));
@@ -2262,7 +2261,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg 
*args, bool is64)
 #endif
 
 datalo = *args++;
-datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
+datahi = TCG_TARGET_REG_BITS == 64 || d_type == TCG_TYPE_I32 ? 0 : *args++;
 addrlo = *args++;
 addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
 oi = *args++;
@@ -2275,10 +2274,10 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg 
*args, bool is64)
  label_ptr, offsetof(CPUTLBEntry, addr_read));
 
 /* TLB Hit.  */
-tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, is64, opc);
+tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, d_type, 
opc);
 
 /* Record the current context of a load into ldst label */
-add_qemu_ldst_label(s, true, is64, oi, datalo, datahi, addrlo, addrhi,
+add_qemu_ldst_label(s, true, d_type, oi, datalo, datahi, addrlo, addrhi,
 s->code_ptr, label_ptr);
 #else
 a_bits = get_alignment_bits(opc);
@@ -2288,7 +2287,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg 
*args, bool is64)
 
 tcg_out_qemu_ld_direct(s, datalo, datahi, addrlo, x86_guest_base_index,
x86_guest_base_offset, x86_guest_base_seg,
-   is64, opc);
+   d_type, opc);
 #endif
 }
 
@@ -2344,7 +2343,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg 
datalo, TCGReg datahi,
 }
 }
 
-static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
+static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, TCGType d_type)
 {
 TCGReg datalo, datahi, addrlo;
 TCGReg addrhi __attribute__((unused));
@@ -2358,7 +2357,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg 
*args, bool is64)
 #endif
 
 datalo = *args++;
-datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
+datahi = TCG_TARGET_REG_BITS == 64 || d_type == TCG_TYPE_I32 ? 0 : *args++;
 addrlo = *args++;
 addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
 oi = *args++;
@@ -2374,7 +2373,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg 
*args, bool is64)
 tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, opc);
 
 /* Record the current context of a store into ldst label */
-add_qemu_ldst_label(s, false, is64, oi,