Re: [Qemu-devel] [PATCH 12/35] tcg-s390: Define TCG_TMP0.

2010-06-10 Thread Aurelien Jarno
On Fri, Jun 04, 2010 at 12:14:20PM -0700, Richard Henderson wrote:
> Use a define for the temp register instead of hard-coding it.
> 
> Signed-off-by: Richard Henderson 
> ---
>  tcg/s390/tcg-target.c |   54 ++--
>  1 files changed, 29 insertions(+), 25 deletions(-)

This patch looks ok.

> diff --git a/tcg/s390/tcg-target.c b/tcg/s390/tcg-target.c
> index 5b2134b..2b80c02 100644
> --- a/tcg/s390/tcg-target.c
> +++ b/tcg/s390/tcg-target.c
> @@ -40,6 +40,10 @@
> rather than TCG_REG_R0.  */
>  #define TCG_REG_NONE0
>  
> +/* A scratch register that may be be used throughout the backend.  */
> +#define TCG_TMP0TCG_REG_R13
> +
> +
>  /* All of the following instructions are prefixed with their instruction
> format, and are defined as 8- or 16-bit quantities, even when the two
> halves of the 16-bit quantity may appear 32 bits apart in the insn.
> @@ -376,12 +380,12 @@ static inline void tcg_out_movi(TCGContext *s, TCGType 
> type,
>  tcg_out_insn(s, RI, IILH, ret, arg >> 16);
>  } else {
>  /* branch over constant and store its address in R13 */
> -tcg_out_insn(s, RIL, BRASL, TCG_REG_R13, (6 + 8) >> 1);
> +tcg_out_insn(s, RIL, BRASL, TCG_TMP0, (6 + 8) >> 1);
>  /* 64-bit constant */
>  tcg_out32(s, arg >> 32);
>  tcg_out32(s, arg);
>  /* load constant to ret */
> -tcg_out_insn(s, RXY, LG, ret, TCG_REG_R13, 0, 0);
> +tcg_out_insn(s, RXY, LG, ret, TCG_TMP0, 0, 0);
>  }
>  }
>  
> @@ -399,14 +403,14 @@ static void tcg_out_mem(TCGContext *s, S390Opcode 
> opc_rx, S390Opcode opc_rxy,
>  if (ofs < -0x8 || ofs >= 0x8) {
>  /* Combine the low 16 bits of the offset with the actual load insn;
> the high 48 bits must come from an immediate load.  */
> -tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, ofs & ~0x);
> +tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs & ~0x);
>  ofs &= 0x;
>  
>  /* If we were already given an index register, add it in.  */
>  if (index != TCG_REG_NONE) {
> -tcg_out_insn(s, RRE, AGR, TCG_REG_R13, index);
> +tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
>  }
> -index = TCG_REG_R13;
> +index = TCG_TMP0;
>  }
>  
>  if (opc_rx && ofs >= 0 && ofs < 0x1000) {
> @@ -482,8 +486,8 @@ static void tgen_gotoi(TCGContext *s, int cc, 
> tcg_target_long dest)
>  } else if (off == (int32_t)off) {
>  tcg_out_insn(s, RIL, BRCL, cc, off);
>  } else {
> -tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, dest);
> -tcg_out_insn(s, RR, BCR, cc, TCG_REG_R13);
> +tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
> +tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
>  }
>  }
>  
> @@ -505,8 +509,8 @@ static void tgen_calli(TCGContext *s, tcg_target_long 
> dest)
>  if (off == (int32_t)off) {
>  tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
>  } else {
> -tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, dest);
> -tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_REG_R13);
> +tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
> +tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
>  }
>  }
>  
> @@ -538,22 +542,22 @@ static void tcg_prepare_qemu_ldst(TCGContext* s, int 
> data_reg, int addr_reg,
>  tcg_out_sh64(s, RSY_SRLG, arg1, addr_reg, TCG_REG_NONE,
>   TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
>  
> -tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13,
> +tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0,
>   TARGET_PAGE_MASK | ((1 << s_bits) - 1));
> -tcg_out_insn(s, RRE, NGR, arg0, TCG_REG_R13);
> +tcg_out_insn(s, RRE, NGR, arg0, TCG_TMP0);
>  
> -tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13,
> +tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0,
>   (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
> -tcg_out_insn(s, RRE, NGR, arg1, TCG_REG_R13);
> +tcg_out_insn(s, RRE, NGR, arg1, TCG_TMP0);
>  
>  if (is_store) {
> -tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13,
> +tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0,
>   offsetof(CPUState, tlb_table[mem_index][0].addr_write));
>  } else {
> -tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13,
> +tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0,
>   offsetof(CPUState, tlb_table[mem_index][0].addr_read));
>  }
> -tcg_out_insn(s, RRE, AGR, arg1, TCG_REG_R13);
> +tcg_out_insn(s, RRE, AGR, arg1, TCG_TMP0);
>  
>  tcg_out_insn(s, RRE, AGR, arg1, TCG_AREG0);
>  
> @@ -688,8 +692,8 @@ static void tcg_out_qemu_ld(TCGContext* s, const TCGArg* 
> args, int opc)
>  #else
>  /* swapped unsigned halfword load with upper bits zeroed */
>  tcg_out_insn(s, RXY, LRVH, data_reg, arg0, 0, 0);
> -tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, 0xL);
> -tcg_out_insn(s, RRE, NGR, data_reg, 13);
> +tcg_out

[Qemu-devel] [PATCH 12/35] tcg-s390: Define TCG_TMP0.

2010-06-04 Thread Richard Henderson
Use a define for the temp register instead of hard-coding it.

Signed-off-by: Richard Henderson 
---
 tcg/s390/tcg-target.c |   54 ++--
 1 files changed, 29 insertions(+), 25 deletions(-)

diff --git a/tcg/s390/tcg-target.c b/tcg/s390/tcg-target.c
index 5b2134b..2b80c02 100644
--- a/tcg/s390/tcg-target.c
+++ b/tcg/s390/tcg-target.c
@@ -40,6 +40,10 @@
rather than TCG_REG_R0.  */
 #define TCG_REG_NONE0
 
+/* A scratch register that may be be used throughout the backend.  */
+#define TCG_TMP0TCG_REG_R13
+
+
 /* All of the following instructions are prefixed with their instruction
format, and are defined as 8- or 16-bit quantities, even when the two
halves of the 16-bit quantity may appear 32 bits apart in the insn.
@@ -376,12 +380,12 @@ static inline void tcg_out_movi(TCGContext *s, TCGType 
type,
 tcg_out_insn(s, RI, IILH, ret, arg >> 16);
 } else {
 /* branch over constant and store its address in R13 */
-tcg_out_insn(s, RIL, BRASL, TCG_REG_R13, (6 + 8) >> 1);
+tcg_out_insn(s, RIL, BRASL, TCG_TMP0, (6 + 8) >> 1);
 /* 64-bit constant */
 tcg_out32(s, arg >> 32);
 tcg_out32(s, arg);
 /* load constant to ret */
-tcg_out_insn(s, RXY, LG, ret, TCG_REG_R13, 0, 0);
+tcg_out_insn(s, RXY, LG, ret, TCG_TMP0, 0, 0);
 }
 }
 
@@ -399,14 +403,14 @@ static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, 
S390Opcode opc_rxy,
 if (ofs < -0x8 || ofs >= 0x8) {
 /* Combine the low 16 bits of the offset with the actual load insn;
the high 48 bits must come from an immediate load.  */
-tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, ofs & ~0x);
+tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs & ~0x);
 ofs &= 0x;
 
 /* If we were already given an index register, add it in.  */
 if (index != TCG_REG_NONE) {
-tcg_out_insn(s, RRE, AGR, TCG_REG_R13, index);
+tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
 }
-index = TCG_REG_R13;
+index = TCG_TMP0;
 }
 
 if (opc_rx && ofs >= 0 && ofs < 0x1000) {
@@ -482,8 +486,8 @@ static void tgen_gotoi(TCGContext *s, int cc, 
tcg_target_long dest)
 } else if (off == (int32_t)off) {
 tcg_out_insn(s, RIL, BRCL, cc, off);
 } else {
-tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, dest);
-tcg_out_insn(s, RR, BCR, cc, TCG_REG_R13);
+tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
+tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
 }
 }
 
@@ -505,8 +509,8 @@ static void tgen_calli(TCGContext *s, tcg_target_long dest)
 if (off == (int32_t)off) {
 tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
 } else {
-tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, dest);
-tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_REG_R13);
+tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, dest);
+tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
 }
 }
 
@@ -538,22 +542,22 @@ static void tcg_prepare_qemu_ldst(TCGContext* s, int 
data_reg, int addr_reg,
 tcg_out_sh64(s, RSY_SRLG, arg1, addr_reg, TCG_REG_NONE,
  TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
 
-tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13,
+tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0,
  TARGET_PAGE_MASK | ((1 << s_bits) - 1));
-tcg_out_insn(s, RRE, NGR, arg0, TCG_REG_R13);
+tcg_out_insn(s, RRE, NGR, arg0, TCG_TMP0);
 
-tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13,
+tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0,
  (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
-tcg_out_insn(s, RRE, NGR, arg1, TCG_REG_R13);
+tcg_out_insn(s, RRE, NGR, arg1, TCG_TMP0);
 
 if (is_store) {
-tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13,
+tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0,
  offsetof(CPUState, tlb_table[mem_index][0].addr_write));
 } else {
-tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13,
+tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0,
  offsetof(CPUState, tlb_table[mem_index][0].addr_read));
 }
-tcg_out_insn(s, RRE, AGR, arg1, TCG_REG_R13);
+tcg_out_insn(s, RRE, AGR, arg1, TCG_TMP0);
 
 tcg_out_insn(s, RRE, AGR, arg1, TCG_AREG0);
 
@@ -688,8 +692,8 @@ static void tcg_out_qemu_ld(TCGContext* s, const TCGArg* 
args, int opc)
 #else
 /* swapped unsigned halfword load with upper bits zeroed */
 tcg_out_insn(s, RXY, LRVH, data_reg, arg0, 0, 0);
-tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R13, 0xL);
-tcg_out_insn(s, RRE, NGR, data_reg, 13);
+tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, 0xL);
+tcg_out_insn(s, RRE, NGR, data_reg, TCG_TMP0);
 #endif
 break;
 case LD_INT16:
@@ -802,16 +806,16 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode 
opc,
(tcg_target_long)s->code_ptr) >> 1;
 if (off == (int32