Re: [PATCH 6/8] tcg/aarch64: Support TCG_TARGET_SIGNED_ADDR32

2021-10-13 Thread Richard Henderson

On 10/11/21 3:28 AM, Alex Bennée wrote:

+if (TARGET_LONG_BITS == 64) {
+return 3; /* LSL #0 */
+} else if (signed_addr32) {
+return 6; /* SXTW */
+} else {
+return 2; /* UXTW */
+}
+}


If this is is going to be a magic number we pass into our code
generation we could at least wrap it in a confined enum rather than a
bare int we chuck around.


Given that it's used exactly one, and commented, and matches the ARM, do we really need an 
enum?



r~



Re: [PATCH 6/8] tcg/aarch64: Support TCG_TARGET_SIGNED_ADDR32

2021-10-11 Thread Richard Henderson

On 10/11/21 3:28 AM, Alex Bennée wrote:


Richard Henderson  writes:


AArch64 has both sign and zero-extending addressing modes, which
means that either treatment of guest addresses is equally efficient.
Enabling this for AArch64 gives us testing of the feature in CI.


So which guests front ends will exercise this backend?


All 32-bit guests.


Is this something we can exercise in 32 bit user mode tests?


Yes.

Which is why I enabled this for aarch64, so that we'd have a major platform 
testing it daily.


r~



Re: [PATCH 6/8] tcg/aarch64: Support TCG_TARGET_SIGNED_ADDR32

2021-10-11 Thread Alex Bennée


Richard Henderson  writes:

> AArch64 has both sign and zero-extending addressing modes, which
> means that either treatment of guest addresses is equally efficient.
> Enabling this for AArch64 gives us testing of the feature in CI.

So which guests front ends will exercise this backend? I realise you
never mentioned it in the cover letter. Is this something we can
exercise in 32 bit user mode tests?

> Signed-off-by: Richard Henderson 
> ---
>  tcg/aarch64/tcg-target-sa32.h |  8 -
>  tcg/aarch64/tcg-target.c.inc  | 68 ++-
>  2 files changed, 51 insertions(+), 25 deletions(-)
>
> diff --git a/tcg/aarch64/tcg-target-sa32.h b/tcg/aarch64/tcg-target-sa32.h
> index cb185b1526..c99e502e4c 100644
> --- a/tcg/aarch64/tcg-target-sa32.h
> +++ b/tcg/aarch64/tcg-target-sa32.h
> @@ -1 +1,7 @@
> -#define TCG_TARGET_SIGNED_ADDR32 0
> +/*
> + * AArch64 has both SXTW and UXTW addressing modes, which means that
> + * it is agnostic to how guest addresses should be represented.
> + * Because aarch64 is more common than the other hosts that will
> + * want to use this feature, enable it for continuous testing.
> + */
> +#define TCG_TARGET_SIGNED_ADDR32 1
> diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
> index 5edca8d44d..88b2963f9d 100644
> --- a/tcg/aarch64/tcg-target.c.inc
> +++ b/tcg/aarch64/tcg-target.c.inc
> @@ -12,6 +12,7 @@
>  
>  #include "../tcg-pool.c.inc"
>  #include "qemu/bitops.h"
> +#include "tcg-target-sa32.h"
>  
>  /* We're going to re-use TCGType in setting of the SF bit, which controls
> the size of the operation performed.  If we know the values match, it
> @@ -804,12 +805,12 @@ static void tcg_out_insn_3617(TCGContext *s, 
> AArch64Insn insn, bool q,
>  }
>  
>  static void tcg_out_insn_3310(TCGContext *s, AArch64Insn insn,
> -  TCGReg rd, TCGReg base, TCGType ext,
> +  TCGReg rd, TCGReg base, int option,
>TCGReg regoff)
>  {
>  /* Note the AArch64Insn constants above are for C3.3.12.  Adjust.  */
>  tcg_out32(s, insn | I3312_TO_I3310 | regoff << 16 |
> -  0x4000 | ext << 13 | base << 5 | (rd & 0x1f));
> +  option << 13 | base << 5 | (rd & 0x1f));
>  }
>  
>  static void tcg_out_insn_3312(TCGContext *s, AArch64Insn insn,
> @@ -1124,7 +1125,7 @@ static void tcg_out_ldst(TCGContext *s, AArch64Insn 
> insn, TCGReg rd,
>  
>  /* Worst-case scenario, move offset to temp register, use reg offset.  */
>  tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset);
> -tcg_out_ldst_r(s, insn, rd, rn, TCG_TYPE_I64, TCG_REG_TMP);
> +tcg_out_ldst_r(s, insn, rd, rn, 3 /* LSL #0 */, TCG_REG_TMP);
>  }
>  
>  static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
> @@ -1718,34 +1719,34 @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg 
> addr_reg, MemOp opc,
>  
>  static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
> TCGReg data_r, TCGReg addr_r,
> -   TCGType otype, TCGReg off_r)
> +   int option, TCGReg off_r)
>  {
>  /* Byte swapping is left to middle-end expansion. */
>  tcg_debug_assert((memop & MO_BSWAP) == 0);
>  
>  switch (memop & MO_SSIZE) {
>  case MO_UB:
> -tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, otype, off_r);
> +tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, option, off_r);
>  break;
>  case MO_SB:
>  tcg_out_ldst_r(s, ext ? I3312_LDRSBX : I3312_LDRSBW,
> -   data_r, addr_r, otype, off_r);
> +   data_r, addr_r, option, off_r);
>  break;
>  case MO_UW:
> -tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, otype, off_r);
> +tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, option, off_r);
>  break;
>  case MO_SW:
>  tcg_out_ldst_r(s, (ext ? I3312_LDRSHX : I3312_LDRSHW),
> -   data_r, addr_r, otype, off_r);
> +   data_r, addr_r, option, off_r);
>  break;
>  case MO_UL:
> -tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, otype, off_r);
> +tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, option, off_r);
>  break;
>  case MO_SL:
> -tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, otype, off_r);
> +tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, option, off_r);
>  break;
>  case MO_Q:
> -tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, otype, off_r);
> +tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, option, off_r);
>  break;
>  default:
>  tcg_abort();
> @@ -1754,50 +1755,68 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, 
> MemOp memop, TCGType ext,
>  
>  static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
> TCGReg data_r, TCGReg addr_r,
> -