Re: [PATCH 13/16] target/sparc: Use MO_ALIGN where required

2023-05-03 Thread Mark Cave-Ayland

On 02/05/2023 17:08, Richard Henderson wrote:


Signed-off-by: Richard Henderson 
---
  target/sparc/translate.c | 66 +---
  1 file changed, 34 insertions(+), 32 deletions(-)

diff --git a/target/sparc/translate.c b/target/sparc/translate.c
index bc71e44e66..414e014b11 100644
--- a/target/sparc/translate.c
+++ b/target/sparc/translate.c
@@ -1899,7 +1899,7 @@ static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
   TCGv addr, int mmu_idx, MemOp memop)
  {
  gen_address_mask(dc, addr);
-tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop);
+tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop | MO_ALIGN);
  }
  
  static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)

@@ -2155,12 +2155,12 @@ static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv 
addr,
  break;
  case GET_ASI_DIRECT:
  gen_address_mask(dc, addr);
-tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop);
+tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop | MO_ALIGN);
  break;
  default:
  {
  TCGv_i32 r_asi = tcg_constant_i32(da.asi);
-TCGv_i32 r_mop = tcg_constant_i32(memop);
+TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
  
  save_state(dc);

  #ifdef TARGET_SPARC64
@@ -2201,7 +2201,7 @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv 
addr,
  /* fall through */
  case GET_ASI_DIRECT:
  gen_address_mask(dc, addr);
-tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop);
+tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop | MO_ALIGN);
  break;
  #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
  case GET_ASI_BCOPY:
@@ -2233,7 +2233,7 @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv 
addr,
  default:
  {
  TCGv_i32 r_asi = tcg_constant_i32(da.asi);
-TCGv_i32 r_mop = tcg_constant_i32(memop & MO_SIZE);
+TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
  
  save_state(dc);

  #ifdef TARGET_SPARC64
@@ -2283,7 +2283,7 @@ static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv 
cmpv,
  case GET_ASI_DIRECT:
  oldv = tcg_temp_new();
  tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
-  da.mem_idx, da.memop);
+  da.mem_idx, da.memop | MO_ALIGN);
  gen_store_gpr(dc, rd, oldv);
  break;
  default:
@@ -2347,7 +2347,7 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr,
  switch (size) {
  case 4:
  d32 = gen_dest_fpr_F(dc);
-tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop);
+tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
  gen_store_fpr_F(dc, rd, d32);
  break;
  case 8:
@@ -2397,7 +2397,8 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr,
  /* Valid for lddfa only.  */
  if (size == 8) {
  gen_address_mask(dc, addr);
-tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
+tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
+da.memop | MO_ALIGN);
  } else {
  gen_exception(dc, TT_ILL_INSN);
  }
@@ -2406,7 +2407,7 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr,
  default:
  {
  TCGv_i32 r_asi = tcg_constant_i32(da.asi);
-TCGv_i32 r_mop = tcg_constant_i32(da.memop);
+TCGv_i32 r_mop = tcg_constant_i32(da.memop | MO_ALIGN);
  
  save_state(dc);

  /* According to the table in the UA2011 manual, the only
@@ -2454,7 +2455,7 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr,
  switch (size) {
  case 4:
  d32 = gen_load_fpr_F(dc, rd);
-tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop);
+tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
  break;
  case 8:
  tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
@@ -2506,7 +2507,8 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr,
  /* Valid for stdfa only.  */
  if (size == 8) {
  gen_address_mask(dc, addr);
-tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
+tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
+da.memop | MO_ALIGN);
  } else {
  gen_exception(dc, TT_ILL_INSN);
  }
@@ -2543,7 +2545,7 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int 
insn, int rd)
  TCGv_i64 tmp = tcg_temp_new_i64();
  
  gen_address_mask(dc, addr);

-tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop);
+tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop | 

[PATCH 13/16] target/sparc: Use MO_ALIGN where required

2023-05-02 Thread Richard Henderson
Signed-off-by: Richard Henderson 
---
 target/sparc/translate.c | 66 +---
 1 file changed, 34 insertions(+), 32 deletions(-)

diff --git a/target/sparc/translate.c b/target/sparc/translate.c
index bc71e44e66..414e014b11 100644
--- a/target/sparc/translate.c
+++ b/target/sparc/translate.c
@@ -1899,7 +1899,7 @@ static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
  TCGv addr, int mmu_idx, MemOp memop)
 {
 gen_address_mask(dc, addr);
-tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop);
+tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop | MO_ALIGN);
 }
 
 static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)
@@ -2155,12 +2155,12 @@ static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv 
addr,
 break;
 case GET_ASI_DIRECT:
 gen_address_mask(dc, addr);
-tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop);
+tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop | MO_ALIGN);
 break;
 default:
 {
 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
-TCGv_i32 r_mop = tcg_constant_i32(memop);
+TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
 
 save_state(dc);
 #ifdef TARGET_SPARC64
@@ -2201,7 +2201,7 @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv 
addr,
 /* fall through */
 case GET_ASI_DIRECT:
 gen_address_mask(dc, addr);
-tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop);
+tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop | MO_ALIGN);
 break;
 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
 case GET_ASI_BCOPY:
@@ -2233,7 +2233,7 @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv 
addr,
 default:
 {
 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
-TCGv_i32 r_mop = tcg_constant_i32(memop & MO_SIZE);
+TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
 
 save_state(dc);
 #ifdef TARGET_SPARC64
@@ -2283,7 +2283,7 @@ static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv 
cmpv,
 case GET_ASI_DIRECT:
 oldv = tcg_temp_new();
 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
-  da.mem_idx, da.memop);
+  da.mem_idx, da.memop | MO_ALIGN);
 gen_store_gpr(dc, rd, oldv);
 break;
 default:
@@ -2347,7 +2347,7 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr,
 switch (size) {
 case 4:
 d32 = gen_dest_fpr_F(dc);
-tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop);
+tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
 gen_store_fpr_F(dc, rd, d32);
 break;
 case 8:
@@ -2397,7 +2397,8 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr,
 /* Valid for lddfa only.  */
 if (size == 8) {
 gen_address_mask(dc, addr);
-tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
+tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
+da.memop | MO_ALIGN);
 } else {
 gen_exception(dc, TT_ILL_INSN);
 }
@@ -2406,7 +2407,7 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr,
 default:
 {
 TCGv_i32 r_asi = tcg_constant_i32(da.asi);
-TCGv_i32 r_mop = tcg_constant_i32(da.memop);
+TCGv_i32 r_mop = tcg_constant_i32(da.memop | MO_ALIGN);
 
 save_state(dc);
 /* According to the table in the UA2011 manual, the only
@@ -2454,7 +2455,7 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr,
 switch (size) {
 case 4:
 d32 = gen_load_fpr_F(dc, rd);
-tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop);
+tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
 break;
 case 8:
 tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
@@ -2506,7 +2507,8 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr,
 /* Valid for stdfa only.  */
 if (size == 8) {
 gen_address_mask(dc, addr);
-tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
+tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
+da.memop | MO_ALIGN);
 } else {
 gen_exception(dc, TT_ILL_INSN);
 }
@@ -2543,7 +2545,7 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int 
insn, int rd)
 TCGv_i64 tmp = tcg_temp_new_i64();
 
 gen_address_mask(dc, addr);
-tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop);
+tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop | MO_ALIGN);
 
 /* Note that LE ldda acts as if each 32-bit register
result is byte swapped.