gcc/ChangeLog:

        * config/loongarch/constraints.md: Disable "k" on LA32.
        * config/loongarch/larchintrin.h (__movgr2fcsr): Disable on soft float.
        (__cacop_w): New.
        (__lddir_w): New.
        (__ldpte_w): New.
        (__crc_w_w_w): Disable on LA32.
        (__crc_w_d_w): Likewise.
        (__crcc_w_w_w): Likewise.
        * config/loongarch/loongarch.cc (loongarch_valid_offset_p): Disabe
        const_imm16_operand on LA32.
        (loongarch_valid_index_p): Disable ADDRESS_REG_REG on LA32.
        (loongarch_legitimize_address): Disable mem_shadd_or_shadd_rtx_p on 
LA32.
        (loongarch_output_move_index): Assert TARGET_64BIT for ldx/stx.
        (loongarch_output_move): Disable ldptr/stptr if offset is 0.
        (loongarch_output_equal_conditional_branch): Disable beqz/bnez on LA32R.
        (loongarch_trampoline_init): Change pcaddi to pcaddu12i.
        (loongarch_get_separate_components): Disable ldptr/stptr on LA32.
        * config/loongarch/predicates.md: Disable low_bitmask_operand and
        ins_zero_bitmask_operand on LA32R.
        * config/loongarch/sync.md: Change beqz to beq and change orn to nor+or.

include/ChangeLog:

        * longlong.h (count_leading_zeros): Delete.
        (count_trailing_zeros): Likewise.
        (COUNT_LEADING_ZEROS_0): Likewise.
---
 gcc/config/loongarch/constraints.md |  1 +
 gcc/config/loongarch/larchintrin.h  | 21 ++++++--------
 gcc/config/loongarch/loongarch.cc   | 43 +++++++++++++++++++----------
 gcc/config/loongarch/predicates.md  |  4 ++-
 gcc/config/loongarch/sync.md        | 36 +++++++++++++++---------
 include/longlong.h                  |  6 +---
 6 files changed, 65 insertions(+), 46 deletions(-)

diff --git a/gcc/config/loongarch/constraints.md 
b/gcc/config/loongarch/constraints.md
index 97a4e4e35d3..de256c0f8c0 100644
--- a/gcc/config/loongarch/constraints.md
+++ b/gcc/config/loongarch/constraints.md
@@ -133,6 +133,7 @@ (define_memory_constraint "k"
   "A memory operand whose address is formed by a base register and (optionally 
scaled)
    index register."
   (and (match_code "mem")
+       (match_test "TARGET_64BIT")
        (match_test "loongarch_base_index_address_p (XEXP (op, 0), mode)")))
 
 (define_constraint "l"
diff --git a/gcc/config/loongarch/larchintrin.h 
b/gcc/config/loongarch/larchintrin.h
index b8c06545c96..edf5fa6d995 100644
--- a/gcc/config/loongarch/larchintrin.h
+++ b/gcc/config/loongarch/larchintrin.h
@@ -80,6 +80,7 @@ __rdtimel_w (void)
   return __rdtime;
 }
 
+#ifdef __loongarch_hard_float
 /* Assembly instruction format:        rj, fcsr.  */
 /* Data types in instruction templates:  USI, UQI.  */
 #define __movfcsr2gr(/*ui5*/ _1) __builtin_loongarch_movfcsr2gr ((_1));
@@ -88,14 +89,16 @@ __rdtimel_w (void)
 /* Data types in instruction templates:  VOID, UQI, USI.  */
 #define __movgr2fcsr(/*ui5*/ _1, _2) \
   __builtin_loongarch_movgr2fcsr ((_1), _2);
+#endif
 
-#if defined __loongarch64
+#if __loongarch_grlen == 64
 /* Assembly instruction format:        ui5, rj, si12.  */
 /* Data types in instruction templates:  VOID, USI, UDI, SI.  */
 #define __cacop_d(/*ui5*/ _1, /*unsigned long int*/ _2, /*si12*/ _3) \
   __builtin_loongarch_cacop_d ((_1), (_2), (_3))
-#else
-#error "Unsupported ABI."
+#elif __loongarch_grlen == 32
+#define __cacop_w(/*ui5*/ _1, /*unsigned long int*/ _2, /*si12*/ _3) \
+  __builtin_loongarch_cacop_w ((_1), (_2), (_3))
 #endif
 
 /* Assembly instruction format:        rd, rj.  */
@@ -127,24 +130,21 @@ __asrtgt_d (long int _1, long int _2)
 }
 #endif
 
-#if defined __loongarch64
+#if __loongarch_grlen == 64
 /* Assembly instruction format:        rd, rj, ui5.  */
 /* Data types in instruction templates:  DI, DI, UQI.  */
 #define __lddir_d(/*long int*/ _1, /*ui5*/ _2) \
   __builtin_loongarch_lddir_d ((_1), (_2))
-#else
-#error "Unsupported ABI."
 #endif
 
-#if defined __loongarch64
+#if __loongarch_grlen == 64
 /* Assembly instruction format:        rj, ui5.  */
 /* Data types in instruction templates:  VOID, DI, UQI.  */
 #define __ldpte_d(/*long int*/ _1, /*ui5*/ _2) \
   __builtin_loongarch_ldpte_d ((_1), (_2))
-#else
-#error "Unsupported ABI."
 #endif
 
+#ifdef __loongarch64
 /* Assembly instruction format:        rd, rj, rk.  */
 /* Data types in instruction templates:  SI, QI, SI.  */
 extern __inline int
@@ -172,7 +172,6 @@ __crc_w_w_w (int _1, int _2)
   return __builtin_loongarch_crc_w_w_w (_1, _2);
 }
 
-#ifdef __loongarch64
 /* Assembly instruction format:        rd, rj, rk.  */
 /* Data types in instruction templates:  SI, DI, SI.  */
 extern __inline int
@@ -181,7 +180,6 @@ __crc_w_d_w (long int _1, int _2)
 {
   return __builtin_loongarch_crc_w_d_w (_1, _2);
 }
-#endif
 
 /* Assembly instruction format:        rd, rj, rk.  */
 /* Data types in instruction templates:  SI, QI, SI.  */
@@ -210,7 +208,6 @@ __crcc_w_w_w (int _1, int _2)
   return __builtin_loongarch_crcc_w_w_w (_1, _2);
 }
 
-#ifdef __loongarch64
 /* Assembly instruction format:        rd, rj, rk.  */
 /* Data types in instruction templates:  SI, DI, SI.  */
 extern __inline int
diff --git a/gcc/config/loongarch/loongarch.cc 
b/gcc/config/loongarch/loongarch.cc
index 03d4e15acf9..2c70f8c4e0a 100644
--- a/gcc/config/loongarch/loongarch.cc
+++ b/gcc/config/loongarch/loongarch.cc
@@ -2280,7 +2280,9 @@ loongarch_valid_offset_p (rtx x, machine_mode mode)
      or check that X is a signed 16-bit number
      and offset 4 byte aligned.  */
   if (!(const_arith_operand (x, Pmode)
-       || ((mode == E_SImode || mode == E_DImode)
+       /* FIXME: la32 atomic insns support 16-bit imm.  */
+       || (TARGET_64BIT
+           && (mode == E_SImode || mode == E_DImode)
            && const_imm16_operand (x, Pmode)
            && (loongarch_signed_immediate_p (INTVAL (x), 14, 2)))))
     return false;
@@ -2393,7 +2395,8 @@ loongarch_valid_index_p (struct loongarch_address_info 
*info, rtx x,
       && contains_reg_of_mode[GENERAL_REGS][GET_MODE (SUBREG_REG (index))])
     index = SUBREG_REG (index);
 
-  if (loongarch_valid_base_register_p (index, mode, strict_p))
+  /* LA32 does not provide LDX/STX.  */
+  if (TARGET_64BIT && loongarch_valid_base_register_p (index, mode, strict_p))
     {
       info->type = ADDRESS_REG_REG;
       info->offset = index;
@@ -3443,7 +3446,9 @@ loongarch_legitimize_address (rtx x, rtx oldx 
ATTRIBUTE_UNUSED,
   if (offset != 0)
     {
       /* Handle (plus (plus (mult (a) (mem_shadd_constant)) (fp)) (C)) case.  
*/
-      if (GET_CODE (base) == PLUS && mem_shadd_or_shadd_rtx_p (XEXP (base, 0))
+      if ((TARGET_64BIT || TARGET_32BIT_S)
+         && GET_CODE (base) == PLUS
+         && mem_shadd_or_shadd_rtx_p (XEXP (base, 0))
          && IMM12_OPERAND (offset))
        {
          rtx index = XEXP (base, 0);
@@ -4868,6 +4873,7 @@ loongarch_output_move_index (rtx x, machine_mode mode, 
bool ldr)
       }
     };
 
+  gcc_assert (TARGET_64BIT);
   return insn[ldr][index];
 }
 
@@ -5119,10 +5125,14 @@ loongarch_output_move (rtx *operands)
              /* Matching address type with a 12bit offset and
                 ADDRESS_LO_SUM.  */
              if (const_arith_operand (offset, Pmode)
-                 || GET_CODE (offset) == LO_SUM)
+                 || GET_CODE (offset) == LO_SUM
+                 || GET_CODE(XEXP (dest, 0)) == REG)
                return "st.w\t%z1,%0";
              else
-               return "stptr.w\t%z1,%0";
+               {
+                 gcc_assert (TARGET_64BIT);
+                 return "stptr.w\t%z1,%0";
+               }
            case 8:
              if (const_arith_operand (offset, Pmode)
                  || GET_CODE (offset) == LO_SUM)
@@ -5164,10 +5174,14 @@ loongarch_output_move (rtx *operands)
              /* Matching address type with a 12bit offset and
                 ADDRESS_LO_SUM.  */
              if (const_arith_operand (offset, Pmode)
-                 || GET_CODE (offset) == LO_SUM)
+                 || GET_CODE (offset) == LO_SUM
+                 || GET_CODE(XEXP (src, 0)) == REG)
                return "ld.w\t%0,%1";
              else
-               return "ldptr.w\t%0,%1";
+               {
+                 gcc_assert (TARGET_64BIT);
+                 return "ldptr.w\t%0,%1";
+               }
            case 8:
              if (const_arith_operand (offset, Pmode)
                  || GET_CODE (offset) == LO_SUM)
@@ -7665,7 +7679,8 @@ loongarch_output_equal_conditional_branch (rtx_insn 
*insn, rtx *operands,
                                           bool inverted_p)
 {
   const char *branch[2];
-  if (operands[3] == const0_rtx)
+  if ((TARGET_64BIT || TARGET_32BIT_S)
+      && operands[3] == const0_rtx)
     {
       branch[!inverted_p] = LARCH_BRANCH ("b%C1z", "%2,%0");
       branch[inverted_p] = LARCH_BRANCH ("b%N1z", "%2,%0");
@@ -8333,11 +8348,11 @@ loongarch_trampoline_init (rtx m_tramp, tree fndecl, 
rtx chain_value)
 
   /* Build up the code in TRAMPOLINE.  */
   i = 0;
-  /*pcaddi $static_chain,0
+  /*pcaddu12i $static_chain,0
     ld.[dw] $tmp,$static_chain,target_function_offset
     ld.[dw] $static_chain,$static_chain,static_chain_offset
     jirl $r0,$tmp,0  */
-  trampoline[i++] = OP (0x18000000 | (STATIC_CHAIN_REGNUM - GP_REG_FIRST));
+  trampoline[i++] = OP (0x1c000000 | (STATIC_CHAIN_REGNUM - GP_REG_FIRST));
   trampoline[i++] = OP ((ptr_mode == DImode ? 0x28c00000 : 0x28800000)
                        | 19 /* $t7  */
                        | ((STATIC_CHAIN_REGNUM - GP_REG_FIRST) << 5)
@@ -8546,11 +8561,9 @@ loongarch_get_separate_components (void)
        /* We can wrap general registers saved at [sp, sp + 32768) using the
           ldptr/stptr instructions.  For large offsets a pseudo register
           might be needed which cannot be created during the shrink
-          wrapping pass.
-
-          TODO: This may need a revise when we add LA32 as ldptr.w is not
-          guaranteed available by the manual.  */
-       if (offset < 32768)
+          wrapping pass.  */
+       if ((TARGET_64BIT && IMM16_OPERAND (offset))
+           || IMM12_OPERAND (offset))
          bitmap_set_bit (components, regno);
 
        offset -= UNITS_PER_WORD;
diff --git a/gcc/config/loongarch/predicates.md 
b/gcc/config/loongarch/predicates.md
index 957215ad89a..ccfc5189247 100644
--- a/gcc/config/loongarch/predicates.md
+++ b/gcc/config/loongarch/predicates.md
@@ -291,7 +291,8 @@ (define_predicate "si_mask_operand"
 
 (define_predicate "low_bitmask_operand"
   (and (match_code "const_int")
-       (match_test "low_bitmask_len (mode, INTVAL (op)) > 12")))
+       (match_test "low_bitmask_len (mode, INTVAL (op)) > 12")
+       (match_test "!TARGET_32BIT_R")))
 
 (define_predicate "d_operand"
   (and (match_code "reg")
@@ -402,6 +403,7 @@ (define_predicate "muldiv_target_operand"
 
 (define_predicate "ins_zero_bitmask_operand"
   (and (match_code "const_int")
+       (match_test "!TARGET_32BIT_R")
        (match_test "low_bitmask_len (mode, \
                                     ~UINTVAL (op) | (~UINTVAL(op) - 1)) \
                    > 0")
diff --git a/gcc/config/loongarch/sync.md b/gcc/config/loongarch/sync.md
index 91d76af01cc..11606cc9aed 100644
--- a/gcc/config/loongarch/sync.md
+++ b/gcc/config/loongarch/sync.md
@@ -376,14 +376,24 @@ (define_insn "atomic_fetch_nand_mask_inverted<mode>"
          UNSPEC_SYNC_OLD_OP))
    (clobber (match_scratch:GPR 3 "=&r"))]
   ""
-  {
-    return "1:\\n\\t"
-          "ll.<d>\\t%0,%1\\n\\t"
-          "orn\\t%3,%2,%0\\n\\t"
-          "sc.<d>\\t%3,%1\\n\\t"
-          "beqz\\t%3,1b";
-  }
-  [(set (attr "length") (const_int 16))])
+{
+  output_asm_insn ("1:", operands);
+  output_asm_insn ("ll.<d>\t%0,%1", operands);
+  if (TARGET_32BIT_R)
+    {
+      output_asm_insn ("nor\t%3,%0,$zero", operands);
+      output_asm_insn ("or\t%3,%2,%3", operands);
+    }
+  else
+    output_asm_insn ("orn\t%3,%2,%0", operands);
+  output_asm_insn ("sc.<d>\t%3,%1", operands);
+  output_asm_insn ("beq\t%3,$zero,1b", operands);
+  return "";
+}
+  [(set (attr "length") (if_then_else
+                         (match_test "TARGET_32BIT_R")
+                         (const_int 20)
+                         (const_int 16)))])
 
 (define_mode_iterator ALL_SC [GPR (TI "loongarch_16b_atomic_lock_free_p ()")])
 (define_mode_attr _scq [(SI "") (DI "") (TI "_scq")])
@@ -473,7 +483,7 @@ (define_insn "atomic_exchangeti_scq"
   output_asm_insn ("ld.d\t%t0,%b1,8", operands);
   output_asm_insn ("move\t%3,%z2", operands);
   output_asm_insn ("sc.q\t%3,%t2,%1", operands);
-  output_asm_insn ("beqz\t%3,1b", operands);
+  output_asm_insn ("beq\t%3,$zero,1b", operands);
 
   return "";
 }
@@ -536,7 +546,7 @@ (define_insn "atomic_cas_value_strong<mode>"
 
   output_asm_insn ("or%i3\t%5,$zero,%3", operands);
   output_asm_insn ("sc.<size>\t%5,%1", operands);
-  output_asm_insn ("beqz\t%5,1b", operands);
+  output_asm_insn ("beq\t%5,$zero,1b", operands);
   output_asm_insn ("%T4b\t3f", operands);
   output_asm_insn ("2:", operands);
   output_asm_insn ("%G4", operands);
@@ -822,7 +832,7 @@ (define_insn "atomic_compare_and_swapti_scq"
   output_asm_insn ("sc.q\t%7,%t3,%1", operands);
 
   /* Check if sc.q has done the store.  */
-  output_asm_insn ("beqz\t%7,1b", operands);
+  output_asm_insn ("beq\t%7,$zero,1b", operands);
 
   /* Jump over the mod_f barrier if sc.q has succeeded.  */
   output_asm_insn ("%T4b\t3f", operands);
@@ -976,7 +986,7 @@ (define_insn "atomic_cas_value_exchange_7_<mode>"
         "and\\t%7,%0,%z3\\n\\t"
         "or%i5\\t%7,%7,%5\\n\\t"
         "sc.<size>\\t%7,%1\\n\\t"
-        "beqz\\t%7,1b\\n\\t";
+        "beq\\t%7,$zero,1b\\n\\t";
 }
   [(set (attr "length") (const_int 20))])
 
@@ -1073,7 +1083,7 @@ (define_insn "atomic_fetch_<amop_ti_fetch>ti_scq"
     }
 
   output_asm_insn ("sc.q\t%3,%4,%1", operands);
-  output_asm_insn ("beqz\t%3,1b", operands);
+  output_asm_insn ("beq\t%3,$zero,1b", operands);
 
   return "";
 }
diff --git a/include/longlong.h b/include/longlong.h
index 5ae250f7192..9429e90e0d1 100644
--- a/include/longlong.h
+++ b/include/longlong.h
@@ -594,11 +594,7 @@ extern UDItype __umulsidi3 (USItype, USItype);
 #endif
 
 #ifdef __loongarch__
-# if W_TYPE_SIZE == 32
-#  define count_leading_zeros(count, x)  ((count) = __builtin_clz (x))
-#  define count_trailing_zeros(count, x) ((count) = __builtin_ctz (x))
-#  define COUNT_LEADING_ZEROS_0 32
-# elif W_TYPE_SIZE == 64
+# if W_TYPE_SIZE == 64
 #  define count_leading_zeros(count, x)  ((count) = __builtin_clzll (x))
 #  define count_trailing_zeros(count, x) ((count) = __builtin_ctzll (x))
 #  define COUNT_LEADING_ZEROS_0 64
-- 
2.34.1

Reply via email to