Add support for atomic_<amop><mode>, atomic_fetch_<amop><mode>,
atomic_exchange<mode> on LA32.
gcc/ChangeLog:
* config/loongarch/sync.md (atomic_<amop><mode>): Modify to
define_expand.
(la64_atomic_<amop><mode>): New.
(la32_atomic_<amop>si): New.
(atomic_fetch_<amop><mode>): Modify to define_expand.
(la64_atomic_fetch_<amop><mode>): New.
(la32_atomic_fetch_<amop>si): New.
(atomic_exchange<mode>): Modify to define_expand.
(la64_atomic_exchange<mode>): New.
(la32_atomic_exchangesi): New.
---
gcc/config/loongarch/sync.md | 120 +++++++++++++++++++++++++++++++++--
1 file changed, 113 insertions(+), 7 deletions(-)
diff --git a/gcc/config/loongarch/sync.md b/gcc/config/loongarch/sync.md
index 2ef8e88d8d7..a024c708414 100644
--- a/gcc/config/loongarch/sync.md
+++ b/gcc/config/loongarch/sync.md
@@ -45,6 +45,10 @@ (define_code_iterator any_atomic [plus ior xor and])
;; particular code.
(define_code_attr amop [(ior "or") (xor "xor") (and "and") (plus "add")])
+;; For 32 bit.
+(define_code_attr atomic_optab_insn
+ [(plus "add.w") (ior "or") (xor "xor") (and "and")])
+
;; Memory barriers.
(define_expand "mem_thread_fence"
@@ -276,17 +280,48 @@ (define_expand "atomic_storeti"
DONE;
})
-(define_insn "atomic_<amop><mode>"
+(define_expand "atomic_<amop><mode>"
+ [(any_atomic:GPR (match_operand:GPR 0 "memory_operand") ;; mem location
+ (match_operand:GPR 1 "reg_or_0_operand")) ;; value for op
+ (match_operand:SI 2 "const_int_operand")] ;; model
+ ""
+{
+ if (TARGET_64BIT)
+ emit_insn (gen_la64_atomic_<amop><mode> (operands[0], operands[1],
operands[2]));
+ else
+ emit_insn (gen_la32_atomic_<amop>si (operands[0], operands[1],
operands[2]));
+ DONE;
+})
+
+(define_insn "la64_atomic_<amop><mode>"
[(set (match_operand:GPR 0 "memory_operand" "+ZB")
(unspec_volatile:GPR
[(any_atomic:GPR (match_dup 0)
(match_operand:GPR 1 "reg_or_0_operand" "rJ"))
(match_operand:SI 2 "const_int_operand")] ;; model
UNSPEC_SYNC_OLD_OP))]
- ""
+ "TARGET_64BIT"
"am<amop>%A2.<size>\t$zero,%z1,%0"
[(set (attr "length") (const_int 4))])
+(define_insn "la32_atomic_<amop>si"
+ [(set (match_operand:SI 0 "memory_operand" "+ZB")
+ (unspec_volatile:SI
+ [(any_atomic:SI (match_dup 0)
+ (match_operand:SI 1 "reg_or_0_operand" "rJ"))
+ (match_operand:SI 2 "const_int_operand")] ;; model
+ UNSPEC_SYNC_OLD_OP))
+ (clobber (match_scratch:SI 3 "=&r"))]
+ "!TARGET_64BIT"
+{
+ return "1:\n\t"
+ "ll.w\t%3,%0\n\t"
+ "<atomic_optab_insn>\t%3,%z1,%3\n\t"
+ "sc.w\t%3,%0\n\t"
+ "beq\t$zero,%3,1b\n\t";
+}
+ [(set (attr "length") (const_int 16))])
+
(define_insn "atomic_add<mode>"
[(set (match_operand:SHORT 0 "memory_operand" "+ZB")
(unspec_volatile:SHORT
@@ -298,7 +333,23 @@ (define_insn "atomic_add<mode>"
"amadd%A2.<size>\t$zero,%z1,%0"
[(set (attr "length") (const_int 4))])
-(define_insn "atomic_fetch_<amop><mode>"
+(define_expand "atomic_fetch_<amop><mode>"
+ [(match_operand:GPR 0 "register_operand") ;; old value at mem
+ (any_atomic:GPR (match_operand:GPR 1 "memory_operand") ;; mem location
+ (match_operand:GPR 2 "reg_or_0_operand")) ;; value for op
+ (match_operand:SI 3 "const_int_operand")] ;; model
+ ""
+ {
+ if (TARGET_64BIT)
+ emit_insn (gen_la64_atomic_fetch_<amop><mode> (operands[0], operands[1],
+ operands[2], operands[3]));
+ else
+ emit_insn (gen_la32_atomic_fetch_<amop>si (operands[0], operands[1],
+ operands[2], operands[3]));
+ DONE;
+ })
+
+(define_insn "la64_atomic_fetch_<amop><mode>"
[(set (match_operand:GPR 0 "register_operand" "=&r")
(match_operand:GPR 1 "memory_operand" "+ZB"))
(set (match_dup 1)
@@ -307,10 +358,30 @@ (define_insn "atomic_fetch_<amop><mode>"
(match_operand:GPR 2 "reg_or_0_operand" "rJ"))
(match_operand:SI 3 "const_int_operand")] ;; model
UNSPEC_SYNC_OLD_OP))]
- ""
+ "TARGET_64BIT"
"am<amop>%A3.<size>\t%0,%z2,%1"
[(set (attr "length") (const_int 4))])
+(define_insn "la32_atomic_fetch_<amop>si"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (match_operand:SI 1 "memory_operand" "+ZB"))
+ (set (match_dup 1)
+ (unspec_volatile:SI
+ [(any_atomic:SI (match_dup 1)
+ (match_operand:SI 2 "reg_or_0_operand" "rJ"))
+ (match_operand:SI 3 "const_int_operand")] ;; model
+ UNSPEC_SYNC_OLD_OP))
+ (clobber (match_scratch:SI 4 "=&r"))]
+ "!TARGET_64BIT"
+{
+ return "1:\n\t"
+ "ll.w\t%0,%1\n\t"
+ "<atomic_optab_insn>\t%4,%z2,%0\n\t"
+ "sc.w\t%4,%1\n\t"
+ "beq\t$zero,%4,1b\n\t";
+}
+ [(set (attr "length") (const_int 16))])
+
(define_insn "atomic_fetch_nand_mask_inverted<mode>"
[(set (match_operand:GPR 0 "register_operand" "=&r")
(match_operand:GPR 1 "memory_operand" "+ZC"))
@@ -354,7 +425,23 @@ (define_expand "atomic_fetch_nand<mode>"
DONE;
})
-(define_insn "atomic_exchange<mode>"
+(define_expand "atomic_exchange<mode>"
+ [(match_operand:GPR 0 "register_operand") ;; old value at mem
+ (match_operand:GPR 1 "memory_operand") ;; mem location
+ (match_operand:GPR 2 "register_operand") ;; value for op
+ (match_operand:SI 3 "const_int_operand")] ;; model
+ ""
+ {
+ if (TARGET_64BIT)
+ emit_insn (gen_la64_atomic_exchange<mode> (operands[0], operands[1],
+ operands[2], operands[3]));
+ else
+ emit_insn (gen_la32_atomic_exchangesi (operands[0], operands[1],
+ operands[2], operands[3]));
+ DONE;
+ })
+
+(define_insn "la64_atomic_exchange<mode>"
[(set (match_operand:GPR 0 "register_operand" "=&r")
(unspec_volatile:GPR
[(match_operand:GPR 1 "memory_operand" "+ZB")
@@ -362,10 +449,29 @@ (define_insn "atomic_exchange<mode>"
UNSPEC_SYNC_EXCHANGE))
(set (match_dup 1)
(match_operand:GPR 2 "register_operand" "r"))]
- ""
+ "TARGET_64BIT"
"amswap%A3.<size>\t%0,%z2,%1"
[(set (attr "length") (const_int 4))])
+(define_insn "la32_atomic_exchangesi"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (unspec_volatile:SI
+ [(match_operand:SI 1 "memory_operand" "+ZB")
+ (match_operand:SI 3 "const_int_operand")] ;; model
+ UNSPEC_SYNC_EXCHANGE))
+ (set (match_dup 1)
+ (match_operand:SI 2 "register_operand" "r"))
+ (clobber (match_scratch:SI 4 "=&r"))]
+ "!TARGET_64BIT"
+{
+ return "1:\n\t"
+ "ll.w\t%0,%1\n\t"
+ "or\t%4,$zero,%2\n\t"
+ "sc.w\t%4,%1\n\t"
+ "beq\t$zero,%4,1b\n\t";
+}
+ [(set (attr "length") (const_int 16))])
+
(define_insn "atomic_exchangeti_scq"
[(set (match_operand:TI 0 "register_operand" "=&r")
(unspec_volatile:TI
@@ -531,7 +637,7 @@ (define_expand "atomic_fetch_<amop><mode>"
(any_bitwise (match_operand:SHORT 1 "memory_operand" "+ZB") ;; memory
(match_operand:SHORT 2 "reg_or_0_operand" "rJ")) ;; val
(match_operand:SI 3 "const_int_operand" "")] ;;
model
- ""
+ "TARGET_64BIT || TARGET_32BIT_S"
{
/* We have no QI/HImode bitwise atomics, so use the address LSBs to form
a mask, then use an aligned SImode atomic. */
--
2.34.1