The attached change updates the atomic support in the PA backend for hppa-linux.

1) It enables 64-bit sync builtin support,
2) It adds new atomic store patterns that are atomic with respect to the LWS
     compare and swap builtins in linux-atomic.c, and
3) Revises existing atomic load and store patterns to also use the LWS builtins.

Tested on hppa-unknown-linux-gnu with no observed regressions.  Committed
to trunk and gcc-5.

Dave
--
John David Anglin       dave.ang...@bell.net


2015-09-24  John David Anglin  <dang...@gcc.gnu.org>

        * config/pa/pa-linux.h (HAVE_sync_compare_and_swapdi): Define.
        * config/pa/pa-protos.h (pa_maybe_emit_compare_and_swap_exchange_loop):
        Declare.
        * config/pa/pa.c (pa_init_libfuncs): Init sync libfuncs up to 8 bytes.
        (pa_expand_compare_and_swap_loop): New.
        (pa_maybe_emit_compare_and_swap_exchange_loop): New.
        * config/pa/pa.md (atomic_storeqi, atomic_storehi, atomic_storesi,
        atomic_storesf, atomic_loaddf, atomic_storedf): New expanders.
        (atomic_loaddf_1, atomic_storedf_1): New insn patterns.
        (atomic_loaddi, atomic_loaddi_1, atomic_storedi, atomic_storedi_1):
        Revise.

Index: config/pa/pa-linux.h
===================================================================
--- config/pa/pa-linux.h        (revision 228100)
+++ config/pa/pa-linux.h        (working copy)
@@ -140,3 +140,4 @@
 #define HAVE_sync_compare_and_swapqi 1
 #define HAVE_sync_compare_and_swaphi 1
 #define HAVE_sync_compare_and_swapsi 1
+#define HAVE_sync_compare_and_swapdi 1
Index: config/pa/pa-protos.h
===================================================================
--- config/pa/pa-protos.h       (revision 228100)
+++ config/pa/pa-protos.h       (working copy)
@@ -79,6 +79,7 @@
 #endif /* ARGS_SIZE_RTX */
 extern int pa_insn_refs_are_delayed (rtx_insn *);
 extern rtx pa_get_deferred_plabel (rtx);
+extern rtx pa_maybe_emit_compare_and_swap_exchange_loop (rtx, rtx, rtx);
 #endif /* RTX_CODE */
 
 extern int pa_and_mask_p (unsigned HOST_WIDE_INT);
Index: config/pa/pa.c
===================================================================
--- config/pa/pa.c      (revision 228100)
+++ config/pa/pa.c      (working copy)
@@ -5749,7 +5749,7 @@
     }
 
   if (TARGET_SYNC_LIBCALL)
-    init_sync_libfuncs (UNITS_PER_WORD);
+    init_sync_libfuncs (8);
 }
 
 /* HP's millicode routines mean something special to the assembler.
@@ -10555,4 +10555,79 @@
     fputs ("\t.end_brtab\n", asm_out_file);
 }
 
+/* This is a helper function for the other atomic operations.  This function
+   emits a loop that contains SEQ that iterates until a compare-and-swap
+   operation at the end succeeds.  MEM is the memory to be modified.  SEQ is
+   a set of instructions that takes a value from OLD_REG as an input and
+   produces a value in NEW_REG as an output.  Before SEQ, OLD_REG will be
+   set to the current contents of MEM.  After SEQ, a compare-and-swap will
+   attempt to update MEM with NEW_REG.  The function returns true when the
+   loop was generated successfully.  */
+
+static bool
+pa_expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
+{
+  machine_mode mode = GET_MODE (mem);
+  rtx_code_label *label;
+  rtx cmp_reg, success, oldval;
+
+  /* The loop we want to generate looks like
+
+        cmp_reg = mem;
+      label:
+        old_reg = cmp_reg;
+        seq;
+        (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
+        if (success)
+          goto label;
+
+     Note that we only do the plain load from memory once.  Subsequent
+     iterations use the value loaded by the compare-and-swap pattern.  */
+
+  label = gen_label_rtx ();
+  cmp_reg = gen_reg_rtx (mode);
+
+  emit_move_insn (cmp_reg, mem);
+  emit_label (label);
+  emit_move_insn (old_reg, cmp_reg);
+  if (seq)
+    emit_insn (seq);
+
+  success = NULL_RTX;
+  oldval = cmp_reg;
+  if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg,
+                                       new_reg, false, MEMMODEL_SYNC_SEQ_CST,
+                                       MEMMODEL_RELAXED))
+    return false;
+
+  if (oldval != cmp_reg)
+    emit_move_insn (cmp_reg, oldval);
+
+  /* Mark this jump predicted not taken.  */
+  emit_cmp_and_jump_insns (success, const0_rtx, EQ, const0_rtx,
+                           GET_MODE (success), 1, label, 0);
+  return true;
+}
+
+/* This function tries to implement an atomic exchange operation using a 
+   compare_and_swap loop. VAL is written to *MEM.  The previous contents of
+   *MEM are returned, using TARGET if possible.  No memory model is required
+   since a compare_and_swap loop is seq-cst.  */
+
+rtx
+pa_maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
+{
+  machine_mode mode = GET_MODE (mem);
+
+  if (can_compare_and_swap_p (mode, true))
+    {
+      if (!target || !register_operand (target, mode))
+        target = gen_reg_rtx (mode);
+      if (pa_expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
+        return target;
+    }
+
+  return NULL_RTX;
+}
+
 #include "gt-pa.h"
Index: config/pa/pa.md
===================================================================
--- config/pa/pa.md     (revision 228100)
+++ config/pa/pa.md     (working copy)
@@ -699,19 +699,99 @@
 ;; doubleword loads and stores are not guaranteed to be atomic
 ;; when referencing the I/O address space.
 
-;; Implement atomic DImode load using 64-bit floating point load and copy.
+;; The kernel cmpxchg operation on linux is not atomic with respect to
+;; memory stores on SMP machines, so we must do stores using a cmpxchg
+;; operation.
 
+;; Implement atomic QImode store using exchange.
+
+(define_expand "atomic_storeqi"
+  [(match_operand:QI 0 "memory_operand")                ;; memory
+   (match_operand:QI 1 "register_operand")              ;; val out
+   (match_operand:SI 2 "const_int_operand")]            ;; model
+  ""
+{
+  if (TARGET_SYNC_LIBCALL)
+    {
+      rtx mem = operands[0];
+      rtx val = operands[1];
+      if (pa_maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem, val))
+       DONE;
+    }
+  FAIL;
+})
+
+;; Implement atomic HImode stores using exchange.
+
+(define_expand "atomic_storehi"
+  [(match_operand:HI 0 "memory_operand")                ;; memory
+   (match_operand:HI 1 "register_operand")              ;; val out
+   (match_operand:SI 2 "const_int_operand")]            ;; model
+  ""
+{
+  if (TARGET_SYNC_LIBCALL)
+    {
+      rtx mem = operands[0];
+      rtx val = operands[1];
+      if (pa_maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem, val))
+       DONE;
+    }
+  FAIL;
+})
+
+;; Implement atomic SImode store using exchange.
+
+(define_expand "atomic_storesi"
+  [(match_operand:SI 0 "memory_operand")                ;; memory
+   (match_operand:SI 1 "register_operand")              ;; val out
+   (match_operand:SI 2 "const_int_operand")]            ;; model
+  ""
+{
+  if (TARGET_SYNC_LIBCALL)
+    {
+      rtx mem = operands[0];
+      rtx val = operands[1];
+      if (pa_maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem, val))
+       DONE;
+    }
+  FAIL;
+})
+
+;; Implement atomic SFmode store using exchange.
+
+(define_expand "atomic_storesf"
+  [(match_operand:SF 0 "memory_operand")                ;; memory
+   (match_operand:SF 1 "register_operand")              ;; val out
+   (match_operand:SI 2 "const_int_operand")]            ;; model
+  ""
+{
+  if (TARGET_SYNC_LIBCALL)
+    {
+      rtx mem = operands[0];
+      rtx val = operands[1];
+      if (pa_maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem, val))
+       DONE;
+    }
+  FAIL;
+})
+
+;; Implement atomic DImode load using 64-bit floating point load.
+
 (define_expand "atomic_loaddi"
   [(match_operand:DI 0 "register_operand")              ;; val out
    (match_operand:DI 1 "memory_operand")                ;; memory
    (match_operand:SI 2 "const_int_operand")]            ;; model
-  "!TARGET_64BIT && !TARGET_SOFT_FLOAT"
+  ""
 {
-  enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
+  enum memmodel model;
+
+  if (TARGET_64BIT || TARGET_SOFT_FLOAT)
+    FAIL;
+
+  model = memmodel_from_int (INTVAL (operands[2]));
   operands[1] = force_reg (SImode, XEXP (operands[1], 0));
-  operands[2] = gen_reg_rtx (DImode);
   expand_mem_thread_fence (model);
-  emit_insn (gen_atomic_loaddi_1 (operands[0], operands[1], operands[2]));
+  emit_insn (gen_atomic_loaddi_1 (operands[0], operands[1]));
   if (is_mm_seq_cst (model))
     expand_mem_thread_fence (model);
   DONE;
@@ -718,27 +798,41 @@
 })
 
 (define_insn "atomic_loaddi_1"
-  [(set (match_operand:DI 0 "register_operand" "=r")
-        (mem:DI (match_operand:SI 1 "register_operand" "r")))
-   (clobber (match_operand:DI 2 "register_operand" "=&f"))]
+  [(set (match_operand:DI 0 "register_operand" "=f,r")
+        (mem:DI (match_operand:SI 1 "register_operand" "r,r")))
+   (clobber (match_scratch:DI 2 "=X,f"))]
   "!TARGET_64BIT && !TARGET_SOFT_FLOAT"
-  "{fldds|fldd} 0(%1),%2\;{fstds|fstd} %2,-16(%%sp)\;{ldws|ldw} 
-16(%%sp),%0\;{ldws|ldw} -12(%%sp),%R0"
-  [(set_attr "type" "move")
-   (set_attr "length" "16")])
+  "@
+   {fldds|fldd} 0(%1),%0
+   {fldds|fldd} 0(%1),%2\n\t{fstds|fstd} %2,-16(%%sp)\n\t{ldws|ldw} 
-16(%%sp),%0\n\t{ldws|ldw} -12(%%sp),%R0"
+  [(set_attr "type" "move,move")
+   (set_attr "length" "4,16")])
 
-;; Implement atomic DImode store using copy and 64-bit floating point store.
+;; Implement atomic DImode store.
 
 (define_expand "atomic_storedi"
   [(match_operand:DI 0 "memory_operand")                ;; memory
    (match_operand:DI 1 "register_operand")              ;; val out
    (match_operand:SI 2 "const_int_operand")]            ;; model
-  "!TARGET_64BIT && !TARGET_SOFT_FLOAT"
+  ""
 {
-  enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
+  enum memmodel model;
+
+  if (TARGET_SYNC_LIBCALL)
+    {
+      rtx mem = operands[0];
+      rtx val = operands[1];
+      if (pa_maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem, val))
+       DONE;
+    }
+
+  if (TARGET_64BIT || TARGET_SOFT_FLOAT)
+    FAIL;
+
+  model = memmodel_from_int (INTVAL (operands[2]));
   operands[0] = force_reg (SImode, XEXP (operands[0], 0));
-  operands[2] = gen_reg_rtx (DImode);
   expand_mem_thread_fence (model);
-  emit_insn (gen_atomic_storedi_1 (operands[0], operands[1], operands[2]));
+  emit_insn (gen_atomic_storedi_1 (operands[0], operands[1]));
   if (is_mm_seq_cst (model))
     expand_mem_thread_fence (model);
   DONE;
@@ -745,14 +839,90 @@
 })
 
 (define_insn "atomic_storedi_1"
-  [(set (mem:DI (match_operand:SI 0 "register_operand" "r"))
-        (match_operand:DI 1 "register_operand" "r"))
-   (clobber (match_operand:DI 2 "register_operand" "=&f"))]
+  [(set (mem:DI (match_operand:SI 0 "register_operand" "r,r"))
+        (match_operand:DI 1 "register_operand" "f,r"))
+   (clobber (match_scratch:DI 2 "=X,f"))]
+  "!TARGET_64BIT && !TARGET_SOFT_FLOAT && !TARGET_SYNC_LIBCALL"
+  "@
+   {fstds|fstd} %1,0(%0)
+   {stws|stw} %1,-16(%%sp)\n\t{stws|stw} %R1,-12(%%sp)\n\t{fldds|fldd} 
-16(%%sp),%2\n\t{fstds|fstd} %2,0(%0)"
+  [(set_attr "type" "move,move")
+   (set_attr "length" "4,16")])
+
+;; Implement atomic DFmode load using 64-bit floating point load.
+
+(define_expand "atomic_loaddf"
+  [(match_operand:DF 0 "register_operand")              ;; val out
+   (match_operand:DF 1 "memory_operand")                ;; memory
+   (match_operand:SI 2 "const_int_operand")]            ;; model
+  ""
+{
+  enum memmodel model;
+
+  if (TARGET_64BIT || TARGET_SOFT_FLOAT)
+    FAIL;
+
+  model = memmodel_from_int (INTVAL (operands[2]));
+  operands[1] = force_reg (SImode, XEXP (operands[1], 0));
+  expand_mem_thread_fence (model);
+  emit_insn (gen_atomic_loaddf_1 (operands[0], operands[1]));
+  if (is_mm_seq_cst (model))
+    expand_mem_thread_fence (model);
+  DONE;
+})
+
+(define_insn "atomic_loaddf_1"
+  [(set (match_operand:DF 0 "register_operand" "=f,r")
+        (mem:DF (match_operand:SI 1 "register_operand" "r,r")))
+   (clobber (match_scratch:DF 2 "=X,f"))]
   "!TARGET_64BIT && !TARGET_SOFT_FLOAT"
-  "{stws|stw} %1,-16(%%sp)\;{stws|stw} %R1,-12(%%sp)\;{fldds|fldd} 
-16(%%sp),%2\;{fstds|fstd} %2,0(%0)"
-  [(set_attr "type" "move")
-   (set_attr "length" "16")])
+  "@
+   {fldds|fldd} 0(%1),%0
+   {fldds|fldd} 0(%1),%2\n\t{fstds|fstd} %2,-16(%%sp)\n\t{ldws|ldw} 
-16(%%sp),%0\n\t{ldws|ldw} -12(%%sp),%R0"
+  [(set_attr "type" "move,move")
+   (set_attr "length" "4,16")])
 
+;; Implement atomic DFmode store using 64-bit floating point store.
+
+(define_expand "atomic_storedf"
+  [(match_operand:DF 0 "memory_operand")                ;; memory
+   (match_operand:DF 1 "register_operand")              ;; val out
+   (match_operand:SI 2 "const_int_operand")]            ;; model
+  ""
+{
+  enum memmodel model;
+
+  if (TARGET_SYNC_LIBCALL)
+    {
+      rtx mem = operands[0];
+      rtx val = operands[1];
+      if (pa_maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem, val))
+       DONE;
+    }
+
+  if (TARGET_64BIT || TARGET_SOFT_FLOAT)
+    FAIL;
+
+  model = memmodel_from_int (INTVAL (operands[2]));
+  operands[0] = force_reg (SImode, XEXP (operands[0], 0));
+  expand_mem_thread_fence (model);
+  emit_insn (gen_atomic_storedf_1 (operands[0], operands[1]));
+  if (is_mm_seq_cst (model))
+    expand_mem_thread_fence (model);
+  DONE;
+})
+
+(define_insn "atomic_storedf_1"
+  [(set (mem:DF (match_operand:SI 0 "register_operand" "r,r"))
+        (match_operand:DF 1 "register_operand" "f,r"))
+   (clobber (match_scratch:DF 2 "=X,f"))]
+  "!TARGET_64BIT && !TARGET_SOFT_FLOAT"
+  "@
+   {fstds|fstd} %1,0(%0)
+   {stws|stw} %1,-16(%%sp)\n\t{stws|stw} %R1,-12(%%sp)\n\t{fldds|fldd} 
-16(%%sp),%2\n\t{fstds|fstd} %2,0(%0)"
+  [(set_attr "type" "move,move")
+   (set_attr "length" "4,16")])
+
 ;; Compare instructions.
 ;; This controls RTL generation and register allocation.
 

Reply via email to