I committed the following patch which implements  svabs, svnot, svneg
and svsqrt to  aarch64/sve-acle-branch. branch

Thanks,
Kugan
From 2af9609a58cf7efbed93f15413224a2552b9696d Mon Sep 17 00:00:00 2001
From: Kugan Vivekanandarajah <kugan.vivekanandarajah@linaro.org>
Date: Wed, 16 Jan 2019 07:45:52 +1100
Subject: [PATCH] [SVE ACLE ] svab, svnot, svneg and svsqrt implementation

Change-Id: Iec1e9491e4a84a351702550babedd0f17968617e
---
 gcc/config/aarch64/aarch64-sve-builtins.c          | 126 ++++++++++++++++++++-
 gcc/config/aarch64/aarch64-sve-builtins.def        |   4 +
 gcc/config/aarch64/aarch64-sve.md                  |  52 +++++++--
 gcc/config/aarch64/iterators.md                    |  16 ++-
 .../gcc.target/aarch64/sve-acle/asm/abs_f16.c      | 122 ++++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/abs_f32.c      | 122 ++++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/abs_f64.c      | 122 ++++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/abs_s16.c      |  83 ++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/abs_s32.c      |  83 ++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/abs_s64.c      |  83 ++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/abs_s8.c       |  83 ++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/neg_f16.c      | 122 ++++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/neg_f32.c      | 122 ++++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/neg_f64.c      | 122 ++++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/neg_s16.c      |  83 ++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/neg_s32.c      |  83 ++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/neg_s64.c      |  83 ++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/neg_s8.c       |  83 ++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/not_s16.c      |  83 ++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/not_s32.c      |  83 ++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/not_s64.c      |  83 ++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/not_s8.c       |  83 ++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/not_u16.c      |  83 ++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/not_u32.c      |  83 ++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/not_u64.c      |  83 ++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/not_u8.c       |  83 ++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/sqrt_f16.c     | 122 ++++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/sqrt_f32.c     | 122 ++++++++++++++++++++
 .../gcc.target/aarch64/sve-acle/asm/sqrt_f64.c     | 122 ++++++++++++++++++++
 29 files changed, 2610 insertions(+), 14 deletions(-)
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_f16.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_f32.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_f64.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_s16.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_s32.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_s64.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_s8.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_f16.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_f32.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_f64.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_s16.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_s32.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_s64.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_s8.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_s16.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_s32.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_s64.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_s8.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_u16.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_u32.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_u64.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_u8.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/sqrt_f16.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/sqrt_f32.c
 create mode 100644 gcc/testsuite/gcc.target/aarch64/sve-acle/asm/sqrt_f64.c

diff --git a/gcc/config/aarch64/aarch64-sve-builtins.c b/gcc/config/aarch64/aarch64-sve-builtins.c
index c300957..d663de4 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins.c
+++ b/gcc/config/aarch64/aarch64-sve-builtins.c
@@ -101,6 +101,9 @@ enum function_shape {
   /* sv<t0>_t svfoo[_n]_t0(<t0>_t).  */
   SHAPE_unary_n,
 
+  /* sv<t0>_t svfoo[_t0](sv<t0>_t).  */
+  SHAPE_unary,
+
   /* sv<t0>_t svfoo_t0(<t0>_t, <t0>_t).  */
   SHAPE_binary_scalar,
 
@@ -151,6 +154,7 @@ typedef enum type_suffix type_suffix_pair[2];
 
 /* Enumerates the function base names, such as "svadd".  */
 enum function {
+  FUNC_svabs,
   FUNC_svabd,
   FUNC_svadd,
   FUNC_svasrd,
@@ -163,9 +167,12 @@ enum function {
   FUNC_svmls,
   FUNC_svmsb,
   FUNC_svmul,
+  FUNC_svneg,
+  FUNC_svnot,
   FUNC_svptrue,
   FUNC_svqadd,
   FUNC_svqsub,
+  FUNC_svsqrt,
   FUNC_svsub,
   FUNC_svsubr
 };
@@ -302,6 +309,7 @@ private:
   void build_all (function_signature, const function_group &, function_mode,
 		  bool = false);
   void sig_inherent (const function_instance &, vec<tree> &);
+  void sig_00 (const function_instance &, vec<tree> &);
   void sig_n_00 (const function_instance &, vec<tree> &);
   void scalar_sig_000 (const function_instance &, vec<tree> &);
   void sig_000 (const function_instance &, vec<tree> &);
@@ -440,6 +448,7 @@ public:
 
 private:
   rtx expand_abd ();
+  rtx expand_abs ();
   rtx expand_add (unsigned int);
   rtx expand_asrd ();
   rtx expand_dup ();
@@ -451,11 +460,15 @@ private:
   rtx expand_mla ();
   rtx expand_mls ();
   rtx expand_mul ();
+  rtx expand_neg ();
+  rtx expand_not ();
   rtx expand_ptrue ();
   rtx expand_qadd ();
   rtx expand_qsub ();
+  rtx expand_sqrt ();
   rtx expand_sub (bool);
 
+  rtx expand_pred_op (rtx_code, int);
   rtx expand_signed_pred_op (rtx_code, rtx_code, int);
   rtx expand_signed_pred_op (int, int, int);
   rtx expand_via_unpred_direct_optab (optab);
@@ -556,6 +569,11 @@ static const type_suffix_info type_suffixes[NUM_TYPE_SUFFIXES + 1] = {
 #define TYPES_all_data(S, D) \
   TYPES_all_float (S, D), TYPES_all_integer (S, D)
 
+/*     _f16 _f32 _f64
+   _s8 _s16 _s32 _s64.  */
+#define TYPES_all_signed_and_float(S, D) \
+  TYPES_all_float (S, D), TYPES_all_signed (S, D)
+
 /* Describe a pair of type suffixes in which only the first is used.  */
 #define DEF_VECTOR_TYPE(X) { TYPE_SUFFIX_ ## X, NUM_TYPE_SUFFIXES }
 
@@ -576,6 +594,7 @@ DEF_SVE_TYPES_ARRAY (all_signed);
 DEF_SVE_TYPES_ARRAY (all_float);
 DEF_SVE_TYPES_ARRAY (all_integer);
 DEF_SVE_TYPES_ARRAY (all_data);
+DEF_SVE_TYPES_ARRAY (all_signed_and_float);
 
 /* Used by functions in aarch64-sve-builtins.def that have no governing
    predicate.  */
@@ -744,6 +763,11 @@ arm_sve_h_builder::build (const function_group &group)
 {
   switch (group.shape)
     {
+    case SHAPE_unary:
+      add_overloaded_functions (group, MODE_none);
+      build_all (&arm_sve_h_builder::sig_00, group, MODE_none);
+      break;
+
     case SHAPE_unary_n:
       build_all (&arm_sve_h_builder::sig_n_00, group, MODE_n, true);
       break;
@@ -810,11 +834,21 @@ arm_sve_h_builder::sig_inherent (const function_instance &instance,
   types.quick_push (instance.vector_type (0));
 }
 
+/* Describe the signature "sv<t0>_t svfoo[_t0](sv<t0>_t)"
+   for INSTANCE in TYPES.  */
+void
+arm_sve_h_builder::sig_00 (const function_instance &instance,
+			   vec<tree> &types)
+{
+  types.quick_push (instance.vector_type (0));
+  types.quick_push (instance.vector_type (0));
+}
+
 /* Describe the signature "sv<t0>_t svfoo[_n_t0](<t0>_t)"
    for INSTANCE in TYPES.  */
 void
 arm_sve_h_builder::sig_n_00 (const function_instance &instance,
-			      vec<tree> &types)
+			     vec<tree> &types)
 {
   types.quick_push (instance.vector_type (0));
   types.quick_push (instance.scalar_type (0));
@@ -1025,6 +1059,7 @@ arm_sve_h_builder::get_attributes (const function_instance &instance)
   switch (function_groups[instance.group].func)
     {
     case FUNC_svabd:
+    case FUNC_svabs:
     case FUNC_svadd:
     case FUNC_svasrd:
     case FUNC_svdup:
@@ -1036,8 +1071,11 @@ arm_sve_h_builder::get_attributes (const function_instance &instance)
     case FUNC_svmls:
     case FUNC_svmsb:
     case FUNC_svmul:
+    case FUNC_svneg:
+    case FUNC_svnot:
     case FUNC_svqadd:
     case FUNC_svqsub:
+    case FUNC_svsqrt:
     case FUNC_svsub:
     case FUNC_svsubr:
       if (type_suffixes[instance.types[0]].integer_p)
@@ -1075,6 +1113,7 @@ arm_sve_h_builder::get_explicit_types (function_shape shape)
     case SHAPE_unary_n:
     case SHAPE_binary_scalar:
       return 1;
+    case SHAPE_unary:
     case SHAPE_binary_opt_n:
     case SHAPE_ternary_opt_n:
     case SHAPE_shift_right_imm:
@@ -1142,6 +1181,8 @@ function_resolver::resolve ()
 {
   switch (m_group.shape)
     {
+    case SHAPE_unary:
+      return resolve_uniform (1);
     case SHAPE_binary_opt_n:
       return resolve_uniform (2);
     case SHAPE_ternary_opt_n:
@@ -1228,11 +1269,25 @@ function_resolver::check_first_vector_argument (unsigned int nops,
   nargs = nops;
   type = NUM_VECTOR_TYPES;
 
+  /* For unary merge operations, the first argument is a vector with
+     the same type as the result.  */
+  if (nops == 1 && m_rfn.instance.pred == PRED_m)
+    nargs += 1;
   if (m_rfn.instance.pred != PRED_none)
     nargs += 1;
   if (!check_num_arguments (nargs))
     return false;
 
+  /* For unary merge operations, the first argument is a vector with
+     the same type as the result.  */
+  if (nops == 1 && m_rfn.instance.pred == PRED_m)
+    {
+      type = require_vector_type (i);
+      if (type == NUM_VECTOR_TYPES)
+	return false;
+      i += 1;
+    }
+
   /* Check the predicate argument.  */
   if (m_rfn.instance.pred != PRED_none)
     {
@@ -1241,11 +1296,13 @@ function_resolver::check_first_vector_argument (unsigned int nops,
       i += 1;
     }
 
-  /* The next argument is always a vector.  */
-  type = require_vector_type (i);
   if (type == NUM_VECTOR_TYPES)
-    return false;
-
+    {
+      /* The next argument is always a vector.  */
+      type = require_vector_type (i);
+      if (type == NUM_VECTOR_TYPES)
+	return false;
+    }
   return true;
 }
 
@@ -1427,6 +1484,7 @@ function_checker::check ()
     case SHAPE_shift_right_imm:
       return check_shift_right_imm ();
 
+    case SHAPE_unary:
     case SHAPE_unary_n:
     case SHAPE_inherent:
     case SHAPE_binary_opt_n:
@@ -1611,6 +1669,7 @@ gimple_folder::fold ()
   switch (m_group.func)
     {
     case FUNC_svabd:
+    case FUNC_svabs:
     case FUNC_svadd:
     case FUNC_svasrd:
     case FUNC_svdup:
@@ -1622,8 +1681,11 @@ gimple_folder::fold ()
     case FUNC_svmls:
     case FUNC_svmsb:
     case FUNC_svmul:
+    case FUNC_svneg:
+    case FUNC_svnot:
     case FUNC_svqadd:
     case FUNC_svqsub:
+    case FUNC_svsqrt:
     case FUNC_svsub:
     case FUNC_svsubr:
       return NULL;
@@ -1685,6 +1747,9 @@ function_expander::expand ()
     case FUNC_svabd:
       return expand_abd ();
 
+    case FUNC_svabs:
+      return expand_abs ();
+
     case FUNC_svadd:
       return expand_add (1);
 
@@ -1718,6 +1783,12 @@ function_expander::expand ()
     case FUNC_svmul:
       return expand_mul ();
 
+    case FUNC_svneg:
+      return expand_neg ();
+
+    case FUNC_svnot:
+      return expand_not ();
+
     case FUNC_svptrue:
       return expand_ptrue ();
 
@@ -1727,6 +1798,9 @@ function_expander::expand ()
     case FUNC_svqsub:
       return expand_qsub ();
 
+    case FUNC_svsqrt:
+      return expand_sqrt ();
+
     case FUNC_svsub:
       return expand_sub (false);
 
@@ -1744,6 +1818,13 @@ function_expander::expand_abd ()
 				UNSPEC_COND_FABD);
 }
 
+/* Expand a call to svabs.  */
+rtx
+function_expander::expand_abs ()
+{
+  return expand_pred_op (ABS, UNSPEC_COND_FABS);
+}
+
 /* Expand a call to svadd, or svsub(r) with a negated operand.
    MERGE_ARGNO is the argument that should be used as the fallback
    value in a merging operation.  */
@@ -1913,6 +1994,20 @@ function_expander::expand_mul ()
     return expand_via_pred_direct_optab (cond_smul_optab);
 }
 
+/* Expand a call to svneg.  */
+rtx
+function_expander::expand_neg ()
+{
+  return expand_pred_op (NEG, UNSPEC_COND_FNEG);
+}
+
+/* Expand a call to svnot.  */
+rtx
+function_expander::expand_not ()
+{
+  return expand_pred_op (NOT, -1);
+}
+
 /* Expand a call to sqadd.  */
 rtx
 function_expander::expand_qadd ()
@@ -1942,6 +2037,14 @@ function_expander::expand_ptrue ()
   return builder.build ();
 }
 
+/* Expand a call to svsqrt.  */
+rtx
+function_expander::expand_sqrt ()
+{
+  gcc_checking_assert (!type_suffixes[m_fi.types[0]].integer_p);
+  return expand_pred_op (UNKNOWN, UNSPEC_COND_FSQRT);
+}
+
 /* Expand a call to svsub or svsubr; REVERSED_P says which.  */
 rtx
 function_expander::expand_sub (bool reversed_p)
@@ -2091,6 +2194,19 @@ function_expander::expand_via_pred_x_insn (insn_code icode)
   return generate_insn (icode);
 }
 
+/* Implement the call using an @aarch64_pred instruction for _x
+   predication and a @cond instruction for _z and _m predication.
+   The integer instructions are parameterized by an rtx_code while
+   the floating-point instructions are parameterized by an unspec code.
+   CODE is the rtx code to use for integer operations and UNSPEC_COND
+   is the unspec code to use for floating-point operations.  There is
+   no distinction between signed and unsigned operations.  */
+rtx
+function_expander::expand_pred_op (rtx_code code, int unspec_cond)
+{
+  return expand_signed_pred_op (code, code, unspec_cond);
+}
+
 /* Implement the call using an @aarch64_cond instruction for _x
    predication and a @cond instruction for _z and _m predication.
    The integer instructions are parameterized by an rtx_code while
diff --git a/gcc/config/aarch64/aarch64-sve-builtins.def b/gcc/config/aarch64/aarch64-sve-builtins.def
index d225d1c4..83dc8e5 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins.def
+++ b/gcc/config/aarch64/aarch64-sve-builtins.def
@@ -61,6 +61,7 @@ DEF_SVE_TYPE_SUFFIX (u64, svuint64_t, 64)
 
 /* List of functions, in alphabetical order.  */
 DEF_SVE_FUNCTION (svabd, binary_opt_n, all_data, mxz)
+DEF_SVE_FUNCTION (svabs, unary, all_signed_and_float, mxz)
 DEF_SVE_FUNCTION (svadd, binary_opt_n, all_data, mxz)
 DEF_SVE_FUNCTION (svasrd, shift_right_imm, all_signed, mxz)
 DEF_SVE_FUNCTION (svdup, unary_n, all_data, mxznone)
@@ -72,9 +73,12 @@ DEF_SVE_FUNCTION (svmsb, ternary_opt_n, all_data, mxz)
 DEF_SVE_FUNCTION (svmla, ternary_opt_n, all_data, mxz)
 DEF_SVE_FUNCTION (svmls, ternary_opt_n, all_data, mxz)
 DEF_SVE_FUNCTION (svmul, binary_opt_n, all_data, mxz)
+DEF_SVE_FUNCTION (svneg, unary, all_signed_and_float, mxz)
+DEF_SVE_FUNCTION (svnot, unary, all_integer, mxz)
 DEF_SVE_FUNCTION (svptrue, inherent, all_pred, none)
 DEF_SVE_FUNCTION (svqadd, binary_opt_n, all_data, none)
 DEF_SVE_FUNCTION (svqsub, binary_opt_n, all_data, none)
+DEF_SVE_FUNCTION (svsqrt, unary, all_float, mxz)
 DEF_SVE_FUNCTION (svsub, binary_opt_n, all_data, mxz)
 DEF_SVE_FUNCTION (svsubr, binary_opt_n, all_data, mxz)
 
diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md
index 2d66549..d8ec4ad 100644
--- a/gcc/config/aarch64/aarch64-sve.md
+++ b/gcc/config/aarch64/aarch64-sve.md
@@ -1270,8 +1270,8 @@
   }
 )
 
-;; NEG, NOT and POPCOUNT predicated with a PTRUE.
-(define_insn "*<optab><mode>2"
+;; NEG, NOT, ABS and POPCOUNT predicated with a PTRUE.
+(define_insn "@aarch64_pred_<optab><mode>"
   [(set (match_operand:SVE_I 0 "register_operand" "=w")
 	(unspec:SVE_I
 	  [(match_operand:<VPRED> 1 "register_operand" "Upl")
@@ -1282,6 +1282,22 @@
   "<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
 )
 
+;; Predicated NEG, NOT, ABS and POPCOUNT with select.
+(define_insn "@cond_<optab><mode>"
+  [(set (match_operand:SVE_I 0 "register_operand" "=w, &w, ?&w")
+	(unspec:SVE_I
+	  [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+	   (SVE_INT_UNARY:SVE_I (match_operand:SVE_I 2 "register_operand" "w, w, w"))
+	   (match_operand:SVE_I 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+	  UNSPEC_SEL))]
+  "TARGET_SVE"
+  "@
+   <sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+   movprfx\t%0, %3\;<sve_int_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
+  [(set_attr "movprfx" "*,yes,yes")]
+)
+
 ;; Vector AND, ORR and XOR.
 (define_insn "<optab><mode>3"
   [(set (match_operand:SVE_I 0 "register_operand" "=w, w")
@@ -2864,25 +2880,45 @@
   [(set (match_operand:SVE_F 0 "register_operand")
 	(unspec:SVE_F
 	  [(match_dup 2)
-	   (SVE_FP_UNARY:SVE_F (match_operand:SVE_F 1 "register_operand"))]
-	  UNSPEC_MERGE_PTRUE))]
+	   (const_int SVE_ALLOW_NEW_FAULTS)
+	   (match_operand:SVE_F 1 "register_operand")]
+	  SVE_COND_FP_UNARY))]
   "TARGET_SVE"
   {
     operands[2] = force_reg (<VPRED>mode, CONSTM1_RTX (<VPRED>mode));
   }
 )
 
-;; FNEG, FABS and FSQRT predicated with a PTRUE.
-(define_insn "*<optab><mode>2"
+;; Predicated FNEG, FABS and FSQRT.
+(define_insn "@aarch64_pred_<optab><mode>"
   [(set (match_operand:SVE_F 0 "register_operand" "=w")
 	(unspec:SVE_F
 	  [(match_operand:<VPRED> 1 "register_operand" "Upl")
-	   (SVE_FP_UNARY:SVE_F (match_operand:SVE_F 2 "register_operand" "w"))]
-	  UNSPEC_MERGE_PTRUE))]
+	   (match_operand:SI 3 "const_int_operand" "i")
+	   (match_operand:SVE_F 2 "register_operand" "w")]
+	  SVE_COND_FP_UNARY))]
   "TARGET_SVE"
   "<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
 )
 
+;; Predicated FNEG, FABS and FSQRT with select.
+(define_insn "@cond_<optab><mode>"
+  [(set (match_operand:SVE_F 0 "register_operand" "=w, &w, ?&w")
+	(unspec:SVE_F
+	  [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+	   (unspec:SVE_F
+	     [(match_operand:SVE_F 2 "register_operand" "w, w, w")]
+	     SVE_COND_FP_UNARY)
+	   (match_operand:SVE_F 3 "aarch64_simd_reg_or_zero" "0, Dz, w")]
+	  UNSPEC_SEL))]
+  "TARGET_SVE"
+  "@
+   <sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+   movprfx\t%0.<Vetype>, %1/z, %0.<Vetype>\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>
+   movprfx\t%0, %3\;<sve_fp_op>\t%0.<Vetype>, %1/m, %2.<Vetype>"
+  [(set_attr "movprfx" "*,yes,yes")]
+)
+
 ;; Unpredicated FRINTy.
 (define_expand "<frint_pattern><mode>2"
   [(set (match_operand:SVE_F 0 "register_operand")
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index 3a05fc7..1a3f539 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -465,6 +465,9 @@
     UNSPEC_XORF		; Used in aarch64-sve.md.
     UNSPEC_SMUL_HIGHPART ; Used in aarch64-sve.md.
     UNSPEC_UMUL_HIGHPART ; Used in aarch64-sve.md.
+    UNSPEC_COND_FABS	; Used in aarch64-sve.md.
+    UNSPEC_COND_FNEG	; Used in aarch64-sve.md.
+    UNSPEC_COND_FSQRT	; Used in aarch64-sve.md.
     UNSPEC_COND_ADD	; Used in aarch64-sve.md.
     UNSPEC_COND_SUB	; Used in aarch64-sve.md.
     UNSPEC_COND_SABD	; Used in aarch64-sve.md.
@@ -1214,7 +1217,7 @@
 (define_code_iterator FAC_COMPARISONS [lt le ge gt])
 
 ;; SVE integer unary operations.
-(define_code_iterator SVE_INT_UNARY [neg not popcount])
+(define_code_iterator SVE_INT_UNARY [neg not popcount abs])
 
 ;; SVE floating-point unary operations.
 (define_code_iterator SVE_FP_UNARY [neg abs sqrt])
@@ -1412,6 +1415,7 @@
 			      (ior "orr")
 			      (xor "eor")
 			      (not "not")
+			      (abs "abs")
 			      (popcount "cnt")])
 
 (define_code_attr sve_int_op_rev [(plus "add")
@@ -1582,6 +1586,10 @@
 (define_int_iterator SVE_COND_MAXMIN [UNSPEC_COND_FMAXNM UNSPEC_COND_FMINNM
 				      UNSPEC_COND_FMAX UNSPEC_COND_FMIN])
 
+(define_int_iterator SVE_COND_FP_UNARY [UNSPEC_COND_FABS
+					UNSPEC_COND_FNEG
+				        UNSPEC_COND_FSQRT])
+
 (define_int_iterator SVE_COND_IABD [UNSPEC_COND_SABD UNSPEC_COND_UABD])
 (define_int_iterator SVE_COND_FABD [UNSPEC_COND_FABD])
 
@@ -1617,6 +1625,9 @@
 			(UNSPEC_ANDV "and")
 			(UNSPEC_IORV "ior")
 			(UNSPEC_XORV "xor")
+			(UNSPEC_COND_FABS "abs")
+			(UNSPEC_COND_FNEG "neg")
+			(UNSPEC_COND_FSQRT "sqrt")
 			(UNSPEC_COND_MUL "mul")
 			(UNSPEC_COND_DIV "div")
 			(UNSPEC_COND_FMAX "smax_nan")
@@ -1865,6 +1876,9 @@
 
 (define_int_attr sve_fp_op [(UNSPEC_COND_MUL "fmul")
 			    (UNSPEC_COND_DIV "fdiv")
+			    (UNSPEC_COND_FABS "fabs")
+			    (UNSPEC_COND_FNEG "fneg")
+			    (UNSPEC_COND_FSQRT "fsqrt")
 			    (UNSPEC_COND_FMAX "fmax")
 			    (UNSPEC_COND_FMIN "fmin")
 			    (UNSPEC_COND_FMAXNM "fmaxnm")
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_f16.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_f16.c
new file mode 100644
index 0000000..c74aa54
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_f16.c
@@ -0,0 +1,122 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** abs_f16_m_tied12:
+**	fabs	z0\.h, p0/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (abs_f16_m_tied12, svfloat16_t,
+		z0 = svabs_f16_m (z0, p0, z0),
+		z0 = svabs_m (z0, p0, z0))
+
+/*
+** abs_f16_m_tied1:
+**	fabs	z0\.h, p0/m, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (abs_f16_m_tied1, svfloat16_t,
+		z0 = svabs_f16_m (z0, p0, z1),
+		z0 = svabs_m (z0, p0, z1))
+
+/*
+** abs_f16_m_tied2:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0, z1
+**	fabs	z0\.h, p0/m, \1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (abs_f16_m_tied2, svfloat16_t,
+		z0 = svabs_f16_m (z1, p0, z0),
+		z0 = svabs_m (z1, p0, z0))
+
+/*
+** abs_f16_m_untied:
+**	movprfx	z0, z2
+**	fabs	z0\.h, p0/m, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (abs_f16_m_untied, svfloat16_t,
+		z0 = svabs_f16_m (z2, p0, z1),
+		z0 = svabs_m (z2, p0, z1))
+
+/*
+** abs_f16_z_tied1:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0\.h, p0/z, z0\.h
+**	fabs	z0\.h, p0/m, \1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (abs_f16_z_tied1, svfloat16_t,
+		z0 = svabs_f16_z (p0, z0),
+		z0 = svabs_z (p0, z0))
+
+/*
+** abs_f16_z_untied:
+**	movprfx	z0\.h, p0/z, z0\.h
+**	fabs	z0\.h, p0/m, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (abs_f16_z_untied, svfloat16_t,
+		z0 = svabs_f16_z (p0, z1),
+		z0 = svabs_z (p0, z1))
+
+/*
+** abs_f16_x_tied1:
+**	fabs	z0\.h, p0/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (abs_f16_x_tied1, svfloat16_t,
+		z0 = svabs_f16_x (p0, z0),
+		z0 = svabs_x (p0, z0))
+
+/*
+** abs_f16_x_untied:
+**	fabs	z2\.h, p0/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (abs_f16_x_untied, svfloat16_t,
+		z2 = svabs_f16_x (p0, z0),
+		z2 = svabs_x (p0, z0))
+
+/*
+** ptrue_abs_f16_x_tied1:
+**	ptrue	(p[0-7])\.h[^\n]*
+**	fabs	z0\.h, \1/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_abs_f16_x_tied1, svfloat16_t,
+		z0 = svabs_f16_x (svptrue_b16 (), z0),
+		z0 = svabs_x (svptrue_b16 (), z0))
+
+/*
+** ptrue_abs_f16_x_untied:
+**	ptrue	(p[0-7])\.h[^\n]*
+**	fabs	z2\.h, \1/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_abs_f16_x_untied, svfloat16_t,
+		z2 = svabs_f16_x (svptrue_b16 (), z0),
+		z2 = svabs_x (svptrue_b16 (), z0))
+
+/*
+** ptrue_b8_abs_f16_x_tied1:
+**	ptrue	(p[0-7])\.h[^\n]*
+**	fabs	z0\.h, \1/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_b8_abs_f16_x_tied1, svfloat16_t,
+		z0 = svabs_f16_x (svptrue_b8 (), z0),
+		z0 = svabs_x (svptrue_b8 (), z0))
+
+/*
+** ptrue_b8_abs_f16_x_untied:
+**	ptrue	(p[0-7])\.h[^\n]*
+**	fabs	z2\.h, \1/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_b8_abs_f16_x_untied, svfloat16_t,
+		z2 = svabs_f16_x (svptrue_b8 (), z0),
+		z2 = svabs_x (svptrue_b8 (), z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_f32.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_f32.c
new file mode 100644
index 0000000..457b78b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_f32.c
@@ -0,0 +1,122 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** abs_f32_m_tied12:
+**	fabs	z0\.s, p0/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (abs_f32_m_tied12, svfloat32_t,
+		z0 = svabs_f32_m (z0, p0, z0),
+		z0 = svabs_m (z0, p0, z0))
+
+/*
+** abs_f32_m_tied1:
+**	fabs	z0\.s, p0/m, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (abs_f32_m_tied1, svfloat32_t,
+		z0 = svabs_f32_m (z0, p0, z1),
+		z0 = svabs_m (z0, p0, z1))
+
+/*
+** abs_f32_m_tied2:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0, z1
+**	fabs	z0\.s, p0/m, \1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (abs_f32_m_tied2, svfloat32_t,
+		z0 = svabs_f32_m (z1, p0, z0),
+		z0 = svabs_m (z1, p0, z0))
+
+/*
+** abs_f32_m_untied:
+**	movprfx	z0, z2
+**	fabs	z0\.s, p0/m, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (abs_f32_m_untied, svfloat32_t,
+		z0 = svabs_f32_m (z2, p0, z1),
+		z0 = svabs_m (z2, p0, z1))
+
+/*
+** abs_f32_z_tied1:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0\.s, p0/z, z0\.s
+**	fabs	z0\.s, p0/m, \1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (abs_f32_z_tied1, svfloat32_t,
+		z0 = svabs_f32_z (p0, z0),
+		z0 = svabs_z (p0, z0))
+
+/*
+** abs_f32_z_untied:
+**	movprfx	z0\.s, p0/z, z0\.s
+**	fabs	z0\.s, p0/m, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (abs_f32_z_untied, svfloat32_t,
+		z0 = svabs_f32_z (p0, z1),
+		z0 = svabs_z (p0, z1))
+
+/*
+** abs_f32_x_tied1:
+**	fabs	z0\.s, p0/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (abs_f32_x_tied1, svfloat32_t,
+		z0 = svabs_f32_x (p0, z0),
+		z0 = svabs_x (p0, z0))
+
+/*
+** abs_f32_x_untied:
+**	fabs	z2\.s, p0/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (abs_f32_x_untied, svfloat32_t,
+		z2 = svabs_f32_x (p0, z0),
+		z2 = svabs_x (p0, z0))
+
+/*
+** ptrue_abs_f32_x_tied1:
+**	ptrue	(p[0-7])\.s[^\n]*
+**	fabs	z0\.s, \1/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_abs_f32_x_tied1, svfloat32_t,
+		z0 = svabs_f32_x (svptrue_b32 (), z0),
+		z0 = svabs_x (svptrue_b32 (), z0))
+
+/*
+** ptrue_abs_f32_x_untied:
+**	ptrue	(p[0-7])\.s[^\n]*
+**	fabs	z2\.s, \1/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_abs_f32_x_untied, svfloat32_t,
+		z2 = svabs_f32_x (svptrue_b32 (), z0),
+		z2 = svabs_x (svptrue_b32 (), z0))
+
+/*
+** ptrue_b8_abs_f32_x_tied1:
+**	ptrue	(p[0-7])\.s[^\n]*
+**	fabs	z0\.s, \1/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_b8_abs_f32_x_tied1, svfloat32_t,
+		z0 = svabs_f32_x (svptrue_b8 (), z0),
+		z0 = svabs_x (svptrue_b8 (), z0))
+
+/*
+** ptrue_b8_abs_f32_x_untied:
+**	ptrue	(p[0-7])\.s[^\n]*
+**	fabs	z2\.s, \1/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_b8_abs_f32_x_untied, svfloat32_t,
+		z2 = svabs_f32_x (svptrue_b8 (), z0),
+		z2 = svabs_x (svptrue_b8 (), z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_f64.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_f64.c
new file mode 100644
index 0000000..150bbda
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_f64.c
@@ -0,0 +1,122 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** abs_f64_m_tied12:
+**	fabs	z0\.d, p0/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (abs_f64_m_tied12, svfloat64_t,
+		z0 = svabs_f64_m (z0, p0, z0),
+		z0 = svabs_m (z0, p0, z0))
+
+/*
+** abs_f64_m_tied1:
+**	fabs	z0\.d, p0/m, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (abs_f64_m_tied1, svfloat64_t,
+		z0 = svabs_f64_m (z0, p0, z1),
+		z0 = svabs_m (z0, p0, z1))
+
+/*
+** abs_f64_m_tied2:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0, z1
+**	fabs	z0\.d, p0/m, \1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (abs_f64_m_tied2, svfloat64_t,
+		z0 = svabs_f64_m (z1, p0, z0),
+		z0 = svabs_m (z1, p0, z0))
+
+/*
+** abs_f64_m_untied:
+**	movprfx	z0, z2
+**	fabs	z0\.d, p0/m, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (abs_f64_m_untied, svfloat64_t,
+		z0 = svabs_f64_m (z2, p0, z1),
+		z0 = svabs_m (z2, p0, z1))
+
+/*
+** abs_f64_z_tied1:
+**	mov	(z[0-9]+\.d), z0\.d
+**	movprfx	z0\.d, p0/z, z0\.d
+**	fabs	z0\.d, p0/m, \1
+**	ret
+*/
+TEST_UNIFORM_Z (abs_f64_z_tied1, svfloat64_t,
+		z0 = svabs_f64_z (p0, z0),
+		z0 = svabs_z (p0, z0))
+
+/*
+** abs_f64_z_untied:
+**	movprfx	z0\.d, p0/z, z0\.d
+**	fabs	z0\.d, p0/m, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (abs_f64_z_untied, svfloat64_t,
+		z0 = svabs_f64_z (p0, z1),
+		z0 = svabs_z (p0, z1))
+
+/*
+** abs_f64_x_tied1:
+**	fabs	z0\.d, p0/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (abs_f64_x_tied1, svfloat64_t,
+		z0 = svabs_f64_x (p0, z0),
+		z0 = svabs_x (p0, z0))
+
+/*
+** abs_f64_x_untied:
+**	fabs	z2\.d, p0/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (abs_f64_x_untied, svfloat64_t,
+		z2 = svabs_f64_x (p0, z0),
+		z2 = svabs_x (p0, z0))
+
+/*
+** ptrue_abs_f64_x_tied1:
+**	ptrue	(p[0-7])\.d[^\n]*
+**	fabs	z0\.d, \1/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_abs_f64_x_tied1, svfloat64_t,
+		z0 = svabs_f64_x (svptrue_b64 (), z0),
+		z0 = svabs_x (svptrue_b64 (), z0))
+
+/*
+** ptrue_abs_f64_x_untied:
+**	ptrue	(p[0-7])\.d[^\n]*
+**	fabs	z2\.d, \1/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_abs_f64_x_untied, svfloat64_t,
+		z2 = svabs_f64_x (svptrue_b64 (), z0),
+		z2 = svabs_x (svptrue_b64 (), z0))
+
+/*
+** ptrue_b8_abs_f64_x_tied1:
+**	ptrue	(p[0-7])\.d[^\n]*
+**	fabs	z0\.d, \1/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_b8_abs_f64_x_tied1, svfloat64_t,
+		z0 = svabs_f64_x (svptrue_b8 (), z0),
+		z0 = svabs_x (svptrue_b8 (), z0))
+
+/*
+** ptrue_b8_abs_f64_x_untied:
+**	ptrue	(p[0-7])\.d[^\n]*
+**	fabs	z2\.d, \1/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_b8_abs_f64_x_untied, svfloat64_t,
+		z2 = svabs_f64_x (svptrue_b8 (), z0),
+		z2 = svabs_x (svptrue_b8 (), z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_s16.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_s16.c
new file mode 100644
index 0000000..0f35175
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_s16.c
@@ -0,0 +1,83 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** abs_s16_m_tied12:
+**	abs	z0\.h, p0/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s16_m_tied12, svint16_t,
+		z0 = svabs_s16_m (z0, p0, z0),
+		z0 = svabs_m (z0, p0, z0))
+
+/*
+** abs_s16_m_tied1:
+**	abs	z0\.h, p0/m, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s16_m_tied1, svint16_t,
+		z0 = svabs_s16_m (z0, p0, z1),
+		z0 = svabs_m (z0, p0, z1))
+
+/*
+** abs_s16_m_tied2:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0, z1
+**	abs	z0\.h, p0/m, \1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s16_m_tied2, svint16_t,
+		z0 = svabs_s16_m (z1, p0, z0),
+		z0 = svabs_m (z1, p0, z0))
+
+/*
+** abs_s16_m_untied:
+**	movprfx	z0, z2
+**	abs	z0\.h, p0/m, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s16_m_untied, svint16_t,
+		z0 = svabs_s16_m (z2, p0, z1),
+		z0 = svabs_m (z2, p0, z1))
+
+/*
+** abs_s16_z_tied1:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0\.h, p0/z, z0\.h
+**	abs	z0\.h, p0/m, \1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s16_z_tied1, svint16_t,
+		z0 = svabs_s16_z (p0, z0),
+		z0 = svabs_z (p0, z0))
+
+/*
+** abs_s16_z_untied:
+**	movprfx	z0\.h, p0/z, z0\.h
+**	abs	z0\.h, p0/m, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s16_z_untied, svint16_t,
+		z0 = svabs_s16_z (p0, z1),
+		z0 = svabs_z (p0, z1))
+
+/*
+** abs_s16_x_tied1:
+**	abs	z0\.h, p0/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s16_x_tied1, svint16_t,
+		z0 = svabs_s16_x (p0, z0),
+		z0 = svabs_x (p0, z0))
+
+/*
+** abs_s16_x_untied:
+**	abs	z2\.h, p0/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s16_x_untied, svint16_t,
+		z2 = svabs_s16_x (p0, z0),
+		z2 = svabs_x (p0, z0))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_s32.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_s32.c
new file mode 100644
index 0000000..cdd8e70
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_s32.c
@@ -0,0 +1,83 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** abs_s32_m_tied12:
+**	abs	z0\.s, p0/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s32_m_tied12, svint32_t,
+		z0 = svabs_s32_m (z0, p0, z0),
+		z0 = svabs_m (z0, p0, z0))
+
+/*
+** abs_s32_m_tied1:
+**	abs	z0\.s, p0/m, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s32_m_tied1, svint32_t,
+		z0 = svabs_s32_m (z0, p0, z1),
+		z0 = svabs_m (z0, p0, z1))
+
+/*
+** abs_s32_m_tied2:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0, z1
+**	abs	z0\.s, p0/m, \1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s32_m_tied2, svint32_t,
+		z0 = svabs_s32_m (z1, p0, z0),
+		z0 = svabs_m (z1, p0, z0))
+
+/*
+** abs_s32_m_untied:
+**	movprfx	z0, z2
+**	abs	z0\.s, p0/m, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s32_m_untied, svint32_t,
+		z0 = svabs_s32_m (z2, p0, z1),
+		z0 = svabs_m (z2, p0, z1))
+
+/*
+** abs_s32_z_tied1:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0\.s, p0/z, z0\.s
+**	abs	z0\.s, p0/m, \1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s32_z_tied1, svint32_t,
+		z0 = svabs_s32_z (p0, z0),
+		z0 = svabs_z (p0, z0))
+
+/*
+** abs_s32_z_untied:
+**	movprfx	z0\.s, p0/z, z0\.s
+**	abs	z0\.s, p0/m, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s32_z_untied, svint32_t,
+		z0 = svabs_s32_z (p0, z1),
+		z0 = svabs_z (p0, z1))
+
+/*
+** abs_s32_x_tied1:
+**	abs	z0\.s, p0/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s32_x_tied1, svint32_t,
+		z0 = svabs_s32_x (p0, z0),
+		z0 = svabs_x (p0, z0))
+
+/*
+** abs_s32_x_untied:
+**	abs	z2\.s, p0/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s32_x_untied, svint32_t,
+		z2 = svabs_s32_x (p0, z0),
+		z2 = svabs_x (p0, z0))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_s64.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_s64.c
new file mode 100644
index 0000000..79db7e4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_s64.c
@@ -0,0 +1,83 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** abs_s64_m_tied12:
+**	abs	z0\.d, p0/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s64_m_tied12, svint64_t,
+		z0 = svabs_s64_m (z0, p0, z0),
+		z0 = svabs_m (z0, p0, z0))
+
+/*
+** abs_s64_m_tied1:
+**	abs	z0\.d, p0/m, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s64_m_tied1, svint64_t,
+		z0 = svabs_s64_m (z0, p0, z1),
+		z0 = svabs_m (z0, p0, z1))
+
+/*
+** abs_s64_m_tied2:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0, z1
+**	abs	z0\.d, p0/m, \1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s64_m_tied2, svint64_t,
+		z0 = svabs_s64_m (z1, p0, z0),
+		z0 = svabs_m (z1, p0, z0))
+
+/*
+** abs_s64_m_untied:
+**	movprfx	z0, z2
+**	abs	z0\.d, p0/m, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s64_m_untied, svint64_t,
+		z0 = svabs_s64_m (z2, p0, z1),
+		z0 = svabs_m (z2, p0, z1))
+
+/*
+** abs_s64_z_tied1:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0\.d, p0/z, z0\.d
+**	abs	z0\.d, p0/m, \1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s64_z_tied1, svint64_t,
+		z0 = svabs_s64_z (p0, z0),
+		z0 = svabs_z (p0, z0))
+
+/*
+** abs_s64_z_untied:
+**	movprfx	z0\.d, p0/z, z0\.d
+**	abs	z0\.d, p0/m, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s64_z_untied, svint64_t,
+		z0 = svabs_s64_z (p0, z1),
+		z0 = svabs_z (p0, z1))
+
+/*
+** abs_s64_x_tied1:
+**	abs	z0\.d, p0/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s64_x_tied1, svint64_t,
+		z0 = svabs_s64_x (p0, z0),
+		z0 = svabs_x (p0, z0))
+
+/*
+** abs_s64_x_untied:
+**	abs	z2\.d, p0/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s64_x_untied, svint64_t,
+		z2 = svabs_s64_x (p0, z0),
+		z2 = svabs_x (p0, z0))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_s8.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_s8.c
new file mode 100644
index 0000000..fcdb1832
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/abs_s8.c
@@ -0,0 +1,83 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** abs_s8_m_tied12:
+**	abs	z0\.b, p0/m, z0\.b
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s8_m_tied12, svint8_t,
+		z0 = svabs_s8_m (z0, p0, z0),
+		z0 = svabs_m (z0, p0, z0))
+
+/*
+** abs_s8_m_tied1:
+**	abs	z0\.b, p0/m, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s8_m_tied1, svint8_t,
+		z0 = svabs_s8_m (z0, p0, z1),
+		z0 = svabs_m (z0, p0, z1))
+
+/*
+** abs_s8_m_tied2:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0, z1
+**	abs	z0\.b, p0/m, \1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s8_m_tied2, svint8_t,
+		z0 = svabs_s8_m (z1, p0, z0),
+		z0 = svabs_m (z1, p0, z0))
+
+/*
+** abs_s8_m_untied:
+**	movprfx	z0, z2
+**	abs	z0\.b, p0/m, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s8_m_untied, svint8_t,
+		z0 = svabs_s8_m (z2, p0, z1),
+		z0 = svabs_m (z2, p0, z1))
+
+/*
+** abs_s8_z_tied1:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0\.b, p0/z, z0\.b
+**	abs	z0\.b, p0/m, \1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s8_z_tied1, svint8_t,
+		z0 = svabs_s8_z (p0, z0),
+		z0 = svabs_z (p0, z0))
+
+/*
+** abs_s8_z_untied:
+**	movprfx	z0\.b, p0/z, z0\.b
+**	abs	z0\.b, p0/m, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s8_z_untied, svint8_t,
+		z0 = svabs_s8_z (p0, z1),
+		z0 = svabs_z (p0, z1))
+
+/*
+** abs_s8_x_tied1:
+**	abs	z0\.b, p0/m, z0\.b
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s8_x_tied1, svint8_t,
+		z0 = svabs_s8_x (p0, z0),
+		z0 = svabs_x (p0, z0))
+
+/*
+** abs_s8_x_untied:
+**	abs	z2\.b, p0/m, z0\.b
+**	ret
+*/
+TEST_UNIFORM_Z (abs_s8_x_untied, svint8_t,
+		z2 = svabs_s8_x (p0, z0),
+		z2 = svabs_x (p0, z0))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_f16.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_f16.c
new file mode 100644
index 0000000..a6d1ac9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_f16.c
@@ -0,0 +1,122 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** neg_f16_m_tied12:
+**	fneg	z0\.h, p0/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (neg_f16_m_tied12, svfloat16_t,
+		z0 = svneg_f16_m (z0, p0, z0),
+		z0 = svneg_m (z0, p0, z0))
+
+/*
+** neg_f16_m_tied1:
+**	fneg	z0\.h, p0/m, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (neg_f16_m_tied1, svfloat16_t,
+		z0 = svneg_f16_m (z0, p0, z1),
+		z0 = svneg_m (z0, p0, z1))
+
+/*
+** neg_f16_m_tied2:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0, z1
+**	fneg	z0\.h, p0/m, \1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (neg_f16_m_tied2, svfloat16_t,
+		z0 = svneg_f16_m (z1, p0, z0),
+		z0 = svneg_m (z1, p0, z0))
+
+/*
+** neg_f16_m_untied:
+**	movprfx	z0, z2
+**	fneg	z0\.h, p0/m, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (neg_f16_m_untied, svfloat16_t,
+		z0 = svneg_f16_m (z2, p0, z1),
+		z0 = svneg_m (z2, p0, z1))
+
+/*
+** neg_f16_z_tied1:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0\.h, p0/z, z0\.h
+**	fneg	z0\.h, p0/m, \1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (neg_f16_z_tied1, svfloat16_t,
+		z0 = svneg_f16_z (p0, z0),
+		z0 = svneg_z (p0, z0))
+
+/*
+** neg_f16_z_untied:
+**	movprfx	z0\.h, p0/z, z0\.h
+**	fneg	z0\.h, p0/m, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (neg_f16_z_untied, svfloat16_t,
+		z0 = svneg_f16_z (p0, z1),
+		z0 = svneg_z (p0, z1))
+
+/*
+** neg_f16_x_tied1:
+**	fneg	z0\.h, p0/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (neg_f16_x_tied1, svfloat16_t,
+		z0 = svneg_f16_x (p0, z0),
+		z0 = svneg_x (p0, z0))
+
+/*
+** neg_f16_x_untied:
+**	fneg	z2\.h, p0/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (neg_f16_x_untied, svfloat16_t,
+		z2 = svneg_f16_x (p0, z0),
+		z2 = svneg_x (p0, z0))
+
+/*
+** ptrue_neg_f16_x_tied1:
+**	ptrue	(p[0-7])\.h[^\n]*
+**	fneg	z0\.h, \1/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_neg_f16_x_tied1, svfloat16_t,
+		z0 = svneg_f16_x (svptrue_b16 (), z0),
+		z0 = svneg_x (svptrue_b16 (), z0))
+
+/*
+** ptrue_neg_f16_x_untied:
+**	ptrue	(p[0-7])\.h[^\n]*
+**	fneg	z2\.h, \1/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_neg_f16_x_untied, svfloat16_t,
+		z2 = svneg_f16_x (svptrue_b16 (), z0),
+		z2 = svneg_x (svptrue_b16 (), z0))
+
+/*
+** ptrue_b8_neg_f16_x_tied1:
+**	ptrue	(p[0-7])\.h[^\n]*
+**	fneg	z0\.h, \1/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_b8_neg_f16_x_tied1, svfloat16_t,
+		z0 = svneg_f16_x (svptrue_b8 (), z0),
+		z0 = svneg_x (svptrue_b8 (), z0))
+
+/*
+** ptrue_b8_neg_f16_x_untied:
+**	ptrue	(p[0-7])\.h[^\n]*
+**	fneg	z2\.h, \1/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_b8_neg_f16_x_untied, svfloat16_t,
+		z2 = svneg_f16_x (svptrue_b8 (), z0),
+		z2 = svneg_x (svptrue_b8 (), z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_f32.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_f32.c
new file mode 100644
index 0000000..6549fa5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_f32.c
@@ -0,0 +1,122 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** neg_f32_m_tied12:
+**	fneg	z0\.s, p0/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (neg_f32_m_tied12, svfloat32_t,
+		z0 = svneg_f32_m (z0, p0, z0),
+		z0 = svneg_m (z0, p0, z0))
+
+/*
+** neg_f32_m_tied1:
+**	fneg	z0\.s, p0/m, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (neg_f32_m_tied1, svfloat32_t,
+		z0 = svneg_f32_m (z0, p0, z1),
+		z0 = svneg_m (z0, p0, z1))
+
+/*
+** neg_f32_m_tied2:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0, z1
+**	fneg	z0\.s, p0/m, \1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (neg_f32_m_tied2, svfloat32_t,
+		z0 = svneg_f32_m (z1, p0, z0),
+		z0 = svneg_m (z1, p0, z0))
+
+/*
+** neg_f32_m_untied:
+**	movprfx	z0, z2
+**	fneg	z0\.s, p0/m, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (neg_f32_m_untied, svfloat32_t,
+		z0 = svneg_f32_m (z2, p0, z1),
+		z0 = svneg_m (z2, p0, z1))
+
+/*
+** neg_f32_z_tied1:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0\.s, p0/z, z0\.s
+**	fneg	z0\.s, p0/m, \1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (neg_f32_z_tied1, svfloat32_t,
+		z0 = svneg_f32_z (p0, z0),
+		z0 = svneg_z (p0, z0))
+
+/*
+** neg_f32_z_untied:
+**	movprfx	z0\.s, p0/z, z0\.s
+**	fneg	z0\.s, p0/m, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (neg_f32_z_untied, svfloat32_t,
+		z0 = svneg_f32_z (p0, z1),
+		z0 = svneg_z (p0, z1))
+
+/*
+** neg_f32_x_tied1:
+**	fneg	z0\.s, p0/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (neg_f32_x_tied1, svfloat32_t,
+		z0 = svneg_f32_x (p0, z0),
+		z0 = svneg_x (p0, z0))
+
+/*
+** neg_f32_x_untied:
+**	fneg	z2\.s, p0/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (neg_f32_x_untied, svfloat32_t,
+		z2 = svneg_f32_x (p0, z0),
+		z2 = svneg_x (p0, z0))
+
+/*
+** ptrue_neg_f32_x_tied1:
+**	ptrue	(p[0-7])\.s[^\n]*
+**	fneg	z0\.s, \1/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_neg_f32_x_tied1, svfloat32_t,
+		z0 = svneg_f32_x (svptrue_b32 (), z0),
+		z0 = svneg_x (svptrue_b32 (), z0))
+
+/*
+** ptrue_neg_f32_x_untied:
+**	ptrue	(p[0-7])\.s[^\n]*
+**	fneg	z2\.s, \1/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_neg_f32_x_untied, svfloat32_t,
+		z2 = svneg_f32_x (svptrue_b32 (), z0),
+		z2 = svneg_x (svptrue_b32 (), z0))
+
+/*
+** ptrue_b8_neg_f32_x_tied1:
+**	ptrue	(p[0-7])\.s[^\n]*
+**	fneg	z0\.s, \1/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_b8_neg_f32_x_tied1, svfloat32_t,
+		z0 = svneg_f32_x (svptrue_b8 (), z0),
+		z0 = svneg_x (svptrue_b8 (), z0))
+
+/*
+** ptrue_b8_neg_f32_x_untied:
+**	ptrue	(p[0-7])\.s[^\n]*
+**	fneg	z2\.s, \1/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_b8_neg_f32_x_untied, svfloat32_t,
+		z2 = svneg_f32_x (svptrue_b8 (), z0),
+		z2 = svneg_x (svptrue_b8 (), z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_f64.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_f64.c
new file mode 100644
index 0000000..63eca80
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_f64.c
@@ -0,0 +1,122 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** neg_f64_m_tied12:
+**	fneg	z0\.d, p0/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (neg_f64_m_tied12, svfloat64_t,
+		z0 = svneg_f64_m (z0, p0, z0),
+		z0 = svneg_m (z0, p0, z0))
+
+/*
+** neg_f64_m_tied1:
+**	fneg	z0\.d, p0/m, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (neg_f64_m_tied1, svfloat64_t,
+		z0 = svneg_f64_m (z0, p0, z1),
+		z0 = svneg_m (z0, p0, z1))
+
+/*
+** neg_f64_m_tied2:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0, z1
+**	fneg	z0\.d, p0/m, \1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (neg_f64_m_tied2, svfloat64_t,
+		z0 = svneg_f64_m (z1, p0, z0),
+		z0 = svneg_m (z1, p0, z0))
+
+/*
+** neg_f64_m_untied:
+**	movprfx	z0, z2
+**	fneg	z0\.d, p0/m, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (neg_f64_m_untied, svfloat64_t,
+		z0 = svneg_f64_m (z2, p0, z1),
+		z0 = svneg_m (z2, p0, z1))
+
+/*
+** neg_f64_z_tied1:
+**	mov	(z[0-9]+\.d), z0\.d
+**	movprfx	z0\.d, p0/z, z0\.d
+**	fneg	z0\.d, p0/m, \1
+**	ret
+*/
+TEST_UNIFORM_Z (neg_f64_z_tied1, svfloat64_t,
+		z0 = svneg_f64_z (p0, z0),
+		z0 = svneg_z (p0, z0))
+
+/*
+** neg_f64_z_untied:
+**	movprfx	z0\.d, p0/z, z0\.d
+**	fneg	z0\.d, p0/m, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (neg_f64_z_untied, svfloat64_t,
+		z0 = svneg_f64_z (p0, z1),
+		z0 = svneg_z (p0, z1))
+
+/*
+** neg_f64_x_tied1:
+**	fneg	z0\.d, p0/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (neg_f64_x_tied1, svfloat64_t,
+		z0 = svneg_f64_x (p0, z0),
+		z0 = svneg_x (p0, z0))
+
+/*
+** neg_f64_x_untied:
+**	fneg	z2\.d, p0/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (neg_f64_x_untied, svfloat64_t,
+		z2 = svneg_f64_x (p0, z0),
+		z2 = svneg_x (p0, z0))
+
+/*
+** ptrue_neg_f64_x_tied1:
+**	ptrue	(p[0-7])\.d[^\n]*
+**	fneg	z0\.d, \1/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_neg_f64_x_tied1, svfloat64_t,
+		z0 = svneg_f64_x (svptrue_b64 (), z0),
+		z0 = svneg_x (svptrue_b64 (), z0))
+
+/*
+** ptrue_neg_f64_x_untied:
+**	ptrue	(p[0-7])\.d[^\n]*
+**	fneg	z2\.d, \1/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_neg_f64_x_untied, svfloat64_t,
+		z2 = svneg_f64_x (svptrue_b64 (), z0),
+		z2 = svneg_x (svptrue_b64 (), z0))
+
+/*
+** ptrue_b8_neg_f64_x_tied1:
+**	ptrue	(p[0-7])\.d[^\n]*
+**	fneg	z0\.d, \1/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_b8_neg_f64_x_tied1, svfloat64_t,
+		z0 = svneg_f64_x (svptrue_b8 (), z0),
+		z0 = svneg_x (svptrue_b8 (), z0))
+
+/*
+** ptrue_b8_neg_f64_x_untied:
+**	ptrue	(p[0-7])\.d[^\n]*
+**	fneg	z2\.d, \1/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_b8_neg_f64_x_untied, svfloat64_t,
+		z2 = svneg_f64_x (svptrue_b8 (), z0),
+		z2 = svneg_x (svptrue_b8 (), z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_s16.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_s16.c
new file mode 100644
index 0000000..80eab1f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_s16.c
@@ -0,0 +1,83 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** neg_s16_m_tied12:
+**	neg	z0\.h, p0/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s16_m_tied12, svint16_t,
+		z0 = svneg_s16_m (z0, p0, z0),
+		z0 = svneg_m (z0, p0, z0))
+
+/*
+** neg_s16_m_tied1:
+**	neg	z0\.h, p0/m, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s16_m_tied1, svint16_t,
+		z0 = svneg_s16_m (z0, p0, z1),
+		z0 = svneg_m (z0, p0, z1))
+
+/*
+** neg_s16_m_tied2:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0, z1
+**	neg	z0\.h, p0/m, \1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s16_m_tied2, svint16_t,
+		z0 = svneg_s16_m (z1, p0, z0),
+		z0 = svneg_m (z1, p0, z0))
+
+/*
+** neg_s16_m_untied:
+**	movprfx	z0, z2
+**	neg	z0\.h, p0/m, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s16_m_untied, svint16_t,
+		z0 = svneg_s16_m (z2, p0, z1),
+		z0 = svneg_m (z2, p0, z1))
+
+/*
+** neg_s16_z_tied1:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0\.h, p0/z, z0\.h
+**	neg	z0\.h, p0/m, \1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s16_z_tied1, svint16_t,
+		z0 = svneg_s16_z (p0, z0),
+		z0 = svneg_z (p0, z0))
+
+/*
+** neg_s16_z_untied:
+**	movprfx	z0\.h, p0/z, z0\.h
+**	neg	z0\.h, p0/m, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s16_z_untied, svint16_t,
+		z0 = svneg_s16_z (p0, z1),
+		z0 = svneg_z (p0, z1))
+
+/*
+** neg_s16_x_tied1:
+**	neg	z0\.h, p0/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s16_x_tied1, svint16_t,
+		z0 = svneg_s16_x (p0, z0),
+		z0 = svneg_x (p0, z0))
+
+/*
+** neg_s16_x_untied:
+**	neg	z2\.h, p0/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s16_x_untied, svint16_t,
+		z2 = svneg_s16_x (p0, z0),
+		z2 = svneg_x (p0, z0))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_s32.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_s32.c
new file mode 100644
index 0000000..7cc7c2f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_s32.c
@@ -0,0 +1,83 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** neg_s32_m_tied12:
+**	neg	z0\.s, p0/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s32_m_tied12, svint32_t,
+		z0 = svneg_s32_m (z0, p0, z0),
+		z0 = svneg_m (z0, p0, z0))
+
+/*
+** neg_s32_m_tied1:
+**	neg	z0\.s, p0/m, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s32_m_tied1, svint32_t,
+		z0 = svneg_s32_m (z0, p0, z1),
+		z0 = svneg_m (z0, p0, z1))
+
+/*
+** neg_s32_m_tied2:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0, z1
+**	neg	z0\.s, p0/m, \1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s32_m_tied2, svint32_t,
+		z0 = svneg_s32_m (z1, p0, z0),
+		z0 = svneg_m (z1, p0, z0))
+
+/*
+** neg_s32_m_untied:
+**	movprfx	z0, z2
+**	neg	z0\.s, p0/m, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s32_m_untied, svint32_t,
+		z0 = svneg_s32_m (z2, p0, z1),
+		z0 = svneg_m (z2, p0, z1))
+
+/*
+** neg_s32_z_tied1:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0\.s, p0/z, z0\.s
+**	neg	z0\.s, p0/m, \1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s32_z_tied1, svint32_t,
+		z0 = svneg_s32_z (p0, z0),
+		z0 = svneg_z (p0, z0))
+
+/*
+** neg_s32_z_untied:
+**	movprfx	z0\.s, p0/z, z0\.s
+**	neg	z0\.s, p0/m, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s32_z_untied, svint32_t,
+		z0 = svneg_s32_z (p0, z1),
+		z0 = svneg_z (p0, z1))
+
+/*
+** neg_s32_x_tied1:
+**	neg	z0\.s, p0/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s32_x_tied1, svint32_t,
+		z0 = svneg_s32_x (p0, z0),
+		z0 = svneg_x (p0, z0))
+
+/*
+** neg_s32_x_untied:
+**	neg	z2\.s, p0/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s32_x_untied, svint32_t,
+		z2 = svneg_s32_x (p0, z0),
+		z2 = svneg_x (p0, z0))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_s64.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_s64.c
new file mode 100644
index 0000000..17b867b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_s64.c
@@ -0,0 +1,83 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** neg_s64_m_tied12:
+**	neg	z0\.d, p0/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s64_m_tied12, svint64_t,
+		z0 = svneg_s64_m (z0, p0, z0),
+		z0 = svneg_m (z0, p0, z0))
+
+/*
+** neg_s64_m_tied1:
+**	neg	z0\.d, p0/m, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s64_m_tied1, svint64_t,
+		z0 = svneg_s64_m (z0, p0, z1),
+		z0 = svneg_m (z0, p0, z1))
+
+/*
+** neg_s64_m_tied2:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0, z1
+**	neg	z0\.d, p0/m, \1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s64_m_tied2, svint64_t,
+		z0 = svneg_s64_m (z1, p0, z0),
+		z0 = svneg_m (z1, p0, z0))
+
+/*
+** neg_s64_m_untied:
+**	movprfx	z0, z2
+**	neg	z0\.d, p0/m, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s64_m_untied, svint64_t,
+		z0 = svneg_s64_m (z2, p0, z1),
+		z0 = svneg_m (z2, p0, z1))
+
+/*
+** neg_s64_z_tied1:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0\.d, p0/z, z0\.d
+**	neg	z0\.d, p0/m, \1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s64_z_tied1, svint64_t,
+		z0 = svneg_s64_z (p0, z0),
+		z0 = svneg_z (p0, z0))
+
+/*
+** neg_s64_z_untied:
+**	movprfx	z0\.d, p0/z, z0\.d
+**	neg	z0\.d, p0/m, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s64_z_untied, svint64_t,
+		z0 = svneg_s64_z (p0, z1),
+		z0 = svneg_z (p0, z1))
+
+/*
+** neg_s64_x_tied1:
+**	neg	z0\.d, p0/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s64_x_tied1, svint64_t,
+		z0 = svneg_s64_x (p0, z0),
+		z0 = svneg_x (p0, z0))
+
+/*
+** neg_s64_x_untied:
+**	neg	z2\.d, p0/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s64_x_untied, svint64_t,
+		z2 = svneg_s64_x (p0, z0),
+		z2 = svneg_x (p0, z0))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_s8.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_s8.c
new file mode 100644
index 0000000..28248ee
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/neg_s8.c
@@ -0,0 +1,83 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** neg_s8_m_tied12:
+**	neg	z0\.b, p0/m, z0\.b
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s8_m_tied12, svint8_t,
+		z0 = svneg_s8_m (z0, p0, z0),
+		z0 = svneg_m (z0, p0, z0))
+
+/*
+** neg_s8_m_tied1:
+**	neg	z0\.b, p0/m, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s8_m_tied1, svint8_t,
+		z0 = svneg_s8_m (z0, p0, z1),
+		z0 = svneg_m (z0, p0, z1))
+
+/*
+** neg_s8_m_tied2:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0, z1
+**	neg	z0\.b, p0/m, \1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s8_m_tied2, svint8_t,
+		z0 = svneg_s8_m (z1, p0, z0),
+		z0 = svneg_m (z1, p0, z0))
+
+/*
+** neg_s8_m_untied:
+**	movprfx	z0, z2
+**	neg	z0\.b, p0/m, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s8_m_untied, svint8_t,
+		z0 = svneg_s8_m (z2, p0, z1),
+		z0 = svneg_m (z2, p0, z1))
+
+/*
+** neg_s8_z_tied1:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0\.b, p0/z, z0\.b
+**	neg	z0\.b, p0/m, \1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s8_z_tied1, svint8_t,
+		z0 = svneg_s8_z (p0, z0),
+		z0 = svneg_z (p0, z0))
+
+/*
+** neg_s8_z_untied:
+**	movprfx	z0\.b, p0/z, z0\.b
+**	neg	z0\.b, p0/m, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s8_z_untied, svint8_t,
+		z0 = svneg_s8_z (p0, z1),
+		z0 = svneg_z (p0, z1))
+
+/*
+** neg_s8_x_tied1:
+**	neg	z0\.b, p0/m, z0\.b
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s8_x_tied1, svint8_t,
+		z0 = svneg_s8_x (p0, z0),
+		z0 = svneg_x (p0, z0))
+
+/*
+** neg_s8_x_untied:
+**	neg	z2\.b, p0/m, z0\.b
+**	ret
+*/
+TEST_UNIFORM_Z (neg_s8_x_untied, svint8_t,
+		z2 = svneg_s8_x (p0, z0),
+		z2 = svneg_x (p0, z0))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_s16.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_s16.c
new file mode 100644
index 0000000..0f2f863
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_s16.c
@@ -0,0 +1,83 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** not_s16_m_tied12:
+**	not	z0\.h, p0/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (not_s16_m_tied12, svint16_t,
+		z0 = svnot_s16_m (z0, p0, z0),
+		z0 = svnot_m (z0, p0, z0))
+
+/*
+** not_s16_m_tied1:
+**	not	z0\.h, p0/m, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (not_s16_m_tied1, svint16_t,
+		z0 = svnot_s16_m (z0, p0, z1),
+		z0 = svnot_m (z0, p0, z1))
+
+/*
+** not_s16_m_tied2:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0, z1
+**	not	z0\.h, p0/m, \1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (not_s16_m_tied2, svint16_t,
+		z0 = svnot_s16_m (z1, p0, z0),
+		z0 = svnot_m (z1, p0, z0))
+
+/*
+** not_s16_m_untied:
+**	movprfx	z0, z2
+**	not	z0\.h, p0/m, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (not_s16_m_untied, svint16_t,
+		z0 = svnot_s16_m (z2, p0, z1),
+		z0 = svnot_m (z2, p0, z1))
+
+/*
+** not_s16_z_tied1:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0\.h, p0/z, z0\.h
+**	not	z0\.h, p0/m, \1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (not_s16_z_tied1, svint16_t,
+		z0 = svnot_s16_z (p0, z0),
+		z0 = svnot_z (p0, z0))
+
+/*
+** not_s16_z_untied:
+**	movprfx	z0\.h, p0/z, z0\.h
+**	not	z0\.h, p0/m, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (not_s16_z_untied, svint16_t,
+		z0 = svnot_s16_z (p0, z1),
+		z0 = svnot_z (p0, z1))
+
+/*
+** not_s16_x_tied1:
+**	not	z0\.h, p0/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (not_s16_x_tied1, svint16_t,
+		z0 = svnot_s16_x (p0, z0),
+		z0 = svnot_x (p0, z0))
+
+/*
+** not_s16_x_untied:
+**	not	z2\.h, p0/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (not_s16_x_untied, svint16_t,
+		z2 = svnot_s16_x (p0, z0),
+		z2 = svnot_x (p0, z0))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_s32.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_s32.c
new file mode 100644
index 0000000..cddd576
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_s32.c
@@ -0,0 +1,83 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** not_s32_m_tied12:
+**	not	z0\.s, p0/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (not_s32_m_tied12, svint32_t,
+		z0 = svnot_s32_m (z0, p0, z0),
+		z0 = svnot_m (z0, p0, z0))
+
+/*
+** not_s32_m_tied1:
+**	not	z0\.s, p0/m, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (not_s32_m_tied1, svint32_t,
+		z0 = svnot_s32_m (z0, p0, z1),
+		z0 = svnot_m (z0, p0, z1))
+
+/*
+** not_s32_m_tied2:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0, z1
+**	not	z0\.s, p0/m, \1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (not_s32_m_tied2, svint32_t,
+		z0 = svnot_s32_m (z1, p0, z0),
+		z0 = svnot_m (z1, p0, z0))
+
+/*
+** not_s32_m_untied:
+**	movprfx	z0, z2
+**	not	z0\.s, p0/m, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (not_s32_m_untied, svint32_t,
+		z0 = svnot_s32_m (z2, p0, z1),
+		z0 = svnot_m (z2, p0, z1))
+
+/*
+** not_s32_z_tied1:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0\.s, p0/z, z0\.s
+**	not	z0\.s, p0/m, \1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (not_s32_z_tied1, svint32_t,
+		z0 = svnot_s32_z (p0, z0),
+		z0 = svnot_z (p0, z0))
+
+/*
+** not_s32_z_untied:
+**	movprfx	z0\.s, p0/z, z0\.s
+**	not	z0\.s, p0/m, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (not_s32_z_untied, svint32_t,
+		z0 = svnot_s32_z (p0, z1),
+		z0 = svnot_z (p0, z1))
+
+/*
+** not_s32_x_tied1:
+**	not	z0\.s, p0/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (not_s32_x_tied1, svint32_t,
+		z0 = svnot_s32_x (p0, z0),
+		z0 = svnot_x (p0, z0))
+
+/*
+** not_s32_x_untied:
+**	not	z2\.s, p0/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (not_s32_x_untied, svint32_t,
+		z2 = svnot_s32_x (p0, z0),
+		z2 = svnot_x (p0, z0))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_s64.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_s64.c
new file mode 100644
index 0000000..e3247d9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_s64.c
@@ -0,0 +1,83 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** not_s64_m_tied12:
+**	not	z0\.d, p0/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (not_s64_m_tied12, svint64_t,
+		z0 = svnot_s64_m (z0, p0, z0),
+		z0 = svnot_m (z0, p0, z0))
+
+/*
+** not_s64_m_tied1:
+**	not	z0\.d, p0/m, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (not_s64_m_tied1, svint64_t,
+		z0 = svnot_s64_m (z0, p0, z1),
+		z0 = svnot_m (z0, p0, z1))
+
+/*
+** not_s64_m_tied2:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0, z1
+**	not	z0\.d, p0/m, \1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (not_s64_m_tied2, svint64_t,
+		z0 = svnot_s64_m (z1, p0, z0),
+		z0 = svnot_m (z1, p0, z0))
+
+/*
+** not_s64_m_untied:
+**	movprfx	z0, z2
+**	not	z0\.d, p0/m, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (not_s64_m_untied, svint64_t,
+		z0 = svnot_s64_m (z2, p0, z1),
+		z0 = svnot_m (z2, p0, z1))
+
+/*
+** not_s64_z_tied1:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0\.d, p0/z, z0\.d
+**	not	z0\.d, p0/m, \1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (not_s64_z_tied1, svint64_t,
+		z0 = svnot_s64_z (p0, z0),
+		z0 = svnot_z (p0, z0))
+
+/*
+** not_s64_z_untied:
+**	movprfx	z0\.d, p0/z, z0\.d
+**	not	z0\.d, p0/m, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (not_s64_z_untied, svint64_t,
+		z0 = svnot_s64_z (p0, z1),
+		z0 = svnot_z (p0, z1))
+
+/*
+** not_s64_x_tied1:
+**	not	z0\.d, p0/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (not_s64_x_tied1, svint64_t,
+		z0 = svnot_s64_x (p0, z0),
+		z0 = svnot_x (p0, z0))
+
+/*
+** not_s64_x_untied:
+**	not	z2\.d, p0/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (not_s64_x_untied, svint64_t,
+		z2 = svnot_s64_x (p0, z0),
+		z2 = svnot_x (p0, z0))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_s8.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_s8.c
new file mode 100644
index 0000000..2de3c32
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_s8.c
@@ -0,0 +1,83 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** not_s8_m_tied12:
+**	not	z0\.b, p0/m, z0\.b
+**	ret
+*/
+TEST_UNIFORM_Z (not_s8_m_tied12, svint8_t,
+		z0 = svnot_s8_m (z0, p0, z0),
+		z0 = svnot_m (z0, p0, z0))
+
+/*
+** not_s8_m_tied1:
+**	not	z0\.b, p0/m, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (not_s8_m_tied1, svint8_t,
+		z0 = svnot_s8_m (z0, p0, z1),
+		z0 = svnot_m (z0, p0, z1))
+
+/*
+** not_s8_m_tied2:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0, z1
+**	not	z0\.b, p0/m, \1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (not_s8_m_tied2, svint8_t,
+		z0 = svnot_s8_m (z1, p0, z0),
+		z0 = svnot_m (z1, p0, z0))
+
+/*
+** not_s8_m_untied:
+**	movprfx	z0, z2
+**	not	z0\.b, p0/m, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (not_s8_m_untied, svint8_t,
+		z0 = svnot_s8_m (z2, p0, z1),
+		z0 = svnot_m (z2, p0, z1))
+
+/*
+** not_s8_z_tied1:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0\.b, p0/z, z0\.b
+**	not	z0\.b, p0/m, \1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (not_s8_z_tied1, svint8_t,
+		z0 = svnot_s8_z (p0, z0),
+		z0 = svnot_z (p0, z0))
+
+/*
+** not_s8_z_untied:
+**	movprfx	z0\.b, p0/z, z0\.b
+**	not	z0\.b, p0/m, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (not_s8_z_untied, svint8_t,
+		z0 = svnot_s8_z (p0, z1),
+		z0 = svnot_z (p0, z1))
+
+/*
+** not_s8_x_tied1:
+**	not	z0\.b, p0/m, z0\.b
+**	ret
+*/
+TEST_UNIFORM_Z (not_s8_x_tied1, svint8_t,
+		z0 = svnot_s8_x (p0, z0),
+		z0 = svnot_x (p0, z0))
+
+/*
+** not_s8_x_untied:
+**	not	z2\.b, p0/m, z0\.b
+**	ret
+*/
+TEST_UNIFORM_Z (not_s8_x_untied, svint8_t,
+		z2 = svnot_s8_x (p0, z0),
+		z2 = svnot_x (p0, z0))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_u16.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_u16.c
new file mode 100644
index 0000000..b1fc6e5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_u16.c
@@ -0,0 +1,83 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** not_u16_m_tied12:
+**	not	z0\.h, p0/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (not_u16_m_tied12, svuint16_t,
+		z0 = svnot_u16_m (z0, p0, z0),
+		z0 = svnot_m (z0, p0, z0))
+
+/*
+** not_u16_m_tied1:
+**	not	z0\.h, p0/m, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (not_u16_m_tied1, svuint16_t,
+		z0 = svnot_u16_m (z0, p0, z1),
+		z0 = svnot_m (z0, p0, z1))
+
+/*
+** not_u16_m_tied2:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0, z1
+**	not	z0\.h, p0/m, \1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (not_u16_m_tied2, svuint16_t,
+		z0 = svnot_u16_m (z1, p0, z0),
+		z0 = svnot_m (z1, p0, z0))
+
+/*
+** not_u16_m_untied:
+**	movprfx	z0, z2
+**	not	z0\.h, p0/m, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (not_u16_m_untied, svuint16_t,
+		z0 = svnot_u16_m (z2, p0, z1),
+		z0 = svnot_m (z2, p0, z1))
+
+/*
+** not_u16_z_tied1:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0\.h, p0/z, z0\.h
+**	not	z0\.h, p0/m, \1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (not_u16_z_tied1, svuint16_t,
+		z0 = svnot_u16_z (p0, z0),
+		z0 = svnot_z (p0, z0))
+
+/*
+** not_u16_z_untied:
+**	movprfx	z0\.h, p0/z, z0\.h
+**	not	z0\.h, p0/m, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (not_u16_z_untied, svuint16_t,
+		z0 = svnot_u16_z (p0, z1),
+		z0 = svnot_z (p0, z1))
+
+/*
+** not_u16_x_tied1:
+**	not	z0\.h, p0/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (not_u16_x_tied1, svuint16_t,
+		z0 = svnot_u16_x (p0, z0),
+		z0 = svnot_x (p0, z0))
+
+/*
+** not_u16_x_untied:
+**	not	z2\.h, p0/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (not_u16_x_untied, svuint16_t,
+		z2 = svnot_u16_x (p0, z0),
+		z2 = svnot_x (p0, z0))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_u32.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_u32.c
new file mode 100644
index 0000000..80f8f10
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_u32.c
@@ -0,0 +1,83 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** not_u32_m_tied12:
+**	not	z0\.s, p0/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (not_u32_m_tied12, svuint32_t,
+		z0 = svnot_u32_m (z0, p0, z0),
+		z0 = svnot_m (z0, p0, z0))
+
+/*
+** not_u32_m_tied1:
+**	not	z0\.s, p0/m, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (not_u32_m_tied1, svuint32_t,
+		z0 = svnot_u32_m (z0, p0, z1),
+		z0 = svnot_m (z0, p0, z1))
+
+/*
+** not_u32_m_tied2:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0, z1
+**	not	z0\.s, p0/m, \1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (not_u32_m_tied2, svuint32_t,
+		z0 = svnot_u32_m (z1, p0, z0),
+		z0 = svnot_m (z1, p0, z0))
+
+/*
+** not_u32_m_untied:
+**	movprfx	z0, z2
+**	not	z0\.s, p0/m, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (not_u32_m_untied, svuint32_t,
+		z0 = svnot_u32_m (z2, p0, z1),
+		z0 = svnot_m (z2, p0, z1))
+
+/*
+** not_u32_z_tied1:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0\.s, p0/z, z0\.s
+**	not	z0\.s, p0/m, \1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (not_u32_z_tied1, svuint32_t,
+		z0 = svnot_u32_z (p0, z0),
+		z0 = svnot_z (p0, z0))
+
+/*
+** not_u32_z_untied:
+**	movprfx	z0\.s, p0/z, z0\.s
+**	not	z0\.s, p0/m, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (not_u32_z_untied, svuint32_t,
+		z0 = svnot_u32_z (p0, z1),
+		z0 = svnot_z (p0, z1))
+
+/*
+** not_u32_x_tied1:
+**	not	z0\.s, p0/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (not_u32_x_tied1, svuint32_t,
+		z0 = svnot_u32_x (p0, z0),
+		z0 = svnot_x (p0, z0))
+
+/*
+** not_u32_x_untied:
+**	not	z2\.s, p0/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (not_u32_x_untied, svuint32_t,
+		z2 = svnot_u32_x (p0, z0),
+		z2 = svnot_x (p0, z0))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_u64.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_u64.c
new file mode 100644
index 0000000..44c2cf2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_u64.c
@@ -0,0 +1,83 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** not_u64_m_tied12:
+**	not	z0\.d, p0/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (not_u64_m_tied12, svuint64_t,
+		z0 = svnot_u64_m (z0, p0, z0),
+		z0 = svnot_m (z0, p0, z0))
+
+/*
+** not_u64_m_tied1:
+**	not	z0\.d, p0/m, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (not_u64_m_tied1, svuint64_t,
+		z0 = svnot_u64_m (z0, p0, z1),
+		z0 = svnot_m (z0, p0, z1))
+
+/*
+** not_u64_m_tied2:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0, z1
+**	not	z0\.d, p0/m, \1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (not_u64_m_tied2, svuint64_t,
+		z0 = svnot_u64_m (z1, p0, z0),
+		z0 = svnot_m (z1, p0, z0))
+
+/*
+** not_u64_m_untied:
+**	movprfx	z0, z2
+**	not	z0\.d, p0/m, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (not_u64_m_untied, svuint64_t,
+		z0 = svnot_u64_m (z2, p0, z1),
+		z0 = svnot_m (z2, p0, z1))
+
+/*
+** not_u64_z_tied1:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0\.d, p0/z, z0\.d
+**	not	z0\.d, p0/m, \1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (not_u64_z_tied1, svuint64_t,
+		z0 = svnot_u64_z (p0, z0),
+		z0 = svnot_z (p0, z0))
+
+/*
+** not_u64_z_untied:
+**	movprfx	z0\.d, p0/z, z0\.d
+**	not	z0\.d, p0/m, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (not_u64_z_untied, svuint64_t,
+		z0 = svnot_u64_z (p0, z1),
+		z0 = svnot_z (p0, z1))
+
+/*
+** not_u64_x_tied1:
+**	not	z0\.d, p0/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (not_u64_x_tied1, svuint64_t,
+		z0 = svnot_u64_x (p0, z0),
+		z0 = svnot_x (p0, z0))
+
+/*
+** not_u64_x_untied:
+**	not	z2\.d, p0/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (not_u64_x_untied, svuint64_t,
+		z2 = svnot_u64_x (p0, z0),
+		z2 = svnot_x (p0, z0))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_u8.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_u8.c
new file mode 100644
index 0000000..63b5aec
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/not_u8.c
@@ -0,0 +1,83 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** not_u8_m_tied12:
+**	not	z0\.b, p0/m, z0\.b
+**	ret
+*/
+TEST_UNIFORM_Z (not_u8_m_tied12, svuint8_t,
+		z0 = svnot_u8_m (z0, p0, z0),
+		z0 = svnot_m (z0, p0, z0))
+
+/*
+** not_u8_m_tied1:
+**	not	z0\.b, p0/m, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (not_u8_m_tied1, svuint8_t,
+		z0 = svnot_u8_m (z0, p0, z1),
+		z0 = svnot_m (z0, p0, z1))
+
+/*
+** not_u8_m_tied2:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0, z1
+**	not	z0\.b, p0/m, \1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (not_u8_m_tied2, svuint8_t,
+		z0 = svnot_u8_m (z1, p0, z0),
+		z0 = svnot_m (z1, p0, z0))
+
+/*
+** not_u8_m_untied:
+**	movprfx	z0, z2
+**	not	z0\.b, p0/m, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (not_u8_m_untied, svuint8_t,
+		z0 = svnot_u8_m (z2, p0, z1),
+		z0 = svnot_m (z2, p0, z1))
+
+/*
+** not_u8_z_tied1:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0\.b, p0/z, z0\.b
+**	not	z0\.b, p0/m, \1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (not_u8_z_tied1, svuint8_t,
+		z0 = svnot_u8_z (p0, z0),
+		z0 = svnot_z (p0, z0))
+
+/*
+** not_u8_z_untied:
+**	movprfx	z0\.b, p0/z, z0\.b
+**	not	z0\.b, p0/m, z1\.b
+**	ret
+*/
+TEST_UNIFORM_Z (not_u8_z_untied, svuint8_t,
+		z0 = svnot_u8_z (p0, z1),
+		z0 = svnot_z (p0, z1))
+
+/*
+** not_u8_x_tied1:
+**	not	z0\.b, p0/m, z0\.b
+**	ret
+*/
+TEST_UNIFORM_Z (not_u8_x_tied1, svuint8_t,
+		z0 = svnot_u8_x (p0, z0),
+		z0 = svnot_x (p0, z0))
+
+/*
+** not_u8_x_untied:
+**	not	z2\.b, p0/m, z0\.b
+**	ret
+*/
+TEST_UNIFORM_Z (not_u8_x_untied, svuint8_t,
+		z2 = svnot_u8_x (p0, z0),
+		z2 = svnot_x (p0, z0))
+
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/sqrt_f16.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/sqrt_f16.c
new file mode 100644
index 0000000..504672e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/sqrt_f16.c
@@ -0,0 +1,122 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** sqrt_f16_m_tied12:
+**	fsqrt	z0\.h, p0/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (sqrt_f16_m_tied12, svfloat16_t,
+		z0 = svsqrt_f16_m (z0, p0, z0),
+		z0 = svsqrt_m (z0, p0, z0))
+
+/*
+** sqrt_f16_m_tied1:
+**	fsqrt	z0\.h, p0/m, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (sqrt_f16_m_tied1, svfloat16_t,
+		z0 = svsqrt_f16_m (z0, p0, z1),
+		z0 = svsqrt_m (z0, p0, z1))
+
+/*
+** sqrt_f16_m_tied2:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0, z1
+**	fsqrt	z0\.h, p0/m, \1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (sqrt_f16_m_tied2, svfloat16_t,
+		z0 = svsqrt_f16_m (z1, p0, z0),
+		z0 = svsqrt_m (z1, p0, z0))
+
+/*
+** sqrt_f16_m_untied:
+**	movprfx	z0, z2
+**	fsqrt	z0\.h, p0/m, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (sqrt_f16_m_untied, svfloat16_t,
+		z0 = svsqrt_f16_m (z2, p0, z1),
+		z0 = svsqrt_m (z2, p0, z1))
+
+/*
+** sqrt_f16_z_tied1:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0\.h, p0/z, z0\.h
+**	fsqrt	z0\.h, p0/m, \1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (sqrt_f16_z_tied1, svfloat16_t,
+		z0 = svsqrt_f16_z (p0, z0),
+		z0 = svsqrt_z (p0, z0))
+
+/*
+** sqrt_f16_z_untied:
+**	movprfx	z0\.h, p0/z, z0\.h
+**	fsqrt	z0\.h, p0/m, z1\.h
+**	ret
+*/
+TEST_UNIFORM_Z (sqrt_f16_z_untied, svfloat16_t,
+		z0 = svsqrt_f16_z (p0, z1),
+		z0 = svsqrt_z (p0, z1))
+
+/*
+** sqrt_f16_x_tied1:
+**	fsqrt	z0\.h, p0/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (sqrt_f16_x_tied1, svfloat16_t,
+		z0 = svsqrt_f16_x (p0, z0),
+		z0 = svsqrt_x (p0, z0))
+
+/*
+** sqrt_f16_x_untied:
+**	fsqrt	z2\.h, p0/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (sqrt_f16_x_untied, svfloat16_t,
+		z2 = svsqrt_f16_x (p0, z0),
+		z2 = svsqrt_x (p0, z0))
+
+/*
+** ptrue_sqrt_f16_x_tied1:
+**	ptrue	(p[0-7])\.h[^\n]*
+**	fsqrt	z0\.h, \1/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_sqrt_f16_x_tied1, svfloat16_t,
+		z0 = svsqrt_f16_x (svptrue_b16 (), z0),
+		z0 = svsqrt_x (svptrue_b16 (), z0))
+
+/*
+** ptrue_sqrt_f16_x_untied:
+**	ptrue	(p[0-7])\.h[^\n]*
+**	fsqrt	z2\.h, \1/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_sqrt_f16_x_untied, svfloat16_t,
+		z2 = svsqrt_f16_x (svptrue_b16 (), z0),
+		z2 = svsqrt_x (svptrue_b16 (), z0))
+
+/*
+** ptrue_b8_sqrt_f16_x_tied1:
+**	ptrue	(p[0-7])\.h[^\n]*
+**	fsqrt	z0\.h, \1/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_b8_sqrt_f16_x_tied1, svfloat16_t,
+		z0 = svsqrt_f16_x (svptrue_b8 (), z0),
+		z0 = svsqrt_x (svptrue_b8 (), z0))
+
+/*
+** ptrue_b8_sqrt_f16_x_untied:
+**	ptrue	(p[0-7])\.h[^\n]*
+**	fsqrt	z2\.h, \1/m, z0\.h
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_b8_sqrt_f16_x_untied, svfloat16_t,
+		z2 = svsqrt_f16_x (svptrue_b8 (), z0),
+		z2 = svsqrt_x (svptrue_b8 (), z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/sqrt_f32.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/sqrt_f32.c
new file mode 100644
index 0000000..a738096
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/sqrt_f32.c
@@ -0,0 +1,122 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** sqrt_f32_m_tied12:
+**	fsqrt	z0\.s, p0/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (sqrt_f32_m_tied12, svfloat32_t,
+		z0 = svsqrt_f32_m (z0, p0, z0),
+		z0 = svsqrt_m (z0, p0, z0))
+
+/*
+** sqrt_f32_m_tied1:
+**	fsqrt	z0\.s, p0/m, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (sqrt_f32_m_tied1, svfloat32_t,
+		z0 = svsqrt_f32_m (z0, p0, z1),
+		z0 = svsqrt_m (z0, p0, z1))
+
+/*
+** sqrt_f32_m_tied2:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0, z1
+**	fsqrt	z0\.s, p0/m, \1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (sqrt_f32_m_tied2, svfloat32_t,
+		z0 = svsqrt_f32_m (z1, p0, z0),
+		z0 = svsqrt_m (z1, p0, z0))
+
+/*
+** sqrt_f32_m_untied:
+**	movprfx	z0, z2
+**	fsqrt	z0\.s, p0/m, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (sqrt_f32_m_untied, svfloat32_t,
+		z0 = svsqrt_f32_m (z2, p0, z1),
+		z0 = svsqrt_m (z2, p0, z1))
+
+/*
+** sqrt_f32_z_tied1:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0\.s, p0/z, z0\.s
+**	fsqrt	z0\.s, p0/m, \1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (sqrt_f32_z_tied1, svfloat32_t,
+		z0 = svsqrt_f32_z (p0, z0),
+		z0 = svsqrt_z (p0, z0))
+
+/*
+** sqrt_f32_z_untied:
+**	movprfx	z0\.s, p0/z, z0\.s
+**	fsqrt	z0\.s, p0/m, z1\.s
+**	ret
+*/
+TEST_UNIFORM_Z (sqrt_f32_z_untied, svfloat32_t,
+		z0 = svsqrt_f32_z (p0, z1),
+		z0 = svsqrt_z (p0, z1))
+
+/*
+** sqrt_f32_x_tied1:
+**	fsqrt	z0\.s, p0/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (sqrt_f32_x_tied1, svfloat32_t,
+		z0 = svsqrt_f32_x (p0, z0),
+		z0 = svsqrt_x (p0, z0))
+
+/*
+** sqrt_f32_x_untied:
+**	fsqrt	z2\.s, p0/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (sqrt_f32_x_untied, svfloat32_t,
+		z2 = svsqrt_f32_x (p0, z0),
+		z2 = svsqrt_x (p0, z0))
+
+/*
+** ptrue_sqrt_f32_x_tied1:
+**	ptrue	(p[0-7])\.s[^\n]*
+**	fsqrt	z0\.s, \1/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_sqrt_f32_x_tied1, svfloat32_t,
+		z0 = svsqrt_f32_x (svptrue_b32 (), z0),
+		z0 = svsqrt_x (svptrue_b32 (), z0))
+
+/*
+** ptrue_sqrt_f32_x_untied:
+**	ptrue	(p[0-7])\.s[^\n]*
+**	fsqrt	z2\.s, \1/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_sqrt_f32_x_untied, svfloat32_t,
+		z2 = svsqrt_f32_x (svptrue_b32 (), z0),
+		z2 = svsqrt_x (svptrue_b32 (), z0))
+
+/*
+** ptrue_b8_sqrt_f32_x_tied1:
+**	ptrue	(p[0-7])\.s[^\n]*
+**	fsqrt	z0\.s, \1/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_b8_sqrt_f32_x_tied1, svfloat32_t,
+		z0 = svsqrt_f32_x (svptrue_b8 (), z0),
+		z0 = svsqrt_x (svptrue_b8 (), z0))
+
+/*
+** ptrue_b8_sqrt_f32_x_untied:
+**	ptrue	(p[0-7])\.s[^\n]*
+**	fsqrt	z2\.s, \1/m, z0\.s
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_b8_sqrt_f32_x_untied, svfloat32_t,
+		z2 = svsqrt_f32_x (svptrue_b8 (), z0),
+		z2 = svsqrt_x (svptrue_b8 (), z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/sqrt_f64.c b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/sqrt_f64.c
new file mode 100644
index 0000000..4786779
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve-acle/asm/sqrt_f64.c
@@ -0,0 +1,122 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** sqrt_f64_m_tied12:
+**	fsqrt	z0\.d, p0/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (sqrt_f64_m_tied12, svfloat64_t,
+		z0 = svsqrt_f64_m (z0, p0, z0),
+		z0 = svsqrt_m (z0, p0, z0))
+
+/*
+** sqrt_f64_m_tied1:
+**	fsqrt	z0\.d, p0/m, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (sqrt_f64_m_tied1, svfloat64_t,
+		z0 = svsqrt_f64_m (z0, p0, z1),
+		z0 = svsqrt_m (z0, p0, z1))
+
+/*
+** sqrt_f64_m_tied2:
+**	mov	(z[0-9]+)\.d, z0\.d
+**	movprfx	z0, z1
+**	fsqrt	z0\.d, p0/m, \1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (sqrt_f64_m_tied2, svfloat64_t,
+		z0 = svsqrt_f64_m (z1, p0, z0),
+		z0 = svsqrt_m (z1, p0, z0))
+
+/*
+** sqrt_f64_m_untied:
+**	movprfx	z0, z2
+**	fsqrt	z0\.d, p0/m, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (sqrt_f64_m_untied, svfloat64_t,
+		z0 = svsqrt_f64_m (z2, p0, z1),
+		z0 = svsqrt_m (z2, p0, z1))
+
+/*
+** sqrt_f64_z_tied1:
+**	mov	(z[0-9]+\.d), z0\.d
+**	movprfx	z0\.d, p0/z, z0\.d
+**	fsqrt	z0\.d, p0/m, \1
+**	ret
+*/
+TEST_UNIFORM_Z (sqrt_f64_z_tied1, svfloat64_t,
+		z0 = svsqrt_f64_z (p0, z0),
+		z0 = svsqrt_z (p0, z0))
+
+/*
+** sqrt_f64_z_untied:
+**	movprfx	z0\.d, p0/z, z0\.d
+**	fsqrt	z0\.d, p0/m, z1\.d
+**	ret
+*/
+TEST_UNIFORM_Z (sqrt_f64_z_untied, svfloat64_t,
+		z0 = svsqrt_f64_z (p0, z1),
+		z0 = svsqrt_z (p0, z1))
+
+/*
+** sqrt_f64_x_tied1:
+**	fsqrt	z0\.d, p0/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (sqrt_f64_x_tied1, svfloat64_t,
+		z0 = svsqrt_f64_x (p0, z0),
+		z0 = svsqrt_x (p0, z0))
+
+/*
+** sqrt_f64_x_untied:
+**	fsqrt	z2\.d, p0/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (sqrt_f64_x_untied, svfloat64_t,
+		z2 = svsqrt_f64_x (p0, z0),
+		z2 = svsqrt_x (p0, z0))
+
+/*
+** ptrue_sqrt_f64_x_tied1:
+**	ptrue	(p[0-7])\.d[^\n]*
+**	fsqrt	z0\.d, \1/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_sqrt_f64_x_tied1, svfloat64_t,
+		z0 = svsqrt_f64_x (svptrue_b64 (), z0),
+		z0 = svsqrt_x (svptrue_b64 (), z0))
+
+/*
+** ptrue_sqrt_f64_x_untied:
+**	ptrue	(p[0-7])\.d[^\n]*
+**	fsqrt	z2\.d, \1/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_sqrt_f64_x_untied, svfloat64_t,
+		z2 = svsqrt_f64_x (svptrue_b64 (), z0),
+		z2 = svsqrt_x (svptrue_b64 (), z0))
+
+/*
+** ptrue_b8_sqrt_f64_x_tied1:
+**	ptrue	(p[0-7])\.d[^\n]*
+**	fsqrt	z0\.d, \1/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_b8_sqrt_f64_x_tied1, svfloat64_t,
+		z0 = svsqrt_f64_x (svptrue_b8 (), z0),
+		z0 = svsqrt_x (svptrue_b8 (), z0))
+
+/*
+** ptrue_b8_sqrt_f64_x_untied:
+**	ptrue	(p[0-7])\.d[^\n]*
+**	fsqrt	z2\.d, \1/m, z0\.d
+**	ret
+*/
+TEST_UNIFORM_Z (ptrue_b8_sqrt_f64_x_untied, svfloat64_t,
+		z2 = svsqrt_f64_x (svptrue_b8 (), z0),
+		z2 = svsqrt_x (svptrue_b8 (), z0))
-- 
2.7.4

Reply via email to