Author: Sander de Smalen
Date: 2020-05-12T11:02:32+01:00
New Revision: d6936be2ef8ce5d5d85b8a6cdd1477cd79688c3a

URL: 
https://github.com/llvm/llvm-project/commit/d6936be2ef8ce5d5d85b8a6cdd1477cd79688c3a
DIFF: 
https://github.com/llvm/llvm-project/commit/d6936be2ef8ce5d5d85b8a6cdd1477cd79688c3a.diff

LOG: [SveEmitter] Add builtins for svdup and svindex

Reviewed By: efriedma

Differential Revision: https://reviews.llvm.org/D79357

Added: 
    clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dup.c

Modified: 
    clang/include/clang/Basic/arm_sve.td
    clang/lib/CodeGen/CGBuiltin.cpp
    clang/lib/CodeGen/CodeGenFunction.h

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/arm_sve.td 
b/clang/include/clang/Basic/arm_sve.td
index b827601d56c1..e8e05902102a 100644
--- a/clang/include/clang/Basic/arm_sve.td
+++ b/clang/include/clang/Basic/arm_sve.td
@@ -627,6 +627,13 @@ def SVDUPQ_16 : SInst<"svdupq[_n]_{d}", "dssssssss",  
"sUsh", MergeNone>;
 def SVDUPQ_32 : SInst<"svdupq[_n]_{d}", "dssss",  "iUif", MergeNone>;
 def SVDUPQ_64 : SInst<"svdupq[_n]_{d}", "dss",  "lUld", MergeNone>;
 
+def SVDUP   : SInst<"svdup[_n]_{d}", "ds",   "csilUcUsUiUlhfd", MergeNone,    
"aarch64_sve_dup_x">;
+def SVDUP_M : SInst<"svdup[_n]_{d}", "ddPs", "csilUcUsUiUlhfd", MergeOp1,     
"aarch64_sve_dup">;
+def SVDUP_X : SInst<"svdup[_n]_{d}", "dPs",  "csilUcUsUiUlhfd", MergeAnyExp,  
"aarch64_sve_dup">;
+def SVDUP_Z : SInst<"svdup[_n]_{d}", "dPs",  "csilUcUsUiUlhfd", MergeZeroExp, 
"aarch64_sve_dup">;
+
+def SVINDEX : SInst<"svindex_{d}",   "dss",  "csilUcUsUiUl",    MergeNone,    
"aarch64_sve_index">;
+
 // Integer arithmetic
 
 multiclass SInstZPZ<string name, string types, string intrinsic, 
list<FlagType> flags=[]> {
@@ -1061,7 +1068,11 @@ def SVCLASTA_N   : SInst<"svclasta[_n_{d}]",  "sPsd", 
"csilUcUsUiUlhfd", MergeNo
 def SVCLASTB     : SInst<"svclastb[_{d}]",    "dPdd", "csilUcUsUiUlhfd", 
MergeNone, "aarch64_sve_clastb">;
 def SVCLASTB_N   : SInst<"svclastb[_n_{d}]",  "sPsd", "csilUcUsUiUlhfd", 
MergeNone, "aarch64_sve_clastb_n">;
 def SVCOMPACT    : SInst<"svcompact[_{d}]",   "dPd",  "ilUiUlfd",        
MergeNone, "aarch64_sve_compact">;
-//  SVDUP_LANE    (to land in D78750)
+// Note: svdup_lane is implemented using the intrinsic for TBL to represent a
+// splat of any possible lane. It is upto LLVM to pick a more efficient
+// instruction such as DUP (indexed) if the lane index fits the range of the
+// instruction's immediate.
+def SVDUP_LANE   : SInst<"svdup_lane[_{d}]",  "ddL",  "csilUcUsUiUlhfd", 
MergeNone, "aarch64_sve_tbl">;
 def SVDUPQ_LANE  : SInst<"svdupq_lane[_{d}]", "ddn",  "csilUcUsUiUlhfd", 
MergeNone, "aarch64_sve_dupq_lane">;
 def SVEXT        : SInst<"svext[_{d}]",       "dddi", "csilUcUsUiUlhfd", 
MergeNone, "aarch64_sve_ext", [], [ImmCheck<2, ImmCheckExtract, 1>]>;
 def SVLASTA      : SInst<"svlasta[_{d}]",     "sPd",  "csilUcUsUiUlhfd", 
MergeNone, "aarch64_sve_lasta">;
@@ -1104,6 +1115,7 @@ def SVDUPQ_B8      : SInst<"svdupq[_n]_{d}",  
"Pssssssssssssssss",  "Pc", MergeN
 def SVDUPQ_B16     : SInst<"svdupq[_n]_{d}", "Pssssssss",  "Ps", MergeNone>;
 def SVDUPQ_B32     : SInst<"svdupq[_n]_{d}", "Pssss",  "Pi", MergeNone>;
 def SVDUPQ_B64     : SInst<"svdupq[_n]_{d}", "Pss",  "Pl", MergeNone>;
+def SVDUP_N_B      : SInst<"svdup[_n]_{d}",  "Ps", "PcPsPiPl", MergeNone>;
 
 
 
////////////////////////////////////////////////////////////////////////////////

diff  --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 541dac7b7580..47620c13aed6 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -7909,12 +7909,15 @@ Value *CodeGenFunction::EmitSVEMaskedStore(const 
CallExpr *E,
 
 // Limit the usage of scalable llvm IR generated by the ACLE by using the
 // sve dup.x intrinsic instead of IRBuilder::CreateVectorSplat.
-Value *CodeGenFunction::EmitSVEDupX(Value* Scalar) {
-  auto F = CGM.getIntrinsic(Intrinsic::aarch64_sve_dup_x,
-                            getSVEVectorForElementType(Scalar->getType()));
+Value *CodeGenFunction::EmitSVEDupX(Value *Scalar, llvm::Type *Ty) {
+  auto F = CGM.getIntrinsic(Intrinsic::aarch64_sve_dup_x, Ty);
   return Builder.CreateCall(F, Scalar);
 }
 
+Value *CodeGenFunction::EmitSVEDupX(Value* Scalar) {
+  return EmitSVEDupX(Scalar, getSVEVectorForElementType(Scalar->getType()));
+}
+
 Value *CodeGenFunction::EmitSVEReinterpret(Value *Val, llvm::Type *Ty) {
   // FIXME: For big endian this needs an additional REV, or needs a separate
   // intrinsic that is code-generated as a no-op, because the LLVM bitcast
@@ -8109,6 +8112,17 @@ Value 
*CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
   case SVE::BI__builtin_sve_svpmullb_n_u64:
     return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullb_pair);
 
+  case SVE::BI__builtin_sve_svdup_n_b8:
+  case SVE::BI__builtin_sve_svdup_n_b16:
+  case SVE::BI__builtin_sve_svdup_n_b32:
+  case SVE::BI__builtin_sve_svdup_n_b64: {
+    Value *CmpNE =
+        Builder.CreateICmpNE(Ops[0], 
Constant::getNullValue(Ops[0]->getType()));
+    llvm::ScalableVectorType *OverloadedTy = getSVEType(TypeFlags);
+    Value *Dup = EmitSVEDupX(CmpNE, OverloadedTy);
+    return EmitSVEPredicateCast(Dup, cast<llvm::ScalableVectorType>(Ty));
+  }
+
   case SVE::BI__builtin_sve_svdupq_n_b8:
   case SVE::BI__builtin_sve_svdupq_n_b16:
   case SVE::BI__builtin_sve_svdupq_n_b32:

diff  --git a/clang/lib/CodeGen/CodeGenFunction.h 
b/clang/lib/CodeGen/CodeGenFunction.h
index 61b51118212c..e9b005c47aee 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -3920,6 +3920,7 @@ class CodeGenFunction : public CodeGenTypeCache {
   llvm::ScalableVectorType *getSVEPredType(SVETypeFlags TypeFlags);
   llvm::Value *EmitSVEAllTruePred(SVETypeFlags TypeFlags);
   llvm::Value *EmitSVEDupX(llvm::Value *Scalar);
+  llvm::Value *EmitSVEDupX(llvm::Value *Scalar, llvm::Type *Ty);
   llvm::Value *EmitSVEReinterpret(llvm::Value *Val, llvm::Type *Ty);
   llvm::Value *EmitSVEPMull(SVETypeFlags TypeFlags,
                             llvm::SmallVectorImpl<llvm::Value *> &Ops,

diff  --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dup.c 
b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dup.c
new file mode 100644
index 000000000000..9fb8cc6fabd1
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dup.c
@@ -0,0 +1,524 @@
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu 
-target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall 
-emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple 
aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns 
-S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svint8_t test_svdup_n_s8(int8_t op)
+{
+  // CHECK-LABEL: test_svdup_n_s8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.dup.x.nxv16i8(i8 %op)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_s8,)(op);
+}
+
+svint16_t test_svdup_n_s16(int16_t op)
+{
+  // CHECK-LABEL: test_svdup_n_s16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.dup.x.nxv8i16(i16 %op)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_s16,)(op);
+}
+
+svint32_t test_svdup_n_s32(int32_t op)
+{
+  // CHECK-LABEL: test_svdup_n_s32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.dup.x.nxv4i32(i32 %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_s32,)(op);
+}
+
+svint64_t test_svdup_n_s64(int64_t op)
+{
+  // CHECK-LABEL: test_svdup_n_s64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.dup.x.nxv2i64(i64 %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_s64,)(op);
+}
+
+svuint8_t test_svdup_n_u8(uint8_t op)
+{
+  // CHECK-LABEL: test_svdup_n_u8
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.dup.x.nxv16i8(i8 %op)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_u8,)(op);
+}
+
+svuint16_t test_svdup_n_u16(uint16_t op)
+{
+  // CHECK-LABEL: test_svdup_n_u16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.dup.x.nxv8i16(i16 %op)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_u16,)(op);
+}
+
+svuint32_t test_svdup_n_u32(uint32_t op)
+{
+  // CHECK-LABEL: test_svdup_n_u32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.dup.x.nxv4i32(i32 %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_u32,)(op);
+}
+
+svuint64_t test_svdup_n_u64(uint64_t op)
+{
+  // CHECK-LABEL: test_svdup_n_u64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.dup.x.nxv2i64(i64 %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_u64,)(op);
+}
+
+svfloat16_t test_svdup_n_f16(float16_t op)
+{
+  // CHECK-LABEL: test_svdup_n_f16
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> 
@llvm.aarch64.sve.dup.x.nxv8f16(half %op)
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_f16,)(op);
+}
+
+svfloat32_t test_svdup_n_f32(float32_t op)
+{
+  // CHECK-LABEL: test_svdup_n_f32
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> 
@llvm.aarch64.sve.dup.x.nxv4f32(float %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_f32,)(op);
+}
+
+svfloat64_t test_svdup_n_f64(float64_t op)
+{
+  // CHECK-LABEL: test_svdup_n_f64
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> 
@llvm.aarch64.sve.dup.x.nxv2f64(double %op)
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_f64,)(op);
+}
+
+svint8_t test_svdup_n_s8_z(svbool_t pg, int8_t op)
+{
+  // CHECK-LABEL: test_svdup_n_s8_z
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 
x i1> %pg, i8 %op)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_s8_z,)(pg, op);
+}
+
+svint16_t test_svdup_n_s16_z(svbool_t pg, int16_t op)
+{
+  // CHECK-LABEL: test_svdup_n_s16_z
+  // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x 
i1> %[[PG]], i16 %op)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_s16_z,)(pg, op);
+}
+
+svint32_t test_svdup_n_s32_z(svbool_t pg, int32_t op)
+{
+  // CHECK-LABEL: test_svdup_n_s32_z
+  // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x 
i1> %[[PG]], i32 %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_s32_z,)(pg, op);
+}
+
+svint64_t test_svdup_n_s64_z(svbool_t pg, int64_t op)
+{
+  // CHECK-LABEL: test_svdup_n_s64_z
+  // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x 
i1> %[[PG]], i64 %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_s64_z,)(pg, op);
+}
+
+svuint8_t test_svdup_n_u8_z(svbool_t pg, uint8_t op)
+{
+  // CHECK-LABEL: test_svdup_n_u8_z
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 
x i1> %pg, i8 %op)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_u8_z,)(pg, op);
+}
+
+svuint16_t test_svdup_n_u16_z(svbool_t pg, uint16_t op)
+{
+  // CHECK-LABEL: test_svdup_n_u16_z
+  // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x 
i1> %[[PG]], i16 %op)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_u16_z,)(pg, op);
+}
+
+svuint32_t test_svdup_n_u32_z(svbool_t pg, uint32_t op)
+{
+  // CHECK-LABEL: test_svdup_n_u32_z
+  // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x 
i1> %[[PG]], i32 %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_u32_z,)(pg, op);
+}
+
+svuint64_t test_svdup_n_u64_z(svbool_t pg, uint64_t op)
+{
+  // CHECK-LABEL: test_svdup_n_u64_z
+  // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x 
i1> %[[PG]], i64 %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_u64_z,)(pg, op);
+}
+
+svfloat16_t test_svdup_n_f16_z(svbool_t pg, float16_t op)
+{
+  // CHECK-LABEL: test_svdup_n_f16_z
+  // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> 
@llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> zeroinitializer, <vscale x 8 
x i1> %[[PG]], half %op)
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_f16_z,)(pg, op);
+}
+
+svfloat32_t test_svdup_n_f32_z(svbool_t pg, float32_t op)
+{
+  // CHECK-LABEL: test_svdup_n_f32_z
+  // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> 
@llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> zeroinitializer, <vscale x 4 
x i1> %[[PG]], float %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_f32_z,)(pg, op);
+}
+
+svfloat64_t test_svdup_n_f64_z(svbool_t pg, float64_t op)
+{
+  // CHECK-LABEL: test_svdup_n_f64_z
+  // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> 
@llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> zeroinitializer, <vscale x 
2 x i1> %[[PG]], double %op)
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_f64_z,)(pg, op);
+}
+
+svint8_t test_svdup_n_s8_m(svint8_t inactive, svbool_t pg, int8_t op)
+{
+  // CHECK-LABEL: test_svdup_n_s8_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> %inactive, <vscale x 16 x i1> 
%pg, i8 %op)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_s8_m,)(inactive, pg, op);
+}
+
+svint16_t test_svdup_n_s16_m(svint16_t inactive, svbool_t pg, int16_t op)
+{
+  // CHECK-LABEL: test_svdup_n_s16_m
+  // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> %inactive, <vscale x 8 x i1> 
%[[PG]], i16 %op)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_s16_m,)(inactive, pg, op);
+}
+
+svint32_t test_svdup_n_s32_m(svint32_t inactive, svbool_t pg, int32_t op)
+{
+  // CHECK-LABEL: test_svdup_n_s32_m
+  // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> %inactive, <vscale x 4 x i1> 
%[[PG]], i32 %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_s32_m,)(inactive, pg, op);
+}
+
+svint64_t test_svdup_n_s64_m(svint64_t inactive, svbool_t pg, int64_t op)
+{
+  // CHECK-LABEL: test_svdup_n_s64_m
+  // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> %inactive, <vscale x 2 x i1> 
%[[PG]], i64 %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_s64_m,)(inactive, pg, op);
+}
+
+svuint8_t test_svdup_n_u8_m(svuint8_t inactive, svbool_t pg, uint8_t op)
+{
+  // CHECK-LABEL: test_svdup_n_u8_m
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> %inactive, <vscale x 16 x i1> 
%pg, i8 %op)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_u8_m,)(inactive, pg, op);
+}
+
+svuint16_t test_svdup_n_u16_m(svuint16_t inactive, svbool_t pg, uint16_t op)
+{
+  // CHECK-LABEL: test_svdup_n_u16_m
+  // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> %inactive, <vscale x 8 x i1> 
%[[PG]], i16 %op)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_u16_m,)(inactive, pg, op);
+}
+
+svuint32_t test_svdup_n_u32_m(svuint32_t inactive, svbool_t pg, uint32_t op)
+{
+  // CHECK-LABEL: test_svdup_n_u32_m
+  // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> %inactive, <vscale x 4 x i1> 
%[[PG]], i32 %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_u32_m,)(inactive, pg, op);
+}
+
+svuint64_t test_svdup_n_u64_m(svuint64_t inactive, svbool_t pg, uint64_t op)
+{
+  // CHECK-LABEL: test_svdup_n_u64_m
+  // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> %inactive, <vscale x 2 x i1> 
%[[PG]], i64 %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_u64_m,)(inactive, pg, op);
+}
+
+svfloat16_t test_svdup_n_f16_m(svfloat16_t inactive, svbool_t pg, float16_t op)
+{
+  // CHECK-LABEL: test_svdup_n_f16_m
+  // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> 
@llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> %inactive, <vscale x 8 x i1> 
%[[PG]], half %op)
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_f16_m,)(inactive, pg, op);
+}
+
+svfloat32_t test_svdup_n_f32_m(svfloat32_t inactive, svbool_t pg, float32_t op)
+{
+  // CHECK-LABEL: test_svdup_n_f32_m
+  // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> 
@llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> %inactive, <vscale x 4 x i1> 
%[[PG]], float %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_f32_m,)(inactive, pg, op);
+}
+
+svfloat64_t test_svdup_n_f64_m(svfloat64_t inactive, svbool_t pg, float64_t op)
+{
+  // CHECK-LABEL: test_svdup_n_f64_m
+  // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> 
@llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> %inactive, <vscale x 2 x 
i1> %[[PG]], double %op)
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_f64_m,)(inactive, pg, op);
+}
+
+svint8_t test_svdup_n_s8_x(svbool_t pg, int8_t op)
+{
+  // CHECK-LABEL: test_svdup_n_s8_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> %pg, 
i8 %op)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_s8_x,)(pg, op);
+}
+
+svint16_t test_svdup_n_s16_x(svbool_t pg, int16_t op)
+{
+  // CHECK-LABEL: test_svdup_n_s16_x
+  // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> 
%[[PG]], i16 %op)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_s16_x,)(pg, op);
+}
+
+svint32_t test_svdup_n_s32_x(svbool_t pg, int32_t op)
+{
+  // CHECK-LABEL: test_svdup_n_s32_x
+  // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> 
%[[PG]], i32 %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_s32_x,)(pg, op);
+}
+
+svint64_t test_svdup_n_s64_x(svbool_t pg, int64_t op)
+{
+  // CHECK-LABEL: test_svdup_n_s64_x
+  // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> 
%[[PG]], i64 %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_s64_x,)(pg, op);
+}
+
+svuint8_t test_svdup_n_u8_x(svbool_t pg, uint8_t op)
+{
+  // CHECK-LABEL: test_svdup_n_u8_x
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i1> %pg, 
i8 %op)
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_u8_x,)(pg, op);
+}
+
+svuint16_t test_svdup_n_u16_x(svbool_t pg, uint16_t op)
+{
+  // CHECK-LABEL: test_svdup_n_u16_x
+  // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i1> 
%[[PG]], i16 %op)
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_u16_x,)(pg, op);
+}
+
+svuint32_t test_svdup_n_u32_x(svbool_t pg, uint32_t op)
+{
+  // CHECK-LABEL: test_svdup_n_u32_x
+  // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i1> 
%[[PG]], i32 %op)
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_u32_x,)(pg, op);
+}
+
+svuint64_t test_svdup_n_u64_x(svbool_t pg, uint64_t op)
+{
+  // CHECK-LABEL: test_svdup_n_u64_x
+  // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> 
%[[PG]], i64 %op)
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_u64_x,)(pg, op);
+}
+
+svfloat16_t test_svdup_n_f16_x(svbool_t pg, float16_t op)
+{
+  // CHECK-LABEL: test_svdup_n_f16_x
+  // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> 
@llvm.aarch64.sve.dup.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x i1> 
%[[PG]], half %op)
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_f16_x,)(pg, op);
+}
+
+svfloat32_t test_svdup_n_f32_x(svbool_t pg, float32_t op)
+{
+  // CHECK-LABEL: test_svdup_n_f32_x
+  // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> 
@llvm.aarch64.sve.dup.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x i1> 
%[[PG]], float %op)
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_f32_x,)(pg, op);
+}
+
+svfloat64_t test_svdup_n_f64_x(svbool_t pg, float64_t op)
+{
+  // CHECK-LABEL: test_svdup_n_f64_x
+  // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> 
@llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> 
@llvm.aarch64.sve.dup.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x i1> 
%[[PG]], double %op)
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup,_n,_f64_x,)(pg, op);
+}
+
+svint8_t test_svdup_lane_s8(svint8_t data, uint8_t index)
+{
+  // CHECK-LABEL: test_svdup_lane_s8
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.dup.x.nxv16i8(i8 %index)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.tbl.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i8> 
%[[DUP]])
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup_lane,_s8,,)(data, index);
+}
+
+svint16_t test_svdup_lane_s16(svint16_t data, uint16_t index)
+{
+  // CHECK-LABEL: test_svdup_lane_s16
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.dup.x.nxv8i16(i16 %index)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.tbl.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i16> 
%[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup_lane,_s16,,)(data, index);
+}
+
+svint32_t test_svdup_lane_s32(svint32_t data, uint32_t index)
+{
+  // CHECK-LABEL: test_svdup_lane_s32
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.dup.x.nxv4i32(i32 %index)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.tbl.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i32> 
%[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup_lane,_s32,,)(data, index);
+}
+
+svint64_t test_svdup_lane_s64(svint64_t data, uint64_t index)
+{
+  // CHECK-LABEL: test_svdup_lane_s64
+  // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.dup.x.nxv2i64(i64 %index)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.tbl.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64> 
%[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup_lane,_s64,,)(data, index);
+}
+
+svuint8_t test_svdup_lane_u8(svuint8_t data, uint8_t index)
+{
+  // CHECK-LABEL: test_svdup_lane_u8
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.dup.x.nxv16i8(i8 %index)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i8> 
@llvm.aarch64.sve.tbl.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i8> 
%[[DUP]])
+  // CHECK: ret <vscale x 16 x i8> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup_lane,_u8,,)(data, index);
+}
+
+svuint16_t test_svdup_lane_u16(svuint16_t data, uint16_t index)
+{
+  // CHECK-LABEL: test_svdup_lane_u16
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.dup.x.nxv8i16(i16 %index)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.tbl.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i16> 
%[[DUP]])
+  // CHECK: ret <vscale x 8 x i16> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup_lane,_u16,,)(data, index);
+}
+
+svuint32_t test_svdup_lane_u32(svuint32_t data, uint32_t index)
+{
+  // CHECK-LABEL: test_svdup_lane_u32
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.dup.x.nxv4i32(i32 %index)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.tbl.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i32> 
%[[DUP]])
+  // CHECK: ret <vscale x 4 x i32> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup_lane,_u32,,)(data, index);
+}
+
+svuint64_t test_svdup_lane_u64(svuint64_t data, uint64_t index)
+{
+  // CHECK-LABEL: test_svdup_lane_u64
+  // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.dup.x.nxv2i64(i64 %index)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.tbl.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i64> 
%[[DUP]])
+  // CHECK: ret <vscale x 2 x i64> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup_lane,_u64,,)(data, index);
+}
+
+svfloat16_t test_svdup_lane_f16(svfloat16_t data, uint16_t index)
+{
+  // CHECK-LABEL: test_svdup_lane_f16
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i16> 
@llvm.aarch64.sve.dup.x.nxv8i16(i16 %index)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x half> 
@llvm.aarch64.sve.tbl.nxv8f16(<vscale x 8 x half> %data, <vscale x 8 x i16> 
%[[DUP]])
+  // CHECK: ret <vscale x 8 x half> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup_lane,_f16,,)(data, index);
+}
+
+svfloat32_t test_svdup_lane_f32(svfloat32_t data, uint32_t index)
+{
+  // CHECK-LABEL: test_svdup_lane_f32
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i32> 
@llvm.aarch64.sve.dup.x.nxv4i32(i32 %index)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x float> 
@llvm.aarch64.sve.tbl.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x i32> 
%[[DUP]])
+  // CHECK: ret <vscale x 4 x float> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup_lane,_f32,,)(data, index);
+}
+
+svfloat64_t test_svdup_lane_f64(svfloat64_t data, uint64_t index)
+{
+  // CHECK-LABEL: test_svdup_lane_f64
+  // CHECK: %[[DUP:.*]] = call <vscale x 2 x i64> 
@llvm.aarch64.sve.dup.x.nxv2i64(i64 %index)
+  // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x double> 
@llvm.aarch64.sve.tbl.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x i64> 
%[[DUP]])
+  // CHECK: ret <vscale x 2 x double> %[[INTRINSIC]]
+  return SVE_ACLE_FUNC(svdup_lane,_f64,,)(data, index);
+}
+
+svbool_t test_svdup_n_b8(bool op)
+{
+  // CHECK-LABEL: test_svdup_n_b8
+  // CHECK: %[[DUP:.*]] = call <vscale x 16 x i1> 
@llvm.aarch64.sve.dup.x.nxv16i1(i1 %op)
+  // CHECK: ret <vscale x 16 x i1> %[[DUP]]
+  return SVE_ACLE_FUNC(svdup,_n,_b8,)(op);
+}
+
+svbool_t test_svdup_n_b16(bool op)
+{
+  // CHECK-LABEL: test_svdup_n_b16
+  // CHECK: %[[DUP:.*]] = call <vscale x 8 x i1> 
@llvm.aarch64.sve.dup.x.nxv8i1(i1 %op)
+  // CHECK: %[[CVT:.*]] = call <vscale x 16 x i1> 
@llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[DUP]])
+  // CHECK: ret <vscale x 16 x i1> %[[CVT]]
+  return SVE_ACLE_FUNC(svdup,_n,_b16,)(op);
+}
+
+svbool_t test_svdup_n_b32(bool op)
+{
+  // CHECK-LABEL: test_svdup_n_b32
+  // CHECK: %[[DUP:.*]] = call <vscale x 4 x i1> 
@llvm.aarch64.sve.dup.x.nxv4i1(i1 %op)
+  // CHECK: %[[CVT:.*]] = call <vscale x 16 x i1> 
@llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[DUP]])
+  // CHECK: ret <vscale x 16 x i1> %[[CVT]]
+  return SVE_ACLE_FUNC(svdup,_n,_b32,)(op);
+}
+
+svbool_t test_svdup_n_b64(bool op)
+{
+  // CHECK-LABEL: test_svdup_n_b64
+  // CHECK: %[[DUP:.*]] = call <vscale x 2 x i1> 
@llvm.aarch64.sve.dup.x.nxv2i1(i1 %op)
+  // CHECK: %[[CVT:.*]] = call <vscale x 16 x i1> 
@llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[DUP]])
+  // CHECK: ret <vscale x 16 x i1> %[[CVT]]
+  return SVE_ACLE_FUNC(svdup,_n,_b64,)(op);
+}


        
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to