Author: alowqie
Date: 2026-04-08T13:54:14+01:00
New Revision: 39c6ed3d080435426d606db70ecceda1cce311bb

URL: 
https://github.com/llvm/llvm-project/commit/39c6ed3d080435426d606db70ecceda1cce311bb
DIFF: 
https://github.com/llvm/llvm-project/commit/39c6ed3d080435426d606db70ecceda1cce311bb.diff

LOG: [CIR][AArch64] add vshr_* builtins (#186693)

Part of https://github.com/llvm/llvm-project/issues/185382

- Moved lowering logic from clangir incubator to upstream
- Added tests, partially reusing tests from
[neon-intrinsics.c](https://github.com/llvm/llvm-project/blob/main/clang/test/CodeGen/AArch64/neon-intrinsics.c)
and
[neon.c](https://github.com/llvm/clangir/blob/main/clang/test/CIR/CodeGen/AArch64/neon.c)
- Made sure that all intrinsics from [Neon
ACLE](https://arm-software.github.io/acle/neon_intrinsics/advsimd.html#vector-shift-right)
are implemented and tested

Added: 
    

Modified: 
    clang/lib/CIR/CodeGen/CIRGenBuilder.h
    clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
    clang/test/CodeGen/AArch64/neon-intrinsics.c
    clang/test/CodeGen/AArch64/neon/intrinsics.c

Removed: 
    


################################################################################
diff  --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h 
b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
index fb047919b003d..f8d3d93e49075 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h
+++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
@@ -461,6 +461,14 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy {
     return getConstantInt(loc, getUInt64Ty(), c);
   }
 
+  cir::ConstantOp getZero(mlir::Location loc, mlir::Type ty) {
+    // TODO: dispatch creation for primitive types.
+    assert((mlir::isa<cir::RecordType>(ty) || mlir::isa<cir::ArrayType>(ty) ||
+            mlir::isa<cir::VectorType>(ty)) &&
+           "NYI for other types");
+    return cir::ConstantOp::create(*this, loc, cir::ZeroAttr::get(ty));
+  }
+
   
//===--------------------------------------------------------------------===//
   // UnaryOp creation helpers
   
//===--------------------------------------------------------------------===//

diff  --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
index 81e7e14e4ea71..cbdaebb157c4c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
@@ -207,6 +207,33 @@ static mlir::Value emitCommonNeonShift(CIRGenBuilderTy 
&builder,
                               shiftLeft);
 }
 
+// Right-shift a vector by a constant.
+static mlir::Value emitNeonRShiftImm(CIRGenFunction &cgf, mlir::Value shiftVec,
+                                     mlir::Value shiftVal,
+                                     cir::VectorType vecTy, bool usgn,
+                                     mlir::Location loc) {
+  CIRGenBuilderTy &builder = cgf.getBuilder();
+  int64_t shiftAmt = getIntValueFromConstOp(shiftVal);
+  int eltSize =
+      cgf.cgm.getDataLayout().getTypeSizeInBits(vecTy.getElementType());
+
+  shiftVec = builder.createBitcast(shiftVec, vecTy);
+  // lshr/ashr are undefined when the shift amount is equal to the vector
+  // element size.
+  if (shiftAmt == eltSize) {
+    if (usgn) {
+      // Right-shifting an unsigned value by its size yields 0.
+      return builder.getZero(loc, vecTy);
+    }
+    // Right-shifting a signed value by its size is equivalent
+    // to a shift of size-1.
+    --shiftAmt;
+    shiftVal = builder.getConstInt(loc, vecTy.getElementType(), shiftAmt);
+  }
+  return emitCommonNeonShift(builder, loc, vecTy, shiftVec, shiftVal,
+                             /*shiftLeft=*/false);
+}
+
 static cir::VectorType getIntVecFromVecTy(CIRGenBuilderTy &builder,
                                           cir::VectorType vecTy) {
   if (!cir::isAnyFloatingPointType(vecTy.getElementType()))
@@ -238,6 +265,7 @@ static mlir::Value emitCommonNeonBuiltinExpr(
 
   // Determine the type of this overloaded NEON intrinsic.
   NeonTypeFlags neonType(neonTypeConst->getZExtValue());
+  const bool isUnsigned = neonType.isUnsigned();
   const bool hasLegalHalfType = cgf.getTarget().hasFastHalfType();
 
   // The value of allowBFloatArgsAndRet is true for AArch64, but it should
@@ -461,8 +489,13 @@ static mlir::Value emitCommonNeonBuiltinExpr(
                                /*shiftLeft=*/true);
   case NEON::BI__builtin_neon_vshll_n_v:
   case NEON::BI__builtin_neon_vshrn_n_v:
+    cgf.cgm.errorNYI(expr->getSourceRange(),
+                     std::string("unimplemented AArch64 builtin call: ") +
+                         ctx.BuiltinInfo.getName(builtinID));
+    return mlir::Value{};
   case NEON::BI__builtin_neon_vshr_n_v:
   case NEON::BI__builtin_neon_vshrq_n_v:
+    return emitNeonRShiftImm(cgf, ops[0], ops[1], vTy, isUnsigned, loc);
   case NEON::BI__builtin_neon_vst1_v:
   case NEON::BI__builtin_neon_vst1q_v:
   case NEON::BI__builtin_neon_vst2_v:
@@ -2210,8 +2243,23 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned 
builtinID, const CallExpr *expr,
     assert(amt && "Expected argument to be a constant");
     return builder.createShiftLeft(loc, ops[0], amt->getZExtValue());
   }
-  case NEON::BI__builtin_neon_vshrd_n_s64:
-  case NEON::BI__builtin_neon_vshrd_n_u64:
+  case NEON::BI__builtin_neon_vshrd_n_s64: {
+    std::optional<llvm::APSInt> amt =
+        expr->getArg(1)->getIntegerConstantExpr(getContext());
+    assert(amt && "Expected argument to be a constant");
+    return builder.createShiftRight(
+        loc, ops[0], std::min(static_cast<uint64_t>(63), amt->getZExtValue()));
+  }
+  case NEON::BI__builtin_neon_vshrd_n_u64: {
+    std::optional<llvm::APSInt> amt =
+        expr->getArg(1)->getIntegerConstantExpr(getContext());
+    assert(amt && "Expected argument to be a constant");
+    uint64_t shiftAmt = amt->getZExtValue();
+    // Right-shifting an unsigned value by its size yields 0.
+    if (shiftAmt == 64)
+      return builder.getConstInt(loc, builder.getUInt64Ty(), 0);
+    return builder.createShiftRight(loc, ops[0], shiftAmt);
+  }
   case NEON::BI__builtin_neon_vsrad_n_s64:
   case NEON::BI__builtin_neon_vsrad_n_u64:
   case NEON::BI__builtin_neon_vqdmlalh_lane_s16:

diff  --git a/clang/test/CodeGen/AArch64/neon-intrinsics.c 
b/clang/test/CodeGen/AArch64/neon-intrinsics.c
index f384d935151c4..9f3484e162cb3 100644
--- a/clang/test/CodeGen/AArch64/neon-intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon-intrinsics.c
@@ -5907,166 +5907,6 @@ float64x2_t test_vmulxq_f64(float64x2_t a, float64x2_t 
b) {
   return vmulxq_f64(a, b);
 }
 
-// CHECK-LABEL: define dso_local <8 x i8> @test_vshr_n_s8(
-// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[VSHR_N:%.*]] = ashr <8 x i8> [[A]], splat (i8 3)
-// CHECK-NEXT:    ret <8 x i8> [[VSHR_N]]
-//
-int8x8_t test_vshr_n_s8(int8x8_t a) {
-  return vshr_n_s8(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <4 x i16> @test_vshr_n_s16(
-// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
-// CHECK-NEXT:    [[VSHR_N:%.*]] = ashr <4 x i16> [[TMP1]], splat (i16 3)
-// CHECK-NEXT:    ret <4 x i16> [[VSHR_N]]
-//
-int16x4_t test_vshr_n_s16(int16x4_t a) {
-  return vshr_n_s16(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <2 x i32> @test_vshr_n_s32(
-// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
-// CHECK-NEXT:    [[VSHR_N:%.*]] = ashr <2 x i32> [[TMP1]], splat (i32 3)
-// CHECK-NEXT:    ret <2 x i32> [[VSHR_N]]
-//
-int32x2_t test_vshr_n_s32(int32x2_t a) {
-  return vshr_n_s32(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <16 x i8> @test_vshrq_n_s8(
-// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[VSHR_N:%.*]] = ashr <16 x i8> [[A]], splat (i8 3)
-// CHECK-NEXT:    ret <16 x i8> [[VSHR_N]]
-//
-int8x16_t test_vshrq_n_s8(int8x16_t a) {
-  return vshrq_n_s8(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <8 x i16> @test_vshrq_n_s16(
-// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
-// CHECK-NEXT:    [[VSHR_N:%.*]] = ashr <8 x i16> [[TMP1]], splat (i16 3)
-// CHECK-NEXT:    ret <8 x i16> [[VSHR_N]]
-//
-int16x8_t test_vshrq_n_s16(int16x8_t a) {
-  return vshrq_n_s16(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <4 x i32> @test_vshrq_n_s32(
-// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
-// CHECK-NEXT:    [[VSHR_N:%.*]] = ashr <4 x i32> [[TMP1]], splat (i32 3)
-// CHECK-NEXT:    ret <4 x i32> [[VSHR_N]]
-//
-int32x4_t test_vshrq_n_s32(int32x4_t a) {
-  return vshrq_n_s32(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <2 x i64> @test_vshrq_n_s64(
-// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
-// CHECK-NEXT:    [[VSHR_N:%.*]] = ashr <2 x i64> [[TMP1]], splat (i64 3)
-// CHECK-NEXT:    ret <2 x i64> [[VSHR_N]]
-//
-int64x2_t test_vshrq_n_s64(int64x2_t a) {
-  return vshrq_n_s64(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <8 x i8> @test_vshr_n_u8(
-// CHECK-SAME: <8 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[VSHR_N:%.*]] = lshr <8 x i8> [[A]], splat (i8 3)
-// CHECK-NEXT:    ret <8 x i8> [[VSHR_N]]
-//
-uint8x8_t test_vshr_n_u8(uint8x8_t a) {
-  return vshr_n_u8(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <4 x i16> @test_vshr_n_u16(
-// CHECK-SAME: <4 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
-// CHECK-NEXT:    [[VSHR_N:%.*]] = lshr <4 x i16> [[TMP1]], splat (i16 3)
-// CHECK-NEXT:    ret <4 x i16> [[VSHR_N]]
-//
-uint16x4_t test_vshr_n_u16(uint16x4_t a) {
-  return vshr_n_u16(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <2 x i32> @test_vshr_n_u32(
-// CHECK-SAME: <2 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
-// CHECK-NEXT:    [[VSHR_N:%.*]] = lshr <2 x i32> [[TMP1]], splat (i32 3)
-// CHECK-NEXT:    ret <2 x i32> [[VSHR_N]]
-//
-uint32x2_t test_vshr_n_u32(uint32x2_t a) {
-  return vshr_n_u32(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <16 x i8> @test_vshrq_n_u8(
-// CHECK-SAME: <16 x i8> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[VSHR_N:%.*]] = lshr <16 x i8> [[A]], splat (i8 3)
-// CHECK-NEXT:    ret <16 x i8> [[VSHR_N]]
-//
-uint8x16_t test_vshrq_n_u8(uint8x16_t a) {
-  return vshrq_n_u8(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <8 x i16> @test_vshrq_n_u16(
-// CHECK-SAME: <8 x i16> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
-// CHECK-NEXT:    [[VSHR_N:%.*]] = lshr <8 x i16> [[TMP1]], splat (i16 3)
-// CHECK-NEXT:    ret <8 x i16> [[VSHR_N]]
-//
-uint16x8_t test_vshrq_n_u16(uint16x8_t a) {
-  return vshrq_n_u16(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <4 x i32> @test_vshrq_n_u32(
-// CHECK-SAME: <4 x i32> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
-// CHECK-NEXT:    [[VSHR_N:%.*]] = lshr <4 x i32> [[TMP1]], splat (i32 3)
-// CHECK-NEXT:    ret <4 x i32> [[VSHR_N]]
-//
-uint32x4_t test_vshrq_n_u32(uint32x4_t a) {
-  return vshrq_n_u32(a, 3);
-}
-
-// CHECK-LABEL: define dso_local <2 x i64> @test_vshrq_n_u64(
-// CHECK-SAME: <2 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
-// CHECK-NEXT:    [[VSHR_N:%.*]] = lshr <2 x i64> [[TMP1]], splat (i64 3)
-// CHECK-NEXT:    ret <2 x i64> [[VSHR_N]]
-//
-uint64x2_t test_vshrq_n_u64(uint64x2_t a) {
-  return vshrq_n_u64(a, 3);
-}
-
 // CHECK-LABEL: define dso_local <8 x i8> @test_vsra_n_s8(
 // CHECK-SAME: <8 x i8> noundef [[A:%.*]], <8 x i8> noundef [[B:%.*]]) 
#[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
@@ -16608,59 +16448,6 @@ uint64_t test_vcaltd_f64(float64_t a, float64_t b) {
   return (uint64_t)vcaltd_f64(a, b);
 }
 
-// CHECK-LABEL: define dso_local i64 @test_vshrd_n_s64(
-// CHECK-SAME: i64 noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[SHRD_N:%.*]] = ashr i64 [[A]], 1
-// CHECK-NEXT:    ret i64 [[SHRD_N]]
-//
-int64_t test_vshrd_n_s64(int64_t a) {
-  return (int64_t)vshrd_n_s64(a, 1);
-}
-
-// CHECK-LABEL: define dso_local <1 x i64> @test_vshr_n_s64(
-// CHECK-SAME: <1 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
-// CHECK-NEXT:    [[VSHR_N:%.*]] = ashr <1 x i64> [[TMP1]], splat (i64 1)
-// CHECK-NEXT:    ret <1 x i64> [[VSHR_N]]
-//
-int64x1_t test_vshr_n_s64(int64x1_t a) {
-  return vshr_n_s64(a, 1);
-}
-
-// CHECK-LABEL: define dso_local i64 @test_vshrd_n_u64(
-// CHECK-SAME: i64 noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    ret i64 0
-//
-uint64_t test_vshrd_n_u64(uint64_t a) {
-  return (uint64_t)vshrd_n_u64(a, 64);
-}
-
-// CHECK-LABEL: define dso_local i64 @test_vshrd_n_u64_2(
-// CHECK-SAME: ) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    ret i64 0
-//
-uint64_t test_vshrd_n_u64_2() {
-  uint64_t a = UINT64_C(0xf000000000000000);
-  return vshrd_n_u64(a, 64);
-}
-
-// CHECK-LABEL: define dso_local <1 x i64> @test_vshr_n_u64(
-// CHECK-SAME: <1 x i64> noundef [[A:%.*]]) #[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
-// CHECK-NEXT:    [[VSHR_N:%.*]] = lshr <1 x i64> [[TMP1]], splat (i64 1)
-// CHECK-NEXT:    ret <1 x i64> [[VSHR_N]]
-//
-uint64x1_t test_vshr_n_u64(uint64x1_t a) {
-  return vshr_n_u64(a, 1);
-}
-
 // CHECK-LABEL: define dso_local i64 @test_vrshrd_n_s64(
 // CHECK-SAME: i64 noundef [[A:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]

diff  --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c 
b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index 98a8dd78f9716..415d3c660be47 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -1706,6 +1706,242 @@ uint64x1_t test_vshl_n_u64(uint64x1_t a) {
  return vshl_n_u64(a, 1);
 }
 
+//===------------------------------------------------------===//
+// 2.1.3.2.1 Vector shift right
+//===------------------------------------------------------===//
+
+// ALL-LABEL: @test_vshr_n_s8( 
+int8x8_t test_vshr_n_s8(int8x8_t a) {
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !s8i, !cir.vector<8 x !s8i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<8 x !s8i>, {{%.*}} : 
!cir.vector<8 x !s8i>) -> !cir.vector<8 x !s8i>
+
+// LLVM-SAME: <8 x i8> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM:   [[VSHR_N:%.*]] = ashr <8 x i8> {{.*}}, splat (i8 3)
+// LLVM:   ret <8 x i8> [[VSHR_N]]
+  return vshr_n_s8(a, 3);
+}
+
+// LLVM-LABEL: @test_vshr_n_s16( 
+// CIR-LABEL: @test_vshr_n_s16( 
+int16x4_t test_vshr_n_s16(int16x4_t a) {
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !s16i, !cir.vector<4 x !s16i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<4 x !s16i>, {{%.*}} : 
!cir.vector<4 x !s16i>) -> !cir.vector<4 x !s16i>
+
+// LLVM-SAME: <4 x i16> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM:   [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
+// LLVM:   [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
+// LLVM:   [[VSHR_N:%.*]] = ashr <4 x i16> [[TMP1]], splat (i16 3)
+// LLVM:   ret <4 x i16> [[VSHR_N]]
+  return vshr_n_s16(a, 3);
+}
+
+// ALL-LABEL: @test_vshr_n_s32( 
+int32x2_t test_vshr_n_s32(int32x2_t a) {
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !s32i, !cir.vector<2 x !s32i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<2 x !s32i>, {{%.*}} : 
!cir.vector<2 x !s32i>) -> !cir.vector<2 x !s32i>
+
+// LLVM-SAME: <2 x i32> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM:  [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
+// LLVM:  [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
+// LLVM:  [[VSHR_N:%.*]] = ashr <2 x i32> [[TMP1]], splat (i32 3)
+// LLVM:  ret <2 x i32> [[VSHR_N]]
+  return vshr_n_s32(a, 3);
+}
+
+// ALL-LABEL: @test_vshr_n_s64( 
+int64x1_t test_vshr_n_s64(int64x1_t a) {
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !s64i, !cir.vector<1 x !s64i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<1 x !s64i>, {{%.*}} : 
!cir.vector<1 x !s64i>) -> !cir.vector<1 x !s64i>
+
+// LLVM-SAME: <1 x i64> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
+// LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
+// LLVM: [[VSHR_N:%.*]] = ashr <1 x i64> [[TMP1]], splat (i64 1)
+// LLVM: ret <1 x i64> [[VSHR_N]]
+  return vshr_n_s64(a, 1);
+}
+
+// ALL-LABEL: @test_vshrq_n_s8( 
+int8x16_t test_vshrq_n_s8(int8x16_t a) {
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !s8i, !cir.vector<16 x !s8i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<16 x !s8i>, {{%.*}} : 
!cir.vector<16 x !s8i>) -> !cir.vector<16 x !s8i>
+
+// LLVM-SAME: <16 x i8> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM: [[VSHR_N:%.*]] = ashr <16 x i8> {{.*}}, splat (i8 3)
+// LLVM: ret <16 x i8> [[VSHR_N]]
+  return vshrq_n_s8(a, 3);
+}
+
+// ALL-LABEL: @test_vshrq_n_s16( 
+int16x8_t test_vshrq_n_s16(int16x8_t a) {
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !s16i, !cir.vector<8 x !s16i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<8 x !s16i>, {{%.*}} : 
!cir.vector<8 x !s16i>) -> !cir.vector<8 x !s16i>
+
+// LLVM-SAME: <8 x i16> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
+// LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
+// LLVM: [[VSHR_N:%.*]] = ashr <8 x i16> [[TMP1]], splat (i16 3)
+// LLVM: ret <8 x i16> [[VSHR_N]]
+  return vshrq_n_s16(a, 3);
+}
+
+// ALL-LABEL: @test_vshrq_n_s32( 
+int32x4_t test_vshrq_n_s32(int32x4_t a) {
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !s32i, !cir.vector<4 x !s32i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<4 x !s32i>, {{%.*}} : 
!cir.vector<4 x !s32i>) -> !cir.vector<4 x !s32i>
+
+// LLVM-SAME: <4 x i32> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
+// LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
+// LLVM: [[VSHR_N:%.*]] = ashr <4 x i32> [[TMP1]], splat (i32 3)
+// LLVM: ret <4 x i32> [[VSHR_N]]   
+  return vshrq_n_s32(a, 3);
+}
+
+// ALL-LABEL: @test_vshrq_n_s64( 
+int64x2_t test_vshrq_n_s64(int64x2_t a) {
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !s64i, !cir.vector<2 x !s64i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<2 x !s64i>, {{%.*}} : 
!cir.vector<2 x !s64i>) -> !cir.vector<2 x !s64i>
+
+// LLVM-SAME: <2 x i64> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
+// LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
+// LLVM: [[VSHR_N:%.*]] = ashr <2 x i64> [[TMP1]], splat (i64 3)
+// LLVM: ret <2 x i64> [[VSHR_N]]
+  return vshrq_n_s64(a, 3);
+}
+
+// ALL-LABEL: @test_vshr_n_u8( 
+uint8x8_t test_vshr_n_u8(uint8x8_t a) {
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !u8i, !cir.vector<8 x !u8i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<8 x !u8i>, {{%.*}} : 
!cir.vector<8 x !u8i>) -> !cir.vector<8 x !u8i>
+
+// LLVM-SAME: <8 x i8> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM: [[VSHR_N:%.*]] = lshr <8 x i8> {{.*}}, splat (i8 3)
+// LLVM: ret <8 x i8> [[VSHR_N]]
+  return vshr_n_u8(a, 3);
+}
+
+// ALL-LABEL: @test_vshr_n_u16( 
+uint16x4_t test_vshr_n_u16(uint16x4_t a) {
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !u16i, !cir.vector<4 x !u16i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<4 x !u16i>, {{%.*}} : 
!cir.vector<4 x !u16i>) -> !cir.vector<4 x !u16i>
+
+// LLVM-SAME: <4 x i16> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM: [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
+// LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
+// LLVM: [[VSHR_N:%.*]] = lshr <4 x i16> [[TMP1]], splat (i16 3)
+// LLVM: ret <4 x i16> [[VSHR_N]] 
+  return vshr_n_u16(a, 3);
+}
+
+// ALL-LABEL: @test_vshr_n_u32( 
+uint32x2_t test_vshr_n_u32(uint32x2_t a) {
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !u32i, !cir.vector<2 x !u32i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<2 x !u32i>, {{%.*}} : 
!cir.vector<2 x !u32i>) -> !cir.vector<2 x !u32i>
+
+// LLVM-SAME: <2 x i32> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM: [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
+// LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
+// LLVM: [[VSHR_N:%.*]] = lshr <2 x i32> [[TMP1]], splat (i32 3)
+// LLVM: ret <2 x i32> [[VSHR_N]]
+  return vshr_n_u32(a, 3);
+}
+
+// ALL-LABEL: @test_vshr_n_u64( 
+uint64x1_t test_vshr_n_u64(uint64x1_t a) {
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !u64i, !cir.vector<1 x !u64i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<1 x !u64i>, {{%.*}} : 
!cir.vector<1 x !u64i>) -> !cir.vector<1 x !u64i>
+
+// LLVM-SAME: <1 x i64> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8>
+// LLVM: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
+// LLVM: [[VSHR_N:%.*]] = lshr <1 x i64> [[TMP1]], splat (i64 3)
+// ret <1 x i64> [[VSHR_N]]
+  return vshr_n_u64(a, 3);
+}
+
+// ALL-LABEL: @test_vshrq_n_u8( 
+uint8x16_t test_vshrq_n_u8(uint8x16_t a) {
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !u8i, !cir.vector<16 x !u8i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<16 x !u8i>, {{%.*}} : 
!cir.vector<16 x !u8i>) -> !cir.vector<16 x !u8i>
+
+// LLVM-SAME: <16 x i8> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM: [[VSHR_N:%.*]] = lshr <16 x i8> {{.*}}, splat (i8 3)
+// LLVM: ret <16 x i8> [[VSHR_N]]
+  return vshrq_n_u8(a, 3);
+}
+
+// ALL-LABEL: @test_vshrq_n_u16( 
+uint16x8_t test_vshrq_n_u16(uint16x8_t a) {
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !u16i, !cir.vector<8 x !u16i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<8 x !u16i>, {{%.*}} : 
!cir.vector<8 x !u16i>) -> !cir.vector<8 x !u16i>
+
+// LLVM-SAME: <8 x i16> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM: [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
+// LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
+// LLVM: [[VSHR_N:%.*]] = lshr <8 x i16> [[TMP1]], splat (i16 3)
+// LLVM: ret <8 x i16> [[VSHR_N]]
+  return vshrq_n_u16(a, 3);
+}
+
+// ALL-LABEL: @test_vshrq_n_u32( 
+uint32x4_t test_vshrq_n_u32(uint32x4_t a) {
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !u32i, !cir.vector<4 x !u32i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<4 x !u32i>, {{%.*}} : 
!cir.vector<4 x !u32i>) -> !cir.vector<4 x !u32i>
+
+// LLVM-SAME: <4 x i32> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM: [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
+// LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
+// LLVM: [[VSHR_N:%.*]] = lshr <4 x i32> [[TMP1]], splat (i32 3)
+// LLVM: ret <4 x i32> [[VSHR_N]]
+  return vshrq_n_u32(a, 3);
+}
+
+// ALL-LABEL: @test_vshrq_n_u64( 
+uint64x2_t test_vshrq_n_u64(uint64x2_t a) {
+// CIR: {{%.*}} = cir.vec.splat {{%.*}} : !u64i, !cir.vector<2 x !u64i>
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !cir.vector<2 x !u64i>, {{%.*}} : 
!cir.vector<2 x !u64i>) -> !cir.vector<2 x !u64i>
+
+// LLVM-SAME: <2 x i64> {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM: [[TMP0:%.*]] = bitcast <2 x i64> [[A]] to <16 x i8>
+// LLVM: [[TMP1:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
+// LLVM: [[VSHR_N:%.*]] = lshr <2 x i64> [[TMP1]], splat (i64 3)
+// LLVM: ret <2 x i64> [[VSHR_N]]
+  return vshrq_n_u64(a, 3);
+}
+
+// ALL-LABEL: @test_vshrd_n_s64( 
+int64_t test_vshrd_n_s64(int64_t a) {
+// CIR: {{%.*}} = cir.shift(right, {{%.*}} : !s64i, {{%.*}} : !s64i) -> !s64i
+
+// LLVM-SAME: i64 {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM:   [[SHRD_N:%.*]] = ashr i64 {{.*}}, 1
+// LLVM:   ret i64 [[SHRD_N]]
+  return (int64_t)vshrd_n_s64(a, 1);
+}
+
+// ALL-LABEL: @test_vshrd_n_u64( 
+uint64_t test_vshrd_n_u64(uint64_t a) {
+// CIR: {{.*}} = cir.const #cir.int<0> : !u64i
+// CIR: cir.return {{.*}} : !u64i
+
+// LLVM-SAME: i64 {{.*}} [[A:%.*]]) {{.*}} {
+// LLVM:   ret i64 0
+  return (uint64_t)vshrd_n_u64(a, 64);
+}
+
+// ALL-LABEL: @test_vshrd_n_u64_2( 
+uint64_t test_vshrd_n_u64_2() {
+// CIR: {{.*}} = cir.const #cir.int<0> : !u64i
+// CIR: cir.return {{.*}} : !u64i
+
+// LLVM-SAME: {{.*}} {
+// LLVM: ret i64 0
+  uint64_t a = UINT64_C(0xf000000000000000);
+  return vshrd_n_u64(a, 64);
+}
+
 //===------------------------------------------------------===//
 // 2.1.8.5 Bitwise select 
 // 
https://arm-software.github.io/acle/neon_intrinsics/advsimd.html#markdown-toc-bitwise-select


        
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to