Author: Zhihui Yang
Date: 2026-04-08T16:03:25+01:00
New Revision: 9a81d3995949135935602557702abc3cf28d3a40

URL: 
https://github.com/llvm/llvm-project/commit/9a81d3995949135935602557702abc3cf28d3a40
DIFF: 
https://github.com/llvm/llvm-project/commit/9a81d3995949135935602557702abc3cf28d3a40.diff

LOG: [CIR][AArch64] Upstream NEON Minimum (#187935)

Part of https://github.com/llvm/llvm-project/issues/185382
Added intrinsics for
vmin_*
vminq_*
vminnm_*
vminnmq_*

Added: 
    

Modified: 
    clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
    clang/test/CodeGen/AArch64/neon-intrinsics.c
    clang/test/CodeGen/AArch64/neon/intrinsics.c

Removed: 
    


################################################################################
diff  --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
index cbdaebb157c4c..cd29c2f16c004 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
@@ -2353,8 +2353,16 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned 
builtinID, const CallExpr *expr,
       intrName = "aarch64.neon.fmax";
     return emitNeonCall(cgm, builder, {ty, ty}, ops, intrName, ty, loc);
   case NEON::BI__builtin_neon_vmaxh_f16:
+    cgm.errorNYI(expr->getSourceRange(),
+                 std::string("unimplemented AArch64 builtin call: ") +
+                     getContext().BuiltinInfo.getName(builtinID));
+    return mlir::Value{};
   case NEON::BI__builtin_neon_vmin_v:
   case NEON::BI__builtin_neon_vminq_v:
+    intrName = usgn ? "aarch64.neon.umin" : "aarch64.neon.smin";
+    if (cir::isFPOrVectorOfFPType(ty))
+      intrName = "aarch64.neon.fmin";
+    return emitNeonCall(cgm, builder, {ty, ty}, ops, intrName, ty, loc);
   case NEON::BI__builtin_neon_vminh_f16:
     cgm.errorNYI(expr->getSourceRange(),
                  std::string("unimplemented AArch64 builtin call: ") +
@@ -2372,8 +2380,14 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned 
builtinID, const CallExpr *expr,
   case NEON::BI__builtin_neon_vpminq_v:
   case NEON::BI__builtin_neon_vpmax_v:
   case NEON::BI__builtin_neon_vpmaxq_v:
+    cgm.errorNYI(expr->getSourceRange(),
+                 std::string("unimplemented AArch64 builtin call: ") +
+                     getContext().BuiltinInfo.getName(builtinID));
+    return mlir::Value{};
   case NEON::BI__builtin_neon_vminnm_v:
   case NEON::BI__builtin_neon_vminnmq_v:
+    intrName = "aarch64.neon.fminnm";
+    return emitNeonCall(cgm, builder, {ty, ty}, ops, intrName, ty, loc);
   case NEON::BI__builtin_neon_vminnmh_f16:
     cgm.errorNYI(expr->getSourceRange(),
                  std::string("unimplemented AArch64 builtin call: ") +

diff  --git a/clang/test/CodeGen/AArch64/neon-intrinsics.c 
b/clang/test/CodeGen/AArch64/neon-intrinsics.c
index 9f3484e162cb3..7c5a7336ef6b6 100644
--- a/clang/test/CodeGen/AArch64/neon-intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon-intrinsics.c
@@ -4762,254 +4762,6 @@ poly64x2_t test_vsliq_n_p64(poly64x2_t a, poly64x2_t b) 
{
   return vsliq_n_p64(a, b, 0);
 }
 
-// CHECK-LABEL: define dso_local <8 x i8> @test_vmin_s8(
-// CHECK-SAME: <8 x i8> noundef [[A:%.*]], <8 x i8> noundef [[B:%.*]]) 
#[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[VMIN_I:%.*]] = call <8 x i8> 
@llvm.aarch64.neon.smin.v8i8(<8 x i8> [[A]], <8 x i8> [[B]])
-// CHECK-NEXT:    ret <8 x i8> [[VMIN_I]]
-//
-int8x8_t test_vmin_s8(int8x8_t a, int8x8_t b) {
-  return vmin_s8(a, b);
-}
-
-// CHECK-LABEL: define dso_local <4 x i16> @test_vmin_s16(
-// CHECK-SAME: <4 x i16> noundef [[A:%.*]], <4 x i16> noundef [[B:%.*]]) 
#[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <4 x i16> [[B]] to <8 x i8>
-// CHECK-NEXT:    [[VMIN_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
-// CHECK-NEXT:    [[VMIN1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
-// CHECK-NEXT:    [[VMIN2_I:%.*]] = call <4 x i16> 
@llvm.aarch64.neon.smin.v4i16(<4 x i16> [[VMIN_I]], <4 x i16> [[VMIN1_I]])
-// CHECK-NEXT:    ret <4 x i16> [[VMIN2_I]]
-//
-int16x4_t test_vmin_s16(int16x4_t a, int16x4_t b) {
-  return vmin_s16(a, b);
-}
-
-// CHECK-LABEL: define dso_local <2 x i32> @test_vmin_s32(
-// CHECK-SAME: <2 x i32> noundef [[A:%.*]], <2 x i32> noundef [[B:%.*]]) 
#[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8>
-// CHECK-NEXT:    [[VMIN_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
-// CHECK-NEXT:    [[VMIN1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
-// CHECK-NEXT:    [[VMIN2_I:%.*]] = call <2 x i32> 
@llvm.aarch64.neon.smin.v2i32(<2 x i32> [[VMIN_I]], <2 x i32> [[VMIN1_I]])
-// CHECK-NEXT:    ret <2 x i32> [[VMIN2_I]]
-//
-int32x2_t test_vmin_s32(int32x2_t a, int32x2_t b) {
-  return vmin_s32(a, b);
-}
-
-// CHECK-LABEL: define dso_local <8 x i8> @test_vmin_u8(
-// CHECK-SAME: <8 x i8> noundef [[A:%.*]], <8 x i8> noundef [[B:%.*]]) 
#[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[VMIN_I:%.*]] = call <8 x i8> 
@llvm.aarch64.neon.umin.v8i8(<8 x i8> [[A]], <8 x i8> [[B]])
-// CHECK-NEXT:    ret <8 x i8> [[VMIN_I]]
-//
-uint8x8_t test_vmin_u8(uint8x8_t a, uint8x8_t b) {
-  return vmin_u8(a, b);
-}
-
-// CHECK-LABEL: define dso_local <4 x i16> @test_vmin_u16(
-// CHECK-SAME: <4 x i16> noundef [[A:%.*]], <4 x i16> noundef [[B:%.*]]) 
#[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i16> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <4 x i16> [[B]] to <8 x i8>
-// CHECK-NEXT:    [[VMIN_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
-// CHECK-NEXT:    [[VMIN1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
-// CHECK-NEXT:    [[VMIN2_I:%.*]] = call <4 x i16> 
@llvm.aarch64.neon.umin.v4i16(<4 x i16> [[VMIN_I]], <4 x i16> [[VMIN1_I]])
-// CHECK-NEXT:    ret <4 x i16> [[VMIN2_I]]
-//
-uint16x4_t test_vmin_u16(uint16x4_t a, uint16x4_t b) {
-  return vmin_u16(a, b);
-}
-
-// CHECK-LABEL: define dso_local <2 x i32> @test_vmin_u32(
-// CHECK-SAME: <2 x i32> noundef [[A:%.*]], <2 x i32> noundef [[B:%.*]]) 
#[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8>
-// CHECK-NEXT:    [[VMIN_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
-// CHECK-NEXT:    [[VMIN1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
-// CHECK-NEXT:    [[VMIN2_I:%.*]] = call <2 x i32> 
@llvm.aarch64.neon.umin.v2i32(<2 x i32> [[VMIN_I]], <2 x i32> [[VMIN1_I]])
-// CHECK-NEXT:    ret <2 x i32> [[VMIN2_I]]
-//
-uint32x2_t test_vmin_u32(uint32x2_t a, uint32x2_t b) {
-  return vmin_u32(a, b);
-}
-
-// CHECK-LABEL: define dso_local <2 x float> @test_vmin_f32(
-// CHECK-SAME: <2 x float> noundef [[A:%.*]], <2 x float> noundef [[B:%.*]]) 
#[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x float> [[A]] to <2 x i32>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x float> [[B]] to <2 x i32>
-// CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
-// CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i32> [[TMP1]] to <8 x i8>
-// CHECK-NEXT:    [[VMIN_I:%.*]] = bitcast <8 x i8> [[TMP2]] to <2 x float>
-// CHECK-NEXT:    [[VMIN1_I:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x float>
-// CHECK-NEXT:    [[VMIN2_I:%.*]] = call <2 x float> 
@llvm.aarch64.neon.fmin.v2f32(<2 x float> [[VMIN_I]], <2 x float> [[VMIN1_I]])
-// CHECK-NEXT:    ret <2 x float> [[VMIN2_I]]
-//
-float32x2_t test_vmin_f32(float32x2_t a, float32x2_t b) {
-  return vmin_f32(a, b);
-}
-
-// CHECK-LABEL: define dso_local <16 x i8> @test_vminq_s8(
-// CHECK-SAME: <16 x i8> noundef [[A:%.*]], <16 x i8> noundef [[B:%.*]]) 
#[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[VMIN_I:%.*]] = call <16 x i8> 
@llvm.aarch64.neon.smin.v16i8(<16 x i8> [[A]], <16 x i8> [[B]])
-// CHECK-NEXT:    ret <16 x i8> [[VMIN_I]]
-//
-int8x16_t test_vminq_s8(int8x16_t a, int8x16_t b) {
-  return vminq_s8(a, b);
-}
-
-// CHECK-LABEL: define dso_local <8 x i16> @test_vminq_s16(
-// CHECK-SAME: <8 x i16> noundef [[A:%.*]], <8 x i16> noundef [[B:%.*]]) 
#[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8>
-// CHECK-NEXT:    [[VMIN_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
-// CHECK-NEXT:    [[VMIN1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
-// CHECK-NEXT:    [[VMIN2_I:%.*]] = call <8 x i16> 
@llvm.aarch64.neon.smin.v8i16(<8 x i16> [[VMIN_I]], <8 x i16> [[VMIN1_I]])
-// CHECK-NEXT:    ret <8 x i16> [[VMIN2_I]]
-//
-int16x8_t test_vminq_s16(int16x8_t a, int16x8_t b) {
-  return vminq_s16(a, b);
-}
-
-// CHECK-LABEL: define dso_local <4 x i32> @test_vminq_s32(
-// CHECK-SAME: <4 x i32> noundef [[A:%.*]], <4 x i32> noundef [[B:%.*]]) 
#[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8>
-// CHECK-NEXT:    [[VMIN_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
-// CHECK-NEXT:    [[VMIN1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
-// CHECK-NEXT:    [[VMIN2_I:%.*]] = call <4 x i32> 
@llvm.aarch64.neon.smin.v4i32(<4 x i32> [[VMIN_I]], <4 x i32> [[VMIN1_I]])
-// CHECK-NEXT:    ret <4 x i32> [[VMIN2_I]]
-//
-int32x4_t test_vminq_s32(int32x4_t a, int32x4_t b) {
-  return vminq_s32(a, b);
-}
-
-// CHECK-LABEL: define dso_local <16 x i8> @test_vminq_u8(
-// CHECK-SAME: <16 x i8> noundef [[A:%.*]], <16 x i8> noundef [[B:%.*]]) 
#[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[VMIN_I:%.*]] = call <16 x i8> 
@llvm.aarch64.neon.umin.v16i8(<16 x i8> [[A]], <16 x i8> [[B]])
-// CHECK-NEXT:    ret <16 x i8> [[VMIN_I]]
-//
-uint8x16_t test_vminq_u8(uint8x16_t a, uint8x16_t b) {
-  return vminq_u8(a, b);
-}
-
-// CHECK-LABEL: define dso_local <8 x i16> @test_vminq_u16(
-// CHECK-SAME: <8 x i16> noundef [[A:%.*]], <8 x i16> noundef [[B:%.*]]) 
#[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i16> [[A]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i16> [[B]] to <16 x i8>
-// CHECK-NEXT:    [[VMIN_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
-// CHECK-NEXT:    [[VMIN1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
-// CHECK-NEXT:    [[VMIN2_I:%.*]] = call <8 x i16> 
@llvm.aarch64.neon.umin.v8i16(<8 x i16> [[VMIN_I]], <8 x i16> [[VMIN1_I]])
-// CHECK-NEXT:    ret <8 x i16> [[VMIN2_I]]
-//
-uint16x8_t test_vminq_u16(uint16x8_t a, uint16x8_t b) {
-  return vminq_u16(a, b);
-}
-
-// CHECK-LABEL: define dso_local <4 x i32> @test_vminq_u32(
-// CHECK-SAME: <4 x i32> noundef [[A:%.*]], <4 x i32> noundef [[B:%.*]]) 
#[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i32> [[A]] to <16 x i8>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <4 x i32> [[B]] to <16 x i8>
-// CHECK-NEXT:    [[VMIN_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
-// CHECK-NEXT:    [[VMIN1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
-// CHECK-NEXT:    [[VMIN2_I:%.*]] = call <4 x i32> 
@llvm.aarch64.neon.umin.v4i32(<4 x i32> [[VMIN_I]], <4 x i32> [[VMIN1_I]])
-// CHECK-NEXT:    ret <4 x i32> [[VMIN2_I]]
-//
-uint32x4_t test_vminq_u32(uint32x4_t a, uint32x4_t b) {
-  return vminq_u32(a, b);
-}
-
-// CHECK-LABEL: define dso_local <4 x float> @test_vminq_f32(
-// CHECK-SAME: <4 x float> noundef [[A:%.*]], <4 x float> noundef [[B:%.*]]) 
#[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x float> [[A]] to <4 x i32>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <4 x float> [[B]] to <4 x i32>
-// CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
-// CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to <16 x i8>
-// CHECK-NEXT:    [[VMIN_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x float>
-// CHECK-NEXT:    [[VMIN1_I:%.*]] = bitcast <16 x i8> [[TMP3]] to <4 x float>
-// CHECK-NEXT:    [[VMIN2_I:%.*]] = call <4 x float> 
@llvm.aarch64.neon.fmin.v4f32(<4 x float> [[VMIN_I]], <4 x float> [[VMIN1_I]])
-// CHECK-NEXT:    ret <4 x float> [[VMIN2_I]]
-//
-float32x4_t test_vminq_f32(float32x4_t a, float32x4_t b) {
-  return vminq_f32(a, b);
-}
-
-// CHECK-LABEL: define dso_local <2 x double> @test_vminq_f64(
-// CHECK-SAME: <2 x double> noundef [[A:%.*]], <2 x double> noundef [[B:%.*]]) 
#[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x double> [[B]] to <2 x i64>
-// CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
-// CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to <16 x i8>
-// CHECK-NEXT:    [[VMIN_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x double>
-// CHECK-NEXT:    [[VMIN1_I:%.*]] = bitcast <16 x i8> [[TMP3]] to <2 x double>
-// CHECK-NEXT:    [[VMIN2_I:%.*]] = call <2 x double> 
@llvm.aarch64.neon.fmin.v2f64(<2 x double> [[VMIN_I]], <2 x double> [[VMIN1_I]])
-// CHECK-NEXT:    ret <2 x double> [[VMIN2_I]]
-//
-float64x2_t test_vminq_f64(float64x2_t a, float64x2_t b) {
-  return vminq_f64(a, b);
-}
-
-// CHECK-LABEL: define dso_local <2 x float> @test_vminnm_f32(
-// CHECK-SAME: <2 x float> noundef [[A:%.*]], <2 x float> noundef [[B:%.*]]) 
#[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x float> [[A]] to <2 x i32>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x float> [[B]] to <2 x i32>
-// CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
-// CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i32> [[TMP1]] to <8 x i8>
-// CHECK-NEXT:    [[VMINNM_I:%.*]] = bitcast <8 x i8> [[TMP2]] to <2 x float>
-// CHECK-NEXT:    [[VMINNM1_I:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x float>
-// CHECK-NEXT:    [[VMINNM2_I:%.*]] = call <2 x float> 
@llvm.aarch64.neon.fminnm.v2f32(<2 x float> [[VMINNM_I]], <2 x float> 
[[VMINNM1_I]])
-// CHECK-NEXT:    ret <2 x float> [[VMINNM2_I]]
-//
-float32x2_t test_vminnm_f32(float32x2_t a, float32x2_t b) {
-  return vminnm_f32(a, b);
-}
-
-// CHECK-LABEL: define dso_local <4 x float> @test_vminnmq_f32(
-// CHECK-SAME: <4 x float> noundef [[A:%.*]], <4 x float> noundef [[B:%.*]]) 
#[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x float> [[A]] to <4 x i32>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <4 x float> [[B]] to <4 x i32>
-// CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
-// CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to <16 x i8>
-// CHECK-NEXT:    [[VMINNM_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x float>
-// CHECK-NEXT:    [[VMINNM1_I:%.*]] = bitcast <16 x i8> [[TMP3]] to <4 x float>
-// CHECK-NEXT:    [[VMINNM2_I:%.*]] = call <4 x float> 
@llvm.aarch64.neon.fminnm.v4f32(<4 x float> [[VMINNM_I]], <4 x float> 
[[VMINNM1_I]])
-// CHECK-NEXT:    ret <4 x float> [[VMINNM2_I]]
-//
-float32x4_t test_vminnmq_f32(float32x4_t a, float32x4_t b) {
-  return vminnmq_f32(a, b);
-}
-
-// CHECK-LABEL: define dso_local <2 x double> @test_vminnmq_f64(
-// CHECK-SAME: <2 x double> noundef [[A:%.*]], <2 x double> noundef [[B:%.*]]) 
#[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64>
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <2 x double> [[B]] to <2 x i64>
-// CHECK-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
-// CHECK-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to <16 x i8>
-// CHECK-NEXT:    [[VMINNM_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x double>
-// CHECK-NEXT:    [[VMINNM1_I:%.*]] = bitcast <16 x i8> [[TMP3]] to <2 x 
double>
-// CHECK-NEXT:    [[VMINNM2_I:%.*]] = call <2 x double> 
@llvm.aarch64.neon.fminnm.v2f64(<2 x double> [[VMINNM_I]], <2 x double> 
[[VMINNM1_I]])
-// CHECK-NEXT:    ret <2 x double> [[VMINNM2_I]]
-//
-float64x2_t test_vminnmq_f64(float64x2_t a, float64x2_t b) {
-  return vminnmq_f64(a, b);
-}
-
 // CHECK-LABEL: define dso_local <8 x i8> @test_vpmax_s8(
 // CHECK-SAME: <8 x i8> noundef [[A:%.*]], <8 x i8> noundef [[B:%.*]]) 
#[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]
@@ -21615,42 +21367,6 @@ float64x1_t test_vabd_f64(float64x1_t a, float64x1_t 
b) {
   return vabd_f64(a, b);
 }
 
-// CHECK-LABEL: define dso_local <1 x double> @test_vmin_f64(
-// CHECK-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]]) 
#[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64
-// CHECK-NEXT:    [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x 
i64> undef, i64 [[TMP0]], i32 0
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <1 x double> [[B]] to i64
-// CHECK-NEXT:    [[__P1_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x 
i64> undef, i64 [[TMP1]], i32 0
-// CHECK-NEXT:    [[TMP2:%.*]] = bitcast <1 x i64> 
[[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8>
-// CHECK-NEXT:    [[TMP3:%.*]] = bitcast <1 x i64> 
[[__P1_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8>
-// CHECK-NEXT:    [[VMIN_I:%.*]] = bitcast <8 x i8> [[TMP2]] to <1 x double>
-// CHECK-NEXT:    [[VMIN1_I:%.*]] = bitcast <8 x i8> [[TMP3]] to <1 x double>
-// CHECK-NEXT:    [[VMIN2_I:%.*]] = call <1 x double> 
@llvm.aarch64.neon.fmin.v1f64(<1 x double> [[VMIN_I]], <1 x double> [[VMIN1_I]])
-// CHECK-NEXT:    ret <1 x double> [[VMIN2_I]]
-//
-float64x1_t test_vmin_f64(float64x1_t a, float64x1_t b) {
-  return vmin_f64(a, b);
-}
-
-// CHECK-LABEL: define dso_local <1 x double> @test_vminnm_f64(
-// CHECK-SAME: <1 x double> noundef [[A:%.*]], <1 x double> noundef [[B:%.*]]) 
#[[ATTR0]] {
-// CHECK-NEXT:  [[ENTRY:.*:]]
-// CHECK-NEXT:    [[TMP0:%.*]] = bitcast <1 x double> [[A]] to i64
-// CHECK-NEXT:    [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x 
i64> undef, i64 [[TMP0]], i32 0
-// CHECK-NEXT:    [[TMP1:%.*]] = bitcast <1 x double> [[B]] to i64
-// CHECK-NEXT:    [[__P1_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x 
i64> undef, i64 [[TMP1]], i32 0
-// CHECK-NEXT:    [[TMP2:%.*]] = bitcast <1 x i64> 
[[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8>
-// CHECK-NEXT:    [[TMP3:%.*]] = bitcast <1 x i64> 
[[__P1_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8>
-// CHECK-NEXT:    [[VMINNM_I:%.*]] = bitcast <8 x i8> [[TMP2]] to <1 x double>
-// CHECK-NEXT:    [[VMINNM1_I:%.*]] = bitcast <8 x i8> [[TMP3]] to <1 x double>
-// CHECK-NEXT:    [[VMINNM2_I:%.*]] = call <1 x double> 
@llvm.aarch64.neon.fminnm.v1f64(<1 x double> [[VMINNM_I]], <1 x double> 
[[VMINNM1_I]])
-// CHECK-NEXT:    ret <1 x double> [[VMINNM2_I]]
-//
-float64x1_t test_vminnm_f64(float64x1_t a, float64x1_t b) {
-  return vminnm_f64(a, b);
-}
-
 // CHECK-LABEL: define dso_local <1 x double> @test_vabs_f64(
 // CHECK-SAME: <1 x double> noundef [[A:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  [[ENTRY:.*:]]

diff  --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c 
b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index 415d3c660be47..7d8b7c0e7b629 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -1474,6 +1474,310 @@ poly16x8_t test_vmull_high_p8(poly8x16_t a, poly8x16_t 
b) {
   return vmull_high_p8(a, b);
 }
 
+//===----------------------------------------------------------------------===//
+// 2.1.8 Minimum
+// https://arm-software.github.io/acle/neon_intrinsics/advsimd.html#minimum
+//===----------------------------------------------------------------------===//
+
+// LLVM-LABEL: @test_vmin_s8
+// CIR-LABEL: @vmin_s8(
+int8x8_t test_vmin_s8(int8x8_t v1, int8x8_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : 
(!cir.vector<8 x !s8i>, !cir.vector<8 x !s8i>) -> !cir.vector<8 x !s8i>
+  
+  // LLVM-SAME: <8 x i8> {{.*}} [[V1:%.*]], <8 x i8> noundef [[V2:%.*]]) 
{{.*}} {
+  // LLVM:    [[VMIN_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 
x i8> [[V1]], <8 x i8> [[V2]])
+  // LLVM:    ret <8 x i8> [[VMIN_V_I]]
+  return vmin_s8(v1, v2);
+}
+
+// LLVM-LABEL: @test_vmin_s16
+// CIR-LABEL: @vmin_s16(
+int16x4_t test_vmin_s16(int16x4_t v1, int16x4_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !s16i>, !cir.vector<4 x !s16i>) -> !cir.vector<4 x !s16i>
+  
+  // LLVM-SAME: <4 x i16> {{.*}} [[V1:%.*]], <4 x i16> noundef [[V2:%.*]]) 
{{.*}} {
+  // LLVM:    [[TMP0:%.*]] = bitcast <4 x i16> [[V1]] to <8 x i8>
+  // LLVM:    [[TMP1:%.*]] = bitcast <4 x i16> [[V2]] to <8 x i8>
+  // LLVM:    [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
+  // LLVM:    [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
+  // LLVM:    [[VMIN_V2_I:%.*]] = call <4 x i16> 
@llvm.aarch64.neon.smin.v4i16(<4 x i16> [[VMIN_V_I]], <4 x i16> [[VMIN_V1_I]])
+  // LLVM:    ret <4 x i16> [[VMIN_V2_I]]
+  return vmin_s16(v1, v2);
+}
+
+// LLVM-LABEL: @test_vmin_s32
+// CIR-LABEL: @vmin_s32(
+int32x2_t test_vmin_s32(int32x2_t v1, int32x2_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !s32i>, !cir.vector<2 x !s32i>) -> !cir.vector<2 x !s32i>
+  
+  // LLVM-SAME: <2 x i32> {{.*}} [[V1:%.*]], <2 x i32> noundef [[V2:%.*]]) 
{{.*}} {
+  // LLVM:    [[TMP0:%.*]] = bitcast <2 x i32> [[V1]] to <8 x i8>
+  // LLVM:    [[TMP1:%.*]] = bitcast <2 x i32> [[V2]] to <8 x i8>
+  // LLVM:    [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
+  // LLVM:    [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
+  // LLVM:    [[VMIN_V2_I:%.*]] = call <2 x i32> 
@llvm.aarch64.neon.smin.v2i32(<2 x i32> [[VMIN_V_I]], <2 x i32> [[VMIN_V1_I]])
+  // LLVM:    ret <2 x i32> [[VMIN_V2_I]]
+  return vmin_s32(v1, v2);
+}
+
+// LLVM-LABEL: @test_vmin_u8
+// CIR-LABEL: @vmin_u8(
+uint8x8_t test_vmin_u8(uint8x8_t v1, uint8x8_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : 
(!cir.vector<8 x !u8i>, !cir.vector<8 x !u8i>) -> !cir.vector<8 x !u8i>
+  
+  // LLVM-SAME: <8 x i8> {{.*}} [[V1:%.*]], <8 x i8> noundef [[V2:%.*]]) 
{{.*}} {
+  // LLVM:    [[VMIN_V_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 
x i8> [[V1]], <8 x i8> [[V2]])
+  // LLVM:    ret <8 x i8> [[VMIN_V_I]]
+  return vmin_u8(v1, v2);
+}
+
+// LLVM-LABEL: @test_vmin_u16
+// CIR-LABEL: @vmin_u16(
+uint16x4_t test_vmin_u16(uint16x4_t v1, uint16x4_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !u16i>, !cir.vector<4 x !u16i>) -> !cir.vector<4 x !u16i>
+  
+  // LLVM-SAME: <4 x i16> {{.*}} [[V1:%.*]], <4 x i16> noundef [[V2:%.*]]) 
{{.*}} {
+  // LLVM:    [[TMP0:%.*]] = bitcast <4 x i16> [[V1]] to <8 x i8>
+  // LLVM:    [[TMP1:%.*]] = bitcast <4 x i16> [[V2]] to <8 x i8>
+  // LLVM:    [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
+  // LLVM:    [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
+  // LLVM:    [[VMIN_V2_I:%.*]] = call <4 x i16> 
@llvm.aarch64.neon.umin.v4i16(<4 x i16> [[VMIN_V_I]], <4 x i16> [[VMIN_V1_I]])
+  // LLVM:    ret <4 x i16> [[VMIN_V2_I]]
+  return vmin_u16(v1, v2);
+}
+
+// LLVM-LABEL: @test_vmin_u32
+// CIR-LABEL: @vmin_u32(
+uint32x2_t test_vmin_u32(uint32x2_t v1, uint32x2_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !u32i>, !cir.vector<2 x !u32i>) -> !cir.vector<2 x !u32i>
+  
+  // LLVM-SAME: <2 x i32> {{.*}} [[V1:%.*]], <2 x i32> noundef [[V2:%.*]]) 
{{.*}} {
+  // LLVM:    [[TMP0:%.*]] = bitcast <2 x i32> [[V1]] to <8 x i8>
+  // LLVM:    [[TMP1:%.*]] = bitcast <2 x i32> [[V2]] to <8 x i8>
+  // LLVM:    [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
+  // LLVM:    [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
+  // LLVM:    [[VMIN_V2_I:%.*]] = call <2 x i32> 
@llvm.aarch64.neon.umin.v2i32(<2 x i32> [[VMIN_V_I]], <2 x i32> [[VMIN_V1_I]])
+  // LLVM:    ret <2 x i32> [[VMIN_V2_I]]
+  return vmin_u32(v1, v2);
+}
+
+// LLVM-LABEL: @test_vminq_s8
+// CIR-LABEL: @vminq_s8(
+int8x16_t test_vminq_s8(int8x16_t v1, int8x16_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : 
(!cir.vector<16 x !s8i>, !cir.vector<16 x !s8i>) -> !cir.vector<16 x !s8i>
+  
+  // LLVM-SAME: <16 x i8> {{.*}} [[V1:%.*]], <16 x i8> noundef [[V2:%.*]]) 
{{.*}} {
+  // LLVM:    [[VMINQ_V_I:%.*]] = call <16 x i8> 
@llvm.aarch64.neon.smin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
+  // LLVM:    ret <16 x i8> [[VMINQ_V_I]]
+  return vminq_s8(v1, v2);
+}
+
+// LLVM-LABEL: @test_vminq_s16
+// CIR-LABEL: @vminq_s16(
+int16x8_t test_vminq_s16(int16x8_t v1, int16x8_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : 
(!cir.vector<8 x !s16i>, !cir.vector<8 x !s16i>) -> !cir.vector<8 x !s16i>
+  
+  // LLVM-SAME: <8 x i16> {{.*}} [[V1:%.*]], <8 x i16> noundef [[V2:%.*]]) 
{{.*}} {
+  // LLVM:    [[TMP0:%.*]] = bitcast <8 x i16> [[V1]] to <16 x i8>
+  // LLVM:    [[TMP1:%.*]] = bitcast <8 x i16> [[V2]] to <16 x i8>
+  // LLVM:    [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
+  // LLVM:    [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
+  // LLVM:    [[VMINQ_V2_I:%.*]] = call <8 x i16> 
@llvm.aarch64.neon.smin.v8i16(<8 x i16> [[VMINQ_V_I]], <8 x i16> [[VMINQ_V1_I]])
+  // LLVM:    ret <8 x i16> [[VMINQ_V2_I]]
+  return vminq_s16(v1, v2);
+}
+
+// LLVM-LABEL: @test_vminq_s32
+// CIR-LABEL: @vminq_s32(
+int32x4_t test_vminq_s32(int32x4_t v1, int32x4_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !s32i>, !cir.vector<4 x !s32i>) -> !cir.vector<4 x !s32i>
+  
+  // LLVM-SAME: <4 x i32> {{.*}} [[V1:%.*]], <4 x i32> noundef [[V2:%.*]]) 
{{.*}} {
+  // LLVM:    [[TMP0:%.*]] = bitcast <4 x i32> [[V1]] to <16 x i8>
+  // LLVM:    [[TMP1:%.*]] = bitcast <4 x i32> [[V2]] to <16 x i8>
+  // LLVM:    [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
+  // LLVM:    [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
+  // LLVM:    [[VMINQ_V2_I:%.*]] = call <4 x i32> 
@llvm.aarch64.neon.smin.v4i32(<4 x i32> [[VMINQ_V_I]], <4 x i32> [[VMINQ_V1_I]])
+  // LLVM:    ret <4 x i32> [[VMINQ_V2_I]]
+  return vminq_s32(v1, v2);
+}
+
+// LLVM-LABEL: @test_vminq_u8
+// CIR-LABEL: @vminq_u8(
+uint8x16_t test_vminq_u8(uint8x16_t v1, uint8x16_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : 
(!cir.vector<16 x !u8i>, !cir.vector<16 x !u8i>) -> !cir.vector<16 x !u8i>
+  
+  // LLVM-SAME: <16 x i8> {{.*}} [[V1:%.*]], <16 x i8> noundef [[V2:%.*]]) 
{{.*}} {
+  // LLVM:    [[VMINQ_V_I:%.*]] = call <16 x i8> 
@llvm.aarch64.neon.umin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
+  // LLVM:    ret <16 x i8> [[VMINQ_V_I]]
+  return vminq_u8(v1, v2);
+}
+
+// LLVM-LABEL: @test_vminq_u16
+// CIR-LABEL: @vminq_u16(
+uint16x8_t test_vminq_u16(uint16x8_t v1, uint16x8_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : 
(!cir.vector<8 x !u16i>, !cir.vector<8 x !u16i>) -> !cir.vector<8 x !u16i>
+  
+  // LLVM-SAME: <8 x i16> {{.*}} [[V1:%.*]], <8 x i16> noundef [[V2:%.*]]) 
{{.*}} {
+  // LLVM:    [[TMP0:%.*]] = bitcast <8 x i16> [[V1]] to <16 x i8>
+  // LLVM:    [[TMP1:%.*]] = bitcast <8 x i16> [[V2]] to <16 x i8>
+  // LLVM:    [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
+  // LLVM:    [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
+  // LLVM:    [[VMINQ_V2_I:%.*]] = call <8 x i16> 
@llvm.aarch64.neon.umin.v8i16(<8 x i16> [[VMINQ_V_I]], <8 x i16> [[VMINQ_V1_I]])
+  // LLVM:    ret <8 x i16> [[VMINQ_V2_I]]
+
+  return vminq_u16(v1, v2);
+}
+
+// LLVM-LABEL: @test_vminq_u32
+// CIR-LABEL: @vminq_u32
+uint32x4_t test_vminq_u32(uint32x4_t v1, uint32x4_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !u32i>, !cir.vector<4 x !u32i>) -> !cir.vector<4 x !u32i>
+  
+  // LLVM-SAME: <4 x i32> {{.*}} [[V1:%.*]], <4 x i32> noundef [[V2:%.*]]) 
{{.*}} {
+  // LLVM:    [[TMP0:%.*]] = bitcast <4 x i32> [[V1]] to <16 x i8>
+  // LLVM:    [[TMP1:%.*]] = bitcast <4 x i32> [[V2]] to <16 x i8>
+  // LLVM:    [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
+  // LLVM:    [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
+  // LLVM:    [[VMINQ_V2_I:%.*]] = call <4 x i32> 
@llvm.aarch64.neon.umin.v4i32(<4 x i32> [[VMINQ_V_I]], <4 x i32> [[VMINQ_V1_I]])
+  // LLVM:    ret <4 x i32> [[VMINQ_V2_I]]
+  return vminq_u32(v1, v2);
+}
+
+// LLVM-LABEL: @test_vmin_f32
+// CIR-LABEL: @vmin_f32
+float32x2_t test_vmin_f32(float32x2_t v1, float32x2_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !cir.float>, !cir.vector<2 x !cir.float>) -> !cir.vector<2 x 
!cir.float>
+  
+  // LLVM-SAME: <2 x float> {{.*}} [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) 
{{.*}} {
+  // LLVM:    [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <2 x i32>
+  // LLVM:    [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <2 x i32>
+  // LLVM:    [[TMP2:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
+  // LLVM:    [[TMP3:%.*]] = bitcast <2 x i32> [[TMP1]] to <8 x i8>
+  // LLVM:    [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP2]] to <2 x float>
+  // LLVM:    [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x float>
+  // LLVM:    [[VMIN_V2_I:%.*]] = call <2 x float> 
@llvm.aarch64.neon.fmin.v2f32(<2 x float> [[VMIN_V_I]], <2 x float> 
[[VMIN_V1_I]])
+  // LLVM:    ret <2 x float> [[VMIN_V2_I]]
+  return vmin_f32(v1, v2);
+}
+
+// LLVM-LABEL: @test_vmin_f64
+// CIR-LABEL: @vmin_f64
+float64x1_t test_vmin_f64(float64x1_t v1, float64x1_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : 
(!cir.vector<1 x !cir.double>, !cir.vector<1 x !cir.double>) -> !cir.vector<1 x 
!cir.double>
+
+  // LLVM-SAME: <1 x double> {{.*}} [[V1:%.*]], <1 x double> noundef 
[[V2:%.*]]) {{.*}} {
+  // LLVM:    [[VMIN_V_I:%.*]] = bitcast <8 x i8> {{.*}} to <1 x double>
+  // LLVM:    [[VMIN_V1_I:%.*]] = bitcast <8 x i8> {{.*}} to <1 x double>
+  // LLVM:    [[VMIN_V2_I:%.*]] = call <1 x double> 
@llvm.aarch64.neon.fmin.v1f64(<1 x double> [[VMIN_V_I]], <1 x double> 
[[VMIN_V1_I]])
+  // LLVM:    ret <1 x double> [[VMIN_V2_I]]
+  return vmin_f64(v1, v2);
+}
+
+// LLVM-LABEL: @test_vminq_f32
+// CIR-LABEL: @vminq_f32
+float32x4_t test_vminq_f32(float32x4_t v1, float32x4_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !cir.float>, !cir.vector<4 x !cir.float>) -> !cir.vector<4 x 
!cir.float>
+  
+  // LLVM-SAME: <4 x float> {{.*}} [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) 
{{.*}} {
+  // LLVM:    [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <4 x i32>
+  // LLVM:    [[TMP1:%.*]] = bitcast <4 x float> [[V2]] to <4 x i32>
+  // LLVM:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
+  // LLVM:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to <16 x i8>
+  // LLVM:    [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x float>
+  // LLVM:    [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP3]] to <4 x float>
+  // LLVM:    [[VMINQ_V2_I:%.*]] = call <4 x float> 
@llvm.aarch64.neon.fmin.v4f32(<4 x float> [[VMINQ_V_I]], <4 x float> 
[[VMINQ_V1_I]])
+  // LLVM:    ret <4 x float> [[VMINQ_V2_I]]
+  return vminq_f32(v1, v2);
+}
+
+// LLVM-LABEL: @test_vminq_f64
+// CIR-LABEL: @vminq_f64
+float64x2_t test_vminq_f64(float64x2_t v1, float64x2_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !cir.double>, !cir.vector<2 x !cir.double>) -> !cir.vector<2 x 
!cir.double>
+  
+  // LLVM-SAME: <2 x double> {{.*}} [[V1:%.*]], <2 x double> noundef 
[[V2:%.*]]) {{.*}} {
+  // LLVM:    [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <2 x i64>
+  // LLVM:    [[TMP1:%.*]] = bitcast <2 x double> [[V2]] to <2 x i64>
+  // LLVM:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
+  // LLVM:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to <16 x i8>
+  // LLVM:    [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x double>
+  // LLVM:    [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP3]] to <2 x double>
+  // LLVM:    [[VMINQ_V2_I:%.*]] = call <2 x double> 
@llvm.aarch64.neon.fmin.v2f64(<2 x double> [[VMINQ_V_I]], <2 x double> 
[[VMINQ_V1_I]])
+  // LLVM:    ret <2 x double> [[VMINQ_V2_I]]
+  return vminq_f64(v1, v2);
+}
+
+// LLVM-LABEL: @test_vminnm_f32
+// CIR-LABEL: @vminnm_f32
+float32x2_t test_vminnm_f32(float32x2_t v1, float32x2_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !cir.float>, !cir.vector<2 x !cir.float>) -> !cir.vector<2 x 
!cir.float>
+  
+  // LLVM-SAME: <2 x float> {{.*}} [[V1:%.*]], <2 x float> noundef [[V2:%.*]]) 
{{.*}} {
+  // LLVM:    [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <2 x i32>
+  // LLVM:    [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <2 x i32>
+  // LLVM:    [[TMP2:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
+  // LLVM:    [[TMP3:%.*]] = bitcast <2 x i32> [[TMP1]] to <8 x i8>
+  // LLVM:    [[VMINNM_V_I:%.*]] = bitcast <8 x i8> [[TMP2]] to <2 x float>
+  // LLVM:    [[VMINNM_V1_I:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x float>
+  // LLVM:    [[VMINNM_V2_I:%.*]] = call <2 x float> 
@llvm.aarch64.neon.fminnm.v2f32(<2 x float> [[VMINNM_V_I]], <2 x float> 
[[VMINNM_V1_I]])
+  // LLVM:    ret <2 x float> [[VMINNM_V2_I]]
+  return vminnm_f32(v1, v2);
+}
+
+// LLVM-LABEL: @test_vminnm_f64
+// CIR-LABEL: @vminnm_f64
+float64x1_t test_vminnm_f64(float64x1_t v1, float64x1_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<1 x !cir.double>, !cir.vector<1 x !cir.double>) -> !cir.vector<1 x 
!cir.double>
+  
+  // LLVM-SAME: <1 x double> {{.*}} [[V1:%.*]], <1 x double> noundef 
[[V2:%.*]]) {{.*}} {
+  // LLVM:    [[TMP0:%.*]] = bitcast <1 x double> [[V1]] to i64
+  // LLVM:    [[__P0_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x 
i64> undef, i64 [[TMP0]], i32 0
+  // LLVM:    [[TMP1:%.*]] = bitcast <1 x double> [[V2]] to i64
+  // LLVM:    [[__P1_ADDR_I_SROA_0_0_VEC_INSERT:%.*]] = insertelement <1 x 
i64> undef, i64 [[TMP1]], i32 0
+  // LLVM:    [[TMP2:%.*]] = bitcast <1 x i64> 
[[__P0_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8>
+  // LLVM:    [[TMP3:%.*]] = bitcast <1 x i64> 
[[__P1_ADDR_I_SROA_0_0_VEC_INSERT]] to <8 x i8>
+  // LLVM:    [[VMINNM_I:%.*]] = bitcast <8 x i8> [[TMP2]] to <1 x double>
+  // LLVM:    [[VMINNM1_I:%.*]] = bitcast <8 x i8> [[TMP3]] to <1 x double>
+  // LLVM:    [[VMINNM2_I:%.*]] = call <1 x double> 
@llvm.aarch64.neon.fminnm.v1f64(<1 x double> [[VMINNM_I]], <1 x double> 
[[VMINNM1_I]])
+  // LLVM:    ret <1 x double> [[VMINNM2_I]]
+  return vminnm_f64(v1, v2);
+}
+
+// LLVM-LABEL: @test_vminnmq_f32
+// CIR-LABEL: @vminnmq_f32
+float32x4_t test_vminnmq_f32(float32x4_t v1, float32x4_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !cir.float>, !cir.vector<4 x !cir.float>) -> !cir.vector<4 x 
!cir.float>
+  
+  // LLVM-SAME: <4 x float> {{.*}} [[V1:%.*]], <4 x float> noundef [[V2:%.*]]) 
{{.*}} {
+  // LLVM:    [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <4 x i32>
+  // LLVM:    [[TMP1:%.*]] = bitcast <4 x float> [[V2]] to <4 x i32>
+  // LLVM:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
+  // LLVM:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to <16 x i8>
+  // LLVM:    [[VMINNM_V_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x float>
+  // LLVM:    [[VMINNM_V1_I:%.*]] = bitcast <16 x i8> [[TMP3]] to <4 x float>
+  // LLVM:    [[VMINNM_V2_I:%.*]] = call <4 x float> 
@llvm.aarch64.neon.fminnm.v4f32(<4 x float> [[VMINNM_V_I]], <4 x float> 
[[VMINNM_V1_I]])
+  // LLVM:    ret <4 x float> [[VMINNM_V2_I]]
+  return vminnmq_f32(v1, v2);
+}
+
+// LLVM-LABEL: @test_vminnmq_f64
+// CIR-LABEL: @vminnmq_f64
+float64x2_t test_vminnmq_f64(float64x2_t v1, float64x2_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !cir.double>, !cir.vector<2 x !cir.double>) -> !cir.vector<2 x 
!cir.double>
+  
+  // LLVM-SAME: <2 x double> {{.*}} [[V1:%.*]], <2 x double> noundef 
[[V2:%.*]]) {{.*}} {
+  // LLVM:    [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <2 x i64>
+  // LLVM:    [[TMP1:%.*]] = bitcast <2 x double> [[V2]] to <2 x i64>
+  // LLVM:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
+  // LLVM:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to <16 x i8>
+  // LLVM:    [[VMINNM_V_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x double>
+  // LLVM:    [[VMINNM_V1_I:%.*]] = bitcast <16 x i8> [[TMP3]] to <2 x double>
+  // LLVM:    [[VMINNM_V2_I:%.*]] = call <2 x double> 
@llvm.aarch64.neon.fminnm.v2f64(<2 x double> [[VMINNM_V_I]], <2 x double> 
[[VMINNM_V1_I]])
+  // LLVM:    ret <2 x double> [[VMINNM_V2_I]]
+  return vminnmq_f64(v1, v2);
+}
+
 //===------------------------------------------------------===//
 // 2.1.3.1.1. Vector Shift Left
 // 
https://arm-software.github.io/acle/neon_intrinsics/advsimd.html#vector-shift-left


        
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to