https://github.com/YGGkk updated 
https://github.com/llvm/llvm-project/pull/187935

>From f14970ff206e9772d2c1fa1ba7b5ae04d5ccfd06 Mon Sep 17 00:00:00 2001
From: Zhihui Yang <[email protected]>
Date: Sun, 22 Mar 2026 08:18:45 -0700
Subject: [PATCH 1/8] [CIR][AArch64] Upstream NEON Minimum

---
 .../lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp  |  14 ++
 clang/test/CodeGen/AArch64/neon/intrinsics.c  | 167 ++++++++++++++++++
 2 files changed, 181 insertions(+)

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
index 5d7b8d839fa84..aafcac244fe65 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
@@ -2873,8 +2873,16 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned 
builtinID, const CallExpr *expr,
   case NEON::BI__builtin_neon_vmax_v:
   case NEON::BI__builtin_neon_vmaxq_v:
   case NEON::BI__builtin_neon_vmaxh_f16:
+      cgm.errorNYI(expr->getSourceRange(),
+                 std::string("unimplemented AArch64 builtin call: ") +
+                     getContext().BuiltinInfo.getName(builtinID));
+    return mlir::Value{};
   case NEON::BI__builtin_neon_vmin_v:
   case NEON::BI__builtin_neon_vminq_v:
+        intrName = usgn ? "aarch64.neon.umin" : "aarch64.neon.smin";
+      if(cir::isFPOrVectorOfFPType(ty))
+        intrName = "aarch64.neon.fmin";
+      return emitNeonCall(cgm, builder, {ty, ty}, ops, intrName, ty, loc);
   case NEON::BI__builtin_neon_vminh_f16:
     cgm.errorNYI(expr->getSourceRange(),
                  std::string("unimplemented AArch64 builtin call: ") +
@@ -2892,8 +2900,14 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned 
builtinID, const CallExpr *expr,
   case NEON::BI__builtin_neon_vpminq_v:
   case NEON::BI__builtin_neon_vpmax_v:
   case NEON::BI__builtin_neon_vpmaxq_v:
+        cgm.errorNYI(expr->getSourceRange(),
+                 std::string("unimplemented AArch64 builtin call: ") +
+                     getContext().BuiltinInfo.getName(builtinID));
+    return mlir::Value{};
   case NEON::BI__builtin_neon_vminnm_v:
   case NEON::BI__builtin_neon_vminnmq_v:
+      intrName = "aarch64.neon.fminnm";
+    return emitNeonCall(cgm, builder, {ty, ty}, ops, intrName, ty, loc);
   case NEON::BI__builtin_neon_vminnmh_f16:
   case NEON::BI__builtin_neon_vmaxnm_v:
   case NEON::BI__builtin_neon_vmaxnmq_v:
diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c 
b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index bf8e62feda8da..fcc0b14c88695 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -982,3 +982,170 @@ int64_t test_vshld_u64(int64_t a,int64_t b) {
   return (int64_t)vshld_u64(a, b);
 }
 
+//===----------------------------------------------------------------------===//
+// 2.1.8 Minimum
+// https://arm-software.github.io/acle/neon_intrinsics/advsimd.html#minimum
+//===----------------------------------------------------------------------===//
+
+// ALL-LABEL: @test_vmin_s8
+int8x8_t test_vmin_s8(int8x8_t v1, int8x8_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : 
(!cir.vector<8 x !s8i>, !cir.vector<8 x !s8i>) -> !cir.vector<8 x !s8i>
+  // LLVM:    [[VMIN_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 x 
i8> [[V1]], <8 x i8> [[V2]])
+  // LLVM:    ret <8 x i8> [[VMIN_I]]
+  return vmin_s8(v1, v2);
+}
+
+// ALL-LABEL: @test_vmin_s16
+int16x4_t test_vmin_s16(int16x4_t v1, int16x4_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !s16i>, !cir.vector<4 x !s16i>) -> !cir.vector<4 x !s16i>
+  // LLVM:    [[VMIN_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.smin.v4i16(<4 
x i16> [[V1]], <4 x i16> [[V2]])
+  // LLVM-NEXT:    ret <4 x i16> [[VMIN_I]]
+  return vmin_s16(v1, v2);
+}
+
+// ALL-LABEL: @test_vmin_s32
+int32x2_t test_vmin_s32(int32x2_t v1, int32x2_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !s32i>, !cir.vector<2 x !s32i>) -> !cir.vector<2 x !s32i>
+  // LLVM:    [[VMIN_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.smin.v2i32(<2 
x i32> [[V1]], <2 x i32> [[V2]])
+  // LLVM-NEXT:    ret <2 x i32> [[VMIN_I]]
+  return vmin_s32(v1, v2);
+}
+
+// ALL-LABEL: @test_vmin_u8
+uint8x8_t test_vmin_u8(uint8x8_t v1, uint8x8_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : 
(!cir.vector<8 x !u8i>, !cir.vector<8 x !u8i>) -> !cir.vector<8 x !u8i>
+  // LLVM:    [[VMIN_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 x 
i8> [[V1]], <8 x i8> [[V2]])
+  // LLVM-NEXT:    ret <8 x i8> [[VMIN_I]]
+  return vmin_u8(v1, v2);
+}
+
+// ALL-LABEL: @test_vmin_u16
+uint16x4_t test_vmin_u16(uint16x4_t v1, uint16x4_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !u16i>, !cir.vector<4 x !u16i>) -> !cir.vector<4 x !u16i>
+  // LLVM:    [[VMIN_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.umin.v4i16(<4 
x i16> [[V1]], <4 x i16> [[V2]])
+  // LLVM-NEXT:    ret <4 x i16> [[VMIN_I]]
+  return vmin_u16(v1, v2);
+}
+
+// ALL-LABEL: @test_vmin_u32
+uint32x2_t test_vmin_u32(uint32x2_t v1, uint32x2_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !u32i>, !cir.vector<2 x !u32i>) -> !cir.vector<2 x !u32i>
+  // LLVM:    [[VMIN_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.umin.v2i32(<2 
x i32> [[V1]], <2 x i32> [[V2]])
+  // LLVM-NEXT:    ret <2 x i32> [[VMIN_I]]
+  return vmin_u32(v1, v2);
+}
+
+// ALL-LABEL: @test_vminq_s8
+int8x16_t test_vminq_s8(int8x16_t v1, int8x16_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : 
(!cir.vector<16 x !s8i>, !cir.vector<16 x !s8i>) -> !cir.vector<16 x !s8i>
+  // LLVM:    [[VMIN_I:%.*]] = call <16 x i8> 
@llvm.aarch64.neon.smin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
+  // LLVM-NEXT:    ret <16 x i8> [[VMIN_I]]
+  return vminq_s8(v1, v2);
+}
+
+// ALL-LABEL: @test_vminq_s16
+int16x8_t test_vminq_s16(int16x8_t v1, int16x8_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : 
(!cir.vector<8 x !s16i>, !cir.vector<8 x !s16i>) -> !cir.vector<8 x !s16i>
+  // LLVM:    [[VMIN_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.smin.v8i16(<8 
x i16> [[V1]], <8 x i16> [[V2]])
+  // LLVM-NEXT:    ret <8 x i16> [[VMIN_I]]
+  return vminq_s16(v1, v2);
+}
+
+// ALL-LABEL: @test_vminq_s32
+int32x4_t test_vminq_s32(int32x4_t v1, int32x4_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !s32i>, !cir.vector<4 x !s32i>) -> !cir.vector<4 x !s32i>
+  // LLVM:    [[VMIN_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.smin.v4i32(<4 
x i32> [[V1]], <4 x i32> [[V2]])
+  // LLVM-NEXT:    ret <4 x i32> [[VMIN_I]]
+  return vminq_s32(v1, v2);
+}
+
+// ALL-LABEL: @test_vminq_u8
+uint8x16_t test_vminq_u8(uint8x16_t v1, uint8x16_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : 
(!cir.vector<16 x !u8i>, !cir.vector<16 x !u8i>) -> !cir.vector<16 x !u8i>
+  // LLVM:    [[VMIN_I:%.*]] = call <16 x i8> 
@llvm.aarch64.neon.umin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
+  // LLVM-NEXT:    ret <16 x i8> [[VMIN_I]]
+  return vminq_u8(v1, v2);
+}
+
+// ALL-LABEL: @test_vminq_u16
+uint16x8_t test_vminq_u16(uint16x8_t v1, uint16x8_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : 
(!cir.vector<8 x !u16i>, !cir.vector<8 x !u16i>) -> !cir.vector<8 x !u16i>
+  // LLVM:    [[VMIN_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.umin.v8i16(<8 
x i16> [[V1]], <8 x i16> [[V2]])
+  // LLVM-NEXT:    ret <8 x i16> [[VMIN_I]]
+  return vminq_u16(v1, v2);
+}
+
+// ALL-LABEL: @test_vminq_u32
+uint32x4_t test_vminq_u32(uint32x4_t v1, uint32x4_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !u32i>, !cir.vector<4 x !u32i>) -> !cir.vector<4 x !u32i>
+  // LLVM:    [[VMIN_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.umin.v4i32(<4 
x i32> [[V1]], <4 x i32> [[V2]])
+  // LLVM-NEXT:    ret <4 x i32> [[VMIN_I]]
+  return vminq_u32(v1, v2);
+}
+
+// ALL-LABEL: @test_vmin_f32
+float32x4_t test_vmin_f32(float32x4_t v1, float32x4_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
+  // LLVM:    [[VMIN_I:%.*]] = call <4 x float> 
@llvm.aarch64.neon.fmin.v4f32(<4 x float> [[V1]], <4 x float> [[V2]])
+  // LLVM-NEXT:    ret <4 x float> [[VMIN_I]]
+  return vmin_f32(v1, v2);
+}
+
+// ALL-LABEL: @test_vmin_f64
+float64x2_t test_vmin_f64(float64x2_t v1, float64x2_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
+  // LLVM:    [[VMIN_I:%.*]] = call <2 x double> 
@llvm.aarch64.neon.fmin.v2f64(<2 x double> [[V1]], <2 x double> [[V2]])
+  // LLVM-NEXT:    ret <2 x double> [[VMIN_I]]
+  return vmin_f64(v1, v2);
+}
+
+// ALL-LABEL: @test_vminq_f32
+float32x4_t test_vminq_f32(float32x4_t v1, float32x4_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
+  // LLVM:    [[VMIN_I:%.*]] = call <4 x float> 
@llvm.aarch64.neon.fmin.v4f32(<4 x float> [[V1]], <4 x float> [[V2]])
+  // LLVM-NEXT:    ret <4 x float> [[VMIN_I]]
+  return vminq_f32(v1, v2);
+}
+
+// ALL-LABEL: @test_vminq_f64
+float64x2_t test_vminq_f64(float64x2_t v1, float64x2_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
+  // LLVM:    [[VMIN_I:%.*]] = call <2 x double> 
@llvm.aarch64.neon.fmin.v2f64(<2 x double> [[V1]], <2 x double> [[V2]])
+  // LLVM-NEXT:    ret <2 x double> [[VMIN_I]]
+  return vminq_f64(v1, v2);
+}
+ // LLVM:    [[VMIN_I:%.*]] = call <4 x float> 
@llvm.aarch64.neon.fminnm.v4f32(<4 x float> [[V1]], <4 x float> [[V2]])
+  // LLVM-NEXT:    ret <4 x float> [[VMIN_I]]
+  return vminnm_f32(v1, v2);
+}
+
+// ALL-LABEL: @test_vminnm_f64
+float64x2_t test_vminnm_f64(float64x2_t v1, float64x2_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
+  // LLVM:    [[VMIN_I:%.*]] = call <2 x double> 
@llvm.aarch64.neon.fminnm.v2f64(<2 x double> [[V1]], <2 x double> [[V2]])
+  // LLVM-NEXT:    ret <2 x double> [[VMIN_I]]
+  return vminnm_f64(v1, v2);
+}
+
+// ALL-LABEL: @test_vminnmq_f32
+float32x4_t test_vminnmq_f32(float32x4_t v1, float32x4_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
+  // LLVM:    [[VMIN_I:%.*]] = call <4 x float> 
@llvm.aarch64.neon.fminnm.v4f32(<4 x float> [[V1]], <4 x float> [[V2]])
+  // LLVM-NEXT:    ret <4 x float> [[VMIN_I]]
+  return vminnmq_f32(v1, v2);
+}
+
+// ALL-LABEL: @test_vminnmq_f64
+float64x2_t test_vminnmq_f64(float64x2_t v1, float64x2_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
+  // LLVM:    [[VMIN_I:%.*]] = call <2 x double> 
@llvm.aarch64.neon.fminnm.v2f64(<2 x double> [[V1]], <2 x double> [[V2]])
+  // LLVM-NEXT:    ret <2 x double> [[VMIN_I]]
+  return vminnmq_f64(v1, v2);
+}
+// ALL-LABEL: @test_vminnm_f32
+float32x4_t test_vminnm_f32(float32x4_t v1, float32x4_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
+  // LLVM:    [[VMIN_I:%.*]] = call <4 x float> 
@llvm.aarch64.neon.fminnm.v4f32(<4 x float> [[V1]], <4 x float> [[V2]])
+  // LLVM-NEXT:    ret <4 x float> [[VMIN_I]]
+  return vminnm_f32(v1, v2);
+}
\ No newline at end of file

>From f6b222ccd532444abc6e46887189295716db0111 Mon Sep 17 00:00:00 2001
From: Zhihui Yang <[email protected]>
Date: Sun, 22 Mar 2026 08:20:11 -0700
Subject: [PATCH 2/8] [NEON] Fix indentation and formatting in AArch64 builtin
 expression handling

---
 clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
index aafcac244fe65..140a6e893a394 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
@@ -2873,16 +2873,16 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned 
builtinID, const CallExpr *expr,
   case NEON::BI__builtin_neon_vmax_v:
   case NEON::BI__builtin_neon_vmaxq_v:
   case NEON::BI__builtin_neon_vmaxh_f16:
-      cgm.errorNYI(expr->getSourceRange(),
+    cgm.errorNYI(expr->getSourceRange(),
                  std::string("unimplemented AArch64 builtin call: ") +
                      getContext().BuiltinInfo.getName(builtinID));
     return mlir::Value{};
   case NEON::BI__builtin_neon_vmin_v:
   case NEON::BI__builtin_neon_vminq_v:
-        intrName = usgn ? "aarch64.neon.umin" : "aarch64.neon.smin";
-      if(cir::isFPOrVectorOfFPType(ty))
-        intrName = "aarch64.neon.fmin";
-      return emitNeonCall(cgm, builder, {ty, ty}, ops, intrName, ty, loc);
+    intrName = usgn ? "aarch64.neon.umin" : "aarch64.neon.smin";
+    if (cir::isFPOrVectorOfFPType(ty))
+      intrName = "aarch64.neon.fmin";
+    return emitNeonCall(cgm, builder, {ty, ty}, ops, intrName, ty, loc);
   case NEON::BI__builtin_neon_vminh_f16:
     cgm.errorNYI(expr->getSourceRange(),
                  std::string("unimplemented AArch64 builtin call: ") +
@@ -2900,13 +2900,13 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned 
builtinID, const CallExpr *expr,
   case NEON::BI__builtin_neon_vpminq_v:
   case NEON::BI__builtin_neon_vpmax_v:
   case NEON::BI__builtin_neon_vpmaxq_v:
-        cgm.errorNYI(expr->getSourceRange(),
+    cgm.errorNYI(expr->getSourceRange(),
                  std::string("unimplemented AArch64 builtin call: ") +
                      getContext().BuiltinInfo.getName(builtinID));
     return mlir::Value{};
   case NEON::BI__builtin_neon_vminnm_v:
   case NEON::BI__builtin_neon_vminnmq_v:
-      intrName = "aarch64.neon.fminnm";
+    intrName = "aarch64.neon.fminnm";
     return emitNeonCall(cgm, builder, {ty, ty}, ops, intrName, ty, loc);
   case NEON::BI__builtin_neon_vminnmh_f16:
   case NEON::BI__builtin_neon_vmaxnm_v:

>From a3128e160b005a524816f4da31205ab31ceb797c Mon Sep 17 00:00:00 2001
From: Zhihui Yang <[email protected]>
Date: Mon, 23 Mar 2026 07:31:25 -0700
Subject: [PATCH 3/8] [AArch64][NEON] Update vector types in vmin and vminnm
 intrinsics tests for improved accuracy

---
 clang/test/CodeGen/AArch64/neon/intrinsics.c | 47 +++++++++-----------
 1 file changed, 22 insertions(+), 25 deletions(-)

diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c 
b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index fcc0b14c88695..c7cf41d950a29 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -1084,18 +1084,18 @@ uint32x4_t test_vminq_u32(uint32x4_t v1, uint32x4_t v2) 
{
 }
 
 // ALL-LABEL: @test_vmin_f32
-float32x4_t test_vmin_f32(float32x4_t v1, float32x4_t v2) {
-  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
-  // LLVM:    [[VMIN_I:%.*]] = call <4 x float> 
@llvm.aarch64.neon.fmin.v4f32(<4 x float> [[V1]], <4 x float> [[V2]])
-  // LLVM-NEXT:    ret <4 x float> [[VMIN_I]]
+float32x2_t test_vmin_f32(float32x2_t v1, float32x2_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
+  // LLVM:    [[VMIN_I:%.*]] = call <2 x float> 
@llvm.aarch64.neon.fmin.v2f32(<2 x float> [[V1]], <2 x float> [[V2]])
+  // LLVM-NEXT:    ret <2 x float> [[VMIN_I]]
   return vmin_f32(v1, v2);
 }
 
 // ALL-LABEL: @test_vmin_f64
-float64x2_t test_vmin_f64(float64x2_t v1, float64x2_t v2) {
-  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
-  // LLVM:    [[VMIN_I:%.*]] = call <2 x double> 
@llvm.aarch64.neon.fmin.v2f64(<2 x double> [[V1]], <2 x double> [[V2]])
-  // LLVM-NEXT:    ret <2 x double> [[VMIN_I]]
+float64x1_t test_vmin_f64(float64x1_t v1, float64x1_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : 
(!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
+  // LLVM:    [[VMIN_I:%.*]] = call <1 x double> 
@llvm.aarch64.neon.fmin.v1f64(<1 x double> [[V1]], <1 x double> [[V2]])
+  // LLVM-NEXT:    ret <1 x double> [[VMIN_I]]
   return vmin_f64(v1, v2);
 }
 
@@ -1114,16 +1114,20 @@ float64x2_t test_vminq_f64(float64x2_t v1, float64x2_t 
v2) {
   // LLVM-NEXT:    ret <2 x double> [[VMIN_I]]
   return vminq_f64(v1, v2);
 }
- // LLVM:    [[VMIN_I:%.*]] = call <4 x float> 
@llvm.aarch64.neon.fminnm.v4f32(<4 x float> [[V1]], <4 x float> [[V2]])
-  // LLVM-NEXT:    ret <4 x float> [[VMIN_I]]
+
+// ALL-LABEL: @test_vminnm_f32
+float32x2_t test_vminnm_f32(float32x2_t v1, float32x2_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
+  // LLVM:    [[VMIN_I:%.*]] = call <2 x float> 
@llvm.aarch64.neon.fminnm.v2f32(<2 x float> [[V1]], <2 x float> [[V2]])
+  // LLVM-NEXT:    ret <2 x float> [[VMIN_I]]
   return vminnm_f32(v1, v2);
 }
 
 // ALL-LABEL: @test_vminnm_f64
-float64x2_t test_vminnm_f64(float64x2_t v1, float64x2_t v2) {
-  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
-  // LLVM:    [[VMIN_I:%.*]] = call <2 x double> 
@llvm.aarch64.neon.fminnm.v2f64(<2 x double> [[V1]], <2 x double> [[V2]])
-  // LLVM-NEXT:    ret <2 x double> [[VMIN_I]]
+float64x1_t test_vminnm_f64(float64x1_t v1, float64x1_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
+  // LLVM:    [[VMIN_I:%.*]] = call <1 x double> 
@llvm.aarch64.neon.fminnm.v1f64(<1 x double> [[V1]], <1 x double> [[V2]])
+  // LLVM-NEXT:    ret <1 x double> [[VMIN_I]]
   return vminnm_f64(v1, v2);
 }
 
@@ -1136,16 +1140,9 @@ float32x4_t test_vminnmq_f32(float32x4_t v1, float32x4_t 
v2) {
 }
 
 // ALL-LABEL: @test_vminnmq_f64
-float64x2_t test_vminnmq_f64(float64x2_t v1, float64x2_t v2) {
-  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
-  // LLVM:    [[VMIN_I:%.*]] = call <2 x double> 
@llvm.aarch64.neon.fminnm.v2f64(<2 x double> [[V1]], <2 x double> [[V2]])
-  // LLVM-NEXT:    ret <2 x double> [[VMIN_I]]
+float64x1_t test_vminnmq_f64(float64x1_t v1, float64x1_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
+  // LLVM:    [[VMIN_I:%.*]] = call <1 x double> 
@llvm.aarch64.neon.fminnm.v1f64(<1 x double> [[V1]], <1 x double> [[V2]])
+  // LLVM-NEXT:    ret <1 x double> [[VMIN_I]]
   return vminnmq_f64(v1, v2);
-}
-// ALL-LABEL: @test_vminnm_f32
-float32x4_t test_vminnm_f32(float32x4_t v1, float32x4_t v2) {
-  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
-  // LLVM:    [[VMIN_I:%.*]] = call <4 x float> 
@llvm.aarch64.neon.fminnm.v4f32(<4 x float> [[V1]], <4 x float> [[V2]])
-  // LLVM-NEXT:    ret <4 x float> [[VMIN_I]]
-  return vminnm_f32(v1, v2);
 }
\ No newline at end of file

>From 981e4303a3953ecac6b57e48ce62ba5d851e1106 Mon Sep 17 00:00:00 2001
From: Zhihui Yang <[email protected]>
Date: Mon, 23 Mar 2026 07:55:08 -0700
Subject: [PATCH 4/8] [NEON] Update test_vminnmq_f64 to use float64x2_t for
 improved accuracy

---
 clang/test/CodeGen/AArch64/neon/intrinsics.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c 
b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index c7cf41d950a29..e6cc2bc374966 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -1140,9 +1140,9 @@ float32x4_t test_vminnmq_f32(float32x4_t v1, float32x4_t 
v2) {
 }
 
 // ALL-LABEL: @test_vminnmq_f64
-float64x1_t test_vminnmq_f64(float64x1_t v1, float64x1_t v2) {
-  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
-  // LLVM:    [[VMIN_I:%.*]] = call <1 x double> 
@llvm.aarch64.neon.fminnm.v1f64(<1 x double> [[V1]], <1 x double> [[V2]])
-  // LLVM-NEXT:    ret <1 x double> [[VMIN_I]]
+float64x2_t test_vminnmq_f64(float64x2_t v1, float64x2_t v2) {
+  // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
+  // LLVM:    [[VMIN_I:%.*]] = call <2 x double> 
@llvm.aarch64.neon.fminnm.v2f64(<2 x double> [[V1]], <2 x double> [[V2]])
+  // LLVM-NEXT:    ret <2 x double> [[VMIN_I]]
   return vminnmq_f64(v1, v2);
 }
\ No newline at end of file

>From 1f4238642225a3cc08848e05b5929e4e8049a95d Mon Sep 17 00:00:00 2001
From: Zhihui Yang <[email protected]>
Date: Mon, 23 Mar 2026 08:34:59 -0700
Subject: [PATCH 5/8] [NEON] Update vmin intrinsics tests to use intermediate
 results for improved accuracy

---
 clang/test/CodeGen/AArch64/neon/intrinsics.c | 80 ++++++++++----------
 1 file changed, 40 insertions(+), 40 deletions(-)

diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c 
b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index e6cc2bc374966..43ba8c8774346 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -990,159 +990,159 @@ int64_t test_vshld_u64(int64_t a,int64_t b) {
 // ALL-LABEL: @test_vmin_s8
 int8x8_t test_vmin_s8(int8x8_t v1, int8x8_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : 
(!cir.vector<8 x !s8i>, !cir.vector<8 x !s8i>) -> !cir.vector<8 x !s8i>
-  // LLVM:    [[VMIN_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 x 
i8> [[V1]], <8 x i8> [[V2]])
-  // LLVM:    ret <8 x i8> [[VMIN_I]]
+  // LLVM:    [[VMIN2_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 
x i8> [[VMIN_I]], <8 x i8> [[VMIN1_I]])
+  // LLVM:    ret <8 x i8> [[VMIN2_I]]
   return vmin_s8(v1, v2);
 }
 
 // ALL-LABEL: @test_vmin_s16
 int16x4_t test_vmin_s16(int16x4_t v1, int16x4_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !s16i>, !cir.vector<4 x !s16i>) -> !cir.vector<4 x !s16i>
-  // LLVM:    [[VMIN_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.smin.v4i16(<4 
x i16> [[V1]], <4 x i16> [[V2]])
-  // LLVM-NEXT:    ret <4 x i16> [[VMIN_I]]
+  // LLVM:    [[VMIN2_I:%.*]] = call <4 x i16> 
@llvm.aarch64.neon.smin.v4i16(<4 x i16> [[VMIN_I]], <4 x i16> [[VMIN1_I]])
+  // LLVM-NEXT:    ret <4 x i16> [[VMIN2_I]]
   return vmin_s16(v1, v2);
 }
 
 // ALL-LABEL: @test_vmin_s32
 int32x2_t test_vmin_s32(int32x2_t v1, int32x2_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !s32i>, !cir.vector<2 x !s32i>) -> !cir.vector<2 x !s32i>
-  // LLVM:    [[VMIN_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.smin.v2i32(<2 
x i32> [[V1]], <2 x i32> [[V2]])
-  // LLVM-NEXT:    ret <2 x i32> [[VMIN_I]]
+  // LLVM:    [[VMIN2_I:%.*]] = call <2 x i32> 
@llvm.aarch64.neon.smin.v2i32(<2 x i32> [[VMIN_I]], <2 x i32> [[VMIN1_I]])
+  // LLVM-NEXT:    ret <2 x i32> [[VMIN2_I]]
   return vmin_s32(v1, v2);
 }
 
 // ALL-LABEL: @test_vmin_u8
 uint8x8_t test_vmin_u8(uint8x8_t v1, uint8x8_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : 
(!cir.vector<8 x !u8i>, !cir.vector<8 x !u8i>) -> !cir.vector<8 x !u8i>
-  // LLVM:    [[VMIN_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 x 
i8> [[V1]], <8 x i8> [[V2]])
-  // LLVM-NEXT:    ret <8 x i8> [[VMIN_I]]
+  // LLVM:    [[VMIN2_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 
x i8> [[VMIN_I]], <8 x i8> [[VMIN1_I]])
+  // LLVM-NEXT:    ret <8 x i8> [[VMIN2_I]]
   return vmin_u8(v1, v2);
 }
 
 // ALL-LABEL: @test_vmin_u16
 uint16x4_t test_vmin_u16(uint16x4_t v1, uint16x4_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !u16i>, !cir.vector<4 x !u16i>) -> !cir.vector<4 x !u16i>
-  // LLVM:    [[VMIN_I:%.*]] = call <4 x i16> @llvm.aarch64.neon.umin.v4i16(<4 
x i16> [[V1]], <4 x i16> [[V2]])
-  // LLVM-NEXT:    ret <4 x i16> [[VMIN_I]]
+  // LLVM:    [[VMIN2_I:%.*]] = call <4 x i16> 
@llvm.aarch64.neon.umin.v4i16(<4 x i16> [[VMIN_I]], <4 x i16> [[VMIN1_I]])
+  // LLVM-NEXT:    ret <4 x i16> [[VMIN2_I]]
   return vmin_u16(v1, v2);
 }
 
 // ALL-LABEL: @test_vmin_u32
 uint32x2_t test_vmin_u32(uint32x2_t v1, uint32x2_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !u32i>, !cir.vector<2 x !u32i>) -> !cir.vector<2 x !u32i>
-  // LLVM:    [[VMIN_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.umin.v2i32(<2 
x i32> [[V1]], <2 x i32> [[V2]])
-  // LLVM-NEXT:    ret <2 x i32> [[VMIN_I]]
+  // LLVM:    [[VMIN2_I:%.*]] = call <2 x i32> 
@llvm.aarch64.neon.umin.v2i32(<2 x i32> [[VMIN_I]], <2 x i32> [[VMIN1_I]])
+  // LLVM-NEXT:    ret <2 x i32> [[VMIN2_I]]
   return vmin_u32(v1, v2);
 }
 
 // ALL-LABEL: @test_vminq_s8
 int8x16_t test_vminq_s8(int8x16_t v1, int8x16_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : 
(!cir.vector<16 x !s8i>, !cir.vector<16 x !s8i>) -> !cir.vector<16 x !s8i>
-  // LLVM:    [[VMIN_I:%.*]] = call <16 x i8> 
@llvm.aarch64.neon.smin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
-  // LLVM-NEXT:    ret <16 x i8> [[VMIN_I]]
+  // LLVM:    [[VMIN2_I:%.*]] = call <16 x i8> 
@llvm.aarch64.neon.smin.v16i8(<16 x i8> [[VMIN_I]], <16 x i8> [[VMIN1_I]])
+  // LLVM-NEXT:    ret <16 x i8> [[VMIN2_I]]
   return vminq_s8(v1, v2);
 }
 
 // ALL-LABEL: @test_vminq_s16
 int16x8_t test_vminq_s16(int16x8_t v1, int16x8_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : 
(!cir.vector<8 x !s16i>, !cir.vector<8 x !s16i>) -> !cir.vector<8 x !s16i>
-  // LLVM:    [[VMIN_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.smin.v8i16(<8 
x i16> [[V1]], <8 x i16> [[V2]])
-  // LLVM-NEXT:    ret <8 x i16> [[VMIN_I]]
+  // LLVM:    [[VMIN2_I:%.*]] = call <8 x i16> 
@llvm.aarch64.neon.smin.v8i16(<8 x i16> [[VMIN_I]], <8 x i16> [[VMIN1_I]])
+  // LLVM-NEXT:    ret <8 x i16> [[VMIN2_I]]
   return vminq_s16(v1, v2);
 }
 
 // ALL-LABEL: @test_vminq_s32
 int32x4_t test_vminq_s32(int32x4_t v1, int32x4_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !s32i>, !cir.vector<4 x !s32i>) -> !cir.vector<4 x !s32i>
-  // LLVM:    [[VMIN_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.smin.v4i32(<4 
x i32> [[V1]], <4 x i32> [[V2]])
-  // LLVM-NEXT:    ret <4 x i32> [[VMIN_I]]
+  // LLVM:    [[VMIN2_I:%.*]] = call <4 x i32> 
@llvm.aarch64.neon.smin.v4i32(<4 x i32> [[VMIN_I]], <4 x i32> [[VMIN1_I]])
+  // LLVM-NEXT:    ret <4 x i32> [[VMIN2_I]]
   return vminq_s32(v1, v2);
 }
 
 // ALL-LABEL: @test_vminq_u8
 uint8x16_t test_vminq_u8(uint8x16_t v1, uint8x16_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : 
(!cir.vector<16 x !u8i>, !cir.vector<16 x !u8i>) -> !cir.vector<16 x !u8i>
-  // LLVM:    [[VMIN_I:%.*]] = call <16 x i8> 
@llvm.aarch64.neon.umin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
-  // LLVM-NEXT:    ret <16 x i8> [[VMIN_I]]
+  // LLVM:    [[VMIN2_I:%.*]] = call <16 x i8> 
@llvm.aarch64.neon.umin.v16i8(<16 x i8> [[VMIN_I]], <16 x i8> [[VMIN1_I]])
+  // LLVM-NEXT:    ret <16 x i8> [[VMIN2_I]]
   return vminq_u8(v1, v2);
 }
 
 // ALL-LABEL: @test_vminq_u16
 uint16x8_t test_vminq_u16(uint16x8_t v1, uint16x8_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : 
(!cir.vector<8 x !u16i>, !cir.vector<8 x !u16i>) -> !cir.vector<8 x !u16i>
-  // LLVM:    [[VMIN_I:%.*]] = call <8 x i16> @llvm.aarch64.neon.umin.v8i16(<8 
x i16> [[V1]], <8 x i16> [[V2]])
-  // LLVM-NEXT:    ret <8 x i16> [[VMIN_I]]
+  // LLVM:    [[VMIN2_I:%.*]] = call <8 x i16> 
@llvm.aarch64.neon.umin.v8i16(<8 x i16> [[VMIN_I]], <8 x i16> [[VMIN1_I]])
+  // LLVM-NEXT:    ret <8 x i16> [[VMIN2_I]]
   return vminq_u16(v1, v2);
 }
 
 // ALL-LABEL: @test_vminq_u32
 uint32x4_t test_vminq_u32(uint32x4_t v1, uint32x4_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !u32i>, !cir.vector<4 x !u32i>) -> !cir.vector<4 x !u32i>
-  // LLVM:    [[VMIN_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.umin.v4i32(<4 
x i32> [[V1]], <4 x i32> [[V2]])
-  // LLVM-NEXT:    ret <4 x i32> [[VMIN_I]]
+  // LLVM:    [[VMIN2_I:%.*]] = call <4 x i32> 
@llvm.aarch64.neon.umin.v4i32(<4 x i32> [[VMIN_I]], <4 x i32> [[VMIN1_I]])
+  // LLVM-NEXT:    ret <4 x i32> [[VMIN2_I]]
   return vminq_u32(v1, v2);
 }
 
 // ALL-LABEL: @test_vmin_f32
 float32x2_t test_vmin_f32(float32x2_t v1, float32x2_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
-  // LLVM:    [[VMIN_I:%.*]] = call <2 x float> 
@llvm.aarch64.neon.fmin.v2f32(<2 x float> [[V1]], <2 x float> [[V2]])
-  // LLVM-NEXT:    ret <2 x float> [[VMIN_I]]
+  // LLVM:    [[VMIN2_I:%.*]] = call <2 x float> 
@llvm.aarch64.neon.fmin.v2f32(<2 x float> [[VMIN_I]], <2 x float> [[VMIN1_I]])
+  // LLVM-NEXT:    ret <2 x float> [[VMIN2_I]]
   return vmin_f32(v1, v2);
 }
 
 // ALL-LABEL: @test_vmin_f64
 float64x1_t test_vmin_f64(float64x1_t v1, float64x1_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : 
(!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
-  // LLVM:    [[VMIN_I:%.*]] = call <1 x double> 
@llvm.aarch64.neon.fmin.v1f64(<1 x double> [[V1]], <1 x double> [[V2]])
-  // LLVM-NEXT:    ret <1 x double> [[VMIN_I]]
+  // LLVM:    [[VMIN2_I:%.*]] = call <1 x double> 
@llvm.aarch64.neon.fmin.v1f64(<1 x double> [[VMIN_I]], <1 x double> [[VMIN1_I]])
+  // LLVM-NEXT:    ret <1 x double> [[VMIN2_I]]
   return vmin_f64(v1, v2);
 }
 
 // ALL-LABEL: @test_vminq_f32
 float32x4_t test_vminq_f32(float32x4_t v1, float32x4_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
-  // LLVM:    [[VMIN_I:%.*]] = call <4 x float> 
@llvm.aarch64.neon.fmin.v4f32(<4 x float> [[V1]], <4 x float> [[V2]])
-  // LLVM-NEXT:    ret <4 x float> [[VMIN_I]]
+  // LLVM:    [[VMIN2_I:%.*]] = call <4 x float> 
@llvm.aarch64.neon.fmin.v4f32(<4 x float> [[VMIN_I]], <4 x float> [[VMIN1_I]])
+  // LLVM-NEXT:    ret <4 x float> [[VMIN2_I]]
   return vminq_f32(v1, v2);
 }
 
 // ALL-LABEL: @test_vminq_f64
 float64x2_t test_vminq_f64(float64x2_t v1, float64x2_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
-  // LLVM:    [[VMIN_I:%.*]] = call <2 x double> 
@llvm.aarch64.neon.fmin.v2f64(<2 x double> [[V1]], <2 x double> [[V2]])
-  // LLVM-NEXT:    ret <2 x double> [[VMIN_I]]
+  // LLVM:    [[VMIN2_I:%.*]] = call <2 x double> 
@llvm.aarch64.neon.fmin.v2f64(<2 x double> [[VMIN_I]], <2 x double> [[VMIN1_I]])
+  // LLVM-NEXT:    ret <2 x double> [[VMIN2_I]]
   return vminq_f64(v1, v2);
 }
 
 // ALL-LABEL: @test_vminnm_f32
 float32x2_t test_vminnm_f32(float32x2_t v1, float32x2_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
-  // LLVM:    [[VMIN_I:%.*]] = call <2 x float> 
@llvm.aarch64.neon.fminnm.v2f32(<2 x float> [[V1]], <2 x float> [[V2]])
-  // LLVM-NEXT:    ret <2 x float> [[VMIN_I]]
+  // LLVM:    [[VMIN2_I:%.*]] = call <2 x float> 
@llvm.aarch64.neon.fminnm.v2f32(<2 x float> [[VMIN_I]], <2 x float> [[VMIN1_I]])
+  // LLVM-NEXT:    ret <2 x float> [[VMIN2_I]]
   return vminnm_f32(v1, v2);
 }
 
 // ALL-LABEL: @test_vminnm_f64
 float64x1_t test_vminnm_f64(float64x1_t v1, float64x1_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
-  // LLVM:    [[VMIN_I:%.*]] = call <1 x double> 
@llvm.aarch64.neon.fminnm.v1f64(<1 x double> [[V1]], <1 x double> [[V2]])
-  // LLVM-NEXT:    ret <1 x double> [[VMIN_I]]
+  // LLVM:    [[VMIN2_I:%.*]] = call <1 x double> 
@llvm.aarch64.neon.fminnm.v1f64(<1 x double> [[VMIN_I]], <1 x double> 
[[VMIN1_I]])
+  // LLVM-NEXT:    ret <1 x double> [[VMIN2_I]]
   return vminnm_f64(v1, v2);
 }
 
 // ALL-LABEL: @test_vminnmq_f32
 float32x4_t test_vminnmq_f32(float32x4_t v1, float32x4_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
-  // LLVM:    [[VMIN_I:%.*]] = call <4 x float> 
@llvm.aarch64.neon.fminnm.v4f32(<4 x float> [[V1]], <4 x float> [[V2]])
-  // LLVM-NEXT:    ret <4 x float> [[VMIN_I]]
+  // LLVM:    [[VMIN2_I:%.*]] = call <4 x float> 
@llvm.aarch64.neon.fminnm.v4f32(<4 x float> [[VMIN_I]], <4 x float> [[VMIN1_I]])
+  // LLVM-NEXT:    ret <4 x float> [[VMIN2_I]]
   return vminnmq_f32(v1, v2);
 }
 
 // ALL-LABEL: @test_vminnmq_f64
 float64x2_t test_vminnmq_f64(float64x2_t v1, float64x2_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
-  // LLVM:    [[VMIN_I:%.*]] = call <2 x double> 
@llvm.aarch64.neon.fminnm.v2f64(<2 x double> [[V1]], <2 x double> [[V2]])
-  // LLVM-NEXT:    ret <2 x double> [[VMIN_I]]
+  // LLVM:    [[VMIN2_I:%.*]] = call <2 x double> 
@llvm.aarch64.neon.fminnm.v2f64(<2 x double> [[VMIN_I]], <2 x double> 
[[VMIN1_I]])
+  // LLVM-NEXT:    ret <2 x double> [[VMIN2_I]]
   return vminnmq_f64(v1, v2);
 }
\ No newline at end of file

>From 4ee8c8db4feea2866e8f7beba4c672286201bd0d Mon Sep 17 00:00:00 2001
From: Zhihui Yang <[email protected]>
Date: Tue, 24 Mar 2026 11:20:24 +0800
Subject: [PATCH 6/8] Update LLVM intrinsic calls for vmin functions

---
 clang/test/CodeGen/AArch64/neon/intrinsics.c | 187 +++++++++++++++----
 1 file changed, 146 insertions(+), 41 deletions(-)

diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c 
b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index 43ba8c8774346..8d7ada09a5aa0 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -990,159 +990,264 @@ int64_t test_vshld_u64(int64_t a,int64_t b) {
 // ALL-LABEL: @test_vmin_s8
 int8x8_t test_vmin_s8(int8x8_t v1, int8x8_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : 
(!cir.vector<8 x !s8i>, !cir.vector<8 x !s8i>) -> !cir.vector<8 x !s8i>
-  // LLVM:    [[VMIN2_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.smin.v8i8(<8 
x i8> [[VMIN_I]], <8 x i8> [[VMIN1_I]])
-  // LLVM:    ret <8 x i8> [[VMIN2_I]]
+  // LLVM-SAME: <8 x i8> noundef [[V1:%.*]], <8 x i8> noundef [[V2:%.*]]) 
#[[ATTR0]] {
+  // LLVM-NEXT:  [[ENTRY:.*:]]
+  // LLVM-NEXT:    [[VMIN_V_I:%.*]] = call <8 x i8> 
@llvm.arm.neon.vmins.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]])
+  // LLVM-NEXT:    ret <8 x i8> [[VMIN_V_I]]
   return vmin_s8(v1, v2);
 }
 
 // ALL-LABEL: @test_vmin_s16
 int16x4_t test_vmin_s16(int16x4_t v1, int16x4_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !s16i>, !cir.vector<4 x !s16i>) -> !cir.vector<4 x !s16i>
-  // LLVM:    [[VMIN2_I:%.*]] = call <4 x i16> 
@llvm.aarch64.neon.smin.v4i16(<4 x i16> [[VMIN_I]], <4 x i16> [[VMIN1_I]])
-  // LLVM-NEXT:    ret <4 x i16> [[VMIN2_I]]
+  // LLVM-SAME: <4 x i16> noundef [[V1:%.*]], <4 x i16> noundef [[V2:%.*]]) 
#[[ATTR0]] {
+  // LLVM-NEXT:  [[ENTRY:.*:]]
+  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <4 x i16> [[V1]] to <8 x i8>
+  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <4 x i16> [[V2]] to <8 x i8>
+  // LLVM-NEXT:    [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
+  // LLVM-NEXT:    [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
+  // LLVM-NEXT:    [[VMIN_V2_I:%.*]] = call <4 x i16> 
@llvm.aarch64.neon.smin.v4i16(<4 x i16> [[VMIN_V_I]], <4 x i16> [[VMIN_V1_I]])
+  // LLVM-NEXT:    ret <4 x i16> [[VMIN_V2_I]]
   return vmin_s16(v1, v2);
 }
 
 // ALL-LABEL: @test_vmin_s32
 int32x2_t test_vmin_s32(int32x2_t v1, int32x2_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !s32i>, !cir.vector<2 x !s32i>) -> !cir.vector<2 x !s32i>
-  // LLVM:    [[VMIN2_I:%.*]] = call <2 x i32> 
@llvm.aarch64.neon.smin.v2i32(<2 x i32> [[VMIN_I]], <2 x i32> [[VMIN1_I]])
-  // LLVM-NEXT:    ret <2 x i32> [[VMIN2_I]]
+  // LLVM-SAME: <2 x i32> noundef [[V1:%.*]], <2 x i32> noundef [[V2:%.*]]) 
#[[ATTR0]] {
+  // LLVM-NEXT:  [[ENTRY:.*:]]
+  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <2 x i32> [[V1]] to <8 x i8>
+  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <2 x i32> [[V2]] to <8 x i8>
+  // LLVM-NEXT:    [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
+  // LLVM-NEXT:    [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
+  // LLVM-NEXT:    [[VMIN_V2_I:%.*]] = call <2 x i32> 
@llvm.aarch64.neon.smin.v2i32(<2 x i32> [[VMIN_V_I]], <2 x i32> [[VMIN_V1_I]])
+  // LLVM-NEXT:    ret <2 x i32> [[VMIN_V2_I]]
   return vmin_s32(v1, v2);
 }
 
 // ALL-LABEL: @test_vmin_u8
 uint8x8_t test_vmin_u8(uint8x8_t v1, uint8x8_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : 
(!cir.vector<8 x !u8i>, !cir.vector<8 x !u8i>) -> !cir.vector<8 x !u8i>
-  // LLVM:    [[VMIN2_I:%.*]] = call <8 x i8> @llvm.aarch64.neon.umin.v8i8(<8 
x i8> [[VMIN_I]], <8 x i8> [[VMIN1_I]])
-  // LLVM-NEXT:    ret <8 x i8> [[VMIN2_I]]
+  // LLVM-SAME: <8 x i8> noundef [[V1:%.*]], <8 x i8> noundef [[V2:%.*]]) 
#[[ATTR0]] {
+  // LLVM-NEXT:  [[ENTRY:.*:]]
+  // LLVM-NEXT:    [[VMIN_V_I:%.*]] = call <8 x i8> 
@llvm.aarch64.neon.umin.v8i8(<8 x i8> [[V1]], <8 x i8> [[V2]])
+  // LLVM-NEXT:    ret <8 x i8> [[VMIN_V_I]]
   return vmin_u8(v1, v2);
 }
 
 // ALL-LABEL: @test_vmin_u16
 uint16x4_t test_vmin_u16(uint16x4_t v1, uint16x4_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !u16i>, !cir.vector<4 x !u16i>) -> !cir.vector<4 x !u16i>
-  // LLVM:    [[VMIN2_I:%.*]] = call <4 x i16> 
@llvm.aarch64.neon.umin.v4i16(<4 x i16> [[VMIN_I]], <4 x i16> [[VMIN1_I]])
-  // LLVM-NEXT:    ret <4 x i16> [[VMIN2_I]]
+  // LLVM-SAME: <4 x i16> noundef [[V1:%.*]], <4 x i16> noundef [[V2:%.*]]) 
#[[ATTR0]] {
+  // LLVM-NEXT:  [[ENTRY:.*:]]
+  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <4 x i16> [[V1]] to <8 x i8>
+  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <4 x i16> [[V2]] to <8 x i8>
+  // LLVM-NEXT:    [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
+  // LLVM-NEXT:    [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
+  // LLVM-NEXT:    [[VMIN_V2_I:%.*]] = call <4 x i16> 
@llvm.aarch64.neon.umin.v4i16(<4 x i16> [[VMIN_V_I]], <4 x i16> [[VMIN_V1_I]])
+  // LLVM-NEXT:    ret <4 x i16> [[VMIN_V2_I]]
   return vmin_u16(v1, v2);
 }
 
 // ALL-LABEL: @test_vmin_u32
 uint32x2_t test_vmin_u32(uint32x2_t v1, uint32x2_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !u32i>, !cir.vector<2 x !u32i>) -> !cir.vector<2 x !u32i>
-  // LLVM:    [[VMIN2_I:%.*]] = call <2 x i32> 
@llvm.aarch64.neon.umin.v2i32(<2 x i32> [[VMIN_I]], <2 x i32> [[VMIN1_I]])
-  // LLVM-NEXT:    ret <2 x i32> [[VMIN2_I]]
+  // LLVM-SAME: <2 x i32> noundef [[V1:%.*]], <2 x i32> noundef [[V2:%.*]]) 
#[[ATTR0]] {
+  // LLVM-NEXT:  [[ENTRY:.*:]]
+  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <2 x i32> [[V1]] to <8 x i8>
+  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <2 x i32> [[V2]] to <8 x i8>
+  // LLVM-NEXT:    [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
+  // LLVM-NEXT:    [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
+  // LLVM-NEXT:    [[VMIN_V2_I:%.*]] = call <2 x i32> 
@llvm.aarch64.neon.umin.v2i32(<2 x i32> [[VMIN_V_I]], <2 x i32> [[VMIN_V1_I]])
+  // LLVM-NEXT:    ret <2 x i32> [[VMIN_V2_I]]
   return vmin_u32(v1, v2);
 }
 
 // ALL-LABEL: @test_vminq_s8
 int8x16_t test_vminq_s8(int8x16_t v1, int8x16_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : 
(!cir.vector<16 x !s8i>, !cir.vector<16 x !s8i>) -> !cir.vector<16 x !s8i>
-  // LLVM:    [[VMIN2_I:%.*]] = call <16 x i8> 
@llvm.aarch64.neon.smin.v16i8(<16 x i8> [[VMIN_I]], <16 x i8> [[VMIN1_I]])
-  // LLVM-NEXT:    ret <16 x i8> [[VMIN2_I]]
+  // LLVM-SAME: <16 x i8> noundef [[V1:%.*]], <16 x i8> noundef [[V2:%.*]]) 
#[[ATTR0]] {
+  // LLVM-NEXT:  [[ENTRY:.*:]]
+  // LLVM-NEXT:    [[VMINQ_V_I:%.*]] = call <16 x i8> 
@llvm.aarch64.neon.smin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
+  // LLVM-NEXT:    ret <16 x i8> [[VMINQ_V_I]]
   return vminq_s8(v1, v2);
 }
 
 // ALL-LABEL: @test_vminq_s16
 int16x8_t test_vminq_s16(int16x8_t v1, int16x8_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : 
(!cir.vector<8 x !s16i>, !cir.vector<8 x !s16i>) -> !cir.vector<8 x !s16i>
-  // LLVM:    [[VMIN2_I:%.*]] = call <8 x i16> 
@llvm.aarch64.neon.smin.v8i16(<8 x i16> [[VMIN_I]], <8 x i16> [[VMIN1_I]])
-  // LLVM-NEXT:    ret <8 x i16> [[VMIN2_I]]
+  // LLVM-SAME: <8 x i16> noundef [[V1:%.*]], <8 x i16> noundef [[V2:%.*]]) 
#[[ATTR0]] {
+  // LLVM-NEXT:  [[ENTRY:.*:]]
+  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <8 x i16> [[V1]] to <16 x i8>
+  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <8 x i16> [[V2]] to <16 x i8>
+  // LLVM-NEXT:    [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
+  // LLVM-NEXT:    [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
+  // LLVM-NEXT:    [[VMINQ_V2_I:%.*]] = call <8 x i16> 
@llvm.aarch64.neon.smin.v8i16(<8 x i16> [[VMINQ_V_I]], <8 x i16> [[VMINQ_V1_I]])
+  // LLVM-NEXT:    ret <8 x i16> [[VMINQ_V2_I]]
   return vminq_s16(v1, v2);
 }
 
 // ALL-LABEL: @test_vminq_s32
 int32x4_t test_vminq_s32(int32x4_t v1, int32x4_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.smin" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !s32i>, !cir.vector<4 x !s32i>) -> !cir.vector<4 x !s32i>
-  // LLVM:    [[VMIN2_I:%.*]] = call <4 x i32> 
@llvm.aarch64.neon.smin.v4i32(<4 x i32> [[VMIN_I]], <4 x i32> [[VMIN1_I]])
-  // LLVM-NEXT:    ret <4 x i32> [[VMIN2_I]]
+  // LLVM-SAME: <4 x i32> noundef [[V1:%.*]], <4 x i32> noundef [[V2:%.*]]) 
#[[ATTR0]] {
+  // LLVM-NEXT:  [[ENTRY:.*:]]
+  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <4 x i32> [[V1]] to <16 x i8>
+  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <4 x i32> [[V2]] to <16 x i8>
+  // LLVM-NEXT:    [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
+  // LLVM-NEXT:    [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
+  // LLVM-NEXT:    [[VMINQ_V2_I:%.*]] = call <4 x i32> 
@llvm.aarch64.neon.smin.v4i32(<4 x i32> [[VMINQ_V_I]], <4 x i32> [[VMINQ_V1_I]])
+  // LLVM-NEXT:    ret <4 x i32> [[VMINQ_V2_I]]
   return vminq_s32(v1, v2);
 }
 
 // ALL-LABEL: @test_vminq_u8
 uint8x16_t test_vminq_u8(uint8x16_t v1, uint8x16_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : 
(!cir.vector<16 x !u8i>, !cir.vector<16 x !u8i>) -> !cir.vector<16 x !u8i>
-  // LLVM:    [[VMIN2_I:%.*]] = call <16 x i8> 
@llvm.aarch64.neon.umin.v16i8(<16 x i8> [[VMIN_I]], <16 x i8> [[VMIN1_I]])
-  // LLVM-NEXT:    ret <16 x i8> [[VMIN2_I]]
+  // LLVM-SAME: <16 x i8> noundef [[V1:%.*]], <16 x i8> noundef [[V2:%.*]]) 
#[[ATTR0]] {
+  // LLVM-NEXT:  [[ENTRY:.*:]]
+  // LLVM-NEXT:    [[VMINQ_V_I:%.*]] = call <16 x i8> 
@llvm.aarch64.neon.umin.v16i8(<16 x i8> [[V1]], <16 x i8> [[V2]])
+  // LLVM-NEXT:    ret <16 x i8> [[VMINQ_V_I]]
   return vminq_u8(v1, v2);
 }
 
 // ALL-LABEL: @test_vminq_u16
 uint16x8_t test_vminq_u16(uint16x8_t v1, uint16x8_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : 
(!cir.vector<8 x !u16i>, !cir.vector<8 x !u16i>) -> !cir.vector<8 x !u16i>
-  // LLVM:    [[VMIN2_I:%.*]] = call <8 x i16> 
@llvm.aarch64.neon.umin.v8i16(<8 x i16> [[VMIN_I]], <8 x i16> [[VMIN1_I]])
-  // LLVM-NEXT:    ret <8 x i16> [[VMIN2_I]]
+  // LLVM-SAME: <8 x i16> noundef [[V1:%.*]], <8 x i16> noundef [[V2:%.*]]) 
#[[ATTR0]] {
+  // LLVM-NEXT:  [[ENTRY:.*:]]
+  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <8 x i16> [[V1]] to <16 x i8>
+  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <8 x i16> [[V2]] to <16 x i8>
+  // LLVM-NEXT:    [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
+  // LLVM-NEXT:    [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
+  // LLVM-NEXT:    [[VMINQ_V2_I:%.*]] = call <8 x i16> 
@llvm.aarch64.neon.umin.v8i16(<8 x i16> [[VMINQ_V_I]], <8 x i16> [[VMINQ_V1_I]])
+  // LLVM-NEXT:    ret <8 x i16> [[VMINQ_V2_I]]
+
   return vminq_u16(v1, v2);
 }
 
 // ALL-LABEL: @test_vminq_u32
 uint32x4_t test_vminq_u32(uint32x4_t v1, uint32x4_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.umin" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !u32i>, !cir.vector<4 x !u32i>) -> !cir.vector<4 x !u32i>
-  // LLVM:    [[VMIN2_I:%.*]] = call <4 x i32> 
@llvm.aarch64.neon.umin.v4i32(<4 x i32> [[VMIN_I]], <4 x i32> [[VMIN1_I]])
-  // LLVM-NEXT:    ret <4 x i32> [[VMIN2_I]]
+  // LLVM-SAME: <4 x i32> noundef [[V1:%.*]], <4 x i32> noundef [[V2:%.*]]) 
#[[ATTR0]] {
+  // LLVM-NEXT:  [[ENTRY:.*:]]
+  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <4 x i32> [[V1]] to <16 x i8>
+  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <4 x i32> [[V2]] to <16 x i8>
+  // LLVM-NEXT:    [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
+  // LLVM-NEXT:    [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
+  // LLVM-NEXT:    [[VMINQ_V2_I:%.*]] = call <4 x i32> 
@llvm.aarch64.neon.umin.v4i32(<4 x i32> [[VMINQ_V_I]], <4 x i32> [[VMINQ_V1_I]])
+  // LLVM-NEXT:    ret <4 x i32> [[VMINQ_V2_I]]
   return vminq_u32(v1, v2);
 }
 
 // ALL-LABEL: @test_vmin_f32
 float32x2_t test_vmin_f32(float32x2_t v1, float32x2_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
-  // LLVM:    [[VMIN2_I:%.*]] = call <2 x float> 
@llvm.aarch64.neon.fmin.v2f32(<2 x float> [[VMIN_I]], <2 x float> [[VMIN1_I]])
-  // LLVM-NEXT:    ret <2 x float> [[VMIN2_I]]
+  // LLVM-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef 
[[V2:%.*]]) #[[ATTR0]] {
+  // LLVM-NEXT:  [[ENTRY:.*:]]
+  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <8 x i8>
+  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <8 x i8>
+  // LLVM-NEXT:    [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x float>
+  // LLVM-NEXT:    [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
+  // LLVM-NEXT:    [[VMIN_V2_I:%.*]] = call <2 x float> 
@llvm.aarch64.neon.fmin.v2f32(<2 x float> [[VMIN_V_I]], <2 x float> 
[[VMIN_V1_I]])
+  // LLVM-NEXT:    ret <2 x float> [[VMIN_V2_I]]
   return vmin_f32(v1, v2);
 }
 
 // ALL-LABEL: @test_vmin_f64
 float64x1_t test_vmin_f64(float64x1_t v1, float64x1_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : 
(!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
-  // LLVM:    [[VMIN2_I:%.*]] = call <1 x double> 
@llvm.aarch64.neon.fmin.v1f64(<1 x double> [[VMIN_I]], <1 x double> [[VMIN1_I]])
-  // LLVM-NEXT:    ret <1 x double> [[VMIN2_I]]
+  // LLVM-SAME: <1 x double> noundef [[V1:%.*]], <1 x double> noundef 
[[V2:%.*]]) #[[ATTR0]] {
+  // LLVM-NEXT:  [[ENTRY:.*:]]
+  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <1 x double> [[V1]] to <8 x i8>
+  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <1 x double> [[V2]] to <8 x i8>
+  // LLVM-NEXT:    [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x double>
+  // LLVM-NEXT:    [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x 
double>
+  // LLVM-NEXT:    [[VMIN_V2_I:%.*]] = call <1 x double> 
@llvm.aarch64.neon.fmin.v1f64(<1 x double> [[VMIN_V_I]], <1 x double> 
[[VMIN_V1_I]])
+  // LLVM-NEXT:    ret <1 x double> [[VMIN_V2_I]]
   return vmin_f64(v1, v2);
 }
 
 // ALL-LABEL: @test_vminq_f32
 float32x4_t test_vminq_f32(float32x4_t v1, float32x4_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
-  // LLVM:    [[VMIN2_I:%.*]] = call <4 x float> 
@llvm.aarch64.neon.fmin.v4f32(<4 x float> [[VMIN_I]], <4 x float> [[VMIN1_I]])
-  // LLVM-NEXT:    ret <4 x float> [[VMIN2_I]]
+  // LLVM-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef 
[[V2:%.*]]) #[[ATTR0]] {
+  // LLVM-NEXT:  [[ENTRY:.*:]]
+  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <16 x i8>
+  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <4 x float> [[V2]] to <16 x i8>
+  // LLVM-NEXT:    [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x 
float>
+  // LLVM-NEXT:    [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x 
float>
+  // LLVM-NEXT:    [[VMINQ_V2_I:%.*]] = call <4 x float> 
@llvm.aarch64.neon.fmin.v4f32(<4 x float> [[VMINQ_V_I]], <4 x float> 
[[VMINQ_V1_I]])
+  // LLVM-NEXT:    ret <4 x float> [[VMINQ_V2_I]]
   return vminq_f32(v1, v2);
 }
 
 // ALL-LABEL: @test_vminq_f64
 float64x2_t test_vminq_f64(float64x2_t v1, float64x2_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
-  // LLVM:    [[VMIN2_I:%.*]] = call <2 x double> 
@llvm.aarch64.neon.fmin.v2f64(<2 x double> [[VMIN_I]], <2 x double> [[VMIN1_I]])
-  // LLVM-NEXT:    ret <2 x double> [[VMIN2_I]]
+  // LLVM-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef 
[[V2:%.*]]) #[[ATTR0]] {
+  // LLVM-NEXT:  [[ENTRY:.*:]]
+  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <16 x i8>
+  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <2 x double> [[V2]] to <16 x i8>
+  // LLVM-NEXT:    [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x 
double>
+  // LLVM-NEXT:    [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x 
double>
+  // LLVM-NEXT:    [[VMINQ_V2_I:%.*]] = call <2 x double> 
@llvm.aarch64.neon.fmin.v2f64(<2 x double> [[VMINQ_V_I]], <2 x double> 
[[VMINQ_V1_I]])
+  // LLVM-NEXT:    ret <2 x double> [[VMINQ_V2_I]]
   return vminq_f64(v1, v2);
 }
 
 // ALL-LABEL: @test_vminnm_f32
 float32x2_t test_vminnm_f32(float32x2_t v1, float32x2_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
-  // LLVM:    [[VMIN2_I:%.*]] = call <2 x float> 
@llvm.aarch64.neon.fminnm.v2f32(<2 x float> [[VMIN_I]], <2 x float> [[VMIN1_I]])
-  // LLVM-NEXT:    ret <2 x float> [[VMIN2_I]]
+  // LLVM-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef 
[[V2:%.*]]) #[[ATTR0]] {
+  // LLVM-NEXT:  [[ENTRY:.*:]]
+  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <8 x i8>
+  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <8 x i8>
+  // LLVM-NEXT:    [[VMINNM_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x 
float>
+  // LLVM-NEXT:    [[VMINNM_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x 
float>
+  // LLVM-NEXT:    [[VMINNM_V2_I:%.*]] = call <2 x float> 
@llvm.aarch64.neon.fminnm.v2f32(<2 x float> [[VMINNM_V_I]], <2 x float> 
[[VMINNM_V1_I]])
+  // LLVM-NEXT:    ret <2 x float> [[VMINNM_V2_I]]
   return vminnm_f32(v1, v2);
 }
 
 // ALL-LABEL: @test_vminnm_f64
 float64x1_t test_vminnm_f64(float64x1_t v1, float64x1_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
-  // LLVM:    [[VMIN2_I:%.*]] = call <1 x double> 
@llvm.aarch64.neon.fminnm.v1f64(<1 x double> [[VMIN_I]], <1 x double> 
[[VMIN1_I]])
-  // LLVM-NEXT:    ret <1 x double> [[VMIN2_I]]
+  // LLVM-SAME: <1 x double> noundef [[V1:%.*]], <1 x double> noundef 
[[V2:%.*]]) #[[ATTR0]] {
+  // LLVM-NEXT:  [[ENTRY:.*:]]
+  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <1 x double> [[V1]] to <8 x i8>
+  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <1 x double> [[V2]] to <8 x i8>
+  // LLVM-NEXT:    [[VMINNM_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x 
double>
+  // LLVM-NEXT:    [[VMINNM_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x 
double>
+  // LLVM-NEXT:    [[VMINNM_V2_I:%.*]] = call <1 x double> 
@llvm.aarch64.neon.fminnm.v1f64(<1 x double> [[VMINNM_V_I]], <1 x double> 
[[VMINNM_V1_I]])
+  // LLVM-NEXT:    ret <1 x double> [[VMINNM_V2_I]]
   return vminnm_f64(v1, v2);
 }
 
 // ALL-LABEL: @test_vminnmq_f32
 float32x4_t test_vminnmq_f32(float32x4_t v1, float32x4_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
-  // LLVM:    [[VMIN2_I:%.*]] = call <4 x float> 
@llvm.aarch64.neon.fminnm.v4f32(<4 x float> [[VMIN_I]], <4 x float> [[VMIN1_I]])
-  // LLVM-NEXT:    ret <4 x float> [[VMIN2_I]]
+  // LLVM-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef 
[[V2:%.*]]) #[[ATTR0]] {
+  // LLVM-NEXT:  [[ENTRY:.*:]]
+  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <16 x i8>
+  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <4 x float> [[V2]] to <16 x i8>
+  // LLVM-NEXT:    [[VMINNM_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x 
float>
+  // LLVM-NEXT:    [[VMINNM_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x 
float>
+  // LLVM-NEXT:    [[VMINNM_V2_I:%.*]] = call <4 x float> 
@llvm.aarch64.neon.fminnm.v4f32(<4 x float> [[VMINNM_V_I]], <4 x float> 
[[VMINNM_V1_I]])
+  // LLVM-NEXT:    ret <4 x float> [[VMINNM_V2_I]]
   return vminnmq_f32(v1, v2);
 }
 
 // ALL-LABEL: @test_vminnmq_f64
 float64x2_t test_vminnmq_f64(float64x2_t v1, float64x2_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
-  // LLVM:    [[VMIN2_I:%.*]] = call <2 x double> 
@llvm.aarch64.neon.fminnm.v2f64(<2 x double> [[VMIN_I]], <2 x double> 
[[VMIN1_I]])
-  // LLVM-NEXT:    ret <2 x double> [[VMIN2_I]]
+  // LLVM-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef 
[[V2:%.*]]) #[[ATTR0]] {
+  // LLVM-NEXT:  [[ENTRY:.*:]]
+  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <16 x i8>
+  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <2 x double> [[V2]] to <16 x i8>
+  // LLVM-NEXT:    [[VMINNM_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x 
double>
+  // LLVM-NEXT:    [[VMINNM_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x 
double>
+  // LLVM-NEXT:    [[VMINNM_V2_I:%.*]] = call <2 x double> 
@llvm.aarch64.neon.fminnm.v2f64(<2 x double> [[VMINNM_V_I]], <2 x double> 
[[VMINNM_V1_I]])
+  // LLVM-NEXT:    ret <2 x double> [[VMINNM_V2_I]]
   return vminnmq_f64(v1, v2);
-}
\ No newline at end of file
+}

>From f5257371c0516cd36ec6248dd57f7fec1e006fb1 Mon Sep 17 00:00:00 2001
From: Zhihui Yang <[email protected]>
Date: Tue, 24 Mar 2026 13:56:01 +0800
Subject: [PATCH 7/8] Update LLVM intrinsics for vmin functions

---
 clang/test/CodeGen/AArch64/neon/intrinsics.c | 78 ++++++++++----------
 1 file changed, 39 insertions(+), 39 deletions(-)

diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c 
b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index 8d7ada09a5aa0..ae851ac674443 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -1145,10 +1145,12 @@ float32x2_t test_vmin_f32(float32x2_t v1, float32x2_t 
v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
   // LLVM-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef 
[[V2:%.*]]) #[[ATTR0]] {
   // LLVM-NEXT:  [[ENTRY:.*:]]
-  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <8 x i8>
-  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <8 x i8>
-  // LLVM-NEXT:    [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x float>
-  // LLVM-NEXT:    [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
+  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <2 x i32>
+  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <2 x i32>
+  // LLVM-NEXT:    [[TMP2:%.*]] = bitcast <2 x float> [[TMP0]] to <8 x i8>
+  // LLVM-NEXT:    [[TMP3:%.*]] = bitcast <2 x float> [[TMP1]] to <8 x i8>
+  // LLVM-NEXT:    [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP2]] to <2 x float>
+  // LLVM-NEXT:    [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x float>
   // LLVM-NEXT:    [[VMIN_V2_I:%.*]] = call <2 x float> 
@llvm.aarch64.neon.fmin.v2f32(<2 x float> [[VMIN_V_I]], <2 x float> 
[[VMIN_V1_I]])
   // LLVM-NEXT:    ret <2 x float> [[VMIN_V2_I]]
   return vmin_f32(v1, v2);
@@ -1157,14 +1159,8 @@ float32x2_t test_vmin_f32(float32x2_t v1, float32x2_t 
v2) {
 // ALL-LABEL: @test_vmin_f64
 float64x1_t test_vmin_f64(float64x1_t v1, float64x1_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : 
(!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
-  // LLVM-SAME: <1 x double> noundef [[V1:%.*]], <1 x double> noundef 
[[V2:%.*]]) #[[ATTR0]] {
-  // LLVM-NEXT:  [[ENTRY:.*:]]
-  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <1 x double> [[V1]] to <8 x i8>
-  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <1 x double> [[V2]] to <8 x i8>
-  // LLVM-NEXT:    [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x double>
-  // LLVM-NEXT:    [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x 
double>
-  // LLVM-NEXT:    [[VMIN_V2_I:%.*]] = call <1 x double> 
@llvm.aarch64.neon.fmin.v1f64(<1 x double> [[VMIN_V_I]], <1 x double> 
[[VMIN_V1_I]])
-  // LLVM-NEXT:    ret <1 x double> [[VMIN_V2_I]]
+  // LLVM:    [[VMIN_V2_I:%.*]] = call <1 x double> 
@llvm.aarch64.neon.fmin.v1f64(<1 x double> %{{.*}}, <1 x double> %{{.*}})
+  // LLVM:    ret <1 x double> [[VMIN_V2_I]]
   return vmin_f64(v1, v2);
 }
 
@@ -1173,10 +1169,12 @@ float32x4_t test_vminq_f32(float32x4_t v1, float32x4_t 
v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
   // LLVM-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef 
[[V2:%.*]]) #[[ATTR0]] {
   // LLVM-NEXT:  [[ENTRY:.*:]]
-  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <16 x i8>
-  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <4 x float> [[V2]] to <16 x i8>
-  // LLVM-NEXT:    [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x 
float>
-  // LLVM-NEXT:    [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x 
float>
+  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <4 x i32>
+  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <4 x float> [[V2]] to <4 x i32>
+  // LLVM-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
+  // LLVM-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to <16 x i8>
+  // LLVM-NEXT:    [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x 
float>
+  // LLVM-NEXT:    [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP3]] to <4 x 
float>
   // LLVM-NEXT:    [[VMINQ_V2_I:%.*]] = call <4 x float> 
@llvm.aarch64.neon.fmin.v4f32(<4 x float> [[VMINQ_V_I]], <4 x float> 
[[VMINQ_V1_I]])
   // LLVM-NEXT:    ret <4 x float> [[VMINQ_V2_I]]
   return vminq_f32(v1, v2);
@@ -1187,10 +1185,12 @@ float64x2_t test_vminq_f64(float64x2_t v1, float64x2_t 
v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.fmin" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
   // LLVM-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef 
[[V2:%.*]]) #[[ATTR0]] {
   // LLVM-NEXT:  [[ENTRY:.*:]]
-  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <16 x i8>
-  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <2 x double> [[V2]] to <16 x i8>
-  // LLVM-NEXT:    [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x 
double>
-  // LLVM-NEXT:    [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x 
double>
+  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <2 x i64>
+  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <2 x double> [[V2]] to <2 x i64>
+  // LLVM-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
+  // LLVM-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to <16 x i8>
+  // LLVM-NEXT:    [[VMINQ_V_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x 
double>
+  // LLVM-NEXT:    [[VMINQ_V1_I:%.*]] = bitcast <16 x i8> [[TMP3]] to <2 x 
double>
   // LLVM-NEXT:    [[VMINQ_V2_I:%.*]] = call <2 x double> 
@llvm.aarch64.neon.fmin.v2f64(<2 x double> [[VMINQ_V_I]], <2 x double> 
[[VMINQ_V1_I]])
   // LLVM-NEXT:    ret <2 x double> [[VMINQ_V2_I]]
   return vminq_f64(v1, v2);
@@ -1201,10 +1201,12 @@ float32x2_t test_vminnm_f32(float32x2_t v1, float32x2_t 
v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !f32>, !cir.vector<2 x !f32>) -> !cir.vector<2 x !f32>
   // LLVM-SAME: <2 x float> noundef [[V1:%.*]], <2 x float> noundef 
[[V2:%.*]]) #[[ATTR0]] {
   // LLVM-NEXT:  [[ENTRY:.*:]]
-  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <8 x i8>
-  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <8 x i8>
-  // LLVM-NEXT:    [[VMINNM_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x 
float>
-  // LLVM-NEXT:    [[VMINNM_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x 
float>
+  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <2 x i32>
+  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <2 x i32>
+  // LLVM-NEXT:    [[TMP2:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
+  // LLVM-NEXT:    [[TMP3:%.*]] = bitcast <2 x i32> [[TMP1]] to <8 x i8>
+  // LLVM-NEXT:    [[VMINNM_V_I:%.*]] = bitcast <8 x i8> [[TMP2]] to <2 x 
float>
+  // LLVM-NEXT:    [[VMINNM_V1_I:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x 
float>
   // LLVM-NEXT:    [[VMINNM_V2_I:%.*]] = call <2 x float> 
@llvm.aarch64.neon.fminnm.v2f32(<2 x float> [[VMINNM_V_I]], <2 x float> 
[[VMINNM_V1_I]])
   // LLVM-NEXT:    ret <2 x float> [[VMINNM_V2_I]]
   return vminnm_f32(v1, v2);
@@ -1213,13 +1215,7 @@ float32x2_t test_vminnm_f32(float32x2_t v1, float32x2_t 
v2) {
 // ALL-LABEL: @test_vminnm_f64
 float64x1_t test_vminnm_f64(float64x1_t v1, float64x1_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
-  // LLVM-SAME: <1 x double> noundef [[V1:%.*]], <1 x double> noundef 
[[V2:%.*]]) #[[ATTR0]] {
-  // LLVM-NEXT:  [[ENTRY:.*:]]
-  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <1 x double> [[V1]] to <8 x i8>
-  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <1 x double> [[V2]] to <8 x i8>
-  // LLVM-NEXT:    [[VMINNM_V_I:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x 
double>
-  // LLVM-NEXT:    [[VMINNM_V1_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x 
double>
-  // LLVM-NEXT:    [[VMINNM_V2_I:%.*]] = call <1 x double> 
@llvm.aarch64.neon.fminnm.v1f64(<1 x double> [[VMINNM_V_I]], <1 x double> 
[[VMINNM_V1_I]])
+  // LLVM-NEXT:    [[VMINNM_V2_I:%.*]] = call <1 x double> 
@llvm.aarch64.neon.fminnm.v1f64(<1 x double>  %{{.*}}, <1 x double> %{{.*}})
   // LLVM-NEXT:    ret <1 x double> [[VMINNM_V2_I]]
   return vminnm_f64(v1, v2);
 }
@@ -1229,10 +1225,12 @@ float32x4_t test_vminnmq_f32(float32x4_t v1, 
float32x4_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<4 x !f32>, !cir.vector<4 x !f32>) -> !cir.vector<4 x !f32>
   // LLVM-SAME: <4 x float> noundef [[V1:%.*]], <4 x float> noundef 
[[V2:%.*]]) #[[ATTR0]] {
   // LLVM-NEXT:  [[ENTRY:.*:]]
-  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <16 x i8>
-  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <4 x float> [[V2]] to <16 x i8>
-  // LLVM-NEXT:    [[VMINNM_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x 
float>
-  // LLVM-NEXT:    [[VMINNM_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x 
float>
+  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <4 x float> [[V1]] to <4 x i32>
+  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <4 x float> [[V2]] to <4 x i32>
+  // LLVM-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
+  // LLVM-NEXT:    [[TMP3:%.*]] = bitcast <4 x i32> [[TMP1]] to <16 x i8>
+  // LLVM-NEXT:    [[VMINNM_V_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x 
float>
+  // LLVM-NEXT:    [[VMINNM_V1_I:%.*]] = bitcast <16 x i8> [[TMP3]] to <4 x 
float>
   // LLVM-NEXT:    [[VMINNM_V2_I:%.*]] = call <4 x float> 
@llvm.aarch64.neon.fminnm.v4f32(<4 x float> [[VMINNM_V_I]], <4 x float> 
[[VMINNM_V1_I]])
   // LLVM-NEXT:    ret <4 x float> [[VMINNM_V2_I]]
   return vminnmq_f32(v1, v2);
@@ -1243,10 +1241,12 @@ float64x2_t test_vminnmq_f64(float64x2_t v1, 
float64x2_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<2 x !f64>, !cir.vector<2 x !f64>) -> !cir.vector<2 x !f64>
   // LLVM-SAME: <2 x double> noundef [[V1:%.*]], <2 x double> noundef 
[[V2:%.*]]) #[[ATTR0]] {
   // LLVM-NEXT:  [[ENTRY:.*:]]
-  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <16 x i8>
-  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <2 x double> [[V2]] to <16 x i8>
-  // LLVM-NEXT:    [[VMINNM_V_I:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x 
double>
-  // LLVM-NEXT:    [[VMINNM_V1_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x 
double>
+  // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <2 x double> [[V1]] to <2 x i64>
+  // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <2 x double> [[V2]] to <2 x i64>
+  // LLVM-NEXT:    [[TMP2:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
+  // LLVM-NEXT:    [[TMP3:%.*]] = bitcast <2 x i64> [[TMP1]] to <16 x i8>
+  // LLVM-NEXT:    [[VMINNM_V_I:%.*]] = bitcast <16 x i8> [[TMP2]] to <2 x 
double>
+  // LLVM-NEXT:    [[VMINNM_V1_I:%.*]] = bitcast <16 x i8> [[TMP3]] to <2 x 
double>
   // LLVM-NEXT:    [[VMINNM_V2_I:%.*]] = call <2 x double> 
@llvm.aarch64.neon.fminnm.v2f64(<2 x double> [[VMINNM_V_I]], <2 x double> 
[[VMINNM_V1_I]])
   // LLVM-NEXT:    ret <2 x double> [[VMINNM_V2_I]]
   return vminnmq_f64(v1, v2);

>From 765d54e3c6f6c75350ecc0ad6f21aeb67348b7f8 Mon Sep 17 00:00:00 2001
From: Zhihui Yang <[email protected]>
Date: Tue, 24 Mar 2026 14:17:15 +0800
Subject: [PATCH 8/8] Fix bitcast types in LLVM IR comments

---
 clang/test/CodeGen/AArch64/neon/intrinsics.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c 
b/clang/test/CodeGen/AArch64/neon/intrinsics.c
index ae851ac674443..69c3dd42c90b2 100644
--- a/clang/test/CodeGen/AArch64/neon/intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c
@@ -1147,8 +1147,8 @@ float32x2_t test_vmin_f32(float32x2_t v1, float32x2_t v2) 
{
   // LLVM-NEXT:  [[ENTRY:.*:]]
   // LLVM-NEXT:    [[TMP0:%.*]] = bitcast <2 x float> [[V1]] to <2 x i32>
   // LLVM-NEXT:    [[TMP1:%.*]] = bitcast <2 x float> [[V2]] to <2 x i32>
-  // LLVM-NEXT:    [[TMP2:%.*]] = bitcast <2 x float> [[TMP0]] to <8 x i8>
-  // LLVM-NEXT:    [[TMP3:%.*]] = bitcast <2 x float> [[TMP1]] to <8 x i8>
+  // LLVM-NEXT:    [[TMP2:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
+  // LLVM-NEXT:    [[TMP3:%.*]] = bitcast <2 x i32> [[TMP1]] to <8 x i8>
   // LLVM-NEXT:    [[VMIN_V_I:%.*]] = bitcast <8 x i8> [[TMP2]] to <2 x float>
   // LLVM-NEXT:    [[VMIN_V1_I:%.*]] = bitcast <8 x i8> [[TMP3]] to <2 x float>
   // LLVM-NEXT:    [[VMIN_V2_I:%.*]] = call <2 x float> 
@llvm.aarch64.neon.fmin.v2f32(<2 x float> [[VMIN_V_I]], <2 x float> 
[[VMIN_V1_I]])
@@ -1215,8 +1215,8 @@ float32x2_t test_vminnm_f32(float32x2_t v1, float32x2_t 
v2) {
 // ALL-LABEL: @test_vminnm_f64
 float64x1_t test_vminnm_f64(float64x1_t v1, float64x1_t v2) {
   // CIR: cir.call_llvm_intrinsic "aarch64.neon.fminnm" %{{.*}}, %{{.*}} : 
(!cir.vector<1 x !f64>, !cir.vector<1 x !f64>) -> !cir.vector<1 x !f64>
-  // LLVM-NEXT:    [[VMINNM_V2_I:%.*]] = call <1 x double> 
@llvm.aarch64.neon.fminnm.v1f64(<1 x double>  %{{.*}}, <1 x double> %{{.*}})
-  // LLVM-NEXT:    ret <1 x double> [[VMINNM_V2_I]]
+  // LLVM:    [[VMINNM_V2_I:%.*]] = call <1 x double> 
@llvm.aarch64.neon.fminnm.v1f64(<1 x double>  %{{.*}}, <1 x double> %{{.*}})
+  // LLVM:    ret <1 x double> [[VMINNM_V2_I]]
   return vminnm_f64(v1, v2);
 }
 

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to