Author: Jonathan Thackray
Date: 2025-11-07T14:56:29Z
New Revision: 9a8781b86f06bbcd7edab97b5a6957f7039e357d

URL: 
https://github.com/llvm/llvm-project/commit/9a8781b86f06bbcd7edab97b5a6957f7039e357d
DIFF: 
https://github.com/llvm/llvm-project/commit/9a8781b86f06bbcd7edab97b5a6957f7039e357d.diff

LOG: [AArch64][llvm] Add support for new vcvt* intrinsics (#163572)

Add support for these new vcvt* intrinsics:

```
  int64_t  vcvts_s64_f32(float32_t);
  uint64_t vcvts_u64_f32(float32_t);
  int32_t  vcvtd_s32_f64(float64_t);
  uint32_t vcvtd_u32_f64(float64_t);

  int64_t  vcvtns_s64_f32(float32_t);
  uint64_t vcvtns_u64_f32(float32_t);
  int32_t  vcvtnd_s32_f64(float64_t);
  uint32_t vcvtnd_u32_f64(float64_t);

  int64_t  vcvtms_s64_f32(float32_t);
  uint64_t vcvtms_u64_f32(float32_t);
  int32_t  vcvtmd_s32_f64(float64_t);
  uint32_t vcvtmd_u32_f64(float64_t);

  int64_t  vcvtps_s64_f32(float32_t);
  uint64_t vcvtps_u64_f32(float32_t);
  int32_t  vcvtpd_s32_f64(float64_t);
  uint32_t vcvtpd_u32_f64(float64_t);

  int64_t  vcvtas_s64_f32(float32_t);
  uint64_t vcvtas_u64_f32(float32_t);
  int32_t  vcvtad_s32_f64(float64_t);
  uint32_t vcvtad_u32_f64(float64_t);
```

Added: 
    

Modified: 
    clang/docs/ReleaseNotes.rst
    clang/include/clang/Basic/arm_neon.td
    clang/lib/CodeGen/TargetBuiltins/ARM.cpp
    clang/test/CodeGen/AArch64/neon-fcvt-intrinsics.c

Removed: 
    


################################################################################
diff  --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index a59f6bd50fc9e..1b0a8d71a2cf8 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -572,6 +572,8 @@ X86 Support
 
 Arm and AArch64 Support
 ^^^^^^^^^^^^^^^^^^^^^^^
+- More intrinsics for the following AArch64 instructions:
+  FCVTZ[US], FCVTN[US], FCVTM[US], FCVTP[US], FCVTA[US]
 
 Android Support
 ^^^^^^^^^^^^^^^

diff  --git a/clang/include/clang/Basic/arm_neon.td 
b/clang/include/clang/Basic/arm_neon.td
index ef196103035e8..65cf5ee4af6cd 100644
--- a/clang/include/clang/Basic/arm_neon.td
+++ b/clang/include/clang/Basic/arm_neon.td
@@ -1466,26 +1466,51 @@ def SCALAR_UCVTFD : SInst<"vcvt_f64", "(1F)(1!)", 
"SUl">;
 
////////////////////////////////////////////////////////////////////////////////
 // Scalar Floating-point Converts
 def SCALAR_FCVTXN  : IInst<"vcvtx_f32", "(1F<)(1!)", "Sd">;
-def SCALAR_FCVTNSS : SInst<"vcvtn_s32", "(1S)1", "Sf">;
-def SCALAR_FCVTNUS : SInst<"vcvtn_u32", "(1U)1", "Sf">;
-def SCALAR_FCVTNSD : SInst<"vcvtn_s64", "(1S)1", "Sd">;
-def SCALAR_FCVTNUD : SInst<"vcvtn_u64", "(1U)1", "Sd">;
-def SCALAR_FCVTMSS : SInst<"vcvtm_s32", "(1S)1", "Sf">;
-def SCALAR_FCVTMUS : SInst<"vcvtm_u32", "(1U)1", "Sf">;
-def SCALAR_FCVTMSD : SInst<"vcvtm_s64", "(1S)1", "Sd">;
-def SCALAR_FCVTMUD : SInst<"vcvtm_u64", "(1U)1", "Sd">;
-def SCALAR_FCVTASS : SInst<"vcvta_s32", "(1S)1", "Sf">;
-def SCALAR_FCVTAUS : SInst<"vcvta_u32", "(1U)1", "Sf">;
-def SCALAR_FCVTASD : SInst<"vcvta_s64", "(1S)1", "Sd">;
-def SCALAR_FCVTAUD : SInst<"vcvta_u64", "(1U)1", "Sd">;
-def SCALAR_FCVTPSS : SInst<"vcvtp_s32", "(1S)1", "Sf">;
-def SCALAR_FCVTPUS : SInst<"vcvtp_u32", "(1U)1", "Sf">;
-def SCALAR_FCVTPSD : SInst<"vcvtp_s64", "(1S)1", "Sd">;
-def SCALAR_FCVTPUD : SInst<"vcvtp_u64", "(1U)1", "Sd">;
-def SCALAR_FCVTZSS : SInst<"vcvt_s32", "(1S)1", "Sf">;
-def SCALAR_FCVTZUS : SInst<"vcvt_u32", "(1U)1", "Sf">;
-def SCALAR_FCVTZSD : SInst<"vcvt_s64", "(1S)1", "Sd">;
-def SCALAR_FCVTZUD : SInst<"vcvt_u64", "(1U)1", "Sd">;
+
+def SCALAR_FCVTN_F32toSS  : SInst<"vcvtn_s32", "(1S)1", "Sf">;
+def SCALAR_FCVTN_F32toUS  : SInst<"vcvtn_u32", "(1U)1", "Sf">;
+def SCALAR_FCVTN_F64toSS  : SInst<"vcvtn_s32", "(1S<)1", "Sd">;
+def SCALAR_FCVTN_F64toUS  : SInst<"vcvtn_u32", "(1U<)1", "Sd">;
+def SCALAR_FCVTN_F32toSD  : SInst<"vcvtn_s64", "(1S>)1", "Sf">;
+def SCALAR_FCVTN_F32toUD  : SInst<"vcvtn_u64", "(1U>)1", "Sf">;
+def SCALAR_FCVTN_F64toSD  : SInst<"vcvtn_s64", "(1S)1", "Sd">;
+def SCALAR_FCVTN_F64toUD  : SInst<"vcvtn_u64", "(1U)1", "Sd">;
+
+def SCALAR_FCVTM_F32toSS  : SInst<"vcvtm_s32", "(1S)1", "Sf">;
+def SCALAR_FCVTM_F32toUS  : SInst<"vcvtm_u32", "(1U)1", "Sf">;
+def SCALAR_FCVTM_F64toSS  : SInst<"vcvtm_s32", "(1S<)1", "Sd">;
+def SCALAR_FCVTM_F64toUS  : SInst<"vcvtm_u32", "(1U<)1", "Sd">;
+def SCALAR_FCVTM_F32toSD  : SInst<"vcvtm_s64", "(1S>)1", "Sf">;
+def SCALAR_FCVTM_F32toUD  : SInst<"vcvtm_u64", "(1U>)1", "Sf">;
+def SCALAR_FCVTM_F64toSD  : SInst<"vcvtm_s64", "(1S)1", "Sd">;
+def SCALAR_FCVTM_F64toUD  : SInst<"vcvtm_u64", "(1U)1", "Sd">;
+
+def SCALAR_FCVTA_F32toSS  : SInst<"vcvta_s32", "(1S)1", "Sf">;
+def SCALAR_FCVTA_F32toUS  : SInst<"vcvta_u32", "(1U)1", "Sf">;
+def SCALAR_FCVTA_F64toSS  : SInst<"vcvta_s32", "(1S<)1", "Sd">;
+def SCALAR_FCVTA_F64toUS  : SInst<"vcvta_u32", "(1U<)1", "Sd">;
+def SCALAR_FCVTA_F32toSD  : SInst<"vcvta_s64", "(1S>)1", "Sf">;
+def SCALAR_FCVTA_F32toUD  : SInst<"vcvta_u64", "(1U>)1", "Sf">;
+def SCALAR_FCVTA_F64toSD  : SInst<"vcvta_s64", "(1S)1", "Sd">;
+def SCALAR_FCVTA_F64toUD  : SInst<"vcvta_u64", "(1U)1", "Sd">;
+
+def SCALAR_FCVTP_F32toSS  : SInst<"vcvtp_s32", "(1S)1", "Sf">;
+def SCALAR_FCVTP_F32toUS  : SInst<"vcvtp_u32", "(1U)1", "Sf">;
+def SCALAR_FCVTP_F64toSS  : SInst<"vcvtp_s32", "(1S<)1", "Sd">;
+def SCALAR_FCVTP_F64toUS  : SInst<"vcvtp_u32", "(1U<)1", "Sd">;
+def SCALAR_FCVTP_F32toSD  : SInst<"vcvtp_s64", "(1S>)1", "Sf">;
+def SCALAR_FCVTP_F32toUD  : SInst<"vcvtp_u64", "(1U>)1", "Sf">;
+def SCALAR_FCVTP_F64toSD  : SInst<"vcvtp_s64", "(1S)1", "Sd">;
+def SCALAR_FCVTP_F64toUD  : SInst<"vcvtp_u64", "(1U)1", "Sd">;
+
+def SCALAR_FCVTZ_F32toSS  : SInst<"vcvt_s32", "(1S)1", "Sf">;
+def SCALAR_FCVTZ_F32toUS  : SInst<"vcvt_u32", "(1U)1", "Sf">;
+def SCALAR_FCVTZ_F64toSS  : SInst<"vcvt_s32", "(1S<)1", "Sd">;
+def SCALAR_FCVTZ_F64toUS  : SInst<"vcvt_u32", "(1U<)1", "Sd">;
+def SCALAR_FCVTZ_F32toSD  : SInst<"vcvt_s64", "(1S>)1", "Sf">;
+def SCALAR_FCVTZ_F32toUD  : SInst<"vcvt_u64", "(1U>)1", "Sf">;
+def SCALAR_FCVTZ_F64toSD  : SInst<"vcvt_s64", "(1S)1", "Sd">;
+def SCALAR_FCVTZ_F64toUD  : SInst<"vcvt_u64", "(1U)1", "Sd">;
 
 
////////////////////////////////////////////////////////////////////////////////
 // Scalar Floating-point Reciprocal Estimate

diff  --git a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp 
b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
index 15fa78ddba715..961e72f7eadda 100644
--- a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
@@ -590,6 +590,7 @@ struct ARMVectorIntrinsicInfo {
       Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
       TypeModifier }
 
+// clang-format off
 static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
   NEONMAP1(__a32_vcvt_bf16_f32, arm_neon_vcvtfp2bf, 0),
   NEONMAP0(splat_lane_v),
@@ -1217,35 +1218,55 @@ static const ARMVectorIntrinsicInfo 
AArch64SISDIntrinsicMap[] = {
   NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
   NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
   NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtad_s32_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
   NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtad_u32_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
   NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
   NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtas_s64_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
   NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtas_u64_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
   NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
   NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
   NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
   NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtd_s32_f64, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
   NEONMAP1(vcvtd_s64_f64, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtd_u32_f64, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
   NEONMAP1(vcvtd_u64_f64, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
   NEONMAP0(vcvth_bf16_f32),
+  NEONMAP1(vcvtmd_s32_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
   NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtmd_u32_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
   NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
   NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtms_s64_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
   NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtms_u64_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtnd_s32_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
   NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtnd_u32_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
   NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
   NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtns_s64_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
   NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtns_u64_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtpd_s32_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
   NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtpd_u32_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
   NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
   NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtps_s64_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
   NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
+  NEONMAP1(vcvtps_u64_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
   NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
   NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
   NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
   NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
   NEONMAP1(vcvts_s32_f32, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
+  NEONMAP1(vcvts_s64_f32, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
   NEONMAP1(vcvts_u32_f32, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
+  NEONMAP1(vcvts_u64_f32, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
   NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
   NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
   NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
@@ -1446,6 +1467,7 @@ static const ARMVectorIntrinsicInfo 
AArch64SISDIntrinsicMap[] = {
   NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType),
   NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType),
 };
+// clang-format on
 
 // Some intrinsics are equivalent for codegen.
 static const std::pair<unsigned, unsigned> NEONEquivalentIntrinsicMap[] = {

diff  --git a/clang/test/CodeGen/AArch64/neon-fcvt-intrinsics.c 
b/clang/test/CodeGen/AArch64/neon-fcvt-intrinsics.c
index 670b65070289d..929df94aa60ef 100644
--- a/clang/test/CodeGen/AArch64/neon-fcvt-intrinsics.c
+++ b/clang/test/CodeGen/AArch64/neon-fcvt-intrinsics.c
@@ -26,16 +26,36 @@ int32_t test_vcvtas_s32_f32(float32_t a) {
   return (int32_t)vcvtas_s32_f32(a);
 }
 
-// CHECK-LABEL: define {{[^@]+}}@test_test_vcvtad_s64_f64
+// CHECK-LABEL: define {{[^@]+}}@test_vcvtad_s64_f64
 // CHECK-SAME: (double noundef [[A:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[VCVTAD_S64_F64_I:%.*]] = call i64 
@llvm.aarch64.neon.fcvtas.i64.f64(double [[A]])
 // CHECK-NEXT:    ret i64 [[VCVTAD_S64_F64_I]]
 //
-int64_t test_test_vcvtad_s64_f64(float64_t a) {
+int64_t test_vcvtad_s64_f64(float64_t a) {
   return (int64_t)vcvtad_s64_f64(a);
 }
 
+// CHECK-LABEL: define {{[^@]+}}@test_vcvtas_s64_f32
+// CHECK-SAME: (float noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VCVTAS_S64_F32_I:%.*]] = call i64 
@llvm.aarch64.neon.fcvtas.i64.f32(float [[A]])
+// CHECK-NEXT:    ret i64 [[VCVTAS_S64_F32_I]]
+//
+int64_t test_vcvtas_s64_f32(float32_t a) {
+  return (int64_t)vcvtas_s64_f32(a);
+}
+
+// CHECK-LABEL: define {{[^@]+}}@test_vcvtad_s32_f64
+// CHECK-SAME: (double noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VCVTAD_S32_F64_I:%.*]] = call i32 
@llvm.aarch64.neon.fcvtas.i32.f64(double [[A]])
+// CHECK-NEXT:    ret i32 [[VCVTAD_S32_F64_I]]
+//
+int32_t test_vcvtad_s32_f64(float64_t a) {
+  return (int32_t)vcvtad_s32_f64(a);
+}
+
 // CHECK-LABEL: define {{[^@]+}}@test_vcvtas_u32_f32
 // CHECK-SAME: (float noundef [[A:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  entry:
@@ -56,6 +76,26 @@ uint64_t test_vcvtad_u64_f64(float64_t a) {
   return (uint64_t)vcvtad_u64_f64(a);
 }
 
+// CHECK-LABEL: define {{[^@]+}}@test_vcvtas_u64_f32
+// CHECK-SAME: (float noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VCVTAS_U64_F32_I:%.*]] = call i64 
@llvm.aarch64.neon.fcvtau.i64.f32(float [[A]])
+// CHECK-NEXT:    ret i64 [[VCVTAS_U64_F32_I]]
+//
+uint64_t test_vcvtas_u64_f32(float32_t a) {
+  return (uint64_t)vcvtas_u64_f32(a);
+}
+
+// CHECK-LABEL: define {{[^@]+}}@test_vcvtad_u32_f64
+// CHECK-SAME: (double noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VCVTAD_U32_F64_I:%.*]] = call i32 
@llvm.aarch64.neon.fcvtau.i32.f64(double [[A]])
+// CHECK-NEXT:    ret i32 [[VCVTAD_U32_F64_I]]
+//
+uint32_t test_vcvtad_u32_f64(float64_t a) {
+  return (uint32_t)vcvtad_u32_f64(a);
+}
+
 // CHECK-LABEL: define {{[^@]+}}@test_vcvtms_s32_f32
 // CHECK-SAME: (float noundef [[A:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  entry:
@@ -76,6 +116,26 @@ int64_t test_vcvtmd_s64_f64(float64_t a) {
   return (int64_t)vcvtmd_s64_f64(a);
 }
 
+// CHECK-LABEL: define {{[^@]+}}@test_vcvtms_s64_f32
+// CHECK-SAME: (float noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VCVTMS_S64_F32_I:%.*]] = call i64 
@llvm.aarch64.neon.fcvtms.i64.f32(float [[A]])
+// CHECK-NEXT:    ret i64 [[VCVTMS_S64_F32_I]]
+//
+int64_t test_vcvtms_s64_f32(float32_t a) {
+  return (int64_t)vcvtms_s64_f32(a);
+}
+
+// CHECK-LABEL: define {{[^@]+}}@test_vcvtmd_s32_f64
+// CHECK-SAME: (double noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VCVTMD_S32_F64_I:%.*]] = call i32 
@llvm.aarch64.neon.fcvtms.i32.f64(double [[A]])
+// CHECK-NEXT:    ret i32 [[VCVTMD_S32_F64_I]]
+//
+int32_t test_vcvtmd_s32_f64(float64_t a) {
+  return (int32_t)vcvtmd_s32_f64(a);
+}
+
 // CHECK-LABEL: define {{[^@]+}}@test_vcvtms_u32_f32
 // CHECK-SAME: (float noundef [[A:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  entry:
@@ -96,6 +156,26 @@ uint64_t test_vcvtmd_u64_f64(float64_t a) {
   return (uint64_t)vcvtmd_u64_f64(a);
 }
 
+// CHECK-LABEL: define {{[^@]+}}@test_vcvtms_u64_f32
+// CHECK-SAME: (float noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VCVTMS_U64_F32_I:%.*]] = call i64 
@llvm.aarch64.neon.fcvtmu.i64.f32(float [[A]])
+// CHECK-NEXT:    ret i64 [[VCVTMS_U64_F32_I]]
+//
+uint64_t test_vcvtms_u64_f32(float32_t a) {
+  return (uint64_t)vcvtms_u64_f32(a);
+}
+
+// CHECK-LABEL: define {{[^@]+}}@test_vcvtmd_u32_f64
+// CHECK-SAME: (double noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VCVTMD_U32_F64_I:%.*]] = call i32 
@llvm.aarch64.neon.fcvtmu.i32.f64(double [[A]])
+// CHECK-NEXT:    ret i32 [[VCVTMD_U32_F64_I]]
+//
+uint32_t test_vcvtmd_u32_f64(float64_t a) {
+  return (uint32_t)vcvtmd_u32_f64(a);
+}
+
 // CHECK-LABEL: define {{[^@]+}}@test_vcvtns_s32_f32
 // CHECK-SAME: (float noundef [[A:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  entry:
@@ -116,6 +196,26 @@ int64_t test_vcvtnd_s64_f64(float64_t a) {
   return (int64_t)vcvtnd_s64_f64(a);
 }
 
+// CHECK-LABEL: define {{[^@]+}}@test_vcvtns_s64_f32
+// CHECK-SAME: (float noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VCVTNS_S64_F32_I:%.*]] = call i64 
@llvm.aarch64.neon.fcvtns.i64.f32(float [[A]])
+// CHECK-NEXT:    ret i64 [[VCVTNS_S64_F32_I]]
+//
+int64_t test_vcvtns_s64_f32(float32_t a) {
+  return (int64_t)vcvtns_s64_f32(a);
+}
+
+// CHECK-LABEL: define {{[^@]+}}@test_vcvtnd_s32_f64
+// CHECK-SAME: (double noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VCVTND_S32_F64_I:%.*]] = call i32 
@llvm.aarch64.neon.fcvtns.i32.f64(double [[A]])
+// CHECK-NEXT:    ret i32 [[VCVTND_S32_F64_I]]
+//
+int32_t test_vcvtnd_s32_f64(float64_t a) {
+  return (int32_t)vcvtnd_s32_f64(a);
+}
+
 // CHECK-LABEL: define {{[^@]+}}@test_vcvtns_u32_f32
 // CHECK-SAME: (float noundef [[A:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  entry:
@@ -136,6 +236,26 @@ uint64_t test_vcvtnd_u64_f64(float64_t a) {
   return (uint64_t)vcvtnd_u64_f64(a);
 }
 
+// CHECK-LABEL: define {{[^@]+}}@test_vcvtns_u64_f32
+// CHECK-SAME: (float noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VCVTNS_U64_F32_I:%.*]] = call i64 
@llvm.aarch64.neon.fcvtnu.i64.f32(float [[A]])
+// CHECK-NEXT:    ret i64 [[VCVTNS_U64_F32_I]]
+//
+uint64_t test_vcvtns_u64_f32(float32_t a) {
+  return (uint64_t)vcvtns_u64_f32(a);
+}
+
+// CHECK-LABEL: define {{[^@]+}}@test_vcvtnd_u32_f64
+// CHECK-SAME: (double noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VCVTND_U32_F64_I:%.*]] = call i32 
@llvm.aarch64.neon.fcvtnu.i32.f64(double [[A]])
+// CHECK-NEXT:    ret i32 [[VCVTND_U32_F64_I]]
+//
+uint32_t test_vcvtnd_u32_f64(float64_t a) {
+  return (uint32_t)vcvtnd_u32_f64(a);
+}
+
 // CHECK-LABEL: define {{[^@]+}}@test_vcvtps_s32_f32
 // CHECK-SAME: (float noundef [[A:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  entry:
@@ -156,6 +276,26 @@ int64_t test_vcvtpd_s64_f64(float64_t a) {
   return (int64_t)vcvtpd_s64_f64(a);
 }
 
+// CHECK-LABEL: define {{[^@]+}}@test_vcvtps_s64_f32
+// CHECK-SAME: (float noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VCVTPS_S64_F32_I:%.*]] = call i64 
@llvm.aarch64.neon.fcvtps.i64.f32(float [[A]])
+// CHECK-NEXT:    ret i64 [[VCVTPS_S64_F32_I]]
+//
+int64_t test_vcvtps_s64_f32(float32_t a) {
+  return (int64_t)vcvtps_s64_f32(a);
+}
+
+// CHECK-LABEL: define {{[^@]+}}@test_vcvtpd_s32_f64
+// CHECK-SAME: (double noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VCVTPD_S32_F64_I:%.*]] = call i32 
@llvm.aarch64.neon.fcvtps.i32.f64(double [[A]])
+// CHECK-NEXT:    ret i32 [[VCVTPD_S32_F64_I]]
+//
+int32_t test_vcvtpd_s32_f64(float64_t a) {
+  return (int32_t)vcvtpd_s32_f64(a);
+}
+
 // CHECK-LABEL: define {{[^@]+}}@test_vcvtps_u32_f32
 // CHECK-SAME: (float noundef [[A:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  entry:
@@ -176,6 +316,26 @@ uint64_t test_vcvtpd_u64_f64(float64_t a) {
   return (uint64_t)vcvtpd_u64_f64(a);
 }
 
+// CHECK-LABEL: define {{[^@]+}}@test_vcvtps_u64_f32
+// CHECK-SAME: (float noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VCVTPS_U64_F32_I:%.*]] = call i64 
@llvm.aarch64.neon.fcvtpu.i64.f32(float [[A]])
+// CHECK-NEXT:    ret i64 [[VCVTPS_U64_F32_I]]
+//
+uint64_t test_vcvtps_u64_f32(float32_t a) {
+  return (uint64_t)vcvtps_u64_f32(a);
+}
+
+// CHECK-LABEL: define {{[^@]+}}@test_vcvtpd_u32_f64
+// CHECK-SAME: (double noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VCVTPD_U32_F64_I:%.*]] = call i32 
@llvm.aarch64.neon.fcvtpu.i32.f64(double [[A]])
+// CHECK-NEXT:    ret i32 [[VCVTPD_U32_F64_I]]
+//
+uint32_t test_vcvtpd_u32_f64(float64_t a) {
+  return (uint32_t)vcvtpd_u32_f64(a);
+}
+
 // CHECK-LABEL: define {{[^@]+}}@test_vcvts_s32_f32
 // CHECK-SAME: (float noundef [[A:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  entry:
@@ -196,6 +356,26 @@ int64_t test_vcvtd_s64_f64(float64_t a) {
   return (int64_t)vcvtd_s64_f64(a);
 }
 
+// CHECK-LABEL: define {{[^@]+}}@test_vcvts_s64_f32
+// CHECK-SAME: (float noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VCVTS_S64_F32_I:%.*]] = call i64 
@llvm.aarch64.neon.fcvtzs.i64.f32(float [[A]])
+// CHECK-NEXT:    ret i64 [[VCVTS_S64_F32_I]]
+//
+int64_t test_vcvts_s64_f32(float32_t a) {
+  return (int64_t)vcvts_s64_f32(a);
+}
+
+// CHECK-LABEL: define {{[^@]+}}@test_vcvtd_s32_f64
+// CHECK-SAME: (double noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VCVTD_S32_F64_I:%.*]] = call i32 
@llvm.aarch64.neon.fcvtzs.i32.f64(double [[A]])
+// CHECK-NEXT:    ret i32 [[VCVTD_S32_F64_I]]
+//
+int32_t test_vcvtd_s32_f64(float64_t a) {
+  return (int32_t)vcvtd_s32_f64(a);
+}
+
 // CHECK-LABEL: define {{[^@]+}}@test_vcvts_u32_f32
 // CHECK-SAME: (float noundef [[A:%.*]]) #[[ATTR0]] {
 // CHECK-NEXT:  entry:
@@ -215,3 +395,24 @@ uint32_t test_vcvts_u32_f32(float32_t a) {
 uint64_t test_vcvtd_u64_f64(float64_t a) {
   return (uint64_t)vcvtd_u64_f64(a);
 }
+
+// CHECK-LABEL: define {{[^@]+}}@test_vcvts_u64_f32
+// CHECK-SAME: (float noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VCVTS_U64_F32_I:%.*]] = call i64 
@llvm.aarch64.neon.fcvtzu.i64.f32(float [[A]])
+// CHECK-NEXT:    ret i64 [[VCVTS_U64_F32_I]]
+//
+uint64_t test_vcvts_u64_f32(float32_t a) {
+  return (uint64_t)vcvts_u64_f32(a);
+}
+
+// CHECK-LABEL: define {{[^@]+}}@test_vcvtd_u32_f64
+// CHECK-SAME: (double noundef [[A:%.*]]) #[[ATTR0]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VCVTD_U32_F64_I:%.*]] = call i32 
@llvm.aarch64.neon.fcvtzu.i32.f64(double [[A]])
+// CHECK-NEXT:    ret i32 [[VCVTD_U32_F64_I]]
+//
+uint32_t test_vcvtd_u32_f64(float64_t a) {
+  return (uint32_t)vcvtd_u32_f64(a);
+}
+


        
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to