leonardchan created this revision.
leonardchan added reviewers: chandlerc, echristo, phosek, serge-sans-paille.
leonardchan added a project: clang.
Herald added subscribers: kristof.beyls, javed.absar.

This is a patch that should go on top of D58375 
<https://reviews.llvm.org/D58375> which addresses/fixes tests that fail under 
new PM with -O0.

This is still ongoing and will ask for reviews once all of them are fixed. 
These are currently the latest ones I have updated so far.

Current reasons for fixes:

- Some aarch64 tests specified attribute numbers explicitly where as new pm 
uses the same attributes but different numbers.
- `CodeGen/aarch64-neon-perm.c` failed because new PM would insert lifetime 
start/end intrinsics even under -O0.


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D62225

Files:
  clang/lib/CodeGen/BackendUtil.cpp
  clang/test/CodeGen/aarch64-neon-across.c
  clang/test/CodeGen/aarch64-neon-fcvt-intrinsics.c
  clang/test/CodeGen/aarch64-neon-fma.c
  clang/test/CodeGen/aarch64-neon-perm.c

Index: clang/test/CodeGen/aarch64-neon-perm.c
===================================================================
--- clang/test/CodeGen/aarch64-neon-perm.c
+++ clang/test/CodeGen/aarch64-neon-perm.c
@@ -1,5 +1,3 @@
-// UNSUPPORTED: experimental-new-pass-manager
-
 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
 // RUN: -disable-O0-optnone -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
 
Index: clang/test/CodeGen/aarch64-neon-fma.c
===================================================================
--- clang/test/CodeGen/aarch64-neon-fma.c
+++ clang/test/CodeGen/aarch64-neon-fma.c
@@ -1,5 +1,3 @@
-// UNSUPPORTED: experimental-new-pass-manager
-
 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon -S -disable-O0-optnone -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
 
 // Test new aarch64 intrinsics and types
@@ -226,7 +224,7 @@
 // CHECK:   [[SUB_I:%.*]] = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %b
 // CHECK:   [[VECINIT_I:%.*]] = insertelement <2 x double> undef, double %c, i32 0
 // CHECK:   [[VECINIT1_I:%.*]] = insertelement <2 x double> [[VECINIT_I]], double %c, i32 1
-// CHECK:   [[TMP6:%.*]] = call <2 x double> @llvm.fma.v2f64(<2 x double> [[SUB_I]], <2 x double> [[VECINIT1_I]], <2 x double> %a) #3
+// CHECK:   [[TMP6:%.*]] = call <2 x double> @llvm.fma.v2f64(<2 x double> [[SUB_I]], <2 x double> [[VECINIT1_I]], <2 x double> %a) [[NOUNWIND_ATTR:#[0-9]+]]
 // CHECK:   ret <2 x double> [[TMP6]]
 float64x2_t test_vfmsq_n_f64(float64x2_t a, float64x2_t b, float64_t c) {
   return vfmsq_n_f64(a, b, c);
@@ -234,3 +232,4 @@
 
 // CHECK: attributes #0 ={{.*}}"min-legal-vector-width"="64"
 // CHECK: attributes #1 ={{.*}}"min-legal-vector-width"="128"
+// CHECK: attributes [[NOUNWIND_ATTR]] = { nounwind }
Index: clang/test/CodeGen/aarch64-neon-fcvt-intrinsics.c
===================================================================
--- clang/test/CodeGen/aarch64-neon-fcvt-intrinsics.c
+++ clang/test/CodeGen/aarch64-neon-fcvt-intrinsics.c
@@ -1,5 +1,3 @@
-// UNSUPPORTED: experimental-new-pass-manager
-
 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
 // RUN:  -disable-O0-optnone -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
 
@@ -8,119 +6,119 @@
 #include <arm_neon.h>
 
 // CHECK-LABEL: define float @test_vcvtxd_f32_f64(double %a) #0 {
-// CHECK:   [[VCVTXD_F32_F64_I:%.*]] = call float @llvm.aarch64.sisd.fcvtxn(double %a) #2
+// CHECK:   [[VCVTXD_F32_F64_I:%.*]] = call float @llvm.aarch64.sisd.fcvtxn(double %a) [[NOUNWIND_ATTR:#[0-9]+]]
 // CHECK:   ret float [[VCVTXD_F32_F64_I]]
 float32_t test_vcvtxd_f32_f64(float64_t a) {
   return (float32_t)vcvtxd_f32_f64(a);
 }
 
 // CHECK-LABEL: define i32 @test_vcvtas_s32_f32(float %a) #0 {
-// CHECK:   [[VCVTAS_S32_F32_I:%.*]] = call i32 @llvm.aarch64.neon.fcvtas.i32.f32(float %a) #2
+// CHECK:   [[VCVTAS_S32_F32_I:%.*]] = call i32 @llvm.aarch64.neon.fcvtas.i32.f32(float %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i32 [[VCVTAS_S32_F32_I]]
 int32_t test_vcvtas_s32_f32(float32_t a) {
   return (int32_t)vcvtas_s32_f32(a);
 }
 
 // CHECK-LABEL: define i64 @test_test_vcvtad_s64_f64(double %a) #0 {
-// CHECK:   [[VCVTAD_S64_F64_I:%.*]] = call i64 @llvm.aarch64.neon.fcvtas.i64.f64(double %a) #2
+// CHECK:   [[VCVTAD_S64_F64_I:%.*]] = call i64 @llvm.aarch64.neon.fcvtas.i64.f64(double %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i64 [[VCVTAD_S64_F64_I]]
 int64_t test_test_vcvtad_s64_f64(float64_t a) {
   return (int64_t)vcvtad_s64_f64(a);
 }
 
 // CHECK-LABEL: define i32 @test_vcvtas_u32_f32(float %a) #0 {
-// CHECK:   [[VCVTAS_U32_F32_I:%.*]] = call i32 @llvm.aarch64.neon.fcvtau.i32.f32(float %a) #2
+// CHECK:   [[VCVTAS_U32_F32_I:%.*]] = call i32 @llvm.aarch64.neon.fcvtau.i32.f32(float %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i32 [[VCVTAS_U32_F32_I]]
 uint32_t test_vcvtas_u32_f32(float32_t a) {
   return (uint32_t)vcvtas_u32_f32(a);
 }
 
 // CHECK-LABEL: define i64 @test_vcvtad_u64_f64(double %a) #0 {
-// CHECK:   [[VCVTAD_U64_F64_I:%.*]] = call i64 @llvm.aarch64.neon.fcvtau.i64.f64(double %a) #2
+// CHECK:   [[VCVTAD_U64_F64_I:%.*]] = call i64 @llvm.aarch64.neon.fcvtau.i64.f64(double %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i64 [[VCVTAD_U64_F64_I]]
 uint64_t test_vcvtad_u64_f64(float64_t a) {
   return (uint64_t)vcvtad_u64_f64(a);
 }
 
 // CHECK-LABEL: define i32 @test_vcvtms_s32_f32(float %a) #0 {
-// CHECK:   [[VCVTMS_S32_F32_I:%.*]] = call i32 @llvm.aarch64.neon.fcvtms.i32.f32(float %a) #2
+// CHECK:   [[VCVTMS_S32_F32_I:%.*]] = call i32 @llvm.aarch64.neon.fcvtms.i32.f32(float %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i32 [[VCVTMS_S32_F32_I]]
 int32_t test_vcvtms_s32_f32(float32_t a) {
   return (int32_t)vcvtms_s32_f32(a);
 }
 
 // CHECK-LABEL: define i64 @test_vcvtmd_s64_f64(double %a) #0 {
-// CHECK:   [[VCVTMD_S64_F64_I:%.*]] = call i64 @llvm.aarch64.neon.fcvtms.i64.f64(double %a) #2
+// CHECK:   [[VCVTMD_S64_F64_I:%.*]] = call i64 @llvm.aarch64.neon.fcvtms.i64.f64(double %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i64 [[VCVTMD_S64_F64_I]]
 int64_t test_vcvtmd_s64_f64(float64_t a) {
   return (int64_t)vcvtmd_s64_f64(a);
 }
 
 // CHECK-LABEL: define i32 @test_vcvtms_u32_f32(float %a) #0 {
-// CHECK:   [[VCVTMS_U32_F32_I:%.*]] = call i32 @llvm.aarch64.neon.fcvtmu.i32.f32(float %a) #2
+// CHECK:   [[VCVTMS_U32_F32_I:%.*]] = call i32 @llvm.aarch64.neon.fcvtmu.i32.f32(float %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i32 [[VCVTMS_U32_F32_I]]
 uint32_t test_vcvtms_u32_f32(float32_t a) {
   return (uint32_t)vcvtms_u32_f32(a);
 }
 
 // CHECK-LABEL: define i64 @test_vcvtmd_u64_f64(double %a) #0 {
-// CHECK:   [[VCVTMD_U64_F64_I:%.*]] = call i64 @llvm.aarch64.neon.fcvtmu.i64.f64(double %a) #2
+// CHECK:   [[VCVTMD_U64_F64_I:%.*]] = call i64 @llvm.aarch64.neon.fcvtmu.i64.f64(double %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i64 [[VCVTMD_U64_F64_I]]
 uint64_t test_vcvtmd_u64_f64(float64_t a) {
   return (uint64_t)vcvtmd_u64_f64(a);
 }
 
 // CHECK-LABEL: define i32 @test_vcvtns_s32_f32(float %a) #0 {
-// CHECK:   [[VCVTNS_S32_F32_I:%.*]] = call i32 @llvm.aarch64.neon.fcvtns.i32.f32(float %a) #2
+// CHECK:   [[VCVTNS_S32_F32_I:%.*]] = call i32 @llvm.aarch64.neon.fcvtns.i32.f32(float %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i32 [[VCVTNS_S32_F32_I]]
 int32_t test_vcvtns_s32_f32(float32_t a) {
   return (int32_t)vcvtns_s32_f32(a);
 }
 
 // CHECK-LABEL: define i64 @test_vcvtnd_s64_f64(double %a) #0 {
-// CHECK:   [[VCVTND_S64_F64_I:%.*]] = call i64 @llvm.aarch64.neon.fcvtns.i64.f64(double %a) #2
+// CHECK:   [[VCVTND_S64_F64_I:%.*]] = call i64 @llvm.aarch64.neon.fcvtns.i64.f64(double %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i64 [[VCVTND_S64_F64_I]]
 int64_t test_vcvtnd_s64_f64(float64_t a) {
   return (int64_t)vcvtnd_s64_f64(a);
 }
 
 // CHECK-LABEL: define i32 @test_vcvtns_u32_f32(float %a) #0 {
-// CHECK:   [[VCVTNS_U32_F32_I:%.*]] = call i32 @llvm.aarch64.neon.fcvtnu.i32.f32(float %a) #2
+// CHECK:   [[VCVTNS_U32_F32_I:%.*]] = call i32 @llvm.aarch64.neon.fcvtnu.i32.f32(float %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i32 [[VCVTNS_U32_F32_I]]
 uint32_t test_vcvtns_u32_f32(float32_t a) {
   return (uint32_t)vcvtns_u32_f32(a);
 }
 
 // CHECK-LABEL: define i64 @test_vcvtnd_u64_f64(double %a) #0 {
-// CHECK:   [[VCVTND_U64_F64_I:%.*]] = call i64 @llvm.aarch64.neon.fcvtnu.i64.f64(double %a) #2
+// CHECK:   [[VCVTND_U64_F64_I:%.*]] = call i64 @llvm.aarch64.neon.fcvtnu.i64.f64(double %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i64 [[VCVTND_U64_F64_I]]
 uint64_t test_vcvtnd_u64_f64(float64_t a) {
   return (uint64_t)vcvtnd_u64_f64(a);
 }
 
 // CHECK-LABEL: define i32 @test_vcvtps_s32_f32(float %a) #0 {
-// CHECK:   [[VCVTPS_S32_F32_I:%.*]] = call i32 @llvm.aarch64.neon.fcvtps.i32.f32(float %a) #2
+// CHECK:   [[VCVTPS_S32_F32_I:%.*]] = call i32 @llvm.aarch64.neon.fcvtps.i32.f32(float %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i32 [[VCVTPS_S32_F32_I]]
 int32_t test_vcvtps_s32_f32(float32_t a) {
   return (int32_t)vcvtps_s32_f32(a);
 }
 
 // CHECK-LABEL: define i64 @test_vcvtpd_s64_f64(double %a) #0 {
-// CHECK:   [[VCVTPD_S64_F64_I:%.*]] = call i64 @llvm.aarch64.neon.fcvtps.i64.f64(double %a) #2
+// CHECK:   [[VCVTPD_S64_F64_I:%.*]] = call i64 @llvm.aarch64.neon.fcvtps.i64.f64(double %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i64 [[VCVTPD_S64_F64_I]]
 int64_t test_vcvtpd_s64_f64(float64_t a) {
   return (int64_t)vcvtpd_s64_f64(a);
 }
 
 // CHECK-LABEL: define i32 @test_vcvtps_u32_f32(float %a) #0 {
-// CHECK:   [[VCVTPS_U32_F32_I:%.*]] = call i32 @llvm.aarch64.neon.fcvtpu.i32.f32(float %a) #2
+// CHECK:   [[VCVTPS_U32_F32_I:%.*]] = call i32 @llvm.aarch64.neon.fcvtpu.i32.f32(float %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i32 [[VCVTPS_U32_F32_I]]
 uint32_t test_vcvtps_u32_f32(float32_t a) {
   return (uint32_t)vcvtps_u32_f32(a);
 }
 
 // CHECK-LABEL: define i64 @test_vcvtpd_u64_f64(double %a) #0 {
-// CHECK:   [[VCVTPD_U64_F64_I:%.*]] = call i64 @llvm.aarch64.neon.fcvtpu.i64.f64(double %a) #2
+// CHECK:   [[VCVTPD_U64_F64_I:%.*]] = call i64 @llvm.aarch64.neon.fcvtpu.i64.f64(double %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i64 [[VCVTPD_U64_F64_I]]
 uint64_t test_vcvtpd_u64_f64(float64_t a) {
   return (uint64_t)vcvtpd_u64_f64(a);
@@ -153,3 +151,5 @@
 uint64_t test_vcvtd_u64_f64(float64_t a) {
   return (uint64_t)vcvtd_u64_f64(a);
 }
+
+// CHECK: attributes [[NOUNWIND_ATTR]] = { nounwind }
Index: clang/test/CodeGen/aarch64-neon-across.c
===================================================================
--- clang/test/CodeGen/aarch64-neon-across.c
+++ clang/test/CodeGen/aarch64-neon-across.c
@@ -1,5 +1,3 @@
-// UNSUPPORTED: experimental-new-pass-manager
-
 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
 // RUN:  -disable-O0-optnone -emit-llvm -o - %s | opt -S -mem2reg | FileCheck %s
 
@@ -8,7 +6,7 @@
 #include <arm_neon.h>
 
 // CHECK-LABEL: define i16 @test_vaddlv_s8(<8 x i8> %a) #0 {
-// CHECK:   [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v8i8(<8 x i8> %a) #3
+// CHECK:   [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v8i8(<8 x i8> %a) [[NOUNWIND_ATTR:#[0-9]+]]
 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VADDLV_I]] to i16
 // CHECK:   ret i16 [[TMP0]]
 int16_t test_vaddlv_s8(int8x8_t a) {
@@ -16,14 +14,14 @@
 }
 
 // CHECK-LABEL: define i32 @test_vaddlv_s16(<4 x i16> %a) #0 {
-// CHECK:   [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v4i16(<4 x i16> %a) #3
+// CHECK:   [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v4i16(<4 x i16> %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i32 [[VADDLV_I]]
 int32_t test_vaddlv_s16(int16x4_t a) {
   return vaddlv_s16(a);
 }
 
 // CHECK-LABEL: define i16 @test_vaddlv_u8(<8 x i8> %a) #0 {
-// CHECK:   [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8> %a) #3
+// CHECK:   [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VADDLV_I]] to i16
 // CHECK:   ret i16 [[TMP0]]
 uint16_t test_vaddlv_u8(uint8x8_t a) {
@@ -31,14 +29,14 @@
 }
 
 // CHECK-LABEL: define i32 @test_vaddlv_u16(<4 x i16> %a) #0 {
-// CHECK:   [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v4i16(<4 x i16> %a) #3
+// CHECK:   [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v4i16(<4 x i16> %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i32 [[VADDLV_I]]
 uint32_t test_vaddlv_u16(uint16x4_t a) {
   return vaddlv_u16(a);
 }
 
 // CHECK-LABEL: define i16 @test_vaddlvq_s8(<16 x i8> %a) #1 {
-// CHECK:   [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v16i8(<16 x i8> %a) #3
+// CHECK:   [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v16i8(<16 x i8> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VADDLV_I]] to i16
 // CHECK:   ret i16 [[TMP0]]
 int16_t test_vaddlvq_s8(int8x16_t a) {
@@ -46,21 +44,21 @@
 }
 
 // CHECK-LABEL: define i32 @test_vaddlvq_s16(<8 x i16> %a) #1 {
-// CHECK:   [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v8i16(<8 x i16> %a) #3
+// CHECK:   [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.saddlv.i32.v8i16(<8 x i16> %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i32 [[VADDLV_I]]
 int32_t test_vaddlvq_s16(int16x8_t a) {
   return vaddlvq_s16(a);
 }
 
 // CHECK-LABEL: define i64 @test_vaddlvq_s32(<4 x i32> %a) #1 {
-// CHECK:   [[VADDLVQ_S32_I:%.*]] = call i64 @llvm.aarch64.neon.saddlv.i64.v4i32(<4 x i32> %a) #3
+// CHECK:   [[VADDLVQ_S32_I:%.*]] = call i64 @llvm.aarch64.neon.saddlv.i64.v4i32(<4 x i32> %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i64 [[VADDLVQ_S32_I]]
 int64_t test_vaddlvq_s32(int32x4_t a) {
   return vaddlvq_s32(a);
 }
 
 // CHECK-LABEL: define i16 @test_vaddlvq_u8(<16 x i8> %a) #1 {
-// CHECK:   [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8> %a) #3
+// CHECK:   [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VADDLV_I]] to i16
 // CHECK:   ret i16 [[TMP0]]
 uint16_t test_vaddlvq_u8(uint8x16_t a) {
@@ -68,21 +66,21 @@
 }
 
 // CHECK-LABEL: define i32 @test_vaddlvq_u16(<8 x i16> %a) #1 {
-// CHECK:   [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v8i16(<8 x i16> %a) #3
+// CHECK:   [[VADDLV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddlv.i32.v8i16(<8 x i16> %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i32 [[VADDLV_I]]
 uint32_t test_vaddlvq_u16(uint16x8_t a) {
   return vaddlvq_u16(a);
 }
 
 // CHECK-LABEL: define i64 @test_vaddlvq_u32(<4 x i32> %a) #1 {
-// CHECK:   [[VADDLVQ_U32_I:%.*]] = call i64 @llvm.aarch64.neon.uaddlv.i64.v4i32(<4 x i32> %a) #3
+// CHECK:   [[VADDLVQ_U32_I:%.*]] = call i64 @llvm.aarch64.neon.uaddlv.i64.v4i32(<4 x i32> %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i64 [[VADDLVQ_U32_I]]
 uint64_t test_vaddlvq_u32(uint32x4_t a) {
   return vaddlvq_u32(a);
 }
 
 // CHECK-LABEL: define i8 @test_vmaxv_s8(<8 x i8> %a) #0 {
-// CHECK:   [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8> %a) #3
+// CHECK:   [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8
 // CHECK:   ret i8 [[TMP0]]
 int8_t test_vmaxv_s8(int8x8_t a) {
@@ -90,7 +88,7 @@
 }
 
 // CHECK-LABEL: define i16 @test_vmaxv_s16(<4 x i16> %a) #0 {
-// CHECK:   [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16> %a) #3
+// CHECK:   [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP2:%.*]] = trunc i32 [[VMAXV_I]] to i16
 // CHECK:   ret i16 [[TMP2]]
 int16_t test_vmaxv_s16(int16x4_t a) {
@@ -98,7 +96,7 @@
 }
 
 // CHECK-LABEL: define i8 @test_vmaxv_u8(<8 x i8> %a) #0 {
-// CHECK:   [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8> %a) #3
+// CHECK:   [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8
 // CHECK:   ret i8 [[TMP0]]
 uint8_t test_vmaxv_u8(uint8x8_t a) {
@@ -106,7 +104,7 @@
 }
 
 // CHECK-LABEL: define i16 @test_vmaxv_u16(<4 x i16> %a) #0 {
-// CHECK:   [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16> %a) #3
+// CHECK:   [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP2:%.*]] = trunc i32 [[VMAXV_I]] to i16
 // CHECK:   ret i16 [[TMP2]]
 uint16_t test_vmaxv_u16(uint16x4_t a) {
@@ -114,7 +112,7 @@
 }
 
 // CHECK-LABEL: define i8 @test_vmaxvq_s8(<16 x i8> %a) #1 {
-// CHECK:   [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8> %a) #3
+// CHECK:   [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8
 // CHECK:   ret i8 [[TMP0]]
 int8_t test_vmaxvq_s8(int8x16_t a) {
@@ -122,7 +120,7 @@
 }
 
 // CHECK-LABEL: define i16 @test_vmaxvq_s16(<8 x i16> %a) #1 {
-// CHECK:   [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16> %a) #3
+// CHECK:   [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP2:%.*]] = trunc i32 [[VMAXV_I]] to i16
 // CHECK:   ret i16 [[TMP2]]
 int16_t test_vmaxvq_s16(int16x8_t a) {
@@ -130,14 +128,14 @@
 }
 
 // CHECK-LABEL: define i32 @test_vmaxvq_s32(<4 x i32> %a) #1 {
-// CHECK:   [[VMAXVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32> %a) #3
+// CHECK:   [[VMAXVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32> %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i32 [[VMAXVQ_S32_I]]
 int32_t test_vmaxvq_s32(int32x4_t a) {
   return vmaxvq_s32(a);
 }
 
 // CHECK-LABEL: define i8 @test_vmaxvq_u8(<16 x i8> %a) #1 {
-// CHECK:   [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> %a) #3
+// CHECK:   [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VMAXV_I]] to i8
 // CHECK:   ret i8 [[TMP0]]
 uint8_t test_vmaxvq_u8(uint8x16_t a) {
@@ -145,7 +143,7 @@
 }
 
 // CHECK-LABEL: define i16 @test_vmaxvq_u16(<8 x i16> %a) #1 {
-// CHECK:   [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16> %a) #3
+// CHECK:   [[VMAXV_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP2:%.*]] = trunc i32 [[VMAXV_I]] to i16
 // CHECK:   ret i16 [[TMP2]]
 uint16_t test_vmaxvq_u16(uint16x8_t a) {
@@ -153,14 +151,14 @@
 }
 
 // CHECK-LABEL: define i32 @test_vmaxvq_u32(<4 x i32> %a) #1 {
-// CHECK:   [[VMAXVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v4i32(<4 x i32> %a) #3
+// CHECK:   [[VMAXVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.umaxv.i32.v4i32(<4 x i32> %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i32 [[VMAXVQ_U32_I]]
 uint32_t test_vmaxvq_u32(uint32x4_t a) {
   return vmaxvq_u32(a);
 }
 
 // CHECK-LABEL: define i8 @test_vminv_s8(<8 x i8> %a) #0 {
-// CHECK:   [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8> %a) #3
+// CHECK:   [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8
 // CHECK:   ret i8 [[TMP0]]
 int8_t test_vminv_s8(int8x8_t a) {
@@ -168,7 +166,7 @@
 }
 
 // CHECK-LABEL: define i16 @test_vminv_s16(<4 x i16> %a) #0 {
-// CHECK:   [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16> %a) #3
+// CHECK:   [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP2:%.*]] = trunc i32 [[VMINV_I]] to i16
 // CHECK:   ret i16 [[TMP2]]
 int16_t test_vminv_s16(int16x4_t a) {
@@ -176,7 +174,7 @@
 }
 
 // CHECK-LABEL: define i8 @test_vminv_u8(<8 x i8> %a) #0 {
-// CHECK:   [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8> %a) #3
+// CHECK:   [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8
 // CHECK:   ret i8 [[TMP0]]
 uint8_t test_vminv_u8(uint8x8_t a) {
@@ -184,7 +182,7 @@
 }
 
 // CHECK-LABEL: define i16 @test_vminv_u16(<4 x i16> %a) #0 {
-// CHECK:   [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16> %a) #3
+// CHECK:   [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP2:%.*]] = trunc i32 [[VMINV_I]] to i16
 // CHECK:   ret i16 [[TMP2]]
 uint16_t test_vminv_u16(uint16x4_t a) {
@@ -192,7 +190,7 @@
 }
 
 // CHECK-LABEL: define i8 @test_vminvq_s8(<16 x i8> %a) #1 {
-// CHECK:   [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8> %a) #3
+// CHECK:   [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8
 // CHECK:   ret i8 [[TMP0]]
 int8_t test_vminvq_s8(int8x16_t a) {
@@ -200,7 +198,7 @@
 }
 
 // CHECK-LABEL: define i16 @test_vminvq_s16(<8 x i16> %a) #1 {
-// CHECK:   [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> %a) #3
+// CHECK:   [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP2:%.*]] = trunc i32 [[VMINV_I]] to i16
 // CHECK:   ret i16 [[TMP2]]
 int16_t test_vminvq_s16(int16x8_t a) {
@@ -208,14 +206,14 @@
 }
 
 // CHECK-LABEL: define i32 @test_vminvq_s32(<4 x i32> %a) #1 {
-// CHECK:   [[VMINVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32> %a) #3
+// CHECK:   [[VMINVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32> %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i32 [[VMINVQ_S32_I]]
 int32_t test_vminvq_s32(int32x4_t a) {
   return vminvq_s32(a);
 }
 
 // CHECK-LABEL: define i8 @test_vminvq_u8(<16 x i8> %a) #1 {
-// CHECK:   [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> %a) #3
+// CHECK:   [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VMINV_I]] to i8
 // CHECK:   ret i8 [[TMP0]]
 uint8_t test_vminvq_u8(uint8x16_t a) {
@@ -223,7 +221,7 @@
 }
 
 // CHECK-LABEL: define i16 @test_vminvq_u16(<8 x i16> %a) #1 {
-// CHECK:   [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16> %a) #3
+// CHECK:   [[VMINV_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP2:%.*]] = trunc i32 [[VMINV_I]] to i16
 // CHECK:   ret i16 [[TMP2]]
 uint16_t test_vminvq_u16(uint16x8_t a) {
@@ -231,14 +229,14 @@
 }
 
 // CHECK-LABEL: define i32 @test_vminvq_u32(<4 x i32> %a) #1 {
-// CHECK:   [[VMINVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v4i32(<4 x i32> %a) #3
+// CHECK:   [[VMINVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uminv.i32.v4i32(<4 x i32> %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i32 [[VMINVQ_U32_I]]
 uint32_t test_vminvq_u32(uint32x4_t a) {
   return vminvq_u32(a);
 }
 
 // CHECK-LABEL: define i8 @test_vaddv_s8(<8 x i8> %a) #0 {
-// CHECK:   [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8> %a) #3
+// CHECK:   [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8
 // CHECK:   ret i8 [[TMP0]]
 int8_t test_vaddv_s8(int8x8_t a) {
@@ -246,7 +244,7 @@
 }
 
 // CHECK-LABEL: define i16 @test_vaddv_s16(<4 x i16> %a) #0 {
-// CHECK:   [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16> %a) #3
+// CHECK:   [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP2:%.*]] = trunc i32 [[VADDV_I]] to i16
 // CHECK:   ret i16 [[TMP2]]
 int16_t test_vaddv_s16(int16x4_t a) {
@@ -254,7 +252,7 @@
 }
 
 // CHECK-LABEL: define i8 @test_vaddv_u8(<8 x i8> %a) #0 {
-// CHECK:   [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v8i8(<8 x i8> %a) #3
+// CHECK:   [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v8i8(<8 x i8> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8
 // CHECK:   ret i8 [[TMP0]]
 uint8_t test_vaddv_u8(uint8x8_t a) {
@@ -262,7 +260,7 @@
 }
 
 // CHECK-LABEL: define i16 @test_vaddv_u16(<4 x i16> %a) #0 {
-// CHECK:   [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v4i16(<4 x i16> %a) #3
+// CHECK:   [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v4i16(<4 x i16> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP2:%.*]] = trunc i32 [[VADDV_I]] to i16
 // CHECK:   ret i16 [[TMP2]]
 uint16_t test_vaddv_u16(uint16x4_t a) {
@@ -270,7 +268,7 @@
 }
 
 // CHECK-LABEL: define i8 @test_vaddvq_s8(<16 x i8> %a) #1 {
-// CHECK:   [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8> %a) #3
+// CHECK:   [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8
 // CHECK:   ret i8 [[TMP0]]
 int8_t test_vaddvq_s8(int8x16_t a) {
@@ -278,7 +276,7 @@
 }
 
 // CHECK-LABEL: define i16 @test_vaddvq_s16(<8 x i16> %a) #1 {
-// CHECK:   [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16> %a) #3
+// CHECK:   [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP2:%.*]] = trunc i32 [[VADDV_I]] to i16
 // CHECK:   ret i16 [[TMP2]]
 int16_t test_vaddvq_s16(int16x8_t a) {
@@ -286,14 +284,14 @@
 }
 
 // CHECK-LABEL: define i32 @test_vaddvq_s32(<4 x i32> %a) #1 {
-// CHECK:   [[VADDVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32> %a) #3
+// CHECK:   [[VADDVQ_S32_I:%.*]] = call i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32> %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i32 [[VADDVQ_S32_I]]
 int32_t test_vaddvq_s32(int32x4_t a) {
   return vaddvq_s32(a);
 }
 
 // CHECK-LABEL: define i8 @test_vaddvq_u8(<16 x i8> %a) #1 {
-// CHECK:   [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v16i8(<16 x i8> %a) #3
+// CHECK:   [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v16i8(<16 x i8> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP0:%.*]] = trunc i32 [[VADDV_I]] to i8
 // CHECK:   ret i8 [[TMP0]]
 uint8_t test_vaddvq_u8(uint8x16_t a) {
@@ -301,7 +299,7 @@
 }
 
 // CHECK-LABEL: define i16 @test_vaddvq_u16(<8 x i16> %a) #1 {
-// CHECK:   [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v8i16(<8 x i16> %a) #3
+// CHECK:   [[VADDV_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v8i16(<8 x i16> %a) [[NOUNWIND_ATTR]]
 // CHECK:   [[TMP2:%.*]] = trunc i32 [[VADDV_I]] to i16
 // CHECK:   ret i16 [[TMP2]]
 uint16_t test_vaddvq_u16(uint16x8_t a) {
@@ -309,35 +307,35 @@
 }
 
 // CHECK-LABEL: define i32 @test_vaddvq_u32(<4 x i32> %a) #1 {
-// CHECK:   [[VADDVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v4i32(<4 x i32> %a) #3
+// CHECK:   [[VADDVQ_U32_I:%.*]] = call i32 @llvm.aarch64.neon.uaddv.i32.v4i32(<4 x i32> %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret i32 [[VADDVQ_U32_I]]
 uint32_t test_vaddvq_u32(uint32x4_t a) {
   return vaddvq_u32(a);
 }
 
 // CHECK-LABEL: define float @test_vmaxvq_f32(<4 x float> %a) #1 {
-// CHECK:   [[VMAXVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fmaxv.f32.v4f32(<4 x float> %a) #3
+// CHECK:   [[VMAXVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fmaxv.f32.v4f32(<4 x float> %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret float [[VMAXVQ_F32_I]]
 float32_t test_vmaxvq_f32(float32x4_t a) {
   return vmaxvq_f32(a);
 }
 
 // CHECK-LABEL: define float @test_vminvq_f32(<4 x float> %a) #1 {
-// CHECK:   [[VMINVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float> %a) #3
+// CHECK:   [[VMINVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float> %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret float [[VMINVQ_F32_I]]
 float32_t test_vminvq_f32(float32x4_t a) {
   return vminvq_f32(a);
 }
 
 // CHECK-LABEL: define float @test_vmaxnmvq_f32(<4 x float> %a) #1 {
-// CHECK:   [[VMAXNMVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fmaxnmv.f32.v4f32(<4 x float> %a) #3
+// CHECK:   [[VMAXNMVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fmaxnmv.f32.v4f32(<4 x float> %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret float [[VMAXNMVQ_F32_I]]
 float32_t test_vmaxnmvq_f32(float32x4_t a) {
   return vmaxnmvq_f32(a);
 }
 
 // CHECK-LABEL: define float @test_vminnmvq_f32(<4 x float> %a) #1 {
-// CHECK:   [[VMINNMVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fminnmv.f32.v4f32(<4 x float> %a) #3
+// CHECK:   [[VMINNMVQ_F32_I:%.*]] = call float @llvm.aarch64.neon.fminnmv.f32.v4f32(<4 x float> %a) [[NOUNWIND_ATTR]]
 // CHECK:   ret float [[VMINNMVQ_F32_I]]
 float32_t test_vminnmvq_f32(float32x4_t a) {
   return vminnmvq_f32(a);
@@ -345,3 +343,4 @@
 
 // CHECK: attributes #0 ={{.*}}"min-legal-vector-width"="64"
 // CHECK: attributes #1 ={{.*}}"min-legal-vector-width"="128"
+// CHECK: attributes [[NOUNWIND_ATTR]] = { nounwind }
Index: clang/lib/CodeGen/BackendUtil.cpp
===================================================================
--- clang/lib/CodeGen/BackendUtil.cpp
+++ clang/lib/CodeGen/BackendUtil.cpp
@@ -1101,7 +1101,9 @@
 
       // Build a minimal pipeline based on the semantics required by Clang,
       // which is just that always inlining occurs.
-      MPM.addPass(AlwaysInlinerPass());
+      // We always pass false here since according to the legacy PM logic for
+      // enabling lifetime intrinsics, we should not be compiling with O0.
+      MPM.addPass(AlwaysInlinerPass(/*InsertLifetimeIntrinsics=*/false));
 
       // At -O0 we directly run necessary sanitizer passes.
       if (LangOpts.Sanitize.has(SanitizerKind::LocalBounds))
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to