https://github.com/YeonguChoe updated 
https://github.com/llvm/llvm-project/pull/183430

>From 23bffc7431bac4542845aab1cb66c8ca3a657e33 Mon Sep 17 00:00:00 2001
From: YeonguChoe <[email protected]>
Date: Wed, 25 Feb 2026 20:29:01 -0500
Subject: [PATCH 1/2] [Clang][CodeGen] Fix builtin_fpclassify with
 llvm.is.fpclass

- Made fpclassify function to classify floating point number using LLVM IR 
operation
- Reference: https://llvm.org/docs/LangRef.html#floating-point-test-intrinsics
---
 clang/lib/CodeGen/CGBuiltin.cpp        | 78 ++++++++++++--------------
 clang/test/CodeGen/strictfp_builtins.c | 27 +++++----
 2 files changed, 50 insertions(+), 55 deletions(-)

diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 38010cad75244..cee874730a3c3 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -4414,60 +4414,56 @@ RValue CodeGenFunction::EmitBuiltinExpr(const 
GlobalDecl GD, unsigned BuiltinID,
 
   case Builtin::BI__builtin_fpclassify: {
     CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
-    // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
     Value *V = EmitScalarExpr(E->getArg(5));
-    llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
-
-    // Create Result
-    BasicBlock *Begin = Builder.GetInsertBlock();
-    BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
-    Builder.SetInsertPoint(End);
-    PHINode *Result =
-      Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
-                        "fpclassify_result");
 
-    // if (V==0) return FP_ZERO
-    Builder.SetInsertPoint(Begin);
-    Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
-                                          "iszero");
+    Value *NanLiteral = EmitScalarExpr(E->getArg(0));
+    Value *InfLiteral = EmitScalarExpr(E->getArg(1));
+    Value *NormalLiteral = EmitScalarExpr(E->getArg(2));
+    Value *SubnormalLiteral = EmitScalarExpr(E->getArg(3));
     Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
-    BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
-    Builder.CreateCondBr(IsZero, End, NotZero);
-    Result->addIncoming(ZeroLiteral, Begin);
 
-    // if (V != V) return FP_NAN
-    Builder.SetInsertPoint(NotZero);
-    Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
-    Value *NanLiteral = EmitScalarExpr(E->getArg(0));
-    BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
+    Value *IsNan = Builder.createIsFPClass(V, 0b0000000011);
+    Value *IsInf = Builder.createIsFPClass(V, 0b1000000100);
+    Value *IsNormal = Builder.createIsFPClass(V, 0b0100001000);
+    Value *IsSubnormal = Builder.createIsFPClass(V, 0b0010010000);
+
+    BasicBlock *Entry = Builder.GetInsertBlock();
+
+    BasicBlock *End = createBasicBlock("fpclassify_end", CurFn);
+    Builder.SetInsertPoint(End);
+    PHINode *Result = Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 
5,
+                                        "fpclassify_result");
+
+    // Check if V is NaN
+    Builder.SetInsertPoint(Entry);
+    BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", CurFn);
     Builder.CreateCondBr(IsNan, End, NotNan);
-    Result->addIncoming(NanLiteral, NotZero);
+    Result->addIncoming(NanLiteral, Entry);
 
-    // if (fabs(V) == infinity) return FP_INFINITY
+    // Check if V is infinity
     Builder.SetInsertPoint(NotNan);
-    Value *VAbs = EmitFAbs(*this, V);
-    Value *IsInf =
-      Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
-                            "isinf");
-    Value *InfLiteral = EmitScalarExpr(E->getArg(1));
-    BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
+    BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", CurFn);
     Builder.CreateCondBr(IsInf, End, NotInf);
     Result->addIncoming(InfLiteral, NotNan);
 
-    // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
+    // Check if V is normal
     Builder.SetInsertPoint(NotInf);
-    APFloat Smallest = APFloat::getSmallestNormalized(
-        getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
-    Value *IsNormal =
-      Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
-                            "isnormal");
-    Value *NormalResult =
-      Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
-                           EmitScalarExpr(E->getArg(3)));
+    BasicBlock *NotNormal = createBasicBlock("fpclassify_not_normal", CurFn);
+    Builder.CreateCondBr(IsNormal, End, NotNormal);
+    Result->addIncoming(NormalLiteral, NotInf);
+
+    // Check if V is subnormal
+    Builder.SetInsertPoint(NotNormal);
+    BasicBlock *NotSubnormal =
+        createBasicBlock("fpclassify_not_subnormal", CurFn);
+    Builder.CreateCondBr(IsSubnormal, End, NotSubnormal);
+    Result->addIncoming(SubnormalLiteral, NotNormal);
+
+    // If V is not one of the above, it is zero
+    Builder.SetInsertPoint(NotSubnormal);
     Builder.CreateBr(End);
-    Result->addIncoming(NormalResult, NotInf);
+    Result->addIncoming(ZeroLiteral, NotSubnormal);
 
-    // return Result
     Builder.SetInsertPoint(End);
     return RValue::get(Result);
   }
diff --git a/clang/test/CodeGen/strictfp_builtins.c 
b/clang/test/CodeGen/strictfp_builtins.c
index 58815c7de4fa9..d965767be095b 100644
--- a/clang/test/CodeGen/strictfp_builtins.c
+++ b/clang/test/CodeGen/strictfp_builtins.c
@@ -3,7 +3,6 @@
 
 // Test that the constrained intrinsics are picking up the exception
 // metadata from the AST instead of the global default from the command line.
-// FIXME: these functions shouldn't trap on SNaN.
 
 #pragma float_control(except, on)
 
@@ -31,22 +30,22 @@ void p(char *str, int x) {
 // CHECK-NEXT:    [[D_ADDR:%.*]] = alloca double, align 8
 // CHECK-NEXT:    store double [[D:%.*]], ptr [[D_ADDR]], align 8
 // CHECK-NEXT:    [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
-// CHECK-NEXT:    [[ISZERO:%.*]] = call i1 
@llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double 0.000000e+00, 
metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR4]]
-// CHECK-NEXT:    br i1 [[ISZERO]], label [[FPCLASSIFY_END:%.*]], label 
[[FPCLASSIFY_NOT_ZERO:%.*]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], 
i32 3) #[[ATTR4]]
+// CHECK-NEXT:    [[TMP2:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], 
i32 516) #[[ATTR4]]
+// CHECK-NEXT:    [[TMP3:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], 
i32 264) #[[ATTR4]]
+// CHECK-NEXT:    [[TMP4:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], 
i32 144) #[[ATTR4]]
+// CHECK-NEXT:    br i1 [[TMP1]], label [[FPCLASSIFY_END:%.*]], label 
[[FPCLASSIFY_NOT_NAN:%.*]]
 // CHECK:       fpclassify_end:
-// CHECK-NEXT:    [[FPCLASSIFY_RESULT:%.*]] = phi i32 [ 4, [[ENTRY:%.*]] ], [ 
0, [[FPCLASSIFY_NOT_ZERO]] ], [ 1, [[FPCLASSIFY_NOT_NAN:%.*]] ], [ 
[[TMP2:%.*]], [[FPCLASSIFY_NOT_INF:%.*]] ]
+// CHECK-NEXT:    [[FPCLASSIFY_RESULT:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ 
1, [[FPCLASSIFY_NOT_NAN]] ], [ 2, [[FPCLASSIFY_NOT_INF:%.*]] ], [ 3, 
[[FPCLASSIFY_NOT_NORMAL:%.*]] ], [ 4, [[FPCLASSIFY_NOT_SUBNORMAL:%.*]] ]
 // CHECK-NEXT:    call void @p(ptr noundef @.str.1, i32 noundef 
[[FPCLASSIFY_RESULT]]) #[[ATTR4]]
 // CHECK-NEXT:    ret void
-// CHECK:       fpclassify_not_zero:
-// CHECK-NEXT:    [[CMP:%.*]] = call i1 
@llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP0]], 
metadata !"uno", metadata !"fpexcept.strict") #[[ATTR4]]
-// CHECK-NEXT:    br i1 [[CMP]], label [[FPCLASSIFY_END]], label 
[[FPCLASSIFY_NOT_NAN]]
 // CHECK:       fpclassify_not_nan:
-// CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) 
#[[ATTR5:[0-9]+]]
-// CHECK-NEXT:    [[ISINF:%.*]] = call i1 
@llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 
0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR4]]
-// CHECK-NEXT:    br i1 [[ISINF]], label [[FPCLASSIFY_END]], label 
[[FPCLASSIFY_NOT_INF]]
+// CHECK-NEXT:    br i1 [[TMP2]], label [[FPCLASSIFY_END]], label 
[[FPCLASSIFY_NOT_INF:%.*]]
 // CHECK:       fpclassify_not_inf:
-// CHECK-NEXT:    [[ISNORMAL:%.*]] = call i1 
@llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 
0x10000000000000, metadata !"uge", metadata !"fpexcept.strict") #[[ATTR4]]
-// CHECK-NEXT:    [[TMP2]] = select i1 [[ISNORMAL]], i32 2, i32 3
+// CHECK-NEXT:    br i1 [[TMP3]], label [[FPCLASSIFY_END]], label 
[[FPCLASSIFY_NOT_NORMAL:%.*]]
+// CHECK:       fpclassify_not_normal:
+// CHECK-NEXT:    br i1 [[TMP4]], label [[FPCLASSIFY_END]], label 
[[FPCLASSIFY_NOT_SUBNORMAL:%.*]]
+// CHECK:       fpclassify_not_subnormal:
 // CHECK-NEXT:    br label [[FPCLASSIFY_END]]
 //
 void test_fpclassify(double d) {
@@ -156,7 +155,7 @@ void test_double_isfinite(double d) {
 // CHECK-NEXT:    [[D_ADDR:%.*]] = alloca double, align 8
 // CHECK-NEXT:    store double [[D:%.*]], ptr [[D_ADDR]], align 8
 // CHECK-NEXT:    [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
-// CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) 
#[[ATTR5]]
+// CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #5
 // CHECK-NEXT:    [[ISINF:%.*]] = call i1 
@llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 
0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR4]]
 // CHECK-NEXT:    [[TMP2:%.*]] = bitcast double [[TMP0]] to i64
 // CHECK-NEXT:    [[TMP3:%.*]] = icmp slt i64 [[TMP2]], 0
@@ -233,4 +232,4 @@ void test_isnormal(double d) {
   P(isnormal, (d));
 
   return;
-}
+}
\ No newline at end of file

>From 24c0dfa180e0f040c5444df1a6e4e416f9cabb12 Mon Sep 17 00:00:00 2001
From: YeonguChoe <[email protected]>
Date: Wed, 25 Feb 2026 23:53:02 -0500
Subject: [PATCH 2/2] Fix nvptx_device_math_macro test for llvm.is.fpclass test

---
 clang/lib/CodeGen/CGBuiltin.cpp               | 514 +++++++++---------
 clang/test/CodeGen/strictfp_builtins.c        |   8 +-
 .../test/Headers/nvptx_device_math_macro.cpp  |   9 +-
 3 files changed, 265 insertions(+), 266 deletions(-)

diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index cee874730a3c3..35cae2fde31d0 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -244,15 +244,15 @@ llvm::Constant 
*CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
   }
 
   llvm::FunctionType *Ty =
-    cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
+      cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
 
   return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
 }
 
 /// Emit the conversions required to turn the given value into an
 /// integer of the given size.
-Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
-                        QualType T, llvm::IntegerType *IntType) {
+Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V, QualType T,
+                 llvm::IntegerType *IntType) {
   V = CGF.EmitToMemory(V, T);
 
   if (V->getType()->isPointerTy())
@@ -262,8 +262,8 @@ Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
   return V;
 }
 
-Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
-                          QualType T, llvm::Type *ResultType) {
+Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, QualType T,
+                   llvm::Type *ResultType) {
   V = CGF.EmitFromMemory(V, T);
 
   if (ResultType->isPointerTy())
@@ -292,14 +292,14 @@ Address CheckAtomicAlignment(CodeGenFunction &CGF, const 
CallExpr *E) {
 
 /// Utility to insert an atomic instruction based on Intrinsic::ID
 /// and the expression node.
-Value *MakeBinaryAtomicValue(
-    CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
-    AtomicOrdering Ordering) {
+Value *MakeBinaryAtomicValue(CodeGenFunction &CGF,
+                             llvm::AtomicRMWInst::BinOp Kind, const CallExpr 
*E,
+                             AtomicOrdering Ordering) {
 
   QualType T = E->getType();
   assert(E->getArg(0)->getType()->isPointerType());
-  assert(CGF.getContext().hasSameUnqualifiedType(T,
-                                  E->getArg(0)->getType()->getPointeeType()));
+  assert(CGF.getContext().hasSameUnqualifiedType(
+      T, E->getArg(0)->getType()->getPointeeType()));
   assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
 
   Address DestAddr = CheckAtomicAlignment(CGF, E);
@@ -346,13 +346,12 @@ static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
 /// operation.
 static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
                                    llvm::AtomicRMWInst::BinOp Kind,
-                                   const CallExpr *E,
-                                   Instruction::BinaryOps Op,
+                                   const CallExpr *E, Instruction::BinaryOps 
Op,
                                    bool Invert = false) {
   QualType T = E->getType();
   assert(E->getArg(0)->getType()->isPointerType());
-  assert(CGF.getContext().hasSameUnqualifiedType(T,
-                                  E->getArg(0)->getType()->getPointeeType()));
+  assert(CGF.getContext().hasSameUnqualifiedType(
+      T, E->getArg(0)->getType()->getPointeeType()));
   assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
 
   Address DestAddr = CheckAtomicAlignment(CGF, E);
@@ -390,7 +389,7 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
 /// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
 /// invoke the function EmitAtomicCmpXchgForMSIntrin.
 Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
-                                     bool ReturnBool) {
+                              bool ReturnBool) {
   QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
   Address DestAddr = CheckAtomicAlignment(CGF, E);
 
@@ -428,8 +427,8 @@ Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const 
CallExpr *E,
 /// function MakeAtomicCmpXchgValue since it expects the arguments to be
 /// already swapped.
 
-static
-Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
+static Value *EmitAtomicCmpXchgForMSIntrin(
+    CodeGenFunction &CGF, const CallExpr *E,
     AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
   assert(E->getArg(0)->getType()->isPointerType());
   assert(CGF.getContext().hasSameUnqualifiedType(
@@ -452,9 +451,9 @@ Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, 
const CallExpr *E,
   }
 
   // For Release ordering, the failure ordering should be Monotonic.
-  auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
-                         AtomicOrdering::Monotonic :
-                         SuccessOrdering;
+  auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release
+                             ? AtomicOrdering::Monotonic
+                             : SuccessOrdering;
 
   // The atomic instruction is marked volatile for consistency with MSVC. This
   // blocks the few atomics optimizations that LLVM has. If we want to optimize
@@ -537,7 +536,8 @@ static Value 
*EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF,
   return CGF.Builder.CreateZExt(Success, CGF.Int8Ty);
 }
 
-static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E,
+static Value *EmitAtomicIncrementValue(
+    CodeGenFunction &CGF, const CallExpr *E,
     AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
   assert(E->getArg(0)->getType()->isPointerType());
 
@@ -590,14 +590,15 @@ static Value *EmitISOVolatileStore(CodeGenFunction &CGF, 
const CallExpr *E) {
 // matching the argument type. Depending on mode, this may be a constrained
 // floating-point intrinsic.
 Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
-                                const CallExpr *E, unsigned IntrinsicID,
-                                unsigned ConstrainedIntrinsicID) {
+                                          const CallExpr *E,
+                                          unsigned IntrinsicID,
+                                          unsigned ConstrainedIntrinsicID) {
   llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
 
   CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
   if (CGF.Builder.getIsFPConstrained()) {
     Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, 
Src0->getType());
-    return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
+    return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
   } else {
     Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
     return CGF.Builder.CreateCall(F, Src0);
@@ -606,19 +607,20 @@ Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction 
&CGF,
 
 // Emit an intrinsic that has 2 operands of the same type as its result.
 // Depending on mode, this may be a constrained floating-point intrinsic.
-static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
-                                const CallExpr *E, unsigned IntrinsicID,
-                                unsigned ConstrainedIntrinsicID) {
+static Value *
+emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E,
+                                    unsigned IntrinsicID,
+                                    unsigned ConstrainedIntrinsicID) {
   llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
   llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
 
   CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
   if (CGF.Builder.getIsFPConstrained()) {
     Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, 
Src0->getType());
-    return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
+    return CGF.Builder.CreateConstrainedFPCall(F, {Src0, Src1});
   } else {
     Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
-    return CGF.Builder.CreateCall(F, { Src0, Src1 });
+    return CGF.Builder.CreateCall(F, {Src0, Src1});
   }
 }
 
@@ -644,9 +646,10 @@ emitBinaryExpMaybeConstrainedFPBuiltin(CodeGenFunction 
&CGF, const CallExpr *E,
 
 // Emit an intrinsic that has 3 operands of the same type as its result.
 // Depending on mode, this may be a constrained floating-point intrinsic.
-static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
-                                 const CallExpr *E, unsigned IntrinsicID,
-                                 unsigned ConstrainedIntrinsicID) {
+static Value *
+emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E,
+                                     unsigned IntrinsicID,
+                                     unsigned ConstrainedIntrinsicID) {
   llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
   llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
   llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
@@ -654,10 +657,10 @@ static Value 
*emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
   CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
   if (CGF.Builder.getIsFPConstrained()) {
     Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, 
Src0->getType());
-    return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
+    return CGF.Builder.CreateConstrainedFPCall(F, {Src0, Src1, Src2});
   } else {
     Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
-    return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
+    return CGF.Builder.CreateCall(F, {Src0, Src1, Src2});
   }
 }
 
@@ -858,11 +861,11 @@ llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
 }
 
 namespace {
-  struct WidthAndSignedness {
-    unsigned Width;
-    bool Signed;
-  };
-}
+struct WidthAndSignedness {
+  unsigned Width;
+  bool Signed;
+};
+} // namespace
 
 static WidthAndSignedness
 getIntegerWidthAndSignedness(const clang::ASTContext &context,
@@ -921,11 +924,9 @@ getDefaultBuiltinObjectSizeResult(unsigned Type, 
llvm::IntegerType *ResType) {
   return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
 }
 
-llvm::Value *
-CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
-                                                 llvm::IntegerType *ResType,
-                                                 llvm::Value *EmittedE,
-                                                 bool IsDynamic) {
+llvm::Value *CodeGenFunction::evaluateOrEmitBuiltinObjectSize(
+    const Expr *E, unsigned Type, llvm::IntegerType *ResType,
+    llvm::Value *EmittedE, bool IsDynamic) {
   if (std::optional<uint64_t> ObjectSize =
           E->tryEvaluateObjectSize(getContext(), Type))
     return ConstantInt::get(ResType, *ObjectSize, /*isSigned=*/true);
@@ -1505,10 +1506,11 @@ llvm::Value *CodeGenFunction::emitCountedByMemberSize(
 /// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
 /// and we wouldn't otherwise try to reference a pass_object_size parameter,
 /// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
-llvm::Value *
-CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
-                                       llvm::IntegerType *ResType,
-                                       llvm::Value *EmittedE, bool IsDynamic) {
+llvm::Value *CodeGenFunction::emitBuiltinObjectSize(const Expr *E,
+                                                    unsigned Type,
+                                                    llvm::IntegerType *ResType,
+                                                    llvm::Value *EmittedE,
+                                                    bool IsDynamic) {
   // We need to reference an argument if the pointer is a parameter with the
   // pass_object_size attribute.
   if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
@@ -1637,16 +1639,19 @@ BitTest BitTest::decodeBitTestBuiltin(unsigned 
BuiltinID) {
 
 static char bitActionToX86BTCode(BitTest::ActionKind A) {
   switch (A) {
-  case BitTest::TestOnly:   return '\0';
-  case BitTest::Complement: return 'c';
-  case BitTest::Reset:      return 'r';
-  case BitTest::Set:        return 's';
+  case BitTest::TestOnly:
+    return '\0';
+  case BitTest::Complement:
+    return 'c';
+  case BitTest::Reset:
+    return 'r';
+  case BitTest::Set:
+    return 's';
   }
   llvm_unreachable("invalid action");
 }
 
-static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
-                                            BitTest BT,
+static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF, BitTest BT,
                                             const CallExpr *E, Value *BitBase,
                                             Value *BitPos) {
   char Action = bitActionToX86BTCode(BT.Action);
@@ -1683,11 +1688,16 @@ static llvm::Value 
*EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
 static llvm::AtomicOrdering
 getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
   switch (I) {
-  case BitTest::Unlocked:   return llvm::AtomicOrdering::NotAtomic;
-  case BitTest::Sequential: return 
llvm::AtomicOrdering::SequentiallyConsistent;
-  case BitTest::Acquire:    return llvm::AtomicOrdering::Acquire;
-  case BitTest::Release:    return llvm::AtomicOrdering::Release;
-  case BitTest::NoFence:    return llvm::AtomicOrdering::Monotonic;
+  case BitTest::Unlocked:
+    return llvm::AtomicOrdering::NotAtomic;
+  case BitTest::Sequential:
+    return llvm::AtomicOrdering::SequentiallyConsistent;
+  case BitTest::Acquire:
+    return llvm::AtomicOrdering::Acquire;
+  case BitTest::Release:
+    return llvm::AtomicOrdering::Release;
+  case BitTest::NoFence:
+    return llvm::AtomicOrdering::Monotonic;
   }
   llvm_unreachable("invalid interlocking");
 }
@@ -1789,11 +1799,7 @@ static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction 
&CGF,
 }
 
 namespace {
-enum class MSVCSetJmpKind {
-  _setjmpex,
-  _setjmp3,
-  _setjmp
-};
+enum class MSVCSetJmpKind { _setjmpex, _setjmp3, _setjmp };
 }
 
 /// MSVC handles setjmp a bit differently on different platforms. On every
@@ -2029,7 +2035,7 @@ struct CallObjCArcUse final : EHScopeStack::Cleanup {
     CGF.EmitARCIntrinsicUse(object);
   }
 };
-}
+} // namespace
 
 Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E,
                                                  BuiltinCheckKind Kind) {
@@ -2291,8 +2297,8 @@ RValue CodeGenFunction::emitBuiltinOSLogFormat(const 
CallExpr &E) {
 
     unsigned ArgValSize =
         CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
-    llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
-                                                     ArgValSize);
+    llvm::IntegerType *IntTy =
+        llvm::Type::getIntNTy(getLLVMContext(), ArgValSize);
     ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
     CanQualType ArgTy = getOSLogArgType(Ctx, Size);
     // If ArgVal has type x86_fp80, zero-extend ArgVal.
@@ -2427,8 +2433,7 @@ EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const 
clang::Expr *Op1,
         IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
     Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
     if (ResultInfo.Width < OpWidth) {
-      auto IntMax =
-          llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
+      auto IntMax = llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
       llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
           UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
       Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
@@ -2548,69 +2553,69 @@ RValue CodeGenFunction::emitRotate(const CallExpr *E, 
bool IsRotateRight) {
 // Map math builtins for long-double to f128 version.
 static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) {
   switch (BuiltinID) {
-#define MUTATE_LDBL(func) \
-  case Builtin::BI__builtin_##func##l: \
+#define MUTATE_LDBL(func)                                                      
\
+  case Builtin::BI__builtin_##func##l:                                         
\
     return Builtin::BI__builtin_##func##f128;
-  MUTATE_LDBL(sqrt)
-  MUTATE_LDBL(cbrt)
-  MUTATE_LDBL(fabs)
-  MUTATE_LDBL(log)
-  MUTATE_LDBL(log2)
-  MUTATE_LDBL(log10)
-  MUTATE_LDBL(log1p)
-  MUTATE_LDBL(logb)
-  MUTATE_LDBL(exp)
-  MUTATE_LDBL(exp2)
-  MUTATE_LDBL(expm1)
-  MUTATE_LDBL(fdim)
-  MUTATE_LDBL(hypot)
-  MUTATE_LDBL(ilogb)
-  MUTATE_LDBL(pow)
-  MUTATE_LDBL(fmin)
-  MUTATE_LDBL(fmax)
-  MUTATE_LDBL(ceil)
-  MUTATE_LDBL(trunc)
-  MUTATE_LDBL(rint)
-  MUTATE_LDBL(nearbyint)
-  MUTATE_LDBL(round)
-  MUTATE_LDBL(floor)
-  MUTATE_LDBL(lround)
-  MUTATE_LDBL(llround)
-  MUTATE_LDBL(lrint)
-  MUTATE_LDBL(llrint)
-  MUTATE_LDBL(fmod)
-  MUTATE_LDBL(modf)
-  MUTATE_LDBL(nan)
-  MUTATE_LDBL(nans)
-  MUTATE_LDBL(inf)
-  MUTATE_LDBL(fma)
-  MUTATE_LDBL(sin)
-  MUTATE_LDBL(cos)
-  MUTATE_LDBL(tan)
-  MUTATE_LDBL(sinh)
-  MUTATE_LDBL(cosh)
-  MUTATE_LDBL(tanh)
-  MUTATE_LDBL(asin)
-  MUTATE_LDBL(acos)
-  MUTATE_LDBL(atan)
-  MUTATE_LDBL(asinh)
-  MUTATE_LDBL(acosh)
-  MUTATE_LDBL(atanh)
-  MUTATE_LDBL(atan2)
-  MUTATE_LDBL(erf)
-  MUTATE_LDBL(erfc)
-  MUTATE_LDBL(ldexp)
-  MUTATE_LDBL(frexp)
-  MUTATE_LDBL(huge_val)
-  MUTATE_LDBL(copysign)
-  MUTATE_LDBL(nextafter)
-  MUTATE_LDBL(nexttoward)
-  MUTATE_LDBL(remainder)
-  MUTATE_LDBL(remquo)
-  MUTATE_LDBL(scalbln)
-  MUTATE_LDBL(scalbn)
-  MUTATE_LDBL(tgamma)
-  MUTATE_LDBL(lgamma)
+    MUTATE_LDBL(sqrt)
+    MUTATE_LDBL(cbrt)
+    MUTATE_LDBL(fabs)
+    MUTATE_LDBL(log)
+    MUTATE_LDBL(log2)
+    MUTATE_LDBL(log10)
+    MUTATE_LDBL(log1p)
+    MUTATE_LDBL(logb)
+    MUTATE_LDBL(exp)
+    MUTATE_LDBL(exp2)
+    MUTATE_LDBL(expm1)
+    MUTATE_LDBL(fdim)
+    MUTATE_LDBL(hypot)
+    MUTATE_LDBL(ilogb)
+    MUTATE_LDBL(pow)
+    MUTATE_LDBL(fmin)
+    MUTATE_LDBL(fmax)
+    MUTATE_LDBL(ceil)
+    MUTATE_LDBL(trunc)
+    MUTATE_LDBL(rint)
+    MUTATE_LDBL(nearbyint)
+    MUTATE_LDBL(round)
+    MUTATE_LDBL(floor)
+    MUTATE_LDBL(lround)
+    MUTATE_LDBL(llround)
+    MUTATE_LDBL(lrint)
+    MUTATE_LDBL(llrint)
+    MUTATE_LDBL(fmod)
+    MUTATE_LDBL(modf)
+    MUTATE_LDBL(nan)
+    MUTATE_LDBL(nans)
+    MUTATE_LDBL(inf)
+    MUTATE_LDBL(fma)
+    MUTATE_LDBL(sin)
+    MUTATE_LDBL(cos)
+    MUTATE_LDBL(tan)
+    MUTATE_LDBL(sinh)
+    MUTATE_LDBL(cosh)
+    MUTATE_LDBL(tanh)
+    MUTATE_LDBL(asin)
+    MUTATE_LDBL(acos)
+    MUTATE_LDBL(atan)
+    MUTATE_LDBL(asinh)
+    MUTATE_LDBL(acosh)
+    MUTATE_LDBL(atanh)
+    MUTATE_LDBL(atan2)
+    MUTATE_LDBL(erf)
+    MUTATE_LDBL(erfc)
+    MUTATE_LDBL(ldexp)
+    MUTATE_LDBL(frexp)
+    MUTATE_LDBL(huge_val)
+    MUTATE_LDBL(copysign)
+    MUTATE_LDBL(nextafter)
+    MUTATE_LDBL(nexttoward)
+    MUTATE_LDBL(remainder)
+    MUTATE_LDBL(remquo)
+    MUTATE_LDBL(scalbln)
+    MUTATE_LDBL(scalbn)
+    MUTATE_LDBL(tgamma)
+    MUTATE_LDBL(lgamma)
 #undef MUTATE_LDBL
   default:
     return BuiltinID;
@@ -2654,11 +2659,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const 
GlobalDecl GD, unsigned BuiltinID,
   if (E->isPRValue() && E->EvaluateAsRValue(Result, CGM.getContext()) &&
       !Result.hasSideEffects()) {
     if (Result.Val.isInt())
-      return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
-                                                Result.Val.getInt()));
+      return RValue::get(
+          llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()));
     if (Result.Val.isFloat())
-      return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
-                                               Result.Val.getFloat()));
+      return RValue::get(
+          llvm::ConstantFP::get(getLLVMContext(), Result.Val.getFloat()));
   }
 
   // If current long-double semantics is IEEE 128-bit, replace math builtins
@@ -2756,9 +2761,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     case Builtin::BI__builtin_ceill:
     case Builtin::BI__builtin_ceilf128:
     case Builtin::BI__builtin_elementwise_ceil:
-      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
-                                   Intrinsic::ceil,
-                                   Intrinsic::experimental_constrained_ceil));
+      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+          *this, E, Intrinsic::ceil, 
Intrinsic::experimental_constrained_ceil));
 
     case Builtin::BIcopysign:
     case Builtin::BIcopysignf:
@@ -2780,9 +2784,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     case Builtin::BI__builtin_cosl:
     case Builtin::BI__builtin_cosf128:
     case Builtin::BI__builtin_elementwise_cos:
-      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
-                                   Intrinsic::cos,
-                                   Intrinsic::experimental_constrained_cos));
+      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+          *this, E, Intrinsic::cos, Intrinsic::experimental_constrained_cos));
 
     case Builtin::BIcosh:
     case Builtin::BIcoshf:
@@ -2805,9 +2808,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     case Builtin::BI__builtin_expl:
     case Builtin::BI__builtin_expf128:
     case Builtin::BI__builtin_elementwise_exp:
-      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
-                                   Intrinsic::exp,
-                                   Intrinsic::experimental_constrained_exp));
+      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+          *this, E, Intrinsic::exp, Intrinsic::experimental_constrained_exp));
 
     case Builtin::BIexp2:
     case Builtin::BIexp2f:
@@ -2818,9 +2820,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     case Builtin::BI__builtin_exp2l:
     case Builtin::BI__builtin_exp2f128:
     case Builtin::BI__builtin_elementwise_exp2:
-      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
-                                   Intrinsic::exp2,
-                                   Intrinsic::experimental_constrained_exp2));
+      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+          *this, E, Intrinsic::exp2, 
Intrinsic::experimental_constrained_exp2));
     case Builtin::BI__builtin_exp10:
     case Builtin::BI__builtin_exp10f:
     case Builtin::BI__builtin_exp10f16:
@@ -2853,9 +2854,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     case Builtin::BI__builtin_floorl:
     case Builtin::BI__builtin_floorf128:
     case Builtin::BI__builtin_elementwise_floor:
-      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
-                                   Intrinsic::floor,
-                                   Intrinsic::experimental_constrained_floor));
+      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+          *this, E, Intrinsic::floor,
+          Intrinsic::experimental_constrained_floor));
 
     case Builtin::BIfma:
     case Builtin::BIfmaf:
@@ -2866,9 +2867,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     case Builtin::BI__builtin_fmal:
     case Builtin::BI__builtin_fmaf128:
     case Builtin::BI__builtin_elementwise_fma:
-      return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E,
-                                   Intrinsic::fma,
-                                   Intrinsic::experimental_constrained_fma));
+      return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(
+          *this, E, Intrinsic::fma, Intrinsic::experimental_constrained_fma));
 
     case Builtin::BIfmax:
     case Builtin::BIfmaxf:
@@ -2954,9 +2954,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     case Builtin::BI__builtin_logl:
     case Builtin::BI__builtin_logf128:
     case Builtin::BI__builtin_elementwise_log:
-      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
-                                   Intrinsic::log,
-                                   Intrinsic::experimental_constrained_log));
+      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+          *this, E, Intrinsic::log, Intrinsic::experimental_constrained_log));
 
     case Builtin::BIlog10:
     case Builtin::BIlog10f:
@@ -2967,9 +2966,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     case Builtin::BI__builtin_log10l:
     case Builtin::BI__builtin_log10f128:
     case Builtin::BI__builtin_elementwise_log10:
-      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
-                                   Intrinsic::log10,
-                                   Intrinsic::experimental_constrained_log10));
+      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+          *this, E, Intrinsic::log10,
+          Intrinsic::experimental_constrained_log10));
 
     case Builtin::BIlog2:
     case Builtin::BIlog2f:
@@ -2980,9 +2979,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     case Builtin::BI__builtin_log2l:
     case Builtin::BI__builtin_log2f128:
     case Builtin::BI__builtin_elementwise_log2:
-      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
-                                   Intrinsic::log2,
-                                   Intrinsic::experimental_constrained_log2));
+      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+          *this, E, Intrinsic::log2, 
Intrinsic::experimental_constrained_log2));
 
     case Builtin::BInearbyint:
     case Builtin::BInearbyintf:
@@ -2992,9 +2990,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     case Builtin::BI__builtin_nearbyintl:
     case Builtin::BI__builtin_nearbyintf128:
     case Builtin::BI__builtin_elementwise_nearbyint:
-      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
-                                Intrinsic::nearbyint,
-                                
Intrinsic::experimental_constrained_nearbyint));
+      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+          *this, E, Intrinsic::nearbyint,
+          Intrinsic::experimental_constrained_nearbyint));
 
     case Builtin::BIpow:
     case Builtin::BIpowf:
@@ -3005,9 +3003,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     case Builtin::BI__builtin_powl:
     case Builtin::BI__builtin_powf128:
     case Builtin::BI__builtin_elementwise_pow:
-      return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
-                                   Intrinsic::pow,
-                                   Intrinsic::experimental_constrained_pow));
+      return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(
+          *this, E, Intrinsic::pow, Intrinsic::experimental_constrained_pow));
 
     case Builtin::BIrint:
     case Builtin::BIrintf:
@@ -3018,9 +3015,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     case Builtin::BI__builtin_rintl:
     case Builtin::BI__builtin_rintf128:
     case Builtin::BI__builtin_elementwise_rint:
-      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
-                                   Intrinsic::rint,
-                                   Intrinsic::experimental_constrained_rint));
+      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+          *this, E, Intrinsic::rint, 
Intrinsic::experimental_constrained_rint));
 
     case Builtin::BIround:
     case Builtin::BIroundf:
@@ -3031,9 +3027,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     case Builtin::BI__builtin_roundl:
     case Builtin::BI__builtin_roundf128:
     case Builtin::BI__builtin_elementwise_round:
-      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
-                                   Intrinsic::round,
-                                   Intrinsic::experimental_constrained_round));
+      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+          *this, E, Intrinsic::round,
+          Intrinsic::experimental_constrained_round));
 
     case Builtin::BIroundeven:
     case Builtin::BIroundevenf:
@@ -3044,9 +3040,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     case Builtin::BI__builtin_roundevenl:
     case Builtin::BI__builtin_roundevenf128:
     case Builtin::BI__builtin_elementwise_roundeven:
-      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
-                                   Intrinsic::roundeven,
-                                   
Intrinsic::experimental_constrained_roundeven));
+      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+          *this, E, Intrinsic::roundeven,
+          Intrinsic::experimental_constrained_roundeven));
 
     case Builtin::BIsin:
     case Builtin::BIsinf:
@@ -3057,9 +3053,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     case Builtin::BI__builtin_sinl:
     case Builtin::BI__builtin_sinf128:
     case Builtin::BI__builtin_elementwise_sin:
-      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
-                                   Intrinsic::sin,
-                                   Intrinsic::experimental_constrained_sin));
+      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+          *this, E, Intrinsic::sin, Intrinsic::experimental_constrained_sin));
 
     case Builtin::BIsinh:
     case Builtin::BIsinhf:
@@ -3142,9 +3137,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     case Builtin::BI__builtin_truncl:
     case Builtin::BI__builtin_truncf128:
     case Builtin::BI__builtin_elementwise_trunc:
-      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
-                                   Intrinsic::trunc,
-                                   Intrinsic::experimental_constrained_trunc));
+      return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+          *this, E, Intrinsic::trunc,
+          Intrinsic::experimental_constrained_trunc));
 
     case Builtin::BIlround:
     case Builtin::BIlroundf:
@@ -3225,7 +3220,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
   };
 
   switch (BuiltinIDIfNoAsmLabel) {
-  default: break;
+  default:
+    break;
   case Builtin::BI__builtin___CFStringMakeConstantString:
   case Builtin::BI__builtin___NSStringMakeConstantString:
     return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
@@ -3304,13 +3300,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const 
GlobalDecl GD, unsigned BuiltinID,
     // is available as debuginfo is needed to preserve user-level
     // access pattern.
     if (!getDebugInfo()) {
-      CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() 
without -g");
+      CGM.Error(E->getExprLoc(),
+                "using builtin_preserve_access_index() without -g");
       return RValue::get(EmitScalarExpr(E->getArg(0)));
     }
 
     // Nested builtin_preserve_access_index() not supported
     if (IsInPreservedAIRegion) {
-      CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not 
supported");
+      CGM.Error(E->getExprLoc(),
+                "nested builtin_preserve_access_index() not supported");
       return RValue::get(EmitScalarExpr(E->getArg(0)));
     }
 
@@ -3347,8 +3345,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
     Value *Result =
         Builder.CreateNUWSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
-    Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
-                                   "cast");
+    Result =
+        Builder.CreateIntCast(Result, ResultType, /*isSigned*/ true, "cast");
     return RValue::get(Result);
   }
   case Builtin::BI__builtin_ctzs:
@@ -3442,8 +3440,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
     Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
     if (Result->getType() != ResultType)
-      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
-                                     "cast");
+      Result =
+          Builder.CreateIntCast(Result, ResultType, /*isSigned*/ true, "cast");
     return RValue::get(Result);
   }
   case Builtin::BI__builtin_parity:
@@ -3459,8 +3457,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     Value *Tmp = Builder.CreateCall(F, ArgValue);
     Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
     if (Result->getType() != ResultType)
-      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
-                                     "cast");
+      Result =
+          Builder.CreateIntCast(Result, ResultType, /*isSigned*/ true, "cast");
     return RValue::get(Result);
   }
   case Builtin::BI__lzcnt16:
@@ -3474,8 +3472,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     llvm::Type *ResultType = ConvertType(E->getType());
     Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
     if (Result->getType() != ResultType)
-      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
-                                     "cast");
+      Result =
+          Builder.CreateIntCast(Result, ResultType, /*isSigned*/ true, "cast");
     return RValue::get(Result);
   }
   case Builtin::BI__popcnt16:
@@ -3550,7 +3548,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     const Expr *Ptr = E->getArg(0);
     Value *PtrValue = EmitScalarExpr(Ptr);
     Value *OffsetValue =
-      (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
+        (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
 
     Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
     ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
@@ -3761,7 +3759,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
         CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
     Value *Result = Builder.CreateCall(F, ArgValue);
     if (Result->getType() != ResultType)
-      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
+      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false);
     return RValue::get(Result);
   }
   case Builtin::BI__builtin_dynamic_object_size:
@@ -3808,10 +3806,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const 
GlobalDecl GD, unsigned BuiltinID,
   case Builtin::BI__builtin_prefetch: {
     Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
     // FIXME: Technically these constants should of type 'int', yes?
-    RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
-      llvm::ConstantInt::get(Int32Ty, 0);
-    Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
-      llvm::ConstantInt::get(Int32Ty, 3);
+    RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1))
+                               : llvm::ConstantInt::get(Int32Ty, 0);
+    Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2))
+                                     : llvm::ConstantInt::get(Int32Ty, 3);
     Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
     Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
     Builder.CreateCall(F, {Address, RW, Locality, Data});
@@ -3870,12 +3868,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const 
GlobalDecl GD, unsigned BuiltinID,
       CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
       Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi,
                                      Src0->getType());
-      return RValue::get(Builder.CreateConstrainedFPCall(F, { Src0, Src1 }));
+      return RValue::get(Builder.CreateConstrainedFPCall(F, {Src0, Src1}));
     }
 
-    Function *F = CGM.getIntrinsic(Intrinsic::powi,
-                                   { Src0->getType(), Src1->getType() });
-    return RValue::get(Builder.CreateCall(F, { Src0, Src1 }));
+    Function *F =
+        CGM.getIntrinsic(Intrinsic::powi, {Src0->getType(), Src1->getType()});
+    return RValue::get(Builder.CreateCall(F, {Src0, Src1}));
   }
   case Builtin::BI__builtin_frexpl: {
     // Linux PPC will not be adding additional PPCDoubleDouble support.
@@ -3912,7 +3910,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     Value *RHS = EmitScalarExpr(E->getArg(1));
 
     switch (BuiltinID) {
-    default: llvm_unreachable("Unknown ordered comparison");
+    default:
+      llvm_unreachable("Unknown ordered comparison");
     case Builtin::BI__builtin_isgreater:
       LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
       break;
@@ -4399,8 +4398,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     llvm::Type *ResultType = ConvertType(E->getType());
     Value *Result = Builder.CreateCall(F);
     if (Result->getType() != ResultType)
-      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
-                                     "cast");
+      Result =
+          Builder.CreateIntCast(Result, ResultType, /*isSigned*/ true, "cast");
     return RValue::get(Result);
   }
 
@@ -4416,16 +4415,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const 
GlobalDecl GD, unsigned BuiltinID,
     CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
     Value *V = EmitScalarExpr(E->getArg(5));
 
-    Value *NanLiteral = EmitScalarExpr(E->getArg(0));
-    Value *InfLiteral = EmitScalarExpr(E->getArg(1));
-    Value *NormalLiteral = EmitScalarExpr(E->getArg(2));
-    Value *SubnormalLiteral = EmitScalarExpr(E->getArg(3));
-    Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
-
-    Value *IsNan = Builder.createIsFPClass(V, 0b0000000011);
-    Value *IsInf = Builder.createIsFPClass(V, 0b1000000100);
-    Value *IsNormal = Builder.createIsFPClass(V, 0b0100001000);
-    Value *IsSubnormal = Builder.createIsFPClass(V, 0b0010010000);
+    Value *IsNan = Builder.createIsFPClass(V, FPClassTest::fcNan);
+    Value *IsInf = Builder.createIsFPClass(V, FPClassTest::fcInf);
+    Value *IsNormal = Builder.createIsFPClass(V, FPClassTest::fcNormal);
+    Value *IsSubnormal = Builder.createIsFPClass(V, FPClassTest::fcSubnormal);
 
     BasicBlock *Entry = Builder.GetInsertBlock();
 
@@ -4438,18 +4431,21 @@ RValue CodeGenFunction::EmitBuiltinExpr(const 
GlobalDecl GD, unsigned BuiltinID,
     Builder.SetInsertPoint(Entry);
     BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", CurFn);
     Builder.CreateCondBr(IsNan, End, NotNan);
+    Value *NanLiteral = EmitScalarExpr(E->getArg(0));
     Result->addIncoming(NanLiteral, Entry);
 
     // Check if V is infinity
     Builder.SetInsertPoint(NotNan);
     BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", CurFn);
     Builder.CreateCondBr(IsInf, End, NotInf);
+    Value *InfLiteral = EmitScalarExpr(E->getArg(1));
     Result->addIncoming(InfLiteral, NotNan);
 
     // Check if V is normal
     Builder.SetInsertPoint(NotInf);
     BasicBlock *NotNormal = createBasicBlock("fpclassify_not_normal", CurFn);
     Builder.CreateCondBr(IsNormal, End, NotNormal);
+    Value *NormalLiteral = EmitScalarExpr(E->getArg(2));
     Result->addIncoming(NormalLiteral, NotInf);
 
     // Check if V is subnormal
@@ -4457,11 +4453,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const 
GlobalDecl GD, unsigned BuiltinID,
     BasicBlock *NotSubnormal =
         createBasicBlock("fpclassify_not_subnormal", CurFn);
     Builder.CreateCondBr(IsSubnormal, End, NotSubnormal);
+    Value *SubnormalLiteral = EmitScalarExpr(E->getArg(3));
     Result->addIncoming(SubnormalLiteral, NotNormal);
 
     // If V is not one of the above, it is zero
     Builder.SetInsertPoint(NotSubnormal);
     Builder.CreateBr(End);
+    Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
     Result->addIncoming(ZeroLiteral, NotSubnormal);
 
     Builder.SetInsertPoint(End);
@@ -4612,8 +4610,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
     Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
     Value *SizeVal = EmitScalarExpr(E->getArg(2));
-    CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
-                                                  DestAddr, SrcAddr, SizeVal);
+    CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestAddr, SrcAddr,
+                                                  SizeVal);
     return RValue::get(DestAddr, *this);
   }
 
@@ -4658,8 +4656,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
   case Builtin::BImemset:
   case Builtin::BI__builtin_memset: {
     Address Dest = EmitPointerWithAlignment(E->getArg(0));
-    Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
-                                         Builder.getInt8Ty());
+    Value *ByteVal =
+        Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty());
     Value *SizeVal = EmitScalarExpr(E->getArg(2));
     EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
                         E->getArg(0)->getExprLoc(), FD, 0);
@@ -4691,8 +4689,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     if (Size.ugt(DstSize))
       break;
     Address Dest = EmitPointerWithAlignment(E->getArg(0));
-    Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
-                                         Builder.getInt8Ty());
+    Value *ByteVal =
+        Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty());
     Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
     auto *I = Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
     addInstToNewSourceAtom(I, nullptr);
@@ -4813,12 +4811,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const 
GlobalDecl GD, unsigned BuiltinID,
     int32_t Offset = 0;
 
     Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
-    return RValue::get(Builder.CreateCall(F,
-                                      llvm::ConstantInt::get(Int32Ty, 
Offset)));
+    return RValue::get(
+        Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, Offset)));
   }
   case Builtin::BI__builtin_return_address: {
-    Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
-                                                   getContext().UnsignedIntTy);
+    Value *Depth = ConstantEmitter(*this).emitAbstract(
+        E->getArg(0), getContext().UnsignedIntTy);
     Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
     return RValue::get(Builder.CreateCall(F, Depth));
   }
@@ -4827,8 +4825,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
   }
   case Builtin::BI__builtin_frame_address: {
-    Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
-                                                   getContext().UnsignedIntTy);
+    Value *Depth = ConstantEmitter(*this).emitAbstract(
+        E->getArg(0), getContext().UnsignedIntTy);
     Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy);
     return RValue::get(Builder.CreateCall(F, Depth));
   }
@@ -4847,8 +4845,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     return RValue::get(Result);
   }
   case Builtin::BI__builtin_dwarf_sp_column: {
-    llvm::IntegerType *Ty
-      = cast<llvm::IntegerType>(ConvertType(E->getType()));
+    llvm::IntegerType *Ty = cast<llvm::IntegerType>(ConvertType(E->getType()));
     int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
     if (Column == -1) {
       CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
@@ -5149,8 +5146,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
         CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
     llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
     llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
-    return EmitCall(FuncInfo, CGCallee::forDirect(Func),
-                    ReturnValueSlot(), Args);
+    return EmitCall(FuncInfo, CGCallee::forDirect(Func), ReturnValueSlot(),
+                    Args);
   }
 
   case Builtin::BI__atomic_thread_fence:
@@ -5170,17 +5167,17 @@ RValue CodeGenFunction::EmitBuiltinExpr(const 
GlobalDecl GD, unsigned BuiltinID,
       case 0:  // memory_order_relaxed
       default: // invalid order
         break;
-      case 1:  // memory_order_consume
-      case 2:  // memory_order_acquire
+      case 1: // memory_order_consume
+      case 2: // memory_order_acquire
         Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
         break;
-      case 3:  // memory_order_release
+      case 3: // memory_order_release
         Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
         break;
-      case 4:  // memory_order_acq_rel
+      case 4: // memory_order_acq_rel
         Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
         break;
-      case 5:  // memory_order_seq_cst
+      case 5: // memory_order_seq_cst
         Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, 
SSID);
         break;
       }
@@ -5432,7 +5429,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
     Intrinsic::ID IntrinsicId;
     switch (BuiltinID) {
-    default: llvm_unreachable("Unknown multiprecision builtin id.");
+    default:
+      llvm_unreachable("Unknown multiprecision builtin id.");
     case Builtin::BI__builtin_addcb:
     case Builtin::BI__builtin_addcs:
     case Builtin::BI__builtin_addc:
@@ -5451,13 +5449,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const 
GlobalDecl GD, unsigned BuiltinID,
 
     // Construct our resulting LLVM IR expression.
     llvm::Value *Carry1;
-    llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
-                                              X, Y, Carry1);
+    llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, 
Carry1);
     llvm::Value *Carry2;
-    llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
-                                              Sum1, Carryin, Carry2);
-    llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, 
Carry2),
-                                               X->getType());
+    llvm::Value *Sum2 =
+        EmitOverflowIntrinsic(*this, IntrinsicId, Sum1, Carryin, Carry2);
+    llvm::Value *CarryOut =
+        Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2), X->getType());
     Builder.CreateStore(CarryOut, CarryOutPtr);
     return RValue::get(Sum2);
   }
@@ -5548,7 +5545,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
 
     // Finally, store the result using the pointer.
     bool isVolatile =
-      ResultArg->getType()->getPointeeType().isVolatileQualified();
+        ResultArg->getType()->getPointeeType().isVolatileQualified();
     Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, 
isVolatile);
 
     return RValue::get(Overflow);
@@ -5583,7 +5580,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     // Decide which of the overflow intrinsics we are lowering to:
     Intrinsic::ID IntrinsicId;
     switch (BuiltinID) {
-    default: llvm_unreachable("Unknown overflow builtin id.");
+    default:
+      llvm_unreachable("Unknown overflow builtin id.");
     case Builtin::BI__builtin_uadd_overflow:
     case Builtin::BI__builtin_uaddl_overflow:
     case Builtin::BI__builtin_uaddll_overflow:
@@ -5616,7 +5614,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
       break;
     }
 
-
     llvm::Value *Carry;
     llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
     Builder.CreateStore(Sum, SumOutPtr);
@@ -5651,9 +5648,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
   case Builtin::BI__builtin_call_with_static_chain: {
     const CallExpr *Call = cast<CallExpr>(E->getArg(0));
     const Expr *Chain = E->getArg(1);
-    return EmitCall(Call->getCallee()->getType(),
-                    EmitCallee(Call->getCallee()), Call, ReturnValue,
-                    EmitScalarExpr(Chain));
+    return EmitCall(Call->getCallee()->getType(), 
EmitCallee(Call->getCallee()),
+                    Call, ReturnValue, EmitScalarExpr(Chain));
   }
   case Builtin::BI_InterlockedExchange8:
   case Builtin::BI_InterlockedExchange16:
@@ -5933,8 +5929,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
       const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
                                                              : 
"__write_pipe_4";
 
-      llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
-                              Int32Ty, Int32Ty};
+      llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty,
+                              I8PTy,           Int32Ty,         Int32Ty};
       Value *Arg2 = EmitScalarExpr(E->getArg(2)),
             *Arg3 = EmitScalarExpr(E->getArg(3));
       llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, 
false);
@@ -6071,8 +6067,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
     auto NewCall =
         EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
-    return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
-      ConvertType(E->getType())));
+    return RValue::get(
+        Builder.CreateBitOrPointerCast(NewCall, ConvertType(E->getType())));
   }
 
   // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
@@ -6495,7 +6491,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
   // can move this up to the beginning of the function.
   checkTargetFeatures(E, FD);
 
-  if (unsigned VectorWidth = 
getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
+  if (unsigned VectorWidth =
+          getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
     LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
 
   // See if we have a target specific intrinsic.
@@ -6516,7 +6513,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
   }
 
   if (IntrinsicID != Intrinsic::not_intrinsic) {
-    SmallVector<Value*, 16> Args;
+    SmallVector<Value *, 16> Args;
 
     // Find out if any arguments are required to be integer constant
     // expressions.
@@ -6566,7 +6563,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl 
GD, unsigned BuiltinID,
     if (RetTy != V->getType()) {
       // XXX - vector of pointers?
       if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
-        if (PtrTy->getAddressSpace() != 
V->getType()->getPointerAddressSpace()) {
+        if (PtrTy->getAddressSpace() !=
+            V->getType()->getPointerAddressSpace()) {
           V = Builder.CreateAddrSpaceCast(
               V, llvm::PointerType::get(getLLVMContext(),
                                         PtrTy->getAddressSpace()));
diff --git a/clang/test/CodeGen/strictfp_builtins.c 
b/clang/test/CodeGen/strictfp_builtins.c
index d965767be095b..4f826d62344fe 100644
--- a/clang/test/CodeGen/strictfp_builtins.c
+++ b/clang/test/CodeGen/strictfp_builtins.c
@@ -40,11 +40,11 @@ void p(char *str, int x) {
 // CHECK-NEXT:    call void @p(ptr noundef @.str.1, i32 noundef 
[[FPCLASSIFY_RESULT]]) #[[ATTR4]]
 // CHECK-NEXT:    ret void
 // CHECK:       fpclassify_not_nan:
-// CHECK-NEXT:    br i1 [[TMP2]], label [[FPCLASSIFY_END]], label 
[[FPCLASSIFY_NOT_INF:%.*]]
+// CHECK-NEXT:    br i1 [[TMP2]], label [[FPCLASSIFY_END]], label 
[[FPCLASSIFY_NOT_INF]]
 // CHECK:       fpclassify_not_inf:
-// CHECK-NEXT:    br i1 [[TMP3]], label [[FPCLASSIFY_END]], label 
[[FPCLASSIFY_NOT_NORMAL:%.*]]
+// CHECK-NEXT:    br i1 [[TMP3]], label [[FPCLASSIFY_END]], label 
[[FPCLASSIFY_NOT_NORMAL]]
 // CHECK:       fpclassify_not_normal:
-// CHECK-NEXT:    br i1 [[TMP4]], label [[FPCLASSIFY_END]], label 
[[FPCLASSIFY_NOT_SUBNORMAL:%.*]]
+// CHECK-NEXT:    br i1 [[TMP4]], label [[FPCLASSIFY_END]], label 
[[FPCLASSIFY_NOT_SUBNORMAL]]
 // CHECK:       fpclassify_not_subnormal:
 // CHECK-NEXT:    br label [[FPCLASSIFY_END]]
 //
@@ -155,7 +155,7 @@ void test_double_isfinite(double d) {
 // CHECK-NEXT:    [[D_ADDR:%.*]] = alloca double, align 8
 // CHECK-NEXT:    store double [[D:%.*]], ptr [[D_ADDR]], align 8
 // CHECK-NEXT:    [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
-// CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #5
+// CHECK-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) 
#[[ATTR5:[0-9]+]]
 // CHECK-NEXT:    [[ISINF:%.*]] = call i1 
@llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 
0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR4]]
 // CHECK-NEXT:    [[TMP2:%.*]] = bitcast double [[TMP0]] to i64
 // CHECK-NEXT:    [[TMP3:%.*]] = icmp slt i64 [[TMP2]], 0
diff --git a/clang/test/Headers/nvptx_device_math_macro.cpp 
b/clang/test/Headers/nvptx_device_math_macro.cpp
index 3faf527daf113..11380ba3f7bd3 100644
--- a/clang/test/Headers/nvptx_device_math_macro.cpp
+++ b/clang/test/Headers/nvptx_device_math_macro.cpp
@@ -8,10 +8,11 @@
 #pragma omp declare target
 int use_macro() {
   double a(0);
-// CHECK-NOT:  call {{.*}}
-// CHECK:  call double @llvm.fabs.f64(double
-// CHECK-NOT:  call {{.*}}
-// CHECK:      ret i32 %conv
+// CHECK: call i1 @llvm.is.fpclass.f64(double {{.*}}, i32 3)
+// CHECK: call i1 @llvm.is.fpclass.f64(double {{.*}}, i32 516)
+// CHECK: call i1 @llvm.is.fpclass.f64(double {{.*}}, i32 264)
+// CHECK: call i1 @llvm.is.fpclass.f64(double {{.*}}, i32 144)
+// CHECK: ret i32
   return (std::fpclassify(a) != FP_ZERO);
 }
 #pragma omp end declare target

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to