https://github.com/Serosh-commits updated 
https://github.com/llvm/llvm-project/pull/194327

>From f2bc4a25927216f4b6981fe14b567e55b67af6ee Mon Sep 17 00:00:00 2001
From: Serosh-commits <[email protected]>
Date: Wed, 6 May 2026 12:55:20 +0530
Subject: [PATCH] address reviewer feedback

---
 clang/include/clang/Basic/Builtins.td         |  77 ++-
 clang/lib/AST/ByteCode/InterpBuiltin.cpp      | 456 ++++++++++++++++++
 clang/lib/AST/ExprConstant.cpp                | 328 +++++++++++++
 clang/test/CodeGen/aix-builtin-mapping.c      |  13 +-
 clang/test/CodeGen/logb_scalbn.c              | 286 ++++++++---
 .../test/SemaCXX/constexpr-cmath-builtins.cpp | 261 ++++++++++
 6 files changed, 1327 insertions(+), 94 deletions(-)
 create mode 100644 clang/test/SemaCXX/constexpr-cmath-builtins.cpp

diff --git a/clang/include/clang/Basic/Builtins.td 
b/clang/include/clang/Basic/Builtins.td
index 4a7eaeb3d353e..0ff15569cd4cb 100644
--- a/clang/include/clang/Basic/Builtins.td
+++ b/clang/include/clang/Basic/Builtins.td
@@ -138,7 +138,7 @@ def CbrtF128 : Builtin {
 
 def CeilF16F128 : Builtin, F16F128MathTemplate {
   let Spellings = ["__builtin_ceil"];
-  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, Const];
+  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, Const, Constexpr];
   let Prototype = "T(T)";
 }
 
@@ -192,19 +192,19 @@ def Expm1F128 : Builtin {
 
 def FdimF128 : Builtin {
   let Spellings = ["__builtin_fdimf128"];
-  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, 
ConstIgnoringErrnoAndExceptions];
+  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, 
ConstIgnoringErrnoAndExceptions, Constexpr];
   let Prototype = "__float128(__float128, __float128)";
 }
 
 def FloorF16F128 : Builtin, F16F128MathTemplate {
   let Spellings = ["__builtin_floor"];
-  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, Const];
+  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, Const, Constexpr];
   let Prototype = "T(T)";
 }
 
 def FmaF16F128 : Builtin, F16F128MathTemplate {
   let Spellings = ["__builtin_fma"];
-  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, 
ConstIgnoringErrnoAndExceptions];
+  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, 
ConstIgnoringErrnoAndExceptions, Constexpr];
   let Prototype = "T(T, T, T)";
 }
 
@@ -264,13 +264,13 @@ def FabsF128 : Builtin {
 
 def FmodF16F128 : F16F128MathTemplate, Builtin {
   let Spellings = ["__builtin_fmod"];
-  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, 
ConstIgnoringErrnoAndExceptions];
+  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, 
ConstIgnoringErrnoAndExceptions, Constexpr];
   let Prototype = "T(T, T)";
 }
 
 def FrexpF16F128 : F16F128MathTemplate, Builtin {
   let Spellings = ["__builtin_frexp"];
-  let Attributes = [FunctionWithBuiltinPrefix, NoThrow];
+  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, 
ConstIgnoringErrnoAndExceptions, Constexpr];
   let Prototype = "T(T, int*)";
 }
 
@@ -300,13 +300,13 @@ def InfF16 : Builtin {
 
 def LdexpF16F128 : F16F128MathTemplate, Builtin {
   let Spellings = ["__builtin_ldexp"];
-  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, 
ConstIgnoringErrnoAndExceptions];
+  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, 
ConstIgnoringErrnoAndExceptions, Constexpr];
   let Prototype = "T(T, int)";
 }
 
 def ModfF128 : Builtin {
   let Spellings = ["__builtin_modff128"];
-  let Attributes = [FunctionWithBuiltinPrefix, NoThrow];
+  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, Constexpr];
   let Prototype = "__float128(__float128, __float128*)";
 }
 
@@ -352,7 +352,7 @@ def HypotF128 : Builtin {
 
 def ILogbF128 : Builtin {
   let Spellings = ["__builtin_ilogbf128"];
-  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, 
ConstIgnoringErrnoAndExceptions];
+  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, 
ConstIgnoringErrnoAndExceptions, Constexpr];
   let Prototype = "int(__float128)";
 }
 
@@ -364,13 +364,13 @@ def LgammaF128 : Builtin {
 
 def LLrintF128 : Builtin {
   let Spellings = ["__builtin_llrintf128"];
-  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, 
ConstIgnoringErrnoAndExceptions];
+  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, 
ConstIgnoringErrnoAndExceptions, Constexpr];
   let Prototype = "long long int(__float128)";
 }
 
 def LLroundF128 : Builtin {
   let Spellings = ["__builtin_llroundf128"];
-  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, 
ConstIgnoringErrnoAndExceptions];
+  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, 
ConstIgnoringErrnoAndExceptions, Constexpr];
   let Prototype = "long long int(__float128)";
 }
 
@@ -406,55 +406,61 @@ def LogF16F128 : Builtin, F16F128MathTemplate {
 
 def LrintF128 : Builtin {
   let Spellings = ["__builtin_lrintf128"];
-  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, 
ConstIgnoringErrnoAndExceptions];
+  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, 
ConstIgnoringErrnoAndExceptions, Constexpr];
   let Prototype = "long int(__float128)";
 }
 
 def LroundF128 : Builtin {
   let Spellings = ["__builtin_lroundf128"];
-  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, 
ConstIgnoringErrnoAndExceptions];
+  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, 
ConstIgnoringErrnoAndExceptions, Constexpr];
   let Prototype = "long int(__float128)";
 }
 
+def NearbyintF16 : Builtin {
+  let Spellings = ["__builtin_nearbyintf16"];
+  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, Const, Constexpr];
+  let Prototype = "_Float16(_Float16)";
+}
+
 def NearbyintF128 : Builtin {
   let Spellings = ["__builtin_nearbyintf128"];
-  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, Const];
+  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, Const, Constexpr];
   let Prototype = "__float128(__float128)";
 }
 
 def NextafterF128 : Builtin {
   let Spellings = ["__builtin_nextafterf128"];
-  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, 
ConstIgnoringErrnoAndExceptions];
+  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, 
ConstIgnoringErrnoAndExceptions, Constexpr];
   let Prototype = "__float128(__float128, __float128)";
 }
 
 def NexttowardF128 : Builtin {
   let Spellings = ["__builtin_nexttowardf128"];
-  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, 
ConstIgnoringErrnoAndExceptions];
+  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, 
ConstIgnoringErrnoAndExceptions, Constexpr];
   let Prototype = "__float128(__float128, __float128)";
 }
 
 def RemainderF128 : Builtin {
   let Spellings = ["__builtin_remainderf128"];
-  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, 
ConstIgnoringErrnoAndExceptions];
+  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, 
ConstIgnoringErrnoAndExceptions, Constexpr];
   let Prototype = "__float128(__float128, __float128)";
 }
 
 def RemquoF128 : Builtin {
   let Spellings = ["__builtin_remquof128"];
-  let Attributes = [FunctionWithBuiltinPrefix, NoThrow];
+  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, Constexpr];
   let Prototype = "__float128(__float128, __float128, int*)";
 }
 
 def RintF16F128 : Builtin, F16F128MathTemplate {
   let Spellings = ["__builtin_rint"];
-  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, Const];
+  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, Const, Constexpr];
   let Prototype = "T(T)";
 }
 
 def RoundF16F128 : Builtin, F16F128MathTemplate {
   let Spellings = ["__builtin_round"];
-  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, Const];
+  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, Const, Constexpr];
   let Prototype = "T(T)";
 }
 
@@ -467,14 +473,14 @@ def RoundevenF16F128 : Builtin, F16F128MathTemplate {
 def ScanlblnF128 : Builtin {
   let Spellings = ["__builtin_scalblnf128"];
   let Attributes = [FunctionWithBuiltinPrefix, NoThrow,
-                    ConstIgnoringErrnoAndExceptions];
+                    ConstIgnoringErrnoAndExceptions, Constexpr];
   let Prototype = "__float128(__float128, long int)";
 }
 
 def ScanlbnF128 : Builtin {
   let Spellings = ["__builtin_scalbnf128"];
   let Attributes = [FunctionWithBuiltinPrefix, NoThrow,
-                    ConstIgnoringErrnoAndExceptions];
+                    ConstIgnoringErrnoAndExceptions, Constexpr];
   let Prototype = "__float128(__float128, int)";
 }
 
@@ -522,7 +528,7 @@ def TgammaF128 : Builtin {
 
 def TruncF16F128 : Builtin, F16F128MathTemplate {
   let Spellings = ["__builtin_trunc"];
-  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, Const];
+  let Attributes = [FunctionWithBuiltinPrefix, NoThrow, Const, Constexpr];
   let Prototype = "T(T)";
 }
 
@@ -4059,6 +4065,7 @@ def Fmod : FPMathTemplate, LibBuiltin<"math.h"> {
   let Attributes = [NoThrow, ConstIgnoringErrnoAndExceptions];
   let Prototype = "T(T, T)";
   let AddBuiltinPrefixedAlias = 1;
+  let OnlyBuiltinPrefixedAliasIsConstexpr = 1;
 }
 
 def Frexp : FPMathTemplate, LibBuiltin<"math.h"> {
@@ -4066,6 +4073,7 @@ def Frexp : FPMathTemplate, LibBuiltin<"math.h"> {
   let Attributes = [NoThrow];
   let Prototype = "T(T, int*)";
   let AddBuiltinPrefixedAlias = 1;
+  let OnlyBuiltinPrefixedAliasIsConstexpr = 1;
 }
 
 def Sincos : FPMathTemplate, GNULibBuiltin<"math.h"> {
@@ -4086,6 +4094,7 @@ def Ldexp : FPMathTemplate, LibBuiltin<"math.h"> {
   let Attributes = [NoThrow, ConstIgnoringErrnoAndExceptions];
   let Prototype = "T(T, int)";
   let AddBuiltinPrefixedAlias = 1;
+  let OnlyBuiltinPrefixedAliasIsConstexpr = 1;
 }
 
 def Modf : FPMathTemplate, LibBuiltin<"math.h"> {
@@ -4093,6 +4102,7 @@ def Modf : FPMathTemplate, LibBuiltin<"math.h"> {
   let Attributes = [NoThrow];
   let Prototype = "T(T, T*)";
   let AddBuiltinPrefixedAlias = 1;
+  let OnlyBuiltinPrefixedAliasIsConstexpr = 1;
 }
 
 def Nan : FPMathTemplate, LibBuiltin<"math.h"> {
@@ -4164,6 +4174,7 @@ def Ceil : FPMathTemplate, LibBuiltin<"math.h"> {
   let Attributes = [NoThrow, Const];
   let Prototype = "T(T)";
   let AddBuiltinPrefixedAlias = 1;
+  let OnlyBuiltinPrefixedAliasIsConstexpr = 1;
 }
 
 def Cos : FPMathTemplate, LibBuiltin<"math.h"> {
@@ -4228,6 +4239,7 @@ def Fdim : FPMathTemplate, LibBuiltin<"math.h"> {
   let Attributes = [NoThrow, ConstIgnoringErrnoAndExceptions];
   let Prototype = "T(T, T)";
   let AddBuiltinPrefixedAlias = 1;
+  let OnlyBuiltinPrefixedAliasIsConstexpr = 1;
 }
 
 def Floor : FPMathTemplate, LibBuiltin<"math.h"> {
@@ -4235,6 +4247,7 @@ def Floor : FPMathTemplate, LibBuiltin<"math.h"> {
   let Attributes = [NoThrow, Const];
   let Prototype = "T(T)";
   let AddBuiltinPrefixedAlias = 1;
+  let OnlyBuiltinPrefixedAliasIsConstexpr = 1;
 }
 
 def Fma : FPMathTemplate, LibBuiltin<"math.h"> {
@@ -4242,6 +4255,7 @@ def Fma : FPMathTemplate, LibBuiltin<"math.h"> {
   let Attributes = [NoThrow, ConstIgnoringErrnoAndExceptions];
   let Prototype = "T(T, T, T)";
   let AddBuiltinPrefixedAlias = 1;
+  let OnlyBuiltinPrefixedAliasIsConstexpr = 1;
 }
 
 def Fmax : FPMathTemplate, LibBuiltin<"math.h"> {
@@ -4288,6 +4302,7 @@ def Ilogb : FPMathTemplate, LibBuiltin<"math.h"> {
   let Attributes = [NoThrow, ConstIgnoringErrnoAndExceptions];
   let Prototype = "int(T)";
   let AddBuiltinPrefixedAlias = 1;
+  let OnlyBuiltinPrefixedAliasIsConstexpr = 1;
 }
 
 def Lgamma : FPMathTemplate, LibBuiltin<"math.h"> {
@@ -4302,6 +4317,7 @@ def Llrint : FPMathTemplate, LibBuiltin<"math.h"> {
   let Attributes = [NoThrow, ConstIgnoringErrnoAndExceptions];
   let Prototype = "long long int(T)";
   let AddBuiltinPrefixedAlias = 1;
+  let OnlyBuiltinPrefixedAliasIsConstexpr = 1;
 }
 
 def Llround : FPMathTemplate, LibBuiltin<"math.h"> {
@@ -4309,6 +4325,7 @@ def Llround : FPMathTemplate, LibBuiltin<"math.h"> {
   let Attributes = [NoThrow, ConstIgnoringErrnoAndExceptions];
   let Prototype = "long long int(T)";
   let AddBuiltinPrefixedAlias = 1;
+  let OnlyBuiltinPrefixedAliasIsConstexpr = 1;
 }
 
 def Log : FPMathTemplate, LibBuiltin<"math.h"> {
@@ -4351,6 +4368,7 @@ def Lrint : FPMathTemplate, LibBuiltin<"math.h"> {
   let Attributes = [NoThrow, ConstIgnoringErrnoAndExceptions];
   let Prototype = "long int(T)";
   let AddBuiltinPrefixedAlias = 1;
+  let OnlyBuiltinPrefixedAliasIsConstexpr = 1;
 }
 
 def Lround : FPMathTemplate, LibBuiltin<"math.h"> {
@@ -4358,13 +4376,15 @@ def Lround : FPMathTemplate, LibBuiltin<"math.h"> {
   let Attributes = [NoThrow, ConstIgnoringErrnoAndExceptions];
   let Prototype = "long int(T)";
   let AddBuiltinPrefixedAlias = 1;
+  let OnlyBuiltinPrefixedAliasIsConstexpr = 1;
 }
 
 def Nearbyint : FPMathTemplate, LibBuiltin<"math.h"> {
   let Spellings = ["nearbyint"];
-  let Attributes = [NoThrow, Const];
+  let Attributes = [NoThrow, Const, Constexpr];
   let Prototype = "T(T)";
   let AddBuiltinPrefixedAlias = 1;
+  let OnlyBuiltinPrefixedAliasIsConstexpr = 1;
 }
 
 def Nextafter : FPMathTemplate, LibBuiltin<"math.h"> {
@@ -4372,6 +4392,7 @@ def Nextafter : FPMathTemplate, LibBuiltin<"math.h"> {
   let Attributes = [NoThrow, ConstIgnoringErrnoAndExceptions];
   let Prototype = "T(T, T)";
   let AddBuiltinPrefixedAlias = 1;
+  let OnlyBuiltinPrefixedAliasIsConstexpr = 1;
 }
 
 def Nexttoward : FPMathTemplate, LibBuiltin<"math.h"> {
@@ -4379,6 +4400,7 @@ def Nexttoward : FPMathTemplate, LibBuiltin<"math.h"> {
   let Attributes = [NoThrow, ConstIgnoringErrnoAndExceptions];
   let Prototype = "T(T, long double)";
   let AddBuiltinPrefixedAlias = 1;
+  let OnlyBuiltinPrefixedAliasIsConstexpr = 1;
 }
 
 def Remainder : FPMathTemplate, LibBuiltin<"math.h"> {
@@ -4386,6 +4408,7 @@ def Remainder : FPMathTemplate, LibBuiltin<"math.h"> {
   let Attributes = [NoThrow, ConstIgnoringErrnoAndExceptions];
   let Prototype = "T(T, T)";
   let AddBuiltinPrefixedAlias = 1;
+  let OnlyBuiltinPrefixedAliasIsConstexpr = 1;
 }
 
 def Remquo : FPMathTemplate, LibBuiltin<"math.h"> {
@@ -4393,6 +4416,7 @@ def Remquo : FPMathTemplate, LibBuiltin<"math.h"> {
   let Attributes = [NoThrow];
   let Prototype = "T(T, T, int*)";
   let AddBuiltinPrefixedAlias = 1;
+  let OnlyBuiltinPrefixedAliasIsConstexpr = 1;
 }
 
 def Rint : FPMathTemplate, LibBuiltin<"math.h"> {
@@ -4400,6 +4424,7 @@ def Rint : FPMathTemplate, LibBuiltin<"math.h"> {
   let Attributes = [NoThrow, ConstIgnoringExceptions];
   let Prototype = "T(T)";
   let AddBuiltinPrefixedAlias = 1;
+  let OnlyBuiltinPrefixedAliasIsConstexpr = 1;
 }
 
 def Round : FPMathTemplate, LibBuiltin<"math.h"> {
@@ -4407,6 +4432,7 @@ def Round : FPMathTemplate, LibBuiltin<"math.h"> {
   let Attributes = [NoThrow, Const];
   let Prototype = "T(T)";
   let AddBuiltinPrefixedAlias = 1;
+  let OnlyBuiltinPrefixedAliasIsConstexpr = 1;
 }
 
 def RoundEven : FPMathTemplate, LibBuiltin<"math.h"> {
@@ -4421,6 +4447,7 @@ def Scalbln : FPMathTemplate, LibBuiltin<"math.h"> {
   let Attributes = [NoThrow, ConstIgnoringErrnoAndExceptions];
   let Prototype = "T(T, long int)";
   let AddBuiltinPrefixedAlias = 1;
+  let OnlyBuiltinPrefixedAliasIsConstexpr = 1;
 }
 
 def Scalbn : FPMathTemplate, LibBuiltin<"math.h"> {
@@ -4428,6 +4455,7 @@ def Scalbn : FPMathTemplate, LibBuiltin<"math.h"> {
   let Attributes = [NoThrow, ConstIgnoringErrnoAndExceptions];
   let Prototype = "T(T, int)";
   let AddBuiltinPrefixedAlias = 1;
+  let OnlyBuiltinPrefixedAliasIsConstexpr = 1;
 }
 
 def Sin : FPMathTemplate, LibBuiltin<"math.h"> {
@@ -4477,6 +4505,7 @@ def Trunc : FPMathTemplate, LibBuiltin<"math.h"> {
   let Attributes = [NoThrow, Const];
   let Prototype = "T(T)";
   let AddBuiltinPrefixedAlias = 1;
+  let OnlyBuiltinPrefixedAliasIsConstexpr = 1;
 }
 
 def Cabs : FPMathTemplate, LibBuiltin<"complex.h"> {
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp 
b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 77ea83605cc16..4f6b3ac525438 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -717,6 +717,335 @@ static inline Floating abs(InterpState &S, const Floating 
&In) {
   return Output;
 }
 
+static bool interp__builtin_ceil(InterpState &S, CodePtr OpPC,
+                                 const InterpFrame *Frame, const CallExpr 
*Call,
+                                 unsigned BuiltinOp) {
+  const Floating &Val = S.Stk.pop<Floating>();
+  Floating Result = S.allocFloat(Val.getSemantics());
+  APFloat F = Val.getAPFloat();
+
+  llvm::RoundingMode RM;
+  switch (BuiltinOp) {
+  case Builtin::BI__builtin_ceil:
+  case Builtin::BI__builtin_ceilf:
+  case Builtin::BI__builtin_ceill:
+  case Builtin::BI__builtin_ceilf16:
+  case Builtin::BI__builtin_ceilf128:
+    RM = llvm::RoundingMode::TowardPositive;
+    break;
+  case Builtin::BI__builtin_floor:
+  case Builtin::BI__builtin_floorf:
+  case Builtin::BI__builtin_floorl:
+  case Builtin::BI__builtin_floorf16:
+  case Builtin::BI__builtin_floorf128:
+    RM = llvm::RoundingMode::TowardNegative;
+    break;
+  case Builtin::BI__builtin_trunc:
+  case Builtin::BI__builtin_truncf:
+  case Builtin::BI__builtin_truncl:
+  case Builtin::BI__builtin_truncf16:
+  case Builtin::BI__builtin_truncf128:
+    RM = llvm::RoundingMode::TowardZero;
+    break;
+  default:
+    llvm_unreachable("invalid builtin ID");
+  }
+
+  F.roundToIntegral(RM);
+  Result.copy(F);
+  S.Stk.push<Floating>(Result);
+  return true;
+}
+
+static bool interp__builtin_fdim(InterpState &S, CodePtr OpPC,
+                                 const InterpFrame *Frame,
+                                 const CallExpr *Call) {
+  const Floating &RHS = S.Stk.pop<Floating>();
+  const Floating &LHS = S.Stk.pop<Floating>();
+  APFloat L = LHS.getAPFloat();
+  APFloat R = RHS.getAPFloat();
+  APFloat Result(L.getSemantics());
+
+  if (L.isNaN() || R.isNaN()) {
+    Result = APFloat::getNaN(L.getSemantics());
+  } else if (L.compare(R) == APFloat::cmpGreaterThan) {
+    L.subtract(R, APFloat::rmNearestTiesToEven);
+    Result = L;
+  } else {
+    Result = APFloat::getZero(L.getSemantics());
+  }
+
+  Floating F = S.allocFloat(Result.getSemantics());
+  F.copy(Result);
+  S.Stk.push<Floating>(F);
+  return true;
+}
+
+static bool interp__builtin_fma(InterpState &S, CodePtr OpPC,
+                                const InterpFrame *Frame,
+                                const CallExpr *Call) {
+  const Floating &Z = S.Stk.pop<Floating>();
+  const Floating &Y = S.Stk.pop<Floating>();
+  const Floating &X = S.Stk.pop<Floating>();
+  APFloat Result = X.getAPFloat();
+
+  llvm::RoundingMode RM =
+      getRoundingMode(Call->getFPFeaturesInEffect(S.getLangOpts()));
+
+  Result.fusedMultiplyAdd(Y.getAPFloat(), Z.getAPFloat(), RM);
+  Floating F = S.allocFloat(Result.getSemantics());
+  F.copy(Result);
+  S.Stk.push<Floating>(F);
+  return true;
+}
+
+static bool interp__builtin_frexp(InterpState &S, CodePtr OpPC,
+                                  const InterpFrame *Frame,
+                                  const CallExpr *Call) {
+  const Pointer &Ptr = S.Stk.pop<Pointer>();
+  const Floating &Val = S.Stk.pop<Floating>();
+
+  int Exp = 0;
+  llvm::RoundingMode RM =
+      getRoundingMode(Call->getFPFeaturesInEffect(S.getLangOpts()));
+
+  APFloat F = frexp(Val.getAPFloat(), Exp, RM);
+
+  if (!Ptr.isDummy()) {
+    QualType ExpType = Call->getArg(1)->getType()->getPointeeType();
+    PrimType ExpT = *S.getContext().classify(ExpType);
+    assignIntegral(S, Ptr, ExpT, APSInt::get(Exp));
+    Ptr.initialize();
+  }
+
+  Floating Result = S.allocFloat(F.getSemantics());
+  Result.copy(F);
+  S.Stk.push<Floating>(Result);
+  return true;
+}
+
+static bool interp__builtin_modf(InterpState &S, CodePtr OpPC,
+                                 const InterpFrame *Frame,
+                                 const CallExpr *Call) {
+  const Pointer &Ptr = S.Stk.pop<Pointer>();
+  const Floating &Val = S.Stk.pop<Floating>();
+  const APFloat &F = Val.getAPFloat();
+
+  APFloat Integral = F;
+  Integral.roundToIntegral(APFloat::rmTowardZero);
+
+  if (!Ptr.isDummy()) {
+    Ptr.deref<Floating>().copy(Integral);
+    Ptr.initialize();
+  }
+
+  if (F.isInfinity()) {
+    Floating Fraction = S.allocFloat(F.getSemantics());
+    Fraction.copy(APFloat::getZero(F.getSemantics(), F.isNegative()));
+    S.Stk.push<Floating>(Fraction);
+    return true;
+  }
+
+  APFloat Fraction = F;
+  Fraction.subtract(Integral, APFloat::rmNearestTiesToEven);
+
+  Floating Result = S.allocFloat(Fraction.getSemantics());
+  Result.copy(Fraction);
+  S.Stk.push<Floating>(Result);
+  return true;
+}
+
+static bool interp__builtin_fmod(InterpState &S, CodePtr OpPC,
+                                 const InterpFrame *Frame, const CallExpr 
*Call,
+                                 unsigned BuiltinOp) {
+  const Floating &RHS = S.Stk.pop<Floating>();
+  const Floating &LHS = S.Stk.pop<Floating>();
+  const APFloat &L = LHS.getAPFloat();
+  const APFloat &R = RHS.getAPFloat();
+  APFloat ResF = L;
+
+  if (BuiltinOp == Builtin::BI__builtin_remainder ||
+      BuiltinOp == Builtin::BI__builtin_remainderf ||
+      BuiltinOp == Builtin::BI__builtin_remainderl ||
+      BuiltinOp == Builtin::BI__builtin_remainderf128)
+    ResF.remainder(R);
+  else
+    ResF.mod(R);
+
+  Floating F = S.allocFloat(ResF.getSemantics());
+  F.copy(ResF);
+  S.Stk.push<Floating>(F);
+  return true;
+}
+
+static bool interp__builtin_nextafter(InterpState &S, CodePtr OpPC,
+                                      const InterpFrame *Frame,
+                                      const CallExpr *Call) {
+  const Floating &RHS = S.Stk.pop<Floating>();
+  const Floating &LHS = S.Stk.pop<Floating>();
+  const APFloat &L = LHS.getAPFloat();
+  const APFloat &R = RHS.getAPFloat();
+
+  if (L.isNaN()) {
+    S.Stk.push<Floating>(LHS);
+    return true;
+  }
+
+  if (R.isNaN()) {
+    bool LoseInfo = false;
+    APFloat NaN = R;
+    NaN.convert(L.getSemantics(), APFloat::rmNearestTiesToEven, &LoseInfo);
+    Floating Result = S.allocFloat(NaN.getSemantics());
+    Result.copy(NaN);
+    S.Stk.push<Floating>(Result);
+    return true;
+  }
+
+  APFloat LCopy = L;
+  bool LoseInfo = false;
+  LCopy.convert(R.getSemantics(), APFloat::rmNearestTiesToEven, &LoseInfo);
+  APFloat::cmpResult Res = LCopy.compare(R);
+
+  APFloat Next = L;
+  if (Res != APFloat::cmpEqual)
+    Next.next(Res == APFloat::cmpGreaterThan);
+
+  Floating Result = S.allocFloat(Next.getSemantics());
+  Result.copy(Next);
+  S.Stk.push<Floating>(Result);
+  return true;
+}
+
+static bool interp__builtin_scalbn(InterpState &S, CodePtr OpPC,
+                                   const InterpFrame *Frame,
+                                   const CallExpr *Call) {
+  PrimType ExpT = *S.getContext().classify(Call->getArg(1)->getType());
+  APSInt Exp;
+  if (!popToAPSInt(S.Stk, ExpT, Exp))
+    return false;
+  const Floating &Val = S.Stk.pop<Floating>();
+
+  llvm::RoundingMode RM =
+      getRoundingMode(Call->getFPFeaturesInEffect(S.getLangOpts()));
+
+  Floating Result = S.allocFloat(Val.getSemantics());
+  Result.copy(scalbn(Val.getAPFloat(), (int)Exp.getExtValue(), RM));
+  S.Stk.push<Floating>(Result);
+  return true;
+}
+
+static bool interp__builtin_ilogb(InterpState &S, CodePtr OpPC,
+                                  const InterpFrame *Frame,
+                                  const CallExpr *Call) {
+  const Floating &Val = S.Stk.pop<Floating>();
+  pushInteger(S, ilogb(Val.getAPFloat()), Call->getType());
+  return true;
+}
+
+static bool interp__builtin_remquo(InterpState &S, CodePtr OpPC,
+                                   const InterpFrame *Frame,
+                                   const CallExpr *Call) {
+  const Pointer &Ptr = S.Stk.pop<Pointer>();
+  const Floating &RHS = S.Stk.pop<Floating>();
+  const Floating &LHS = S.Stk.pop<Floating>();
+
+  APFloat Q = LHS.getAPFloat();
+  if (Q.divide(RHS.getAPFloat(), APFloat::rmNearestTiesToEven) &
+      APFloat::opInvalidOp)
+    Q = APFloat::getZero(Q.getSemantics());
+  else
+    Q.roundToIntegral(APFloat::rmNearestTiesToEven);
+
+  if (!Ptr.isDummy()) {
+    QualType QuoType = Call->getArg(2)->getType()->getPointeeType();
+    APSInt QuoInt(S.getASTContext().getTypeSize(QuoType), false);
+    bool IsExact = false;
+    Q.convertToInteger(QuoInt, APFloat::rmTowardZero, &IsExact);
+
+    PrimType QuoT = *S.getContext().classify(QuoType);
+    assignIntegral(S, Ptr, QuoT, QuoInt);
+    Ptr.initialize();
+  }
+
+  APFloat R = LHS.getAPFloat();
+  R.remainder(RHS.getAPFloat());
+  Floating Result = S.allocFloat(R.getSemantics());
+  Result.copy(R);
+  S.Stk.push<Floating>(Result);
+  return true;
+}
+
+static bool interp__builtin_round(InterpState &S, CodePtr OpPC,
+                                  const InterpFrame *Frame,
+                                  const CallExpr *Call) {
+  const Floating &Val = S.Stk.pop<Floating>();
+  Floating Result = S.allocFloat(Val.getSemantics());
+  APFloat F = Val.getAPFloat();
+
+  F.roundToIntegral(llvm::RoundingMode::NearestTiesToAway);
+  Result.copy(F);
+  S.Stk.push<Floating>(Result);
+  return true;
+}
+
+static bool interp__builtin_lrint(InterpState &S, CodePtr OpPC,
+                                  const InterpFrame *Frame,
+                                  const CallExpr *Call, unsigned BuiltinOp) {
+  const Floating &Val = S.Stk.pop<Floating>();
+  APFloat F = Val.getAPFloat();
+
+  llvm::RoundingMode RM;
+  switch (BuiltinOp) {
+  case Builtin::BI__builtin_lround:
+  case Builtin::BI__builtin_lroundf:
+  case Builtin::BI__builtin_lroundl:
+  case Builtin::BI__builtin_lroundf128:
+  case Builtin::BI__builtin_llround:
+  case Builtin::BI__builtin_llroundf:
+  case Builtin::BI__builtin_llroundl:
+  case Builtin::BI__builtin_llroundf128:
+    RM = llvm::RoundingMode::NearestTiesToAway;
+    break;
+  default:
+    RM = getRoundingMode(Call->getFPFeaturesInEffect(S.getLangOpts()));
+    break;
+  }
+
+  F.roundToIntegral(RM);
+
+  APSInt IntVal(S.getASTContext().getTypeSize(Call->getType()),
+                Call->getType()->isUnsignedIntegerOrEnumerationType());
+  bool IsExact = false;
+  APFloat::opStatus Status = F.convertToInteger(IntVal, RM, &IsExact);
+
+  if (Status & APFloat::opInvalidOp) {
+    if (S.diagnosing()) {
+      auto Loc = S.Current->getSource(OpPC);
+      S.CCEDiag(Loc, diag::note_constexpr_float_arithmetic)
+          << (F.isNaN() ? 1 : 0);
+    }
+  }
+
+  pushInteger(S, IntVal, Call->getType());
+  return true;
+}
+
+static bool interp__builtin_nearbyint(InterpState &S, CodePtr OpPC,
+                                      const InterpFrame *Frame,
+                                      const CallExpr *Call) {
+  const Floating &Val = S.Stk.pop<Floating>();
+  Floating Result = S.allocFloat(Val.getSemantics());
+  APFloat F = Val.getAPFloat();
+
+  llvm::RoundingMode RM =
+      getRoundingMode(Call->getFPFeaturesInEffect(S.getLangOpts()));
+
+  F.roundToIntegral(RM);
+  Result.copy(F);
+  S.Stk.push<Floating>(Result);
+  return true;
+}
+
 // The C standard says "fabs raises no floating-point exceptions,
 // even if x is a signaling NaN. The returned value is independent of
 // the current rounding direction mode."  Therefore constant folding can
@@ -4532,6 +4861,133 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, 
const CallExpr *Call,
   case Builtin::BI__builtin_issubnormal:
     return interp__builtin_issubnormal(S, OpPC, Frame, Call);
 
+  case Builtin::BI__builtin_nearbyint:
+  case Builtin::BI__builtin_nearbyintf:
+  case Builtin::BI__builtin_nearbyintl:
+  case Builtin::BI__builtin_nearbyintf16:
+  case Builtin::BI__builtin_nearbyintf128:
+  case Builtin::BI__builtin_rint:
+  case Builtin::BI__builtin_rintf:
+  case Builtin::BI__builtin_rintl:
+  case Builtin::BI__builtin_rintf16:
+  case Builtin::BI__builtin_rintf128:
+    return interp__builtin_nearbyint(S, OpPC, Frame, Call);
+
+  case Builtin::BI__builtin_lrint:
+  case Builtin::BI__builtin_lrintf:
+  case Builtin::BI__builtin_lrintl:
+  case Builtin::BI__builtin_lrintf128:
+  case Builtin::BI__builtin_llrint:
+  case Builtin::BI__builtin_llrintf:
+  case Builtin::BI__builtin_llrintl:
+  case Builtin::BI__builtin_llrintf128:
+  case Builtin::BI__builtin_lround:
+  case Builtin::BI__builtin_lroundf:
+  case Builtin::BI__builtin_lroundl:
+  case Builtin::BI__builtin_lroundf128:
+  case Builtin::BI__builtin_llround:
+  case Builtin::BI__builtin_llroundf:
+  case Builtin::BI__builtin_llroundl:
+  case Builtin::BI__builtin_llroundf128:
+    return interp__builtin_lrint(S, OpPC, Frame, Call, BuiltinID);
+
+  case Builtin::BI__builtin_ceil:
+  case Builtin::BI__builtin_ceilf:
+  case Builtin::BI__builtin_ceill:
+  case Builtin::BI__builtin_ceilf16:
+  case Builtin::BI__builtin_ceilf128:
+  case Builtin::BI__builtin_floor:
+  case Builtin::BI__builtin_floorf:
+  case Builtin::BI__builtin_floorl:
+  case Builtin::BI__builtin_floorf16:
+  case Builtin::BI__builtin_floorf128:
+  case Builtin::BI__builtin_trunc:
+  case Builtin::BI__builtin_truncf:
+  case Builtin::BI__builtin_truncl:
+  case Builtin::BI__builtin_truncf16:
+  case Builtin::BI__builtin_truncf128:
+    return interp__builtin_ceil(S, OpPC, Frame, Call, BuiltinID);
+
+  case Builtin::BI__builtin_fdim:
+  case Builtin::BI__builtin_fdimf:
+  case Builtin::BI__builtin_fdiml:
+  case Builtin::BI__builtin_fdimf128:
+    return interp__builtin_fdim(S, OpPC, Frame, Call);
+
+  case Builtin::BI__builtin_frexp:
+  case Builtin::BI__builtin_frexpf:
+  case Builtin::BI__builtin_frexpl:
+  case Builtin::BI__builtin_frexpf16:
+  case Builtin::BI__builtin_frexpf128:
+    return interp__builtin_frexp(S, OpPC, Frame, Call);
+
+  case Builtin::BI__builtin_modf:
+  case Builtin::BI__builtin_modff:
+  case Builtin::BI__builtin_modfl:
+  case Builtin::BI__builtin_modff128:
+    return interp__builtin_modf(S, OpPC, Frame, Call);
+
+  case Builtin::BI__builtin_fma:
+  case Builtin::BI__builtin_fmaf:
+  case Builtin::BI__builtin_fmal:
+  case Builtin::BI__builtin_fmaf16:
+  case Builtin::BI__builtin_fmaf128:
+    return interp__builtin_fma(S, OpPC, Frame, Call);
+
+  case Builtin::BI__builtin_fmod:
+  case Builtin::BI__builtin_fmodf:
+  case Builtin::BI__builtin_fmodl:
+  case Builtin::BI__builtin_fmodf16:
+  case Builtin::BI__builtin_fmodf128:
+  case Builtin::BI__builtin_remainder:
+  case Builtin::BI__builtin_remainderf:
+  case Builtin::BI__builtin_remainderf128:
+    return interp__builtin_fmod(S, OpPC, Frame, Call, BuiltinID);
+
+  case Builtin::BI__builtin_nextafter:
+  case Builtin::BI__builtin_nextafterf:
+  case Builtin::BI__builtin_nextafterl:
+  case Builtin::BI__builtin_nextafterf128:
+  case Builtin::BI__builtin_nexttoward:
+  case Builtin::BI__builtin_nexttowardf:
+  case Builtin::BI__builtin_nexttowardl:
+  case Builtin::BI__builtin_nexttowardf128:
+    return interp__builtin_nextafter(S, OpPC, Frame, Call);
+
+  case Builtin::BI__builtin_scalbn:
+  case Builtin::BI__builtin_scalbnf:
+  case Builtin::BI__builtin_scalbnl:
+  case Builtin::BI__builtin_scalbnf128:
+  case Builtin::BI__builtin_scalbln:
+  case Builtin::BI__builtin_scalblnf:
+  case Builtin::BI__builtin_scalblnl:
+  case Builtin::BI__builtin_scalblnf128:
+  case Builtin::BI__builtin_ldexp:
+  case Builtin::BI__builtin_ldexpf:
+  case Builtin::BI__builtin_ldexpl:
+  case Builtin::BI__builtin_ldexpf16:
+  case Builtin::BI__builtin_ldexpf128:
+    return interp__builtin_scalbn(S, OpPC, Frame, Call);
+
+  case Builtin::BI__builtin_ilogb:
+  case Builtin::BI__builtin_ilogbf:
+  case Builtin::BI__builtin_ilogbl:
+  case Builtin::BI__builtin_ilogbf128:
+    return interp__builtin_ilogb(S, OpPC, Frame, Call);
+
+  case Builtin::BI__builtin_remquo:
+  case Builtin::BI__builtin_remquof:
+  case Builtin::BI__builtin_remquol:
+  case Builtin::BI__builtin_remquof128:
+    return interp__builtin_remquo(S, OpPC, Frame, Call);
+
+  case Builtin::BI__builtin_round:
+  case Builtin::BI__builtin_roundf:
+  case Builtin::BI__builtin_roundl:
+  case Builtin::BI__builtin_roundf16:
+  case Builtin::BI__builtin_roundf128:
+    return interp__builtin_round(S, OpPC, Frame, Call);
+
   case Builtin::BI__builtin_iszero:
     return interp__builtin_iszero(S, OpPC, Frame, Call);
 
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 1a4c962801077..f39b9e0d149a1 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -16356,6 +16356,57 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const 
CallExpr *E,
   };
 
   switch (BuiltinOp) {
+  case Builtin::BI__builtin_lrint:
+  case Builtin::BI__builtin_lrintf:
+  case Builtin::BI__builtin_lrintl:
+  case Builtin::BI__builtin_lrintf128:
+  case Builtin::BI__builtin_llrint:
+  case Builtin::BI__builtin_llrintf:
+  case Builtin::BI__builtin_llrintl:
+  case Builtin::BI__builtin_llrintf128:
+  case Builtin::BI__builtin_lround:
+  case Builtin::BI__builtin_lroundf:
+  case Builtin::BI__builtin_lroundl:
+  case Builtin::BI__builtin_lroundf128:
+  case Builtin::BI__builtin_llround:
+  case Builtin::BI__builtin_llroundf:
+  case Builtin::BI__builtin_llroundl:
+  case Builtin::BI__builtin_llroundf128: {
+    APFloat FloatVal(0.0);
+    if (!EvaluateFloat(E->getArg(0), FloatVal, Info))
+      return false;
+
+    llvm::RoundingMode RM;
+    switch (BuiltinOp) {
+    case Builtin::BI__builtin_lround:
+    case Builtin::BI__builtin_lroundf:
+    case Builtin::BI__builtin_lroundl:
+    case Builtin::BI__builtin_lroundf128:
+    case Builtin::BI__builtin_llround:
+    case Builtin::BI__builtin_llroundf:
+    case Builtin::BI__builtin_llroundl:
+    case Builtin::BI__builtin_llroundf128:
+      RM = llvm::RoundingMode::NearestTiesToAway;
+      break;
+    default:
+      RM = getActiveRoundingMode(Info, E);
+      break;
+    }
+
+    FloatVal.roundToIntegral(RM);
+
+    APSInt IntVal(Info.Ctx.getTypeSize(E->getType()),
+                  E->getType()->isUnsignedIntegerOrEnumerationType());
+    bool IsExact = false;
+    APFloat::opStatus Status = FloatVal.convertToInteger(IntVal, RM, &IsExact);
+
+    if (Status & APFloat::opInvalidOp)
+      Info.CCEDiag(E, diag::note_constexpr_float_arithmetic)
+          << (FloatVal.isNaN() ? 1 : 0);
+
+    return Success(IntVal, E);
+  }
+
   default:
     return false;
 
@@ -16825,6 +16876,17 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const 
CallExpr *E,
     return Success(Val.popcount() % 2, E);
   }
 
+  case Builtin::BI__builtin_ilogb:
+  case Builtin::BI__builtin_ilogbf:
+  case Builtin::BI__builtin_ilogbl:
+  case Builtin::BI__builtin_ilogbf128: {
+    APFloat FloatVal(0.0);
+    if (!EvaluateFloat(E->getArg(0), FloatVal, Info))
+      return false;
+
+    return Success(ilogb(FloatVal), E);
+  }
+
   case Builtin::BI__builtin_abs:
   case Builtin::BI__builtin_labs:
   case Builtin::BI__builtin_llabs: {
@@ -19962,6 +20024,272 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr 
*E) {
     return true;
   }
 
+  case Builtin::BI__builtin_nearbyint:
+  case Builtin::BI__builtin_nearbyintf:
+  case Builtin::BI__builtin_nearbyintl:
+  case Builtin::BI__builtin_nearbyintf16:
+  case Builtin::BI__builtin_nearbyintf128:
+  case Builtin::BI__builtin_rint:
+  case Builtin::BI__builtin_rintf:
+  case Builtin::BI__builtin_rintl:
+  case Builtin::BI__builtin_rintf16:
+  case Builtin::BI__builtin_rintf128: {
+    if (!EvaluateFloat(E->getArg(0), Result, Info))
+      return false;
+    llvm::RoundingMode RM = getActiveRoundingMode(getEvalInfo(), E);
+    Result.roundToIntegral(RM);
+    return true;
+  }
+
+  case Builtin::BI__builtin_round:
+  case Builtin::BI__builtin_roundf:
+  case Builtin::BI__builtin_roundl:
+  case Builtin::BI__builtin_roundf16:
+  case Builtin::BI__builtin_roundf128: {
+    if (!EvaluateFloat(E->getArg(0), Result, Info))
+      return false;
+    Result.roundToIntegral(llvm::RoundingMode::NearestTiesToAway);
+    return true;
+  }
+
+  case Builtin::BI__builtin_ceil:
+  case Builtin::BI__builtin_ceilf:
+  case Builtin::BI__builtin_ceill:
+  case Builtin::BI__builtin_ceilf16:
+  case Builtin::BI__builtin_ceilf128:
+  case Builtin::BI__builtin_floor:
+  case Builtin::BI__builtin_floorf:
+  case Builtin::BI__builtin_floorl:
+  case Builtin::BI__builtin_floorf16:
+  case Builtin::BI__builtin_floorf128:
+  case Builtin::BI__builtin_trunc:
+  case Builtin::BI__builtin_truncf:
+  case Builtin::BI__builtin_truncl:
+  case Builtin::BI__builtin_truncf16:
+  case Builtin::BI__builtin_truncf128: {
+    if (!EvaluateFloat(E->getArg(0), Result, Info))
+      return false;
+    llvm::RoundingMode RM;
+    switch (E->getBuiltinCallee()) {
+    case Builtin::BI__builtin_ceil:
+    case Builtin::BI__builtin_ceilf:
+    case Builtin::BI__builtin_ceill:
+    case Builtin::BI__builtin_ceilf16:
+    case Builtin::BI__builtin_ceilf128:
+      RM = llvm::RoundingMode::TowardPositive;
+      break;
+    case Builtin::BI__builtin_floor:
+    case Builtin::BI__builtin_floorf:
+    case Builtin::BI__builtin_floorl:
+    case Builtin::BI__builtin_floorf16:
+    case Builtin::BI__builtin_floorf128:
+      RM = llvm::RoundingMode::TowardNegative;
+      break;
+    default:
+      RM = llvm::RoundingMode::TowardZero;
+      break;
+    }
+    Result.roundToIntegral(RM);
+    return true;
+  }
+
+  case Builtin::BI__builtin_fdim:
+  case Builtin::BI__builtin_fdimf:
+  case Builtin::BI__builtin_fdiml:
+  case Builtin::BI__builtin_fdimf128: {
+    APFloat RHS(0.);
+    if (!EvaluateFloat(E->getArg(0), Result, Info) ||
+        !EvaluateFloat(E->getArg(1), RHS, Info))
+      return false;
+    if (Result.compare(RHS) == APFloat::cmpGreaterThan) {
+      Result.subtract(RHS, APFloat::rmNearestTiesToEven);
+    } else if (Result.isNaN() || RHS.isNaN()) {
+      Result.add(RHS, APFloat::rmNearestTiesToEven);
+    } else {
+      Result = APFloat::getZero(Result.getSemantics());
+    }
+    return true;
+  }
+
+  case Builtin::BI__builtin_fma:
+  case Builtin::BI__builtin_fmaf:
+  case Builtin::BI__builtin_fmal:
+  case Builtin::BI__builtin_fmaf16:
+  case Builtin::BI__builtin_fmaf128: {
+    APFloat RHS(0.), Third(0.);
+    if (!EvaluateFloat(E->getArg(0), Result, Info) ||
+        !EvaluateFloat(E->getArg(1), RHS, Info) ||
+        !EvaluateFloat(E->getArg(2), Third, Info))
+      return false;
+
+    llvm::RoundingMode RM = getActiveRoundingMode(getEvalInfo(), E);
+    Result.fusedMultiplyAdd(RHS, Third, RM);
+    return true;
+  }
+
+  case Builtin::BI__builtin_fmod:
+  case Builtin::BI__builtin_fmodf:
+  case Builtin::BI__builtin_fmodl:
+  case Builtin::BI__builtin_fmodf16:
+  case Builtin::BI__builtin_fmodf128: {
+    APFloat RHS(0.);
+    if (!EvaluateFloat(E->getArg(0), Result, Info) ||
+        !EvaluateFloat(E->getArg(1), RHS, Info))
+      return false;
+    Result.mod(RHS);
+    return true;
+  }
+
+  case Builtin::BI__builtin_remainder:
+  case Builtin::BI__builtin_remainderf:
+  case Builtin::BI__builtin_remainderl:
+  case Builtin::BI__builtin_remainderf128: {
+    APFloat RHS(0.);
+    if (!EvaluateFloat(E->getArg(0), Result, Info) ||
+        !EvaluateFloat(E->getArg(1), RHS, Info))
+      return false;
+    Result.remainder(RHS);
+    return true;
+  }
+
+  case Builtin::BI__builtin_nextafter:
+  case Builtin::BI__builtin_nextafterf:
+  case Builtin::BI__builtin_nextafterl:
+  case Builtin::BI__builtin_nextafterf128:
+  case Builtin::BI__builtin_nexttoward:
+  case Builtin::BI__builtin_nexttowardf:
+  case Builtin::BI__builtin_nexttowardl:
+  case Builtin::BI__builtin_nexttowardf128: {
+    APFloat RHS(0.);
+    if (!EvaluateFloat(E->getArg(0), Result, Info) ||
+        !EvaluateFloat(E->getArg(1), RHS, Info))
+      return false;
+
+    if (Result.isNaN())
+      return true;
+
+    if (RHS.isNaN()) {
+      bool LoseInfo = false;
+      Result = RHS;
+      Result.convert(Info.Ctx.getFloatTypeSemantics(E->getType()),
+                     APFloat::rmNearestTiesToEven, &LoseInfo);
+      return true;
+    }
+
+    APFloat ResultCopy = Result;
+    bool LoseInfo = false;
+    ResultCopy.convert(RHS.getSemantics(), APFloat::rmNearestTiesToEven,
+                       &LoseInfo);
+    APFloat::cmpResult Res = ResultCopy.compare(RHS);
+
+    if (Res == APFloat::cmpEqual)
+      return true;
+
+    Result.next(Res == APFloat::cmpGreaterThan);
+    return true;
+  }
+
+  case Builtin::BI__builtin_scalbn:
+  case Builtin::BI__builtin_scalbnf:
+  case Builtin::BI__builtin_scalbnl:
+  case Builtin::BI__builtin_scalbnf128:
+  case Builtin::BI__builtin_scalbln:
+  case Builtin::BI__builtin_scalblnf:
+  case Builtin::BI__builtin_scalblnl:
+  case Builtin::BI__builtin_scalblnf128:
+  case Builtin::BI__builtin_ldexp:
+  case Builtin::BI__builtin_ldexpf:
+  case Builtin::BI__builtin_ldexpl:
+  case Builtin::BI__builtin_ldexpf16:
+  case Builtin::BI__builtin_ldexpf128: {
+    APSInt Exp;
+    if (!EvaluateFloat(E->getArg(0), Result, Info) ||
+        !EvaluateInteger(E->getArg(1), Exp, Info))
+      return false;
+
+    llvm::RoundingMode RM = getActiveRoundingMode(getEvalInfo(), E);
+    Result = scalbn(Result, Exp.getExtValue(), RM);
+    return true;
+  }
+
+  case Builtin::BI__builtin_frexp:
+  case Builtin::BI__builtin_frexpf:
+  case Builtin::BI__builtin_frexpl:
+  case Builtin::BI__builtin_frexpf16:
+  case Builtin::BI__builtin_frexpf128: {
+    LValue ExpLVal;
+    if (!EvaluateFloat(E->getArg(0), Result, Info) ||
+        !EvaluatePointer(E->getArg(1), ExpLVal, Info))
+      return false;
+
+    int Exp = 0;
+    llvm::RoundingMode RM = getActiveRoundingMode(getEvalInfo(), E);
+    Result = frexp(Result, Exp, RM);
+
+    QualType PointeeType = E->getArg(1)->getType()->getPointeeType();
+    APValue APV{APSInt(Info.Ctx.getTypeSize(PointeeType), false)};
+    APV.getInt() = Exp;
+    if (!handleAssignment(Info, E, ExpLVal, PointeeType, APV))
+      return false;
+    return true;
+  }
+
+  case Builtin::BI__builtin_modf:
+  case Builtin::BI__builtin_modff:
+  case Builtin::BI__builtin_modfl:
+  case Builtin::BI__builtin_modff128: {
+    LValue IptrLVal;
+    if (!EvaluateFloat(E->getArg(0), Result, Info) ||
+        !EvaluatePointer(E->getArg(1), IptrLVal, Info))
+      return false;
+
+    APFloat Integral = Result;
+    Integral.roundToIntegral(APFloat::rmTowardZero);
+
+    QualType PointeeType = E->getArg(1)->getType()->getPointeeType();
+    APValue APV{Integral};
+    if (!handleAssignment(Info, E, IptrLVal, PointeeType, APV))
+      return false;
+
+    if (Result.isInfinity()) {
+      Result = APFloat::getZero(Result.getSemantics(), Result.isNegative());
+    } else {
+      Result.subtract(Integral, APFloat::rmNearestTiesToEven);
+    }
+    return true;
+  }
+
+  case Builtin::BI__builtin_remquo:
+  case Builtin::BI__builtin_remquof:
+  case Builtin::BI__builtin_remquol:
+  case Builtin::BI__builtin_remquof128: {
+    APFloat RHS(0.);
+    LValue QuoLVal;
+    if (!EvaluateFloat(E->getArg(0), Result, Info) ||
+        !EvaluateFloat(E->getArg(1), RHS, Info) ||
+        !EvaluatePointer(E->getArg(2), QuoLVal, Info))
+      return false;
+
+    APFloat Q = Result;
+    if (Q.divide(RHS, APFloat::rmNearestTiesToEven) & APFloat::opInvalidOp)
+      Q = APFloat::getZero(Q.getSemantics());
+    else
+      Q.roundToIntegral(APFloat::rmNearestTiesToEven);
+
+    APSInt QuoInt(
+        Info.Ctx.getTypeSize(E->getArg(2)->getType()->getPointeeType()), 
false);
+    bool IsExact = false;
+    Q.convertToInteger(QuoInt, APFloat::rmTowardZero, &IsExact);
+
+    APValue APV{QuoInt};
+    if (!handleAssignment(Info, E, QuoLVal,
+                          E->getArg(2)->getType()->getPointeeType(), APV))
+      return false;
+
+    Result.remainder(RHS);
+    return true;
+  }
+
   case Builtin::BI__builtin_elementwise_fma: {
     if (!E->getArg(0)->isPRValue() || !E->getArg(1)->isPRValue() ||
         !E->getArg(2)->isPRValue()) {
diff --git a/clang/test/CodeGen/aix-builtin-mapping.c 
b/clang/test/CodeGen/aix-builtin-mapping.c
index cc1cc1a44f32c..2c378f426da2c 100644
--- a/clang/test/CodeGen/aix-builtin-mapping.c
+++ b/clang/test/CodeGen/aix-builtin-mapping.c
@@ -6,17 +6,18 @@
 // RUN: %clang_cc1 -triple powerpc-ibm-aix -mlong-double-64 -emit-llvm -o - %s 
| FileCheck -check-prefix=CHECK %s
 // RUN: %clang_cc1 -triple powerpc64-ibm-aix -mlong-double-64 -emit-llvm -o - 
%s | FileCheck -check-prefix=CHECK %s
 
+long double arg = 1.0L;
 int main()
 {
   int DummyInt;
   long double DummyLongDouble;
   long double returnValue;
 
-  returnValue = __builtin_modfl(1.0L, &DummyLongDouble);
-  returnValue = __builtin_frexpl(0.0L, &DummyInt);
-  returnValue = __builtin_ldexpl(1.0L, 1);
+  returnValue = __builtin_modfl(arg, &DummyLongDouble);
+  returnValue = __builtin_frexpl(arg, &DummyInt);
+  returnValue = __builtin_ldexpl(arg, 1);
 }
 
-// CHECK: %{{.+}} = call { double, double } @llvm.modf.f64(double 1.000000e+00)
-// CHECK: %{{.+}} = call { double, i32 } @llvm.frexp.f64.i32(double 
0.000000e+00)
-// CHECK: %{{.+}} = call double @llvm.ldexp.f64.i32(double 1.000000e+00, i32 1)
+// CHECK: %{{.+}} = call { double, double } @llvm.modf.f64(double %{{.+}})
+// CHECK: %{{.+}} = call { double, i32 } @llvm.frexp.f64.i32(double %{{.+}})
+// CHECK: %{{.+}} = call double @llvm.ldexp.f64.i32(double %{{.+}}, i32 1)
diff --git a/clang/test/CodeGen/logb_scalbn.c b/clang/test/CodeGen/logb_scalbn.c
index 52c52bcb292be..a2832ea068dd5 100644
--- a/clang/test/CodeGen/logb_scalbn.c
+++ b/clang/test/CodeGen/logb_scalbn.c
@@ -760,101 +760,180 @@ void test_logb_var(double a) {
 // CHECK-NEXT:  [[ENTRY:.*:]]
 // CHECK-NEXT:    [[D1:%.*]] = alloca float, align 4, addrspace(5)
 // CHECK-NEXT:    [[D1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[D1]] to 
ptr
-// CHECK-NEXT:    [[TMP0:%.*]] = call float @llvm.ldexp.f32.i32(float 
0x4030B33340000000, i32 10)
-// CHECK-NEXT:    store float [[TMP0]], ptr [[D1_ASCAST]], align 4
+// CHECK-NEXT:    store float 0x40D0B33340000000, ptr [[D1_ASCAST]], align 4
 // CHECK-NEXT:    ret void
 // DEFAULT-LABEL: define dso_local void @test_scalbnf(
-// DEFAULT-SAME: ) #[[ATTR0]] {
+// DEFAULT-SAME: float noundef [[A:%.*]], i32 noundef [[B:%.*]]) #[[ATTR0]] {
 // DEFAULT-NEXT:  [[ENTRY:.*:]]
+// DEFAULT-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// DEFAULT-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
 // DEFAULT-NEXT:    [[D1:%.*]] = alloca float, align 4, addrspace(5)
+// DEFAULT-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[A_ADDR]] to ptr
+// DEFAULT-NEXT:    [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[B_ADDR]] to ptr
 // DEFAULT-NEXT:    [[D1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[D1]] 
to ptr
-// DEFAULT-NEXT:    [[TMP0:%.*]] = call float @llvm.ldexp.f32.i32(float 
0x4030B33340000000, i32 10)
-// DEFAULT-NEXT:    store float [[TMP0]], ptr [[D1_ASCAST]], align 4
+// DEFAULT-NEXT:    store float [[A]], ptr [[A_ADDR_ASCAST]], align 4
+// DEFAULT-NEXT:    store i32 [[B]], ptr [[B_ADDR_ASCAST]], align 4
+// DEFAULT-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR_ASCAST]], align 4
+// DEFAULT-NEXT:    [[TMP1:%.*]] = load i32, ptr [[B_ADDR_ASCAST]], align 4
+// DEFAULT-NEXT:    [[TMP2:%.*]] = call float @llvm.ldexp.f32.i32(float 
[[TMP0]], i32 [[TMP1]])
+// DEFAULT-NEXT:    store float [[TMP2]], ptr [[D1_ASCAST]], align 4
 // DEFAULT-NEXT:    ret void
 //
 // IGNORE-LABEL: define dso_local void @test_scalbnf(
-// IGNORE-SAME: ) #[[ATTR0]] {
+// IGNORE-SAME: float noundef [[A:%.*]], i32 noundef [[B:%.*]]) #[[ATTR0]] {
 // IGNORE-NEXT:  [[ENTRY:.*:]]
+// IGNORE-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// IGNORE-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
 // IGNORE-NEXT:    [[D1:%.*]] = alloca float, align 4, addrspace(5)
+// IGNORE-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[A_ADDR]] to ptr
+// IGNORE-NEXT:    [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[B_ADDR]] to ptr
 // IGNORE-NEXT:    [[D1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[D1]] 
to ptr
-// IGNORE-NEXT:    [[TMP0:%.*]] = call float @llvm.ldexp.f32.i32(float 
0x4030B33340000000, i32 10)
-// IGNORE-NEXT:    store float [[TMP0]], ptr [[D1_ASCAST]], align 4
+// IGNORE-NEXT:    store float [[A]], ptr [[A_ADDR_ASCAST]], align 4
+// IGNORE-NEXT:    store i32 [[B]], ptr [[B_ADDR_ASCAST]], align 4
+// IGNORE-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR_ASCAST]], align 4
+// IGNORE-NEXT:    [[TMP1:%.*]] = load i32, ptr [[B_ADDR_ASCAST]], align 4
+// IGNORE-NEXT:    [[TMP2:%.*]] = call float @llvm.ldexp.f32.i32(float 
[[TMP0]], i32 [[TMP1]])
+// IGNORE-NEXT:    store float [[TMP2]], ptr [[D1_ASCAST]], align 4
 // IGNORE-NEXT:    ret void
 //
 // STRICT-LABEL: define dso_local void @test_scalbnf(
-// STRICT-SAME: ) #[[ATTR0]] {
+// STRICT-SAME: float noundef [[A:%.*]], i32 noundef [[B:%.*]]) #[[ATTR0]] {
 // STRICT-NEXT:  [[ENTRY:.*:]]
+// STRICT-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// STRICT-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
 // STRICT-NEXT:    [[D1:%.*]] = alloca float, align 4, addrspace(5)
+// STRICT-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[A_ADDR]] to ptr
+// STRICT-NEXT:    [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[B_ADDR]] to ptr
 // STRICT-NEXT:    [[D1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[D1]] 
to ptr
-// STRICT-NEXT:    [[TMP0:%.*]] = call float @llvm.ldexp.f32.i32(float 
0x4030B33340000000, i32 10)
-// STRICT-NEXT:    store float [[TMP0]], ptr [[D1_ASCAST]], align 4
+// STRICT-NEXT:    store float [[A]], ptr [[A_ADDR_ASCAST]], align 4
+// STRICT-NEXT:    store i32 [[B]], ptr [[B_ADDR_ASCAST]], align 4
+// STRICT-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR_ASCAST]], align 4
+// STRICT-NEXT:    [[TMP1:%.*]] = load i32, ptr [[B_ADDR_ASCAST]], align 4
+// STRICT-NEXT:    [[TMP2:%.*]] = call float @llvm.ldexp.f32.i32(float 
[[TMP0]], i32 [[TMP1]])
+// STRICT-NEXT:    store float [[TMP2]], ptr [[D1_ASCAST]], align 4
 // STRICT-NEXT:    ret void
 //
 // MAYTRAP-LABEL: define dso_local void @test_scalbnf(
-// MAYTRAP-SAME: ) #[[ATTR0]] {
+// MAYTRAP-SAME: float noundef [[A:%.*]], i32 noundef [[B:%.*]]) #[[ATTR0]] {
 // MAYTRAP-NEXT:  [[ENTRY:.*:]]
+// MAYTRAP-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// MAYTRAP-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
 // MAYTRAP-NEXT:    [[D1:%.*]] = alloca float, align 4, addrspace(5)
+// MAYTRAP-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[A_ADDR]] to ptr
+// MAYTRAP-NEXT:    [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[B_ADDR]] to ptr
 // MAYTRAP-NEXT:    [[D1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[D1]] 
to ptr
-// MAYTRAP-NEXT:    [[TMP0:%.*]] = call float @llvm.ldexp.f32.i32(float 
0x4030B33340000000, i32 10)
-// MAYTRAP-NEXT:    store float [[TMP0]], ptr [[D1_ASCAST]], align 4
+// MAYTRAP-NEXT:    store float [[A]], ptr [[A_ADDR_ASCAST]], align 4
+// MAYTRAP-NEXT:    store i32 [[B]], ptr [[B_ADDR_ASCAST]], align 4
+// MAYTRAP-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR_ASCAST]], align 4
+// MAYTRAP-NEXT:    [[TMP1:%.*]] = load i32, ptr [[B_ADDR_ASCAST]], align 4
+// MAYTRAP-NEXT:    [[TMP2:%.*]] = call float @llvm.ldexp.f32.i32(float 
[[TMP0]], i32 [[TMP1]])
+// MAYTRAP-NEXT:    store float [[TMP2]], ptr [[D1_ASCAST]], align 4
 // MAYTRAP-NEXT:    ret void
 //
 // ERRNO-LABEL: define dso_local void @test_scalbnf(
-// ERRNO-SAME: ) #[[ATTR0]] {
+// ERRNO-SAME: float noundef [[A:%.*]], i32 noundef [[B:%.*]]) #[[ATTR0]] {
 // ERRNO-NEXT:  [[ENTRY:.*:]]
+// ERRNO-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4, addrspace(5)
+// ERRNO-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
 // ERRNO-NEXT:    [[D1:%.*]] = alloca float, align 4, addrspace(5)
+// ERRNO-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[A_ADDR]] to ptr
+// ERRNO-NEXT:    [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[B_ADDR]] to ptr
 // ERRNO-NEXT:    [[D1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[D1]] to 
ptr
-// ERRNO-NEXT:    [[CALL:%.*]] = call float @scalbnf(float noundef 
0x4030B33340000000, i32 noundef 10) #[[ATTR2]]
+// ERRNO-NEXT:    store float [[A]], ptr [[A_ADDR_ASCAST]], align 4
+// ERRNO-NEXT:    store i32 [[B]], ptr [[B_ADDR_ASCAST]], align 4
+// ERRNO-NEXT:    [[TMP0:%.*]] = load float, ptr [[A_ADDR_ASCAST]], align 4
+// ERRNO-NEXT:    [[TMP1:%.*]] = load i32, ptr [[B_ADDR_ASCAST]], align 4
+// ERRNO-NEXT:    [[CALL:%.*]] = call float @scalbnf(float noundef [[TMP0]], 
i32 noundef [[TMP1]]) #[[ATTR2]]
 // ERRNO-NEXT:    store float [[CALL]], ptr [[D1_ASCAST]], align 4
 // ERRNO-NEXT:    ret void
 //
 // AMDGCNSPIRV-DEFAULT-LABEL: define spir_func void @test_scalbnf(
-// AMDGCNSPIRV-DEFAULT-SAME: ) addrspace(4) #[[ATTR0]] {
+// AMDGCNSPIRV-DEFAULT-SAME: float noundef [[A:%.*]], i32 noundef [[B:%.*]]) 
addrspace(4) #[[ATTR0]] {
 // AMDGCNSPIRV-DEFAULT-NEXT:  [[ENTRY:.*:]]
+// AMDGCNSPIRV-DEFAULT-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// AMDGCNSPIRV-DEFAULT-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
 // AMDGCNSPIRV-DEFAULT-NEXT:    [[D1:%.*]] = alloca float, align 4
+// AMDGCNSPIRV-DEFAULT-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr 
[[A_ADDR]] to ptr addrspace(4)
+// AMDGCNSPIRV-DEFAULT-NEXT:    [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr 
[[B_ADDR]] to ptr addrspace(4)
 // AMDGCNSPIRV-DEFAULT-NEXT:    [[D1_ASCAST:%.*]] = addrspacecast ptr [[D1]] 
to ptr addrspace(4)
-// AMDGCNSPIRV-DEFAULT-NEXT:    [[TMP0:%.*]] = call addrspace(4) float 
@llvm.ldexp.f32.i32(float 0x4030B33340000000, i32 10)
-// AMDGCNSPIRV-DEFAULT-NEXT:    store float [[TMP0]], ptr addrspace(4) 
[[D1_ASCAST]], align 4
+// AMDGCNSPIRV-DEFAULT-NEXT:    store float [[A]], ptr addrspace(4) 
[[A_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-DEFAULT-NEXT:    store i32 [[B]], ptr addrspace(4) 
[[B_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-DEFAULT-NEXT:    [[TMP0:%.*]] = load float, ptr addrspace(4) 
[[A_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-DEFAULT-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(4) 
[[B_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-DEFAULT-NEXT:    [[TMP2:%.*]] = call addrspace(4) float 
@llvm.ldexp.f32.i32(float [[TMP0]], i32 [[TMP1]])
+// AMDGCNSPIRV-DEFAULT-NEXT:    store float [[TMP2]], ptr addrspace(4) 
[[D1_ASCAST]], align 4
 // AMDGCNSPIRV-DEFAULT-NEXT:    ret void
 //
 // AMDGCNSPIRV-IGNORE-LABEL: define spir_func void @test_scalbnf(
-// AMDGCNSPIRV-IGNORE-SAME: ) addrspace(4) #[[ATTR0]] {
+// AMDGCNSPIRV-IGNORE-SAME: float noundef [[A:%.*]], i32 noundef [[B:%.*]]) 
addrspace(4) #[[ATTR0]] {
 // AMDGCNSPIRV-IGNORE-NEXT:  [[ENTRY:.*:]]
+// AMDGCNSPIRV-IGNORE-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// AMDGCNSPIRV-IGNORE-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
 // AMDGCNSPIRV-IGNORE-NEXT:    [[D1:%.*]] = alloca float, align 4
+// AMDGCNSPIRV-IGNORE-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr 
[[A_ADDR]] to ptr addrspace(4)
+// AMDGCNSPIRV-IGNORE-NEXT:    [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr 
[[B_ADDR]] to ptr addrspace(4)
 // AMDGCNSPIRV-IGNORE-NEXT:    [[D1_ASCAST:%.*]] = addrspacecast ptr [[D1]] to 
ptr addrspace(4)
-// AMDGCNSPIRV-IGNORE-NEXT:    [[TMP0:%.*]] = call addrspace(4) float 
@llvm.ldexp.f32.i32(float 0x4030B33340000000, i32 10)
-// AMDGCNSPIRV-IGNORE-NEXT:    store float [[TMP0]], ptr addrspace(4) 
[[D1_ASCAST]], align 4
+// AMDGCNSPIRV-IGNORE-NEXT:    store float [[A]], ptr addrspace(4) 
[[A_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-IGNORE-NEXT:    store i32 [[B]], ptr addrspace(4) 
[[B_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-IGNORE-NEXT:    [[TMP0:%.*]] = load float, ptr addrspace(4) 
[[A_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-IGNORE-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(4) 
[[B_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-IGNORE-NEXT:    [[TMP2:%.*]] = call addrspace(4) float 
@llvm.ldexp.f32.i32(float [[TMP0]], i32 [[TMP1]])
+// AMDGCNSPIRV-IGNORE-NEXT:    store float [[TMP2]], ptr addrspace(4) 
[[D1_ASCAST]], align 4
 // AMDGCNSPIRV-IGNORE-NEXT:    ret void
 //
 // AMDGCNSPIRV-STRICT-LABEL: define spir_func void @test_scalbnf(
-// AMDGCNSPIRV-STRICT-SAME: ) addrspace(4) #[[ATTR0]] {
+// AMDGCNSPIRV-STRICT-SAME: float noundef [[A:%.*]], i32 noundef [[B:%.*]]) 
addrspace(4) #[[ATTR0]] {
 // AMDGCNSPIRV-STRICT-NEXT:  [[ENTRY:.*:]]
+// AMDGCNSPIRV-STRICT-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// AMDGCNSPIRV-STRICT-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
 // AMDGCNSPIRV-STRICT-NEXT:    [[D1:%.*]] = alloca float, align 4
+// AMDGCNSPIRV-STRICT-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr 
[[A_ADDR]] to ptr addrspace(4)
+// AMDGCNSPIRV-STRICT-NEXT:    [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr 
[[B_ADDR]] to ptr addrspace(4)
 // AMDGCNSPIRV-STRICT-NEXT:    [[D1_ASCAST:%.*]] = addrspacecast ptr [[D1]] to 
ptr addrspace(4)
-// AMDGCNSPIRV-STRICT-NEXT:    [[TMP0:%.*]] = call addrspace(4) float 
@llvm.ldexp.f32.i32(float 0x4030B33340000000, i32 10)
-// AMDGCNSPIRV-STRICT-NEXT:    store float [[TMP0]], ptr addrspace(4) 
[[D1_ASCAST]], align 4
+// AMDGCNSPIRV-STRICT-NEXT:    store float [[A]], ptr addrspace(4) 
[[A_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-STRICT-NEXT:    store i32 [[B]], ptr addrspace(4) 
[[B_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-STRICT-NEXT:    [[TMP0:%.*]] = load float, ptr addrspace(4) 
[[A_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-STRICT-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(4) 
[[B_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-STRICT-NEXT:    [[TMP2:%.*]] = call addrspace(4) float 
@llvm.ldexp.f32.i32(float [[TMP0]], i32 [[TMP1]])
+// AMDGCNSPIRV-STRICT-NEXT:    store float [[TMP2]], ptr addrspace(4) 
[[D1_ASCAST]], align 4
 // AMDGCNSPIRV-STRICT-NEXT:    ret void
 //
 // AMDGCNSPIRV-MAYTRAP-LABEL: define spir_func void @test_scalbnf(
-// AMDGCNSPIRV-MAYTRAP-SAME: ) addrspace(4) #[[ATTR0]] {
+// AMDGCNSPIRV-MAYTRAP-SAME: float noundef [[A:%.*]], i32 noundef [[B:%.*]]) 
addrspace(4) #[[ATTR0]] {
 // AMDGCNSPIRV-MAYTRAP-NEXT:  [[ENTRY:.*:]]
+// AMDGCNSPIRV-MAYTRAP-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// AMDGCNSPIRV-MAYTRAP-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
 // AMDGCNSPIRV-MAYTRAP-NEXT:    [[D1:%.*]] = alloca float, align 4
+// AMDGCNSPIRV-MAYTRAP-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr 
[[A_ADDR]] to ptr addrspace(4)
+// AMDGCNSPIRV-MAYTRAP-NEXT:    [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr 
[[B_ADDR]] to ptr addrspace(4)
 // AMDGCNSPIRV-MAYTRAP-NEXT:    [[D1_ASCAST:%.*]] = addrspacecast ptr [[D1]] 
to ptr addrspace(4)
-// AMDGCNSPIRV-MAYTRAP-NEXT:    [[TMP0:%.*]] = call addrspace(4) float 
@llvm.ldexp.f32.i32(float 0x4030B33340000000, i32 10)
-// AMDGCNSPIRV-MAYTRAP-NEXT:    store float [[TMP0]], ptr addrspace(4) 
[[D1_ASCAST]], align 4
+// AMDGCNSPIRV-MAYTRAP-NEXT:    store float [[A]], ptr addrspace(4) 
[[A_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-MAYTRAP-NEXT:    store i32 [[B]], ptr addrspace(4) 
[[B_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-MAYTRAP-NEXT:    [[TMP0:%.*]] = load float, ptr addrspace(4) 
[[A_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-MAYTRAP-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(4) 
[[B_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-MAYTRAP-NEXT:    [[TMP2:%.*]] = call addrspace(4) float 
@llvm.ldexp.f32.i32(float [[TMP0]], i32 [[TMP1]])
+// AMDGCNSPIRV-MAYTRAP-NEXT:    store float [[TMP2]], ptr addrspace(4) 
[[D1_ASCAST]], align 4
 // AMDGCNSPIRV-MAYTRAP-NEXT:    ret void
 //
 // AMDGCNSPIRV-ERRNO-LABEL: define spir_func void @test_scalbnf(
-// AMDGCNSPIRV-ERRNO-SAME: ) addrspace(4) #[[ATTR0]] {
+// AMDGCNSPIRV-ERRNO-SAME: float noundef [[A:%.*]], i32 noundef [[B:%.*]]) 
addrspace(4) #[[ATTR0]] {
 // AMDGCNSPIRV-ERRNO-NEXT:  [[ENTRY:.*:]]
+// AMDGCNSPIRV-ERRNO-NEXT:    [[A_ADDR:%.*]] = alloca float, align 4
+// AMDGCNSPIRV-ERRNO-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
 // AMDGCNSPIRV-ERRNO-NEXT:    [[D1:%.*]] = alloca float, align 4
+// AMDGCNSPIRV-ERRNO-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr 
[[A_ADDR]] to ptr addrspace(4)
+// AMDGCNSPIRV-ERRNO-NEXT:    [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr 
[[B_ADDR]] to ptr addrspace(4)
 // AMDGCNSPIRV-ERRNO-NEXT:    [[D1_ASCAST:%.*]] = addrspacecast ptr [[D1]] to 
ptr addrspace(4)
-// AMDGCNSPIRV-ERRNO-NEXT:    [[CALL:%.*]] = call spir_func addrspace(4) float 
@scalbnf(float noundef 0x4030B33340000000, i32 noundef 10) #[[ATTR2]]
+// AMDGCNSPIRV-ERRNO-NEXT:    store float [[A]], ptr addrspace(4) 
[[A_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-ERRNO-NEXT:    store i32 [[B]], ptr addrspace(4) 
[[B_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-ERRNO-NEXT:    [[TMP0:%.*]] = load float, ptr addrspace(4) 
[[A_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-ERRNO-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(4) 
[[B_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-ERRNO-NEXT:    [[CALL:%.*]] = call spir_func addrspace(4) float 
@scalbnf(float noundef [[TMP0]], i32 noundef [[TMP1]]) #[[ATTR2]]
 // AMDGCNSPIRV-ERRNO-NEXT:    store float [[CALL]], ptr addrspace(4) 
[[D1_ASCAST]], align 4
 // AMDGCNSPIRV-ERRNO-NEXT:    ret void
 //
-void test_scalbnf() {
-  float D1 = __builtin_scalbnf(16.7f, 10);
+void test_scalbnf(float a, int b) {
+  float D1 = __builtin_scalbnf(a, b);
 }
 // CHECK-LABEL: define dso_local void @test_scalbnf_var1(
 // CHECK-SAME: float noundef [[A:%.*]]) #[[ATTR0]] {
@@ -1341,101 +1420,180 @@ void test_scalbnf_var3(float a, int b) {
 // CHECK-NEXT:  [[ENTRY:.*:]]
 // CHECK-NEXT:    [[D1:%.*]] = alloca double, align 8, addrspace(5)
 // CHECK-NEXT:    [[D1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[D1]] to 
ptr
-// CHECK-NEXT:    [[TMP0:%.*]] = call double @llvm.ldexp.f64.i32(double 
1.720000e+01, i32 10)
-// CHECK-NEXT:    store double [[TMP0]], ptr [[D1_ASCAST]], align 8
+// CHECK-NEXT:    store double 1.761280e+04, ptr [[D1_ASCAST]], align 8
 // CHECK-NEXT:    ret void
 // DEFAULT-LABEL: define dso_local void @test_scalbn(
-// DEFAULT-SAME: ) #[[ATTR0]] {
+// DEFAULT-SAME: double noundef [[A:%.*]], i32 noundef [[B:%.*]]) #[[ATTR0]] {
 // DEFAULT-NEXT:  [[ENTRY:.*:]]
+// DEFAULT-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8, addrspace(5)
+// DEFAULT-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
 // DEFAULT-NEXT:    [[D1:%.*]] = alloca double, align 8, addrspace(5)
+// DEFAULT-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[A_ADDR]] to ptr
+// DEFAULT-NEXT:    [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[B_ADDR]] to ptr
 // DEFAULT-NEXT:    [[D1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[D1]] 
to ptr
-// DEFAULT-NEXT:    [[TMP0:%.*]] = call double @llvm.ldexp.f64.i32(double 
1.720000e+01, i32 10)
-// DEFAULT-NEXT:    store double [[TMP0]], ptr [[D1_ASCAST]], align 8
+// DEFAULT-NEXT:    store double [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// DEFAULT-NEXT:    store i32 [[B]], ptr [[B_ADDR_ASCAST]], align 4
+// DEFAULT-NEXT:    [[TMP0:%.*]] = load double, ptr [[A_ADDR_ASCAST]], align 8
+// DEFAULT-NEXT:    [[TMP1:%.*]] = load i32, ptr [[B_ADDR_ASCAST]], align 4
+// DEFAULT-NEXT:    [[TMP2:%.*]] = call double @llvm.ldexp.f64.i32(double 
[[TMP0]], i32 [[TMP1]])
+// DEFAULT-NEXT:    store double [[TMP2]], ptr [[D1_ASCAST]], align 8
 // DEFAULT-NEXT:    ret void
 //
 // IGNORE-LABEL: define dso_local void @test_scalbn(
-// IGNORE-SAME: ) #[[ATTR0]] {
+// IGNORE-SAME: double noundef [[A:%.*]], i32 noundef [[B:%.*]]) #[[ATTR0]] {
 // IGNORE-NEXT:  [[ENTRY:.*:]]
+// IGNORE-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8, addrspace(5)
+// IGNORE-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
 // IGNORE-NEXT:    [[D1:%.*]] = alloca double, align 8, addrspace(5)
+// IGNORE-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[A_ADDR]] to ptr
+// IGNORE-NEXT:    [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[B_ADDR]] to ptr
 // IGNORE-NEXT:    [[D1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[D1]] 
to ptr
-// IGNORE-NEXT:    [[TMP0:%.*]] = call double @llvm.ldexp.f64.i32(double 
1.720000e+01, i32 10)
-// IGNORE-NEXT:    store double [[TMP0]], ptr [[D1_ASCAST]], align 8
+// IGNORE-NEXT:    store double [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// IGNORE-NEXT:    store i32 [[B]], ptr [[B_ADDR_ASCAST]], align 4
+// IGNORE-NEXT:    [[TMP0:%.*]] = load double, ptr [[A_ADDR_ASCAST]], align 8
+// IGNORE-NEXT:    [[TMP1:%.*]] = load i32, ptr [[B_ADDR_ASCAST]], align 4
+// IGNORE-NEXT:    [[TMP2:%.*]] = call double @llvm.ldexp.f64.i32(double 
[[TMP0]], i32 [[TMP1]])
+// IGNORE-NEXT:    store double [[TMP2]], ptr [[D1_ASCAST]], align 8
 // IGNORE-NEXT:    ret void
 //
 // STRICT-LABEL: define dso_local void @test_scalbn(
-// STRICT-SAME: ) #[[ATTR0]] {
+// STRICT-SAME: double noundef [[A:%.*]], i32 noundef [[B:%.*]]) #[[ATTR0]] {
 // STRICT-NEXT:  [[ENTRY:.*:]]
+// STRICT-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8, addrspace(5)
+// STRICT-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
 // STRICT-NEXT:    [[D1:%.*]] = alloca double, align 8, addrspace(5)
+// STRICT-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[A_ADDR]] to ptr
+// STRICT-NEXT:    [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[B_ADDR]] to ptr
 // STRICT-NEXT:    [[D1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[D1]] 
to ptr
-// STRICT-NEXT:    [[TMP0:%.*]] = call double @llvm.ldexp.f64.i32(double 
1.720000e+01, i32 10)
-// STRICT-NEXT:    store double [[TMP0]], ptr [[D1_ASCAST]], align 8
+// STRICT-NEXT:    store double [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// STRICT-NEXT:    store i32 [[B]], ptr [[B_ADDR_ASCAST]], align 4
+// STRICT-NEXT:    [[TMP0:%.*]] = load double, ptr [[A_ADDR_ASCAST]], align 8
+// STRICT-NEXT:    [[TMP1:%.*]] = load i32, ptr [[B_ADDR_ASCAST]], align 4
+// STRICT-NEXT:    [[TMP2:%.*]] = call double @llvm.ldexp.f64.i32(double 
[[TMP0]], i32 [[TMP1]])
+// STRICT-NEXT:    store double [[TMP2]], ptr [[D1_ASCAST]], align 8
 // STRICT-NEXT:    ret void
 //
 // MAYTRAP-LABEL: define dso_local void @test_scalbn(
-// MAYTRAP-SAME: ) #[[ATTR0]] {
+// MAYTRAP-SAME: double noundef [[A:%.*]], i32 noundef [[B:%.*]]) #[[ATTR0]] {
 // MAYTRAP-NEXT:  [[ENTRY:.*:]]
+// MAYTRAP-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8, addrspace(5)
+// MAYTRAP-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
 // MAYTRAP-NEXT:    [[D1:%.*]] = alloca double, align 8, addrspace(5)
+// MAYTRAP-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[A_ADDR]] to ptr
+// MAYTRAP-NEXT:    [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[B_ADDR]] to ptr
 // MAYTRAP-NEXT:    [[D1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[D1]] 
to ptr
-// MAYTRAP-NEXT:    [[TMP0:%.*]] = call double @llvm.ldexp.f64.i32(double 
1.720000e+01, i32 10)
-// MAYTRAP-NEXT:    store double [[TMP0]], ptr [[D1_ASCAST]], align 8
+// MAYTRAP-NEXT:    store double [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// MAYTRAP-NEXT:    store i32 [[B]], ptr [[B_ADDR_ASCAST]], align 4
+// MAYTRAP-NEXT:    [[TMP0:%.*]] = load double, ptr [[A_ADDR_ASCAST]], align 8
+// MAYTRAP-NEXT:    [[TMP1:%.*]] = load i32, ptr [[B_ADDR_ASCAST]], align 4
+// MAYTRAP-NEXT:    [[TMP2:%.*]] = call double @llvm.ldexp.f64.i32(double 
[[TMP0]], i32 [[TMP1]])
+// MAYTRAP-NEXT:    store double [[TMP2]], ptr [[D1_ASCAST]], align 8
 // MAYTRAP-NEXT:    ret void
 //
 // ERRNO-LABEL: define dso_local void @test_scalbn(
-// ERRNO-SAME: ) #[[ATTR0]] {
+// ERRNO-SAME: double noundef [[A:%.*]], i32 noundef [[B:%.*]]) #[[ATTR0]] {
 // ERRNO-NEXT:  [[ENTRY:.*:]]
+// ERRNO-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8, addrspace(5)
+// ERRNO-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
 // ERRNO-NEXT:    [[D1:%.*]] = alloca double, align 8, addrspace(5)
+// ERRNO-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[A_ADDR]] to ptr
+// ERRNO-NEXT:    [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) 
[[B_ADDR]] to ptr
 // ERRNO-NEXT:    [[D1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[D1]] to 
ptr
-// ERRNO-NEXT:    [[CALL:%.*]] = call double @scalbn(double noundef 
1.720000e+01, i32 noundef 10) #[[ATTR2]]
+// ERRNO-NEXT:    store double [[A]], ptr [[A_ADDR_ASCAST]], align 8
+// ERRNO-NEXT:    store i32 [[B]], ptr [[B_ADDR_ASCAST]], align 4
+// ERRNO-NEXT:    [[TMP0:%.*]] = load double, ptr [[A_ADDR_ASCAST]], align 8
+// ERRNO-NEXT:    [[TMP1:%.*]] = load i32, ptr [[B_ADDR_ASCAST]], align 4
+// ERRNO-NEXT:    [[CALL:%.*]] = call double @scalbn(double noundef [[TMP0]], 
i32 noundef [[TMP1]]) #[[ATTR2]]
 // ERRNO-NEXT:    store double [[CALL]], ptr [[D1_ASCAST]], align 8
 // ERRNO-NEXT:    ret void
 //
 // AMDGCNSPIRV-DEFAULT-LABEL: define spir_func void @test_scalbn(
-// AMDGCNSPIRV-DEFAULT-SAME: ) addrspace(4) #[[ATTR0]] {
+// AMDGCNSPIRV-DEFAULT-SAME: double noundef [[A:%.*]], i32 noundef [[B:%.*]]) 
addrspace(4) #[[ATTR0]] {
 // AMDGCNSPIRV-DEFAULT-NEXT:  [[ENTRY:.*:]]
+// AMDGCNSPIRV-DEFAULT-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// AMDGCNSPIRV-DEFAULT-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
 // AMDGCNSPIRV-DEFAULT-NEXT:    [[D1:%.*]] = alloca double, align 8
+// AMDGCNSPIRV-DEFAULT-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr 
[[A_ADDR]] to ptr addrspace(4)
+// AMDGCNSPIRV-DEFAULT-NEXT:    [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr 
[[B_ADDR]] to ptr addrspace(4)
 // AMDGCNSPIRV-DEFAULT-NEXT:    [[D1_ASCAST:%.*]] = addrspacecast ptr [[D1]] 
to ptr addrspace(4)
-// AMDGCNSPIRV-DEFAULT-NEXT:    [[TMP0:%.*]] = call addrspace(4) double 
@llvm.ldexp.f64.i32(double 1.720000e+01, i32 10)
-// AMDGCNSPIRV-DEFAULT-NEXT:    store double [[TMP0]], ptr addrspace(4) 
[[D1_ASCAST]], align 8
+// AMDGCNSPIRV-DEFAULT-NEXT:    store double [[A]], ptr addrspace(4) 
[[A_ADDR_ASCAST]], align 8
+// AMDGCNSPIRV-DEFAULT-NEXT:    store i32 [[B]], ptr addrspace(4) 
[[B_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-DEFAULT-NEXT:    [[TMP0:%.*]] = load double, ptr addrspace(4) 
[[A_ADDR_ASCAST]], align 8
+// AMDGCNSPIRV-DEFAULT-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(4) 
[[B_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-DEFAULT-NEXT:    [[TMP2:%.*]] = call addrspace(4) double 
@llvm.ldexp.f64.i32(double [[TMP0]], i32 [[TMP1]])
+// AMDGCNSPIRV-DEFAULT-NEXT:    store double [[TMP2]], ptr addrspace(4) 
[[D1_ASCAST]], align 8
 // AMDGCNSPIRV-DEFAULT-NEXT:    ret void
 //
 // AMDGCNSPIRV-IGNORE-LABEL: define spir_func void @test_scalbn(
-// AMDGCNSPIRV-IGNORE-SAME: ) addrspace(4) #[[ATTR0]] {
+// AMDGCNSPIRV-IGNORE-SAME: double noundef [[A:%.*]], i32 noundef [[B:%.*]]) 
addrspace(4) #[[ATTR0]] {
 // AMDGCNSPIRV-IGNORE-NEXT:  [[ENTRY:.*:]]
+// AMDGCNSPIRV-IGNORE-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// AMDGCNSPIRV-IGNORE-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
 // AMDGCNSPIRV-IGNORE-NEXT:    [[D1:%.*]] = alloca double, align 8
+// AMDGCNSPIRV-IGNORE-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr 
[[A_ADDR]] to ptr addrspace(4)
+// AMDGCNSPIRV-IGNORE-NEXT:    [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr 
[[B_ADDR]] to ptr addrspace(4)
 // AMDGCNSPIRV-IGNORE-NEXT:    [[D1_ASCAST:%.*]] = addrspacecast ptr [[D1]] to 
ptr addrspace(4)
-// AMDGCNSPIRV-IGNORE-NEXT:    [[TMP0:%.*]] = call addrspace(4) double 
@llvm.ldexp.f64.i32(double 1.720000e+01, i32 10)
-// AMDGCNSPIRV-IGNORE-NEXT:    store double [[TMP0]], ptr addrspace(4) 
[[D1_ASCAST]], align 8
+// AMDGCNSPIRV-IGNORE-NEXT:    store double [[A]], ptr addrspace(4) 
[[A_ADDR_ASCAST]], align 8
+// AMDGCNSPIRV-IGNORE-NEXT:    store i32 [[B]], ptr addrspace(4) 
[[B_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-IGNORE-NEXT:    [[TMP0:%.*]] = load double, ptr addrspace(4) 
[[A_ADDR_ASCAST]], align 8
+// AMDGCNSPIRV-IGNORE-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(4) 
[[B_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-IGNORE-NEXT:    [[TMP2:%.*]] = call addrspace(4) double 
@llvm.ldexp.f64.i32(double [[TMP0]], i32 [[TMP1]])
+// AMDGCNSPIRV-IGNORE-NEXT:    store double [[TMP2]], ptr addrspace(4) 
[[D1_ASCAST]], align 8
 // AMDGCNSPIRV-IGNORE-NEXT:    ret void
 //
 // AMDGCNSPIRV-STRICT-LABEL: define spir_func void @test_scalbn(
-// AMDGCNSPIRV-STRICT-SAME: ) addrspace(4) #[[ATTR0]] {
+// AMDGCNSPIRV-STRICT-SAME: double noundef [[A:%.*]], i32 noundef [[B:%.*]]) 
addrspace(4) #[[ATTR0]] {
 // AMDGCNSPIRV-STRICT-NEXT:  [[ENTRY:.*:]]
+// AMDGCNSPIRV-STRICT-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// AMDGCNSPIRV-STRICT-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
 // AMDGCNSPIRV-STRICT-NEXT:    [[D1:%.*]] = alloca double, align 8
+// AMDGCNSPIRV-STRICT-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr 
[[A_ADDR]] to ptr addrspace(4)
+// AMDGCNSPIRV-STRICT-NEXT:    [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr 
[[B_ADDR]] to ptr addrspace(4)
 // AMDGCNSPIRV-STRICT-NEXT:    [[D1_ASCAST:%.*]] = addrspacecast ptr [[D1]] to 
ptr addrspace(4)
-// AMDGCNSPIRV-STRICT-NEXT:    [[TMP0:%.*]] = call addrspace(4) double 
@llvm.ldexp.f64.i32(double 1.720000e+01, i32 10)
-// AMDGCNSPIRV-STRICT-NEXT:    store double [[TMP0]], ptr addrspace(4) 
[[D1_ASCAST]], align 8
+// AMDGCNSPIRV-STRICT-NEXT:    store double [[A]], ptr addrspace(4) 
[[A_ADDR_ASCAST]], align 8
+// AMDGCNSPIRV-STRICT-NEXT:    store i32 [[B]], ptr addrspace(4) 
[[B_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-STRICT-NEXT:    [[TMP0:%.*]] = load double, ptr addrspace(4) 
[[A_ADDR_ASCAST]], align 8
+// AMDGCNSPIRV-STRICT-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(4) 
[[B_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-STRICT-NEXT:    [[TMP2:%.*]] = call addrspace(4) double 
@llvm.ldexp.f64.i32(double [[TMP0]], i32 [[TMP1]])
+// AMDGCNSPIRV-STRICT-NEXT:    store double [[TMP2]], ptr addrspace(4) 
[[D1_ASCAST]], align 8
 // AMDGCNSPIRV-STRICT-NEXT:    ret void
 //
 // AMDGCNSPIRV-MAYTRAP-LABEL: define spir_func void @test_scalbn(
-// AMDGCNSPIRV-MAYTRAP-SAME: ) addrspace(4) #[[ATTR0]] {
+// AMDGCNSPIRV-MAYTRAP-SAME: double noundef [[A:%.*]], i32 noundef [[B:%.*]]) 
addrspace(4) #[[ATTR0]] {
 // AMDGCNSPIRV-MAYTRAP-NEXT:  [[ENTRY:.*:]]
+// AMDGCNSPIRV-MAYTRAP-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// AMDGCNSPIRV-MAYTRAP-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
 // AMDGCNSPIRV-MAYTRAP-NEXT:    [[D1:%.*]] = alloca double, align 8
+// AMDGCNSPIRV-MAYTRAP-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr 
[[A_ADDR]] to ptr addrspace(4)
+// AMDGCNSPIRV-MAYTRAP-NEXT:    [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr 
[[B_ADDR]] to ptr addrspace(4)
 // AMDGCNSPIRV-MAYTRAP-NEXT:    [[D1_ASCAST:%.*]] = addrspacecast ptr [[D1]] 
to ptr addrspace(4)
-// AMDGCNSPIRV-MAYTRAP-NEXT:    [[TMP0:%.*]] = call addrspace(4) double 
@llvm.ldexp.f64.i32(double 1.720000e+01, i32 10)
-// AMDGCNSPIRV-MAYTRAP-NEXT:    store double [[TMP0]], ptr addrspace(4) 
[[D1_ASCAST]], align 8
+// AMDGCNSPIRV-MAYTRAP-NEXT:    store double [[A]], ptr addrspace(4) 
[[A_ADDR_ASCAST]], align 8
+// AMDGCNSPIRV-MAYTRAP-NEXT:    store i32 [[B]], ptr addrspace(4) 
[[B_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-MAYTRAP-NEXT:    [[TMP0:%.*]] = load double, ptr addrspace(4) 
[[A_ADDR_ASCAST]], align 8
+// AMDGCNSPIRV-MAYTRAP-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(4) 
[[B_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-MAYTRAP-NEXT:    [[TMP2:%.*]] = call addrspace(4) double 
@llvm.ldexp.f64.i32(double [[TMP0]], i32 [[TMP1]])
+// AMDGCNSPIRV-MAYTRAP-NEXT:    store double [[TMP2]], ptr addrspace(4) 
[[D1_ASCAST]], align 8
 // AMDGCNSPIRV-MAYTRAP-NEXT:    ret void
 //
 // AMDGCNSPIRV-ERRNO-LABEL: define spir_func void @test_scalbn(
-// AMDGCNSPIRV-ERRNO-SAME: ) addrspace(4) #[[ATTR0]] {
+// AMDGCNSPIRV-ERRNO-SAME: double noundef [[A:%.*]], i32 noundef [[B:%.*]]) 
addrspace(4) #[[ATTR0]] {
 // AMDGCNSPIRV-ERRNO-NEXT:  [[ENTRY:.*:]]
+// AMDGCNSPIRV-ERRNO-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
+// AMDGCNSPIRV-ERRNO-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
 // AMDGCNSPIRV-ERRNO-NEXT:    [[D1:%.*]] = alloca double, align 8
+// AMDGCNSPIRV-ERRNO-NEXT:    [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr 
[[A_ADDR]] to ptr addrspace(4)
+// AMDGCNSPIRV-ERRNO-NEXT:    [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr 
[[B_ADDR]] to ptr addrspace(4)
 // AMDGCNSPIRV-ERRNO-NEXT:    [[D1_ASCAST:%.*]] = addrspacecast ptr [[D1]] to 
ptr addrspace(4)
-// AMDGCNSPIRV-ERRNO-NEXT:    [[CALL:%.*]] = call spir_func addrspace(4) 
double @scalbn(double noundef 1.720000e+01, i32 noundef 10) #[[ATTR2]]
+// AMDGCNSPIRV-ERRNO-NEXT:    store double [[A]], ptr addrspace(4) 
[[A_ADDR_ASCAST]], align 8
+// AMDGCNSPIRV-ERRNO-NEXT:    store i32 [[B]], ptr addrspace(4) 
[[B_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-ERRNO-NEXT:    [[TMP0:%.*]] = load double, ptr addrspace(4) 
[[A_ADDR_ASCAST]], align 8
+// AMDGCNSPIRV-ERRNO-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(4) 
[[B_ADDR_ASCAST]], align 4
+// AMDGCNSPIRV-ERRNO-NEXT:    [[CALL:%.*]] = call spir_func addrspace(4) 
double @scalbn(double noundef [[TMP0]], i32 noundef [[TMP1]]) #[[ATTR2]]
 // AMDGCNSPIRV-ERRNO-NEXT:    store double [[CALL]], ptr addrspace(4) 
[[D1_ASCAST]], align 8
 // AMDGCNSPIRV-ERRNO-NEXT:    ret void
 //
-void test_scalbn() {
-  double D1 = __builtin_scalbn(17.2, 10);
+void test_scalbn(double a, int b) {
+  double D1 = __builtin_scalbn(a, b);
 }
 // CHECK-LABEL: define dso_local void @test_scalbn_var1(
 // CHECK-SAME: double noundef [[A:%.*]]) #[[ATTR0]] {
diff --git a/clang/test/SemaCXX/constexpr-cmath-builtins.cpp 
b/clang/test/SemaCXX/constexpr-cmath-builtins.cpp
new file mode 100644
index 0000000000000..a7cc30efbccb8
--- /dev/null
+++ b/clang/test/SemaCXX/constexpr-cmath-builtins.cpp
@@ -0,0 +1,261 @@
+// RUN: %clang_cc1 -fexperimental-new-constant-interpreter -verify -std=c++20 
%s
+// RUN: %clang_cc1 -verify -std=c++20 %s
+
+// expected-no-diagnostics
+
+static_assert(__builtin_nearbyint(1.1) == 1.0);
+static_assert(__builtin_nearbyint(1.9) == 2.0);
+static_assert(__builtin_nearbyint(-1.1) == -1.0);
+static_assert(__builtin_nearbyint(-1.9) == -2.0);
+
+static_assert(__builtin_nearbyintf(1.1f) == 1.0f);
+static_assert(__builtin_nearbyintl(1.1l) == 1.0l);
+static_assert(__builtin_nearbyintf16(1.1f16) == 1.0f16);
+static_assert(__builtin_nearbyintf128(1.1) == 1.0);
+static_assert(__builtin_isnan(__builtin_nearbyint(__builtin_nan(""))));
+static_assert(__builtin_isinf(__builtin_nearbyint(__builtin_inf())));
+
+// Test ties to even (default rounding mode)
+static_assert(__builtin_nearbyint(1.5) == 2.0);
+static_assert(__builtin_nearbyint(2.5) == 2.0);
+
+// rint tests
+static_assert(__builtin_rint(1.1) == 1.0);
+static_assert(__builtin_rint(1.9) == 2.0);
+static_assert(__builtin_rintf(1.1f) == 1.0f);
+static_assert(__builtin_rintl(1.1l) == 1.0l);
+static_assert(__builtin_rintf16(1.1f16) == 1.0f16);
+static_assert(__builtin_rintf128(1.1) == 1.0);
+static_assert(__builtin_isnan(__builtin_rint(__builtin_nan(""))));
+static_assert(__builtin_isinf(__builtin_rint(__builtin_inf())));
+
+// lrint tests
+static_assert(__builtin_lrint(1.1) == 1);
+static_assert(__builtin_lrint(1.9) == 2);
+static_assert(__builtin_lrintf(1.1f) == 1);
+static_assert(__builtin_lrintl(1.1l) == 1);
+static_assert(__builtin_lrintf128(1.1) == 1);
+
+// llrint tests
+static_assert(__builtin_llrint(1.1) == 1LL);
+static_assert(__builtin_llrint(1.9) == 2LL);
+static_assert(__builtin_llrintf(1.1f) == 1LL);
+static_assert(__builtin_llrintl(1.1l) == 1LL);
+static_assert(__builtin_llrintf128(1.1) == 1LL);
+
+// round tests
+static_assert(__builtin_round(1.1) == 1.0);
+static_assert(__builtin_round(1.5) == 2.0);
+static_assert(__builtin_round(1.9) == 2.0);
+static_assert(__builtin_round(-1.5) == -2.0);
+static_assert(__builtin_roundf16(1.5f16) == 2.0f16);
+static_assert(__builtin_roundf128(1.5) == 2.0);
+static_assert(__builtin_isnan(__builtin_round(__builtin_nan(""))));
+static_assert(__builtin_isinf(__builtin_round(__builtin_inf())));
+
+// lround tests
+static_assert(__builtin_lround(1.1) == 1);
+static_assert(__builtin_lround(1.5) == 2);
+static_assert(__builtin_lround(-1.5) == -2);
+static_assert(__builtin_lroundf128(1.5) == 2);
+
+// llround tests
+static_assert(__builtin_llround(1.1) == 1LL);
+static_assert(__builtin_llround(1.5) == 2LL);
+static_assert(__builtin_llround(-1.5) == -2LL);
+static_assert(__builtin_llroundf128(1.5) == 2LL);
+
+// ceil tests
+static_assert(__builtin_ceil(1.1) == 2.0);
+static_assert(__builtin_ceil(-1.1) == -1.0);
+static_assert(__builtin_ceilf(1.1f) == 2.0f);
+static_assert(__builtin_ceilf16(1.1f16) == 2.0f16);
+static_assert(__builtin_ceilf128(1.1) == 2.0);
+static_assert(__builtin_isnan(__builtin_ceil(__builtin_nan(""))));
+static_assert(__builtin_isinf(__builtin_ceil(__builtin_inf())));
+
+// floor tests
+static_assert(__builtin_floor(1.1) == 1.0);
+static_assert(__builtin_floor(-1.1) == -2.0);
+static_assert(__builtin_floorf(1.1f) == 1.0f);
+static_assert(__builtin_floorf16(1.1f16) == 1.0f16);
+static_assert(__builtin_floorf128(1.1) == 1.0);
+static_assert(__builtin_isnan(__builtin_floor(__builtin_nan(""))));
+static_assert(__builtin_isinf(__builtin_floor(__builtin_inf())));
+
+// trunc tests
+static_assert(__builtin_trunc(1.1) == 1.0);
+static_assert(__builtin_trunc(-1.1) == -1.0);
+static_assert(__builtin_truncf(1.1f) == 1.0f);
+static_assert(__builtin_truncf16(1.1f16) == 1.0f16);
+static_assert(__builtin_truncf128(1.1) == 1.0);
+static_assert(__builtin_isnan(__builtin_trunc(__builtin_nan(""))));
+static_assert(__builtin_isinf(__builtin_trunc(__builtin_inf())));
+
+// fdim tests
+static_assert(__builtin_fdim(3.0, 1.0) == 2.0);
+static_assert(__builtin_fdim(1.0, 3.0) == 0.0);
+static_assert(__builtin_fdimf(3.0f, 1.0f) == 2.0f);
+static_assert(__builtin_fdimf128(3.0, 1.0) == 2.0);
+static_assert(__builtin_isnan(__builtin_fdim(__builtin_nan(""), 1.0)));
+static_assert(__builtin_isnan(__builtin_fdim(1.0, __builtin_nan(""))));
+static_assert(__builtin_isinf(__builtin_fdim(__builtin_inf(), 0.0)));
+static_assert(__builtin_fdim(__builtin_inf(), __builtin_inf()) == 0.0);
+
+// fma tests
+static_assert(__builtin_fma(2.0, 3.0, 4.0) == 10.0);
+static_assert(__builtin_fmaf(2.0f, 3.0f, 4.0f) == 10.0f);
+static_assert(__builtin_fmaf16(2.0f16, 3.0f16, 4.0f16) == 10.0f16);
+static_assert(__builtin_fmaf128(2.0, 3.0, 4.0) == 10.0);
+static_assert(__builtin_isnan(__builtin_fma(__builtin_nan(""), 2.0, 3.0)));
+static_assert(__builtin_isnan(__builtin_fma(1.0, __builtin_nan(""), 3.0)));
+static_assert(__builtin_isnan(__builtin_fma(1.0, 2.0, __builtin_nan(""))));
+static_assert(__builtin_isinf(__builtin_fma(__builtin_inf(), 2.0, 3.0)));
+
+// fmod tests
+static_assert(__builtin_fmod(5.5, 3.0) == 2.5);
+static_assert(__builtin_fmodf(5.5f, 3.0f) == 2.5f);
+static_assert(__builtin_fmodf16(5.5f16, 3.0f16) == 2.5f16);
+static_assert(__builtin_fmodf128(5.5, 3.0) == 2.5);
+static_assert(__builtin_isnan(__builtin_fmod(__builtin_nan(""), 2.0)));
+static_assert(__builtin_isnan(__builtin_fmod(2.0, __builtin_nan(""))));
+static_assert(__builtin_isnan(__builtin_fmod(__builtin_inf(), 2.0)));
+
+// remainder tests
+static_assert(__builtin_remainder(5.5, 3.0) == -0.5);
+static_assert(__builtin_remainderf(5.5f, 3.0f) == -0.5f);
+static_assert(__builtin_remainderf128(5.5, 3.0) == -0.5);
+static_assert(__builtin_isnan(__builtin_remainder(__builtin_nan(""), 2.0)));
+static_assert(__builtin_isnan(__builtin_remainder(2.0, __builtin_nan(""))));
+static_assert(__builtin_isnan(__builtin_remainder(__builtin_inf(), 2.0)));
+
+// nextafter tests
+static_assert(__builtin_nextafter(1.0, 2.0) > 1.0);
+static_assert(__builtin_nextafter(1.0, 0.0) < 1.0);
+static_assert(__builtin_nextafter(1.0, 1.0) == 1.0);
+static_assert(__builtin_nextafter(0.0, 1.0) > 0.0);
+static_assert(__builtin_nextafter(0.0, -1.0) < 0.0);
+static_assert(__builtin_nextafterf128(1.0, 2.0) > 1.0);
+static_assert(__builtin_isnan(__builtin_nextafter(__builtin_nan(""), 2.0)));
+static_assert(__builtin_isnan(__builtin_nextafter(2.0, __builtin_nan(""))));
+static_assert(__builtin_isinf(__builtin_nextafter(__builtin_inf(), 
__builtin_inf())));
+
+// nexttoward tests
+static_assert(__builtin_nexttoward(1.0, 2.0L) > 1.0);
+static_assert(__builtin_nexttoward(1.0, 1.0L) == 1.0);
+static_assert(__builtin_nexttowardf128(1.0, 2.0L) > 1.0);
+static_assert(__builtin_isnan(__builtin_nexttoward(__builtin_nan(""), 2.0L)));
+static_assert(__builtin_isnan(__builtin_nexttoward(2.0, __builtin_nan(""))));
+
+// scalbn tests
+static_assert(__builtin_scalbn(1.0, 2) == 4.0);
+static_assert(__builtin_scalbnf(1.0f, -1) == 0.5f);
+static_assert(__builtin_scalbnf128(1.0, 2) == 4.0);
+static_assert(__builtin_scalbn(0.0, 2) == 0.0);
+static_assert(__builtin_scalbn(1.0, 0) == 1.0);
+static_assert(__builtin_isnan(__builtin_scalbn(__builtin_nan(""), 2)));
+static_assert(__builtin_isinf(__builtin_scalbn(__builtin_inf(), 2)));
+
+// scalbln tests
+static_assert(__builtin_scalbln(1.0, 2L) == 4.0);
+static_assert(__builtin_scalblnf128(1.0, 2L) == 4.0);
+static_assert(__builtin_isnan(__builtin_scalbln(__builtin_nan(""), 2L)));
+static_assert(__builtin_isinf(__builtin_scalbln(__builtin_inf(), 2L)));
+
+// ldexp tests
+static_assert(__builtin_ldexp(1.0, 3) == 8.0);
+static_assert(__builtin_ldexpf16(1.0f16, 3) == 8.0f16);
+static_assert(__builtin_ldexpf128(1.0, 3) == 8.0);
+static_assert(__builtin_isnan(__builtin_ldexp(__builtin_nan(""), 2)));
+static_assert(__builtin_isinf(__builtin_ldexp(__builtin_inf(), 2)));
+
+// ilogb tests
+static_assert(__builtin_ilogb(1.0) == 0);
+static_assert(__builtin_ilogb(2.0) == 1);
+static_assert(__builtin_ilogb(0.5) == -1);
+static_assert(__builtin_ilogbf(8.0f) == 3);
+static_assert(__builtin_ilogbf128(8.0) == 3);
+static_assert(__builtin_ilogb(0.) == (-__INT_MAX__));
+static_assert(__builtin_ilogb(__builtin_nan("")) == (-__INT_MAX__ - 1));
+static_assert(__builtin_ilogb(__builtin_inf()) == __INT_MAX__);
+
+// remquo tests
+constexpr double test_remquo(double x, double y) {
+  int quo = 0;
+  double rem = __builtin_remquo(x, y, &quo);
+  return rem;
+}
+static_assert(test_remquo(10.0, 3.0) == 1.0);
+
+constexpr int test_remquo_quo(double x, double y) {
+  int quo = 0;
+  __builtin_remquo(x, y, &quo);
+  return quo;
+}
+static_assert(test_remquo_quo(10.0, 3.0) == 3);
+static_assert(test_remquo_quo(10.0, -3.0) == -3);
+
+// remquo NaN cases (per C standard / cppreference):
+// - x or y is NaN
+// - x is ±inf
+// - y is ±0
+static_assert(__builtin_isnan(test_remquo(__builtin_nan(""), 2.0)));
+static_assert(__builtin_isnan(test_remquo(2.0, __builtin_nan(""))));
+static_assert(__builtin_isnan(test_remquo(__builtin_nan(""), 
__builtin_nan(""))));
+static_assert(__builtin_isnan(test_remquo(__builtin_inf(), 2.0)));
+static_assert(__builtin_isnan(test_remquo(-__builtin_inf(), 2.0)));
+static_assert(__builtin_isnan(test_remquo(1.0, 0.0)));
+static_assert(__builtin_isnan(test_remquo(1.0, -0.0)));
+
+// frexp tests
+constexpr double test_frexp_val(double x) {
+  int exp = 0;
+  return __builtin_frexp(x, &exp);
+}
+static_assert(test_frexp_val(8.0) == 0.5);
+
+constexpr int test_frexp_exp(double x) {
+  int exp = 0;
+  __builtin_frexp(x, &exp);
+  return exp;
+}
+static_assert(test_frexp_exp(8.0) == 4);
+
+// frexp special cases: +/- 0
+static_assert(test_frexp_val(0.0) == 0.0);
+static_assert(test_frexp_val(-0.0) == -0.0);
+static_assert(test_frexp_exp(0.0) == 0);
+static_assert(test_frexp_exp(-0.0) == 0);
+// NaN and Inf: LLVM does not specify the exponent value for these cases.
+static_assert(__builtin_isnan(test_frexp_val(__builtin_nan(""))));
+static_assert(__builtin_isinf(test_frexp_val(__builtin_inf())));
+
+// modf tests
+constexpr double test_modf_val(double x) {
+  double iptr = 0;
+  return __builtin_modf(x, &iptr);
+}
+static_assert(test_modf_val(1.5) == 0.5);
+static_assert(test_modf_val(-1.5) == -0.5);
+
+constexpr double test_modf_iptr(double x) {
+  double iptr = 0;
+  __builtin_modf(x, &iptr);
+  return iptr;
+}
+static_assert(test_modf_iptr(1.5) == 1.0);
+static_assert(test_modf_iptr(-1.5) == -1.0);
+
+// modf special values
+constexpr double test_modf_inf_val() {
+  double iptr = 0;
+  return __builtin_modf(__builtin_inf(), &iptr);
+}
+static_assert(test_modf_inf_val() == 0.0);
+
+constexpr double test_modf_inf_iptr() {
+  double iptr = 0;
+  __builtin_modf(__builtin_inf(), &iptr);
+  return iptr;
+}
+static_assert(__builtin_isinf(test_modf_inf_iptr()));

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to