michele.scandale created this revision.
michele.scandale added reviewers: rjmccall, mibintc, Anastasia.
Herald added subscribers: cfe-commits, jvesely.
Herald added a project: clang.
michele.scandale marked an inline comment as done.
michele.scandale added inline comments.
michele.scandale marked 2 inline comments as done.


================
Comment at: clang/test/CodeGen/libcalls.c:11-21
+  // CHECK-FAST: call reassoc nsz arcp afn float @llvm.sqrt.f32(float
   float l0 = sqrtf(a0);
 
   // CHECK-YES: call double @sqrt
   // CHECK-NO: call double @llvm.sqrt.f64(double
-  // CHECK-FAST: call double @llvm.sqrt.f64(double
+  // CHECK-FAST: call reassoc nsz arcp afn double @llvm.sqrt.f64(double
   double l1 = sqrt(a1);
----------------
For CUDA the default FP contract mode is `fast`, therefore the `contract` FMF 
is emitted.


================
Comment at: clang/test/CodeGenCUDA/builtins-amdgcn.cu:13
 // CHECK-LABEL: @_Z12test_ds_fmaxf(
-// CHECK: call float @llvm.amdgcn.ds.fmax(float addrspace(3)* 
@_ZZ12test_ds_fmaxfE6shared, float %{{[^,]*}}, i32 0, i32 0, i1 false)
+// CHECK: call contract float @llvm.amdgcn.ds.fmax(float addrspace(3)* 
@_ZZ12test_ds_fmaxfE6shared, float %{{[^,]*}}, i32 0, i32 0, i1 false)
 __global__
----------------
For CUDA the default FP contract mode is `fast`, therefore the `contract` FMF 
is emitted.


================
Comment at: clang/test/CodeGenOpenCL/relaxed-fpmath.cl:14
   // FINITE: fdiv nnan ninf float
-  // UNSAFE: fdiv nnan nsz float
+  // UNSAFE: fdiv reassoc nsz arcp afn float
   // MAD: fdiv float
----------------
This change is based on the following:
* `-cl-fast-relaxed-math` = `-cl-unsafe-math-optimizations` + 
`-cl-finite-math-only` 
* the GCC option `-funsafe-math-optimizations` and 
`-cl-unsafe-math-optimizations` are described with very similar wording and 
from the GCC description states explicitly mention that no signed zeros, 
reassociation and reciprocals are enabled, but there is no mention to assuming 
that NaNs do not exist.

See 
https://www.khronos.org/registry/OpenCL/sdk/1.2/docs/man/xhtml/clBuildProgram.html
 and https://gcc.gnu.org/onlinedocs/gcc/Optimize-Options.html


This fixes the mapping between CC1 command line options to the
properties in `LangOptions` describing the floating point optimizations
configuration.
The default fast-math flags for the IR builder are now derived from such
properties to avoid inconsistencies.
Finally some of the `CodeGenOptions` floating point optimizations
properties have been removed since they are now exist in `LangOptions`.


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D80315

Files:
  clang/include/clang/Basic/CodeGenOptions.def
  clang/include/clang/Basic/LangOptions.h
  clang/lib/CodeGen/BackendUtil.cpp
  clang/lib/CodeGen/CGCall.cpp
  clang/lib/CodeGen/CGExprScalar.cpp
  clang/lib/CodeGen/CodeGenFunction.cpp
  clang/lib/CodeGen/CodeGenFunction.h
  clang/lib/Frontend/CompilerInvocation.cpp
  clang/test/CodeGen/builtins-nvptx-ptx60.cu
  clang/test/CodeGen/complex-math.c
  clang/test/CodeGen/fp-options-to-fast-math-flags.c
  clang/test/CodeGen/libcalls.c
  clang/test/CodeGenCUDA/builtins-amdgcn.cu
  clang/test/CodeGenOpenCL/relaxed-fpmath.cl

Index: clang/test/CodeGenOpenCL/relaxed-fpmath.cl
===================================================================
--- clang/test/CodeGenOpenCL/relaxed-fpmath.cl
+++ clang/test/CodeGenOpenCL/relaxed-fpmath.cl
@@ -11,7 +11,7 @@
   // NORMAL: fdiv float
   // FAST: fdiv fast float
   // FINITE: fdiv nnan ninf float
-  // UNSAFE: fdiv nnan nsz float
+  // UNSAFE: fdiv reassoc nsz arcp afn float
   // MAD: fdiv float
   // NOSIGNED: fdiv nsz float
   return a / b;
@@ -38,7 +38,7 @@
 
 // UNSAFE: "less-precise-fpmad"="true"
 // UNSAFE: "no-infs-fp-math"="false"
-// UNSAFE: "no-nans-fp-math"="true"
+// UNSAFE: "no-nans-fp-math"="false"
 // UNSAFE: "no-signed-zeros-fp-math"="true"
 // UNSAFE: "unsafe-fp-math"="true"
 
Index: clang/test/CodeGenCUDA/builtins-amdgcn.cu
===================================================================
--- clang/test/CodeGenCUDA/builtins-amdgcn.cu
+++ clang/test/CodeGenCUDA/builtins-amdgcn.cu
@@ -10,7 +10,7 @@
 }
 
 // CHECK-LABEL: @_Z12test_ds_fmaxf(
-// CHECK: call float @llvm.amdgcn.ds.fmax(float addrspace(3)* @_ZZ12test_ds_fmaxfE6shared, float %{{[^,]*}}, i32 0, i32 0, i1 false)
+// CHECK: call contract float @llvm.amdgcn.ds.fmax(float addrspace(3)* @_ZZ12test_ds_fmaxfE6shared, float %{{[^,]*}}, i32 0, i32 0, i1 false)
 __global__
 void test_ds_fmax(float src) {
   __shared__ float shared;
Index: clang/test/CodeGen/libcalls.c
===================================================================
--- clang/test/CodeGen/libcalls.c
+++ clang/test/CodeGen/libcalls.c
@@ -8,17 +8,17 @@
 void test_sqrt(float a0, double a1, long double a2) {
   // CHECK-YES: call float @sqrtf
   // CHECK-NO: call float @llvm.sqrt.f32(float
-  // CHECK-FAST: call float @llvm.sqrt.f32(float
+  // CHECK-FAST: call reassoc nsz arcp afn float @llvm.sqrt.f32(float
   float l0 = sqrtf(a0);
 
   // CHECK-YES: call double @sqrt
   // CHECK-NO: call double @llvm.sqrt.f64(double
-  // CHECK-FAST: call double @llvm.sqrt.f64(double
+  // CHECK-FAST: call reassoc nsz arcp afn double @llvm.sqrt.f64(double
   double l1 = sqrt(a1);
 
   // CHECK-YES: call x86_fp80 @sqrtl
   // CHECK-NO: call x86_fp80 @llvm.sqrt.f80(x86_fp80
-  // CHECK-FAST: call x86_fp80 @llvm.sqrt.f80(x86_fp80
+  // CHECK-FAST: call reassoc nsz arcp afn x86_fp80 @llvm.sqrt.f80(x86_fp80
   long double l2 = sqrtl(a2);
 }
 
Index: clang/test/CodeGen/fp-options-to-fast-math-flags.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/fp-options-to-fast-math-flags.c
@@ -0,0 +1,42 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s | FileCheck -check-prefix CHECK-PRECISE %s
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -menable-no-nans -emit-llvm -o - %s | FileCheck -check-prefix CHECK-NO-NANS %s
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -menable-no-infs -emit-llvm -o - %s | FileCheck -check-prefix CHECK-NO-INFS %s
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -ffinite-math-only -emit-llvm -o - %s | FileCheck -check-prefix CHECK-FINITE %s
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fno-signed-zeros -emit-llvm -o - %s | FileCheck -check-prefix CHECK-NO-SIGNED-ZEROS %s
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -mreassociate -emit-llvm -o - %s | FileCheck -check-prefix CHECK-REASSOC %s
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -freciprocal-math -emit-llvm -o - %s | FileCheck -check-prefix CHECK-RECIP %s
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -menable-unsafe-fp-math -emit-llvm -o - %s | FileCheck -check-prefix CHECK-UNSAFE %s
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -ffast-math -emit-llvm -o - %s | FileCheck -check-prefix CHECK-FAST %s
+
+float fn(float);
+
+float test(float a) {
+  return a + fn(a);
+}
+
+// CHECK-PRECISE: [[CALL_RES:%.+]] = call float @fn(float {{%.+}})
+// CHECK-PRECISE: {{%.+}} = fadd float {{%.+}}, [[CALL_RES]]
+
+// CHECK-NO-NANS: [[CALL_RES:%.+]] = call nnan float @fn(float {{%.+}})
+// CHECK-NO-NANS: {{%.+}} = fadd nnan float {{%.+}}, [[CALL_RES]]
+
+// CHECK-NO-INFS: [[CALL_RES:%.+]] = call ninf float @fn(float {{%.+}})
+// CHECK-NO-INFS: {{%.+}} = fadd ninf float {{%.+}}, [[CALL_RES]]
+
+// CHECK-FINITE: [[CALL_RES:%.+]] = call nnan ninf float @fn(float {{%.+}})
+// CHECK-FINITE: {{%.+}} = fadd nnan ninf float {{%.+}}, [[CALL_RES]]
+
+// CHECK-NO-SIGNED-ZEROS: [[CALL_RES:%.+]] = call nsz float @fn(float {{%.+}})
+// CHECK-NO-SIGNED-ZEROS: {{%.+}} = fadd nsz float {{%.+}}, [[CALL_RES]]
+
+// CHECK-REASSOC: [[CALL_RES:%.+]] = call reassoc float @fn(float {{%.+}})
+// CHECK-REASSOC: {{%.+}} = fadd reassoc float {{%.+}}, [[CALL_RES]]
+
+// CHECK-RECIP: [[CALL_RES:%.+]] = call arcp float @fn(float {{%.+}})
+// CHECK-RECIP: {{%.+}} = fadd arcp float {{%.+}}, [[CALL_RES]]
+
+// CHECK-UNSAFE: [[CALL_RES:%.+]] = call reassoc nsz arcp afn float @fn(float {{%.+}})
+// CHECK-UNSAFE: {{%.+}} = fadd reassoc nsz arcp afn float {{%.+}}, [[CALL_RES]]
+
+// CHECK-FAST: [[CALL_RES:%.+]] = call reassoc nnan ninf nsz arcp afn float @fn(float {{%.+}})
+// CHECK-FAST: {{%.+}} = fadd reassoc nnan ninf nsz arcp afn float {{%.+}}, [[CALL_RES]]
Index: clang/test/CodeGen/complex-math.c
===================================================================
--- clang/test/CodeGen/complex-math.c
+++ clang/test/CodeGen/complex-math.c
@@ -5,7 +5,7 @@
 // RUN: %clang_cc1 %s -O0 -fno-experimental-new-pass-manager -emit-llvm -triple armv7-none-linux-gnueabi -o - | FileCheck %s --check-prefix=ARM
 // RUN: %clang_cc1 %s -O0 -fno-experimental-new-pass-manager -emit-llvm -triple armv7-none-linux-gnueabihf -o - | FileCheck %s --check-prefix=ARMHF
 // RUN: %clang_cc1 %s -O0 -fno-experimental-new-pass-manager -emit-llvm -triple thumbv7k-apple-watchos2.0 -o - -target-abi aapcs16 | FileCheck %s --check-prefix=ARM7K
-// RUN: %clang_cc1 %s -O0 -fno-experimental-new-pass-manager -emit-llvm -triple aarch64-unknown-unknown -ffast-math -o - | FileCheck %s --check-prefix=AARCH64-FASTMATH
+// RUN: %clang_cc1 %s -O0 -fno-experimental-new-pass-manager -emit-llvm -triple aarch64-unknown-unknown -ffast-math -ffp-contract=fast -o - | FileCheck %s --check-prefix=AARCH64-FASTMATH
 
 float _Complex add_float_rr(float a, float b) {
   // X86-LABEL: @add_float_rr(
Index: clang/test/CodeGen/builtins-nvptx-ptx60.cu
===================================================================
--- clang/test/CodeGen/builtins-nvptx-ptx60.cu
+++ clang/test/CodeGen/builtins-nvptx-ptx60.cu
@@ -45,25 +45,25 @@
   // CHECK: call i32 @llvm.nvvm.shfl.sync.down.i32(i32 {{%[0-9]+}}, i32
   // expected-error@+1 {{'__nvvm_shfl_sync_down_i32' needs target feature ptx60}}
   __nvvm_shfl_sync_down_i32(mask, i, a, b);
-  // CHECK: call float @llvm.nvvm.shfl.sync.down.f32(i32 {{%[0-9]+}}, float
+  // CHECK: call contract float @llvm.nvvm.shfl.sync.down.f32(i32 {{%[0-9]+}}, float
   // expected-error@+1 {{'__nvvm_shfl_sync_down_f32' needs target feature ptx60}}
   __nvvm_shfl_sync_down_f32(mask, f, a, b);
   // CHECK: call i32 @llvm.nvvm.shfl.sync.up.i32(i32 {{%[0-9]+}}, i32
   // expected-error@+1 {{'__nvvm_shfl_sync_up_i32' needs target feature ptx60}}
   __nvvm_shfl_sync_up_i32(mask, i, a, b);
-  // CHECK: call float @llvm.nvvm.shfl.sync.up.f32(i32 {{%[0-9]+}}, float
+  // CHECK: call contract float @llvm.nvvm.shfl.sync.up.f32(i32 {{%[0-9]+}}, float
   // expected-error@+1 {{'__nvvm_shfl_sync_up_f32' needs target feature ptx60}}
   __nvvm_shfl_sync_up_f32(mask, f, a, b);
   // CHECK: call i32 @llvm.nvvm.shfl.sync.bfly.i32(i32 {{%[0-9]+}}, i32
   // expected-error@+1 {{'__nvvm_shfl_sync_bfly_i32' needs target feature ptx60}}
   __nvvm_shfl_sync_bfly_i32(mask, i, a, b);
-  // CHECK: call float @llvm.nvvm.shfl.sync.bfly.f32(i32 {{%[0-9]+}}, float
+  // CHECK: call contract float @llvm.nvvm.shfl.sync.bfly.f32(i32 {{%[0-9]+}}, float
   // expected-error@+1 {{'__nvvm_shfl_sync_bfly_f32' needs target feature ptx60}}
   __nvvm_shfl_sync_bfly_f32(mask, f, a, b);
   // CHECK: call i32 @llvm.nvvm.shfl.sync.idx.i32(i32 {{%[0-9]+}}, i32
   // expected-error@+1 {{'__nvvm_shfl_sync_idx_i32' needs target feature ptx60}}
   __nvvm_shfl_sync_idx_i32(mask, i, a, b);
-  // CHECK: call float @llvm.nvvm.shfl.sync.idx.f32(i32 {{%[0-9]+}}, float
+  // CHECK: call contract float @llvm.nvvm.shfl.sync.idx.f32(i32 {{%[0-9]+}}, float
   // expected-error@+1 {{'__nvvm_shfl_sync_idx_f32' needs target feature ptx60}}
   __nvvm_shfl_sync_idx_f32(mask, f, a, b);
 
Index: clang/lib/Frontend/CompilerInvocation.cpp
===================================================================
--- clang/lib/Frontend/CompilerInvocation.cpp
+++ clang/lib/Frontend/CompilerInvocation.cpp
@@ -897,25 +897,11 @@
                           Args.hasArg(OPT_cl_fast_relaxed_math);
   Opts.LimitFloatPrecision =
       std::string(Args.getLastArgValue(OPT_mlimit_float_precision));
-  Opts.NoInfsFPMath = (Args.hasArg(OPT_menable_no_infinities) ||
-                       Args.hasArg(OPT_cl_finite_math_only) ||
-                       Args.hasArg(OPT_cl_fast_relaxed_math));
-  Opts.NoNaNsFPMath = (Args.hasArg(OPT_menable_no_nans) ||
-                       Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
-                       Args.hasArg(OPT_cl_finite_math_only) ||
-                       Args.hasArg(OPT_cl_fast_relaxed_math));
-  Opts.NoSignedZeros = (Args.hasArg(OPT_fno_signed_zeros) ||
-                        Args.hasArg(OPT_cl_no_signed_zeros) ||
-                        Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
-                        Args.hasArg(OPT_cl_fast_relaxed_math));
-  Opts.Reassociate = Args.hasArg(OPT_mreassociate);
   Opts.CorrectlyRoundedDivSqrt =
       Args.hasArg(OPT_cl_fp32_correctly_rounded_divide_sqrt);
   Opts.UniformWGSize =
       Args.hasArg(OPT_cl_uniform_work_group_size);
   Opts.Reciprocals = Args.getAllArgValues(OPT_mrecip_EQ);
-  Opts.ReciprocalMath = Args.hasArg(OPT_freciprocal_math);
-  Opts.NoTrappingMath = Args.hasArg(OPT_fno_trapping_math);
   Opts.StrictFloatCastOverflow =
       !Args.hasArg(OPT_fno_strict_float_cast_overflow);
 
@@ -940,9 +926,6 @@
   Opts.StrictReturn = !Args.hasArg(OPT_fno_strict_return);
   Opts.StrictVTablePointers = Args.hasArg(OPT_fstrict_vtable_pointers);
   Opts.ForceEmitVTables = Args.hasArg(OPT_fforce_emit_vtables);
-  Opts.UnsafeFPMath = Args.hasArg(OPT_menable_unsafe_fp_math) ||
-                      Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
-                      Args.hasArg(OPT_cl_fast_relaxed_math);
   Opts.UnwindTables = Args.hasArg(OPT_munwind_tables);
   Opts.RelocationModel = getRelocModel(Args, Diags);
   Opts.ThreadModel =
@@ -3188,30 +3171,46 @@
       Opts.NoInlineDefine = true;
 
   Opts.FastMath = Args.hasArg(OPT_ffast_math) ||
-      Args.hasArg(OPT_cl_fast_relaxed_math);
+                  Args.hasArg(OPT_cl_fast_relaxed_math);
   Opts.FiniteMathOnly = Args.hasArg(OPT_ffinite_math_only) ||
-      Args.hasArg(OPT_cl_finite_math_only) ||
-      Args.hasArg(OPT_cl_fast_relaxed_math);
+                        Args.hasArg(OPT_ffast_math) ||
+                        Args.hasArg(OPT_cl_finite_math_only) ||
+                        Args.hasArg(OPT_cl_fast_relaxed_math);
   Opts.UnsafeFPMath = Args.hasArg(OPT_menable_unsafe_fp_math) ||
+                      Args.hasArg(OPT_ffast_math) ||
                       Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
                       Args.hasArg(OPT_cl_fast_relaxed_math);
-  Opts.AllowFPReassoc = Opts.FastMath || Args.hasArg(OPT_mreassociate);
-  Opts.NoHonorNaNs = Opts.FastMath || Opts.FiniteMathOnly ||
-                     Args.hasArg(OPT_menable_no_nans) ||
-                     Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
+  Opts.AllowFPReassoc = Args.hasArg(OPT_mreassociate) ||
+                        Args.hasArg(OPT_menable_unsafe_fp_math) ||
+                        Args.hasArg(OPT_ffast_math) ||
+                        Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
+                        Args.hasArg(OPT_cl_fast_relaxed_math);
+  Opts.NoHonorNaNs = Args.hasArg(OPT_menable_no_nans) ||
+                     Args.hasArg(OPT_ffinite_math_only) ||
+                     Args.hasArg(OPT_ffast_math) ||
                      Args.hasArg(OPT_cl_finite_math_only) ||
                      Args.hasArg(OPT_cl_fast_relaxed_math);
-  Opts.NoHonorInfs = Opts.FastMath || Opts.FiniteMathOnly ||
-                     Args.hasArg(OPT_menable_no_infinities) ||
+  Opts.NoHonorInfs = Args.hasArg(OPT_menable_no_infinities) ||
+                     Args.hasArg(OPT_ffinite_math_only) ||
+                     Args.hasArg(OPT_ffast_math) ||
                      Args.hasArg(OPT_cl_finite_math_only) ||
                      Args.hasArg(OPT_cl_fast_relaxed_math);
-  Opts.NoSignedZero = Opts.FastMath || (Args.hasArg(OPT_fno_signed_zeros) ||
+  Opts.NoSignedZero = Args.hasArg(OPT_fno_signed_zeros) ||
+                      Args.hasArg(OPT_menable_unsafe_fp_math) ||
+                      Args.hasArg(OPT_ffast_math) ||
                       Args.hasArg(OPT_cl_no_signed_zeros) ||
                       Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
-                      Args.hasArg(OPT_cl_fast_relaxed_math));
-  Opts.AllowRecip = Opts.FastMath || Args.hasArg(OPT_freciprocal_math);
+                      Args.hasArg(OPT_cl_fast_relaxed_math);
+  Opts.AllowRecip = Args.hasArg(OPT_freciprocal_math) ||
+                    Args.hasArg(OPT_menable_unsafe_fp_math) ||
+                    Args.hasArg(OPT_ffast_math) ||
+                    Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
+                    Args.hasArg(OPT_cl_fast_relaxed_math);
   // Currently there's no clang option to enable this individually
-  Opts.ApproxFunc = Opts.FastMath;
+  Opts.ApproxFunc = Args.hasArg(OPT_menable_unsafe_fp_math) ||
+                    Args.hasArg(OPT_ffast_math) ||
+                    Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
+                    Args.hasArg(OPT_cl_fast_relaxed_math);
 
   if (Arg *A = Args.getLastArg(OPT_ffp_contract)) {
     StringRef Val = A->getValue();
Index: clang/lib/CodeGen/CodeGenFunction.h
===================================================================
--- clang/lib/CodeGen/CodeGenFunction.h
+++ clang/lib/CodeGen/CodeGenFunction.h
@@ -4337,6 +4337,9 @@
   /// SetFPModel - Control floating point behavior via fp-model settings.
   void SetFPModel();
 
+  /// Set the codegen fast-math flags.
+  void SetFastMathFlags(FPOptions FPFeatures);
+
 private:
   llvm::MDNode *getRangeForLoadFromType(QualType Ty);
   void EmitReturnOfRValue(RValue RV, QualType Ty);
Index: clang/lib/CodeGen/CodeGenFunction.cpp
===================================================================
--- clang/lib/CodeGen/CodeGenFunction.cpp
+++ clang/lib/CodeGen/CodeGenFunction.cpp
@@ -71,26 +71,7 @@
   if (!suppressNewContext)
     CGM.getCXXABI().getMangleContext().startNewFunction();
 
-  llvm::FastMathFlags FMF;
-  if (CGM.getLangOpts().FastMath)
-    FMF.setFast();
-  if (CGM.getLangOpts().FiniteMathOnly) {
-    FMF.setNoNaNs();
-    FMF.setNoInfs();
-  }
-  if (CGM.getCodeGenOpts().NoNaNsFPMath) {
-    FMF.setNoNaNs();
-  }
-  if (CGM.getCodeGenOpts().NoSignedZeros) {
-    FMF.setNoSignedZeros();
-  }
-  if (CGM.getCodeGenOpts().ReciprocalMath) {
-    FMF.setAllowReciprocal();
-  }
-  if (CGM.getCodeGenOpts().Reassociate) {
-    FMF.setAllowReassoc();
-  }
-  Builder.setFastMathFlags(FMF);
+  SetFastMathFlags(FPOptions(CGM.getLangOpts()));
   SetFPModel();
 }
 
@@ -139,6 +120,18 @@
                              RM != llvm::RoundingMode::NearestTiesToEven);
 }
 
+void CodeGenFunction::SetFastMathFlags(FPOptions FPFeatures) {
+  llvm::FastMathFlags FMF;
+  FMF.setAllowReassoc(FPFeatures.allowAssociativeMath());
+  FMF.setNoNaNs(FPFeatures.noHonorNaNs());
+  FMF.setNoInfs(FPFeatures.noHonorInfs());
+  FMF.setNoSignedZeros(FPFeatures.noSignedZeros());
+  FMF.setAllowReciprocal(FPFeatures.allowReciprocalMath());
+  FMF.setApproxFunc(FPFeatures.allowApproximateFunctions());
+  FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
+  Builder.setFastMathFlags(FMF);
+}
+
 LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
   LValueBaseInfo BaseInfo;
   TBAAAccessInfo TBAAInfo;
Index: clang/lib/CodeGen/CGExprScalar.cpp
===================================================================
--- clang/lib/CodeGen/CGExprScalar.cpp
+++ clang/lib/CodeGen/CGExprScalar.cpp
@@ -214,28 +214,6 @@
          (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
 }
 
-/// Update the FastMathFlags of LLVM IR from the FPOptions in LangOptions.
-static void updateFastMathFlags(llvm::FastMathFlags &FMF,
-                                FPOptions FPFeatures) {
-  FMF.setAllowReassoc(FPFeatures.allowAssociativeMath());
-  FMF.setNoNaNs(FPFeatures.noHonorNaNs());
-  FMF.setNoInfs(FPFeatures.noHonorInfs());
-  FMF.setNoSignedZeros(FPFeatures.noSignedZeros());
-  FMF.setAllowReciprocal(FPFeatures.allowReciprocalMath());
-  FMF.setApproxFunc(FPFeatures.allowApproximateFunctions());
-  FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
-}
-
-/// Propagate fast-math flags from \p Op to the instruction in \p V.
-static Value *propagateFMFlags(Value *V, const BinOpInfo &Op) {
-  if (auto *I = dyn_cast<llvm::Instruction>(V)) {
-    llvm::FastMathFlags FMF = I->getFastMathFlags();
-    updateFastMathFlags(FMF, Op.FPFeatures);
-    I->setFastMathFlags(FMF);
-  }
-  return V;
-}
-
 static void setBuilderFlagsFromFPFeatures(CGBuilderTy &Builder,
                                           CodeGenFunction &CGF,
                                           FPOptions FPFeatures) {
@@ -244,9 +222,7 @@
   auto NewExceptionBehavior =
       ToConstrainedExceptMD(FPFeatures.getExceptionMode());
   Builder.setDefaultConstrainedExcept(NewExceptionBehavior);
-  auto FMF = Builder.getFastMathFlags();
-  updateFastMathFlags(FMF, FPFeatures);
-  Builder.setFastMathFlags(FMF);
+  CGF.SetFastMathFlags(FPFeatures);
   assert((CGF.CurFuncDecl == nullptr || Builder.getIsFPConstrained() ||
           isa<CXXConstructorDecl>(CGF.CurFuncDecl) ||
           isa<CXXDestructorDecl>(CGF.CurFuncDecl) ||
@@ -772,8 +748,7 @@
       //  Preserve the old values
       llvm::IRBuilder<>::FastMathFlagGuard FMFG(Builder);
       setBuilderFlagsFromFPFeatures(Builder, CGF, Ops.FPFeatures);
-      Value *V = Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
-      return propagateFMFlags(V, Ops);
+      return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
     }
     if (Ops.isFixedPointOp())
       return EmitFixedPointBinOp(Ops);
@@ -3548,8 +3523,7 @@
     if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
       return FMulAdd;
 
-    Value *V = Builder.CreateFAdd(op.LHS, op.RHS, "add");
-    return propagateFMFlags(V, op);
+    return Builder.CreateFAdd(op.LHS, op.RHS, "add");
   }
 
   if (op.isFixedPointOp())
@@ -3731,8 +3705,7 @@
       // Try to form an fmuladd.
       if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
         return FMulAdd;
-      Value *V = Builder.CreateFSub(op.LHS, op.RHS, "sub");
-      return propagateFMFlags(V, op);
+      return Builder.CreateFSub(op.LHS, op.RHS, "sub");
     }
 
     if (op.isFixedPointOp())
Index: clang/lib/CodeGen/CGCall.cpp
===================================================================
--- clang/lib/CodeGen/CGCall.cpp
+++ clang/lib/CodeGen/CGCall.cpp
@@ -1757,7 +1757,8 @@
     }
 
     FuncAttrs.addAttribute("no-trapping-math",
-                           llvm::toStringRef(CodeGenOpts.NoTrappingMath));
+                           llvm::toStringRef(LangOpts.getFPExceptionMode() ==
+                                             LangOptions::FPE_Ignore));
 
     // Strict (compliant) code is the default, so only add this attribute to
     // indicate that we are trying to workaround a problem case.
@@ -1767,17 +1768,17 @@
     // TODO: Are these all needed?
     // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
     FuncAttrs.addAttribute("no-infs-fp-math",
-                           llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
+                           llvm::toStringRef(LangOpts.NoHonorInfs));
     FuncAttrs.addAttribute("no-nans-fp-math",
-                           llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
+                           llvm::toStringRef(LangOpts.NoHonorNaNs));
     FuncAttrs.addAttribute("unsafe-fp-math",
-                           llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
+                           llvm::toStringRef(LangOpts.UnsafeFPMath));
     FuncAttrs.addAttribute("use-soft-float",
                            llvm::toStringRef(CodeGenOpts.SoftFloat));
     FuncAttrs.addAttribute("stack-protector-buffer-size",
                            llvm::utostr(CodeGenOpts.SSPBufferSize));
     FuncAttrs.addAttribute("no-signed-zeros-fp-math",
-                           llvm::toStringRef(CodeGenOpts.NoSignedZeros));
+                           llvm::toStringRef(LangOpts.NoSignedZero));
     FuncAttrs.addAttribute(
         "correctly-rounded-divide-sqrt-fp-math",
         llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
Index: clang/lib/CodeGen/BackendUtil.cpp
===================================================================
--- clang/lib/CodeGen/BackendUtil.cpp
+++ clang/lib/CodeGen/BackendUtil.cpp
@@ -482,10 +482,10 @@
   if (LangOpts.WasmExceptions)
     Options.ExceptionModel = llvm::ExceptionHandling::Wasm;
 
-  Options.NoInfsFPMath = CodeGenOpts.NoInfsFPMath;
-  Options.NoNaNsFPMath = CodeGenOpts.NoNaNsFPMath;
+  Options.NoInfsFPMath = LangOpts.NoHonorInfs;
+  Options.NoNaNsFPMath = LangOpts.NoHonorNaNs;
   Options.NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
-  Options.UnsafeFPMath = CodeGenOpts.UnsafeFPMath;
+  Options.UnsafeFPMath = LangOpts.UnsafeFPMath;
   Options.StackAlignmentOverride = CodeGenOpts.StackAlignment;
   Options.FunctionSections = CodeGenOpts.FunctionSections;
   Options.DataSections = CodeGenOpts.DataSections;
Index: clang/include/clang/Basic/LangOptions.h
===================================================================
--- clang/include/clang/Basic/LangOptions.h
+++ clang/include/clang/Basic/LangOptions.h
@@ -384,12 +384,10 @@
         fenv_access(LangOptions::FPM_Off),
         rounding(static_cast<unsigned>(LangOpts.getFPRoundingMode())),
         exceptions(LangOpts.getFPExceptionMode()),
-        allow_reassoc(LangOpts.FastMath || LangOpts.AllowFPReassoc),
-        no_nans(LangOpts.FastMath || LangOpts.NoHonorNaNs),
-        no_infs(LangOpts.FastMath || LangOpts.NoHonorInfs),
-        no_signed_zeros(LangOpts.FastMath || LangOpts.NoSignedZero),
-        allow_reciprocal(LangOpts.FastMath || LangOpts.AllowRecip),
-        approx_func(LangOpts.FastMath || LangOpts.ApproxFunc) {}
+        allow_reassoc(LangOpts.AllowFPReassoc), no_nans(LangOpts.NoHonorNaNs),
+        no_infs(LangOpts.NoHonorInfs), no_signed_zeros(LangOpts.NoSignedZero),
+        allow_reciprocal(LangOpts.AllowRecip),
+        approx_func(LangOpts.ApproxFunc) {}
   // FIXME: Use getDefaultFEnvAccessMode() when available.
 
   void setFastMath(bool B = true) {
Index: clang/include/clang/Basic/CodeGenOptions.def
===================================================================
--- clang/include/clang/Basic/CodeGenOptions.def
+++ clang/include/clang/Basic/CodeGenOptions.def
@@ -150,13 +150,7 @@
                                      ///< inline line tables.
 CODEGENOPT(StackClashProtector, 1, 0) ///< Set when -fstack-clash-protection is enabled.
 CODEGENOPT(NoImplicitFloat   , 1, 0) ///< Set when -mno-implicit-float is enabled.
-CODEGENOPT(NoInfsFPMath      , 1, 0) ///< Assume FP arguments, results not +-Inf.
-CODEGENOPT(NoSignedZeros     , 1, 0) ///< Allow ignoring the signedness of FP zero
 CODEGENOPT(NullPointerIsValid , 1, 0) ///< Assume Null pointer deference is defined.
-CODEGENOPT(Reassociate       , 1, 0) ///< Allow reassociation of FP math ops
-CODEGENOPT(ReciprocalMath    , 1, 0) ///< Allow FP divisions to be reassociated.
-CODEGENOPT(NoTrappingMath    , 1, 0) ///< Set when -fno-trapping-math is enabled.
-CODEGENOPT(NoNaNsFPMath      , 1, 0) ///< Assume FP arguments, results not NaN.
 CODEGENOPT(CorrectlyRoundedDivSqrt, 1, 0) ///< -cl-fp32-correctly-rounded-divide-sqrt
 CODEGENOPT(UniqueInternalLinkageNames, 1, 0) ///< Internal Linkage symbols get unique names.
 
@@ -248,7 +242,6 @@
 CODEGENOPT(UnrollLoops       , 1, 0) ///< Control whether loops are unrolled.
 CODEGENOPT(RerollLoops       , 1, 0) ///< Control whether loops are rerolled.
 CODEGENOPT(NoUseJumpTables   , 1, 0) ///< Set when -fno-jump-tables is enabled.
-CODEGENOPT(UnsafeFPMath      , 1, 0) ///< Allow unsafe floating point optzns.
 CODEGENOPT(UnwindTables      , 1, 0) ///< Emit unwind tables.
 CODEGENOPT(VectorizeLoop     , 1, 0) ///< Run loop vectorizer.
 CODEGENOPT(VectorizeSLP      , 1, 0) ///< Run SLP vectorizer.
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to