Author: moorabbit
Date: 2025-08-08T20:51:15+01:00
New Revision: 989c0d252650476baea1345c8f446a3dd4f0d3fe

URL: 
https://github.com/llvm/llvm-project/commit/989c0d252650476baea1345c8f446a3dd4f0d3fe
DIFF: 
https://github.com/llvm/llvm-project/commit/989c0d252650476baea1345c8f446a3dd4f0d3fe.diff

LOG: [Clang][X86] Replace unnecessary `vfmadd*` builtins with 
`element_wise_fma` (#152545)

The following intrinsics were replaced by `__builtin_elementwise_fma`:
- `__builtin_ia32_vfmaddps(256)`
- `__builtin_ia32_vfmaddpd(256)`
- `__builtin_ia32_vfmaddph(256)`
- `__builtin_ia32_vfmaddbf16(128 | 256 | 512)`

All the aforementioned `__builtin_ia32_vfmadd*` intrinsics are
equivalent to a `__builtin_elementwise_fma`, so keeping them is an
unnecessary indirection.

Fixes [#152461](https://github.com/llvm/llvm-project/issues/152461)

---------

Co-authored-by: Simon Pilgrim <llvm-...@redking.me.uk>

Added: 
    

Modified: 
    clang/include/clang/Basic/BuiltinsX86.td
    clang/lib/CodeGen/TargetBuiltins/X86.cpp
    clang/lib/Headers/avx10_2_512bf16intrin.h
    clang/lib/Headers/avx10_2bf16intrin.h
    clang/lib/Headers/avx512vlfp16intrin.h
    clang/lib/Headers/avx512vlintrin.h
    clang/lib/Headers/fma4intrin.h
    clang/lib/Headers/fmaintrin.h
    clang/test/CodeGen/target-builtin-noerror.c

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/BuiltinsX86.td 
b/clang/include/clang/Basic/BuiltinsX86.td
index a5aa578940483..3efc0be8fa698 100644
--- a/clang/include/clang/Basic/BuiltinsX86.td
+++ b/clang/include/clang/Basic/BuiltinsX86.td
@@ -884,11 +884,6 @@ let Features = "sha", Attributes = [NoThrow, Const, 
RequiredVectorWidth<128>] in
   def sha256msg2 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, 
int>)">;
 }
 
-let Features = "fma|fma4", Attributes = [NoThrow, Const, 
RequiredVectorWidth<128>] in {
-  def vfmaddps : X86Builtin<"_Vector<4, float>(_Vector<4, float>, _Vector<4, 
float>, _Vector<4, float>)">;
-  def vfmaddpd : X86Builtin<"_Vector<2, double>(_Vector<2, double>, _Vector<2, 
double>, _Vector<2, double>)">;
-}
-
 let Features = "fma", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] 
in {
   def vfmaddss3 : X86Builtin<"_Vector<4, float>(_Vector<4, float>, _Vector<4, 
float>, _Vector<4, float>)">;
   def vfmaddsd3 : X86Builtin<"_Vector<2, double>(_Vector<2, double>, 
_Vector<2, double>, _Vector<2, double>)">;
@@ -904,9 +899,8 @@ let Features = "fma|fma4", Attributes = [NoThrow, Const, 
RequiredVectorWidth<128
   def vfmaddsubpd : X86Builtin<"_Vector<2, double>(_Vector<2, double>, 
_Vector<2, double>, _Vector<2, double>)">;
 }
 
-let Features = "fma|fma4", Attributes = [NoThrow, Const, 
RequiredVectorWidth<256>] in {
-  def vfmaddps256 : X86Builtin<"_Vector<8, float>(_Vector<8, float>, 
_Vector<8, float>, _Vector<8, float>)">;
-  def vfmaddpd256 : X86Builtin<"_Vector<4, double>(_Vector<4, double>, 
_Vector<4, double>, _Vector<4, double>)">;
+let Features = "fma|fma4",
+    Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
   def vfmaddsubps256 : X86Builtin<"_Vector<8, float>(_Vector<8, float>, 
_Vector<8, float>, _Vector<8, float>)">;
   def vfmaddsubpd256 : X86Builtin<"_Vector<4, double>(_Vector<4, double>, 
_Vector<4, double>, _Vector<4, double>)">;
 }
@@ -4149,14 +4143,6 @@ let Features = "avx512fp16,evex512", Attributes = 
[NoThrow, Const, RequiredVecto
   def vcvtps2phx512_mask : X86Builtin<"_Vector<16, _Float16>(_Vector<16, 
float>, _Vector<16, _Float16>, unsigned short, _Constant int)">;
 }
 
-let Features = "avx512fp16,avx512vl", Attributes = [NoThrow, Const, 
RequiredVectorWidth<128>] in {
-  def vfmaddph : X86Builtin<"_Vector<8, _Float16>(_Vector<8, _Float16>, 
_Vector<8, _Float16>, _Vector<8, _Float16>)">;
-}
-
-let Features = "avx512fp16,avx512vl", Attributes = [NoThrow, Const, 
RequiredVectorWidth<256>] in {
-  def vfmaddph256 : X86Builtin<"_Vector<16, _Float16>(_Vector<16, _Float16>, 
_Vector<16, _Float16>, _Vector<16, _Float16>)">;
-}
-
 let Features = "avx512fp16,evex512", Attributes = [NoThrow, Const, 
RequiredVectorWidth<512>] in {
   def vfmaddph512_mask : X86Builtin<"_Vector<32, _Float16>(_Vector<32, 
_Float16>, _Vector<32, _Float16>, _Vector<32, _Float16>, unsigned int, 
_Constant int)">;
   def vfmaddph512_mask3 : X86Builtin<"_Vector<32, _Float16>(_Vector<32, 
_Float16>, _Vector<32, _Float16>, _Vector<32, _Float16>, unsigned int, 
_Constant int)">;
@@ -5382,13 +5368,4 @@ let Features = "avx10.2-256", Attributes = [NoThrow, 
Const, RequiredVectorWidth<
 
 let Features = "avx10.2-512", Attributes = [NoThrow, Const, 
RequiredVectorWidth<512>] in {
   def vsqrtbf16512 : X86Builtin<"_Vector<32, __bf16>(_Vector<32, __bf16>)">;
-  def vfmaddbf16512 : X86Builtin<"_Vector<32, __bf16>(_Vector<32, __bf16>, 
_Vector<32, __bf16>, _Vector<32, __bf16>)">;
-}
-
-let Features = "avx10.2-256", Attributes = [NoThrow, Const, 
RequiredVectorWidth<256>] in {
-  def vfmaddbf16256 : X86Builtin<"_Vector<16, __bf16>(_Vector<16, __bf16>, 
_Vector<16, __bf16>, _Vector<16, __bf16>)">;
-}
-
-let Features = "avx10.2-256", Attributes = [NoThrow, Const, 
RequiredVectorWidth<128>] in {
-  def vfmaddbf16128 : X86Builtin<"_Vector<8, __bf16>(_Vector<8, __bf16>, 
_Vector<8, __bf16>, _Vector<8, __bf16>)">;
 }

diff  --git a/clang/lib/CodeGen/TargetBuiltins/X86.cpp 
b/clang/lib/CodeGen/TargetBuiltins/X86.cpp
index e23d19d2f6b6b..b508709e4bbae 100644
--- a/clang/lib/CodeGen/TargetBuiltins/X86.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/X86.cpp
@@ -1051,18 +1051,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned 
BuiltinID,
   case X86::BI__builtin_ia32_vfmsubsd3_mask3:
     return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2,
                              /*NegAcc*/ true);
-  case X86::BI__builtin_ia32_vfmaddph:
-  case X86::BI__builtin_ia32_vfmaddps:
-  case X86::BI__builtin_ia32_vfmaddpd:
-  case X86::BI__builtin_ia32_vfmaddph256:
-  case X86::BI__builtin_ia32_vfmaddps256:
-  case X86::BI__builtin_ia32_vfmaddpd256:
   case X86::BI__builtin_ia32_vfmaddph512_mask:
   case X86::BI__builtin_ia32_vfmaddph512_maskz:
   case X86::BI__builtin_ia32_vfmaddph512_mask3:
-  case X86::BI__builtin_ia32_vfmaddbf16128:
-  case X86::BI__builtin_ia32_vfmaddbf16256:
-  case X86::BI__builtin_ia32_vfmaddbf16512:
   case X86::BI__builtin_ia32_vfmaddps512_mask:
   case X86::BI__builtin_ia32_vfmaddps512_maskz:
   case X86::BI__builtin_ia32_vfmaddps512_mask3:

diff  --git a/clang/lib/Headers/avx10_2_512bf16intrin.h 
b/clang/lib/Headers/avx10_2_512bf16intrin.h
index 75290d22ef259..95e9bd7a36f9b 100644
--- a/clang/lib/Headers/avx10_2_512bf16intrin.h
+++ b/clang/lib/Headers/avx10_2_512bf16intrin.h
@@ -441,8 +441,8 @@ _mm512_maskz_sqrt_pbh(__mmask32 __U, __m512bh __A) {
 
 static __inline__ __m512bh __DEFAULT_FN_ATTRS512
 _mm512_fmadd_pbh(__m512bh __A, __m512bh __B, __m512bh __C) {
-  return (__m512bh)__builtin_ia32_vfmaddbf16512((__v32bf)__A, (__v32bf)__B,
-                                                (__v32bf)__C);
+  return (__m512bh)__builtin_elementwise_fma((__v32bf)__A, (__v32bf)__B,
+                                             (__v32bf)__C);
 }
 
 static __inline__ __m512bh __DEFAULT_FN_ATTRS512
@@ -469,8 +469,8 @@ static __inline__ __m512bh __DEFAULT_FN_ATTRS512 
_mm512_maskz_fmadd_pbh(
 
 static __inline__ __m512bh __DEFAULT_FN_ATTRS512
 _mm512_fmsub_pbh(__m512bh __A, __m512bh __B, __m512bh __C) {
-  return (__m512bh)__builtin_ia32_vfmaddbf16512((__v32bf)__A, (__v32bf)__B,
-                                                -(__v32bf)__C);
+  return (__m512bh)__builtin_elementwise_fma((__v32bf)__A, (__v32bf)__B,
+                                             -(__v32bf)__C);
 }
 
 static __inline__ __m512bh __DEFAULT_FN_ATTRS512
@@ -497,8 +497,8 @@ static __inline__ __m512bh __DEFAULT_FN_ATTRS512 
_mm512_maskz_fmsub_pbh(
 
 static __inline__ __m512bh __DEFAULT_FN_ATTRS512
 _mm512_fnmadd_pbh(__m512bh __A, __m512bh __B, __m512bh __C) {
-  return (__m512bh)__builtin_ia32_vfmaddbf16512((__v32bf)__A, -(__v32bf)__B,
-                                                (__v32bf)__C);
+  return (__m512bh)__builtin_elementwise_fma((__v32bf)__A, -(__v32bf)__B,
+                                             (__v32bf)__C);
 }
 
 static __inline__ __m512bh __DEFAULT_FN_ATTRS512 _mm512_mask_fnmadd_pbh(
@@ -527,8 +527,8 @@ static __inline__ __m512bh __DEFAULT_FN_ATTRS512 
_mm512_maskz_fnmadd_pbh(
 
 static __inline__ __m512bh __DEFAULT_FN_ATTRS512
 _mm512_fnmsub_pbh(__m512bh __A, __m512bh __B, __m512bh __C) {
-  return (__m512bh)__builtin_ia32_vfmaddbf16512((__v32bf)__A, -(__v32bf)__B,
-                                                -(__v32bf)__C);
+  return (__m512bh)__builtin_elementwise_fma((__v32bf)__A, -(__v32bf)__B,
+                                             -(__v32bf)__C);
 }
 
 static __inline__ __m512bh __DEFAULT_FN_ATTRS512 _mm512_mask_fnmsub_pbh(

diff  --git a/clang/lib/Headers/avx10_2bf16intrin.h 
b/clang/lib/Headers/avx10_2bf16intrin.h
index 66797ae00fe4f..0c7f381f04fa5 100644
--- a/clang/lib/Headers/avx10_2bf16intrin.h
+++ b/clang/lib/Headers/avx10_2bf16intrin.h
@@ -852,8 +852,8 @@ _mm_maskz_sqrt_pbh(__mmask8 __U, __m128bh __A) {
 
 static __inline__ __m256bh __DEFAULT_FN_ATTRS256
 _mm256_fmadd_pbh(__m256bh __A, __m256bh __B, __m256bh __C) {
-  return (__m256bh)__builtin_ia32_vfmaddbf16256((__v16bf)__A, (__v16bf)__B,
-                                                (__v16bf)__C);
+  return (__m256bh)__builtin_elementwise_fma((__v16bf)__A, (__v16bf)__B,
+                                             (__v16bf)__C);
 }
 
 static __inline__ __m256bh __DEFAULT_FN_ATTRS256
@@ -880,8 +880,8 @@ static __inline__ __m256bh __DEFAULT_FN_ATTRS256 
_mm256_maskz_fmadd_pbh(
 
 static __inline__ __m256bh __DEFAULT_FN_ATTRS256
 _mm256_fmsub_pbh(__m256bh __A, __m256bh __B, __m256bh __C) {
-  return (__m256bh)__builtin_ia32_vfmaddbf16256((__v16bf)__A, (__v16bf)__B,
-                                                -(__v16bf)__C);
+  return (__m256bh)__builtin_elementwise_fma((__v16bf)__A, (__v16bf)__B,
+                                             -(__v16bf)__C);
 }
 
 static __inline__ __m256bh __DEFAULT_FN_ATTRS256
@@ -908,8 +908,8 @@ static __inline__ __m256bh __DEFAULT_FN_ATTRS256 
_mm256_maskz_fmsub_pbh(
 
 static __inline__ __m256bh __DEFAULT_FN_ATTRS256
 _mm256_fnmadd_pbh(__m256bh __A, __m256bh __B, __m256bh __C) {
-  return (__m256bh)__builtin_ia32_vfmaddbf16256((__v16bf)__A, -(__v16bf)__B,
-                                                (__v16bf)__C);
+  return (__m256bh)__builtin_elementwise_fma((__v16bf)__A, -(__v16bf)__B,
+                                             (__v16bf)__C);
 }
 
 static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_mask_fnmadd_pbh(
@@ -938,8 +938,8 @@ static __inline__ __m256bh __DEFAULT_FN_ATTRS256 
_mm256_maskz_fnmadd_pbh(
 
 static __inline__ __m256bh __DEFAULT_FN_ATTRS256
 _mm256_fnmsub_pbh(__m256bh __A, __m256bh __B, __m256bh __C) {
-  return (__m256bh)__builtin_ia32_vfmaddbf16256((__v16bf)__A, -(__v16bf)__B,
-                                                -(__v16bf)__C);
+  return (__m256bh)__builtin_elementwise_fma((__v16bf)__A, -(__v16bf)__B,
+                                             -(__v16bf)__C);
 }
 
 static __inline__ __m256bh __DEFAULT_FN_ATTRS256 _mm256_mask_fnmsub_pbh(
@@ -969,8 +969,8 @@ static __inline__ __m256bh __DEFAULT_FN_ATTRS256 
_mm256_maskz_fnmsub_pbh(
 static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_fmadd_pbh(__m128bh __A,
                                                                __m128bh __B,
                                                                __m128bh __C) {
-  return (__m128bh)__builtin_ia32_vfmaddbf16128((__v8bf)__A, (__v8bf)__B,
-                                                (__v8bf)__C);
+  return (__m128bh)__builtin_elementwise_fma((__v8bf)__A, (__v8bf)__B,
+                                             (__v8bf)__C);
 }
 
 static __inline__ __m128bh __DEFAULT_FN_ATTRS128
@@ -997,8 +997,8 @@ _mm_maskz_fmadd_pbh(__mmask8 __U, __m128bh __A, __m128bh 
__B, __m128bh __C) {
 static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_fmsub_pbh(__m128bh __A,
                                                                __m128bh __B,
                                                                __m128bh __C) {
-  return (__m128bh)__builtin_ia32_vfmaddbf16128((__v8bf)__A, (__v8bf)__B,
-                                                -(__v8bf)__C);
+  return (__m128bh)__builtin_elementwise_fma((__v8bf)__A, (__v8bf)__B,
+                                             -(__v8bf)__C);
 }
 
 static __inline__ __m128bh __DEFAULT_FN_ATTRS128
@@ -1025,8 +1025,8 @@ _mm_maskz_fmsub_pbh(__mmask8 __U, __m128bh __A, __m128bh 
__B, __m128bh __C) {
 static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_fnmadd_pbh(__m128bh __A,
                                                                 __m128bh __B,
                                                                 __m128bh __C) {
-  return (__m128bh)__builtin_ia32_vfmaddbf16128((__v8bf)__A, -(__v8bf)__B,
-                                                (__v8bf)__C);
+  return (__m128bh)__builtin_elementwise_fma((__v8bf)__A, -(__v8bf)__B,
+                                             (__v8bf)__C);
 }
 
 static __inline__ __m128bh __DEFAULT_FN_ATTRS128
@@ -1053,8 +1053,8 @@ _mm_maskz_fnmadd_pbh(__mmask8 __U, __m128bh __A, __m128bh 
__B, __m128bh __C) {
 static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_fnmsub_pbh(__m128bh __A,
                                                                 __m128bh __B,
                                                                 __m128bh __C) {
-  return (__m128bh)__builtin_ia32_vfmaddbf16128((__v8bf)__A, -(__v8bf)__B,
-                                                -(__v8bf)__C);
+  return (__m128bh)__builtin_elementwise_fma((__v8bf)__A, -(__v8bf)__B,
+                                             -(__v8bf)__C);
 }
 
 static __inline__ __m128bh __DEFAULT_FN_ATTRS128

diff  --git a/clang/lib/Headers/avx512vlfp16intrin.h 
b/clang/lib/Headers/avx512vlfp16intrin.h
index a12acb7d9a24a..1f8cca711f9f3 100644
--- a/clang/lib/Headers/avx512vlfp16intrin.h
+++ b/clang/lib/Headers/avx512vlfp16intrin.h
@@ -1419,8 +1419,8 @@ _mm256_maskz_cvtxps_ph(__mmask8 __U, __m256 __A) {
 static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmadd_ph(__m128h __A,
                                                              __m128h __B,
                                                              __m128h __C) {
-  return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B,
-                                          (__v8hf)__C);
+  return (__m128h)__builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B,
+                                            (__v8hf)__C);
 }
 
 static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmadd_ph(__m128h __A,
@@ -1429,7 +1429,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 
_mm_mask_fmadd_ph(__m128h __A,
                                                                   __m128h __C) 
{
   return (__m128h)__builtin_ia32_selectph_128(
       (__mmask8)__U,
-      __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+      __builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
       (__v8hf)__A);
 }
 
@@ -1437,7 +1437,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_mask3_fmadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
   return (__m128h)__builtin_ia32_selectph_128(
       (__mmask8)__U,
-      __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+      __builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
       (__v8hf)__C);
 }
 
@@ -1445,15 +1445,15 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_maskz_fmadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
   return (__m128h)__builtin_ia32_selectph_128(
       (__mmask8)__U,
-      __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+      __builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
       (__v8hf)_mm_setzero_ph());
 }
 
 static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmsub_ph(__m128h __A,
                                                              __m128h __B,
                                                              __m128h __C) {
-  return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B,
-                                          -(__v8hf)__C);
+  return (__m128h)__builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B,
+                                            -(__v8hf)__C);
 }
 
 static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmsub_ph(__m128h __A,
@@ -1476,7 +1476,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_mask3_fnmadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
   return (__m128h)__builtin_ia32_selectph_128(
       (__mmask8)__U,
-      __builtin_ia32_vfmaddph(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+      __builtin_elementwise_fma(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
       (__v8hf)__C);
 }
 
@@ -1484,7 +1484,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_maskz_fnmadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
   return (__m128h)__builtin_ia32_selectph_128(
       (__mmask8)__U,
-      __builtin_ia32_vfmaddph(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+      __builtin_elementwise_fma(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
       (__v8hf)_mm_setzero_ph());
 }
 
@@ -1492,22 +1492,22 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_maskz_fnmsub_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
   return (__m128h)__builtin_ia32_selectph_128(
       (__mmask8)__U,
-      __builtin_ia32_vfmaddph(-(__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
+      __builtin_elementwise_fma(-(__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
       (__v8hf)_mm_setzero_ph());
 }
 
 static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fmadd_ph(__m256h __A,
                                                                 __m256h __B,
                                                                 __m256h __C) {
-  return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B,
-                                             (__v16hf)__C);
+  return (__m256h)__builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B,
+                                            (__v16hf)__C);
 }
 
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask_fmadd_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
   return (__m256h)__builtin_ia32_selectph_256(
       (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+      __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
       (__v16hf)__A);
 }
 
@@ -1515,7 +1515,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask3_fmadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
   return (__m256h)__builtin_ia32_selectph_256(
       (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+      __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
       (__v16hf)__C);
 }
 
@@ -1523,22 +1523,22 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_maskz_fmadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
   return (__m256h)__builtin_ia32_selectph_256(
       (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+      __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
       (__v16hf)_mm256_setzero_ph());
 }
 
 static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fmsub_ph(__m256h __A,
                                                                 __m256h __B,
                                                                 __m256h __C) {
-  return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B,
-                                             -(__v16hf)__C);
+  return (__m256h)__builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B,
+                                            -(__v16hf)__C);
 }
 
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask_fmsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
   return (__m256h)__builtin_ia32_selectph_256(
       (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
+      __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
       (__v16hf)__A);
 }
 
@@ -1546,7 +1546,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_maskz_fmsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
   return (__m256h)__builtin_ia32_selectph_256(
       (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
+      __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
       (__v16hf)_mm256_setzero_ph());
 }
 
@@ -1554,7 +1554,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask3_fnmadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
   return (__m256h)__builtin_ia32_selectph_256(
       (__mmask16)__U,
-      __builtin_ia32_vfmaddph256(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+      __builtin_elementwise_fma(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
       (__v16hf)__C);
 }
 
@@ -1562,7 +1562,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_maskz_fnmadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
   return (__m256h)__builtin_ia32_selectph_256(
       (__mmask16)__U,
-      __builtin_ia32_vfmaddph256(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+      __builtin_elementwise_fma(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
       (__v16hf)_mm256_setzero_ph());
 }
 
@@ -1570,7 +1570,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_maskz_fnmsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
   return (__m256h)__builtin_ia32_selectph_256(
       (__mmask16)__U,
-      __builtin_ia32_vfmaddph256(-(__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
+      __builtin_elementwise_fma(-(__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
       (__v16hf)_mm256_setzero_ph());
 }
 
@@ -1684,7 +1684,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_mask3_fmsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
   return (__m128h)__builtin_ia32_selectph_128(
       (__mmask8)__U,
-      __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
+      __builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
       (__v8hf)__C);
 }
 
@@ -1692,7 +1692,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask3_fmsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
   return (__m256h)__builtin_ia32_selectph_256(
       (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
+      __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
       (__v16hf)__C);
 }
 
@@ -1715,45 +1715,45 @@ _mm256_mask3_fmsubadd_ph(__m256h __A, __m256h __B, 
__m256h __C, __mmask16 __U) {
 static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fnmadd_ph(__m128h __A,
                                                               __m128h __B,
                                                               __m128h __C) {
-  return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B,
-                                          (__v8hf)__C);
+  return (__m128h)__builtin_elementwise_fma((__v8hf)__A, -(__v8hf)__B,
+                                            (__v8hf)__C);
 }
 
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_mask_fnmadd_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
   return (__m128h)__builtin_ia32_selectph_128(
       (__mmask8)__U,
-      __builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, (__v8hf)__C),
+      __builtin_elementwise_fma((__v8hf)__A, -(__v8hf)__B, (__v8hf)__C),
       (__v8hf)__A);
 }
 
 static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fnmadd_ph(__m256h __A,
                                                                  __m256h __B,
                                                                  __m256h __C) {
-  return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B,
-                                             (__v16hf)__C);
+  return (__m256h)__builtin_elementwise_fma((__v16hf)__A, -(__v16hf)__B,
+                                            (__v16hf)__C);
 }
 
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask_fnmadd_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
   return (__m256h)__builtin_ia32_selectph_256(
       (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, (__v16hf)__C),
+      __builtin_elementwise_fma((__v16hf)__A, -(__v16hf)__B, (__v16hf)__C),
       (__v16hf)__A);
 }
 
 static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fnmsub_ph(__m128h __A,
                                                               __m128h __B,
                                                               __m128h __C) {
-  return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B,
-                                          -(__v8hf)__C);
+  return (__m128h)__builtin_elementwise_fma((__v8hf)__A, -(__v8hf)__B,
+                                            -(__v8hf)__C);
 }
 
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_mask_fnmsub_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
   return (__m128h)__builtin_ia32_selectph_128(
       (__mmask8)__U,
-      __builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C),
+      __builtin_elementwise_fma((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C),
       (__v8hf)__A);
 }
 
@@ -1761,22 +1761,22 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_mask3_fnmsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
   return (__m128h)__builtin_ia32_selectph_128(
       (__mmask8)__U,
-      __builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C),
+      __builtin_elementwise_fma((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C),
       (__v8hf)__C);
 }
 
 static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fnmsub_ph(__m256h __A,
                                                                  __m256h __B,
                                                                  __m256h __C) {
-  return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B,
-                                             -(__v16hf)__C);
+  return (__m256h)__builtin_elementwise_fma((__v16hf)__A, -(__v16hf)__B,
+                                            -(__v16hf)__C);
 }
 
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask_fnmsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
   return (__m256h)__builtin_ia32_selectph_256(
       (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C),
+      __builtin_elementwise_fma((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C),
       (__v16hf)__A);
 }
 
@@ -1784,7 +1784,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask3_fnmsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
   return (__m256h)__builtin_ia32_selectph_256(
       (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C),
+      __builtin_elementwise_fma((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C),
       (__v16hf)__C);
 }
 

diff  --git a/clang/lib/Headers/avx512vlintrin.h 
b/clang/lib/Headers/avx512vlintrin.h
index 2a5f7b43f63fc..cbad39acad84f 100644
--- a/clang/lib/Headers/avx512vlintrin.h
+++ b/clang/lib/Headers/avx512vlintrin.h
@@ -899,321 +899,289 @@ _mm_maskz_xor_epi64(__mmask8 __U, __m128i __A, __m128i 
__B)
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
 {
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             (__v2df) __B,
-                                             (__v2df) __C),
-                    (__v2df) __A);
+  return (__m128d)__builtin_ia32_selectpd_128(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v2df)__A, (__v2df)__B, (__v2df)__C),
+      (__v2df)__A);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
 {
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             (__v2df) __B,
-                                             (__v2df) __C),
-                    (__v2df) __C);
+  return (__m128d)__builtin_ia32_selectpd_128(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v2df)__A, (__v2df)__B, (__v2df)__C),
+      (__v2df)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             (__v2df) __B,
-                                             (__v2df) __C),
-                    (__v2df)_mm_setzero_pd());
+  return (__m128d)__builtin_ia32_selectpd_128(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v2df)__A, (__v2df)__B, (__v2df)__C),
+      (__v2df)_mm_setzero_pd());
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
 {
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             (__v2df) __B,
-                                             -(__v2df) __C),
-                    (__v2df) __A);
+  return (__m128d)__builtin_ia32_selectpd_128(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v2df)__A, (__v2df)__B, -(__v2df)__C),
+      (__v2df)__A);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             (__v2df) __B,
-                                             -(__v2df) __C),
-                    (__v2df)_mm_setzero_pd());
+  return (__m128d)__builtin_ia32_selectpd_128(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v2df)__A, (__v2df)__B, -(__v2df)__C),
+      (__v2df)_mm_setzero_pd());
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
 {
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd (-(__v2df) __A,
-                                             (__v2df) __B,
-                                             (__v2df) __C),
-                    (__v2df) __C);
+  return (__m128d)__builtin_ia32_selectpd_128(
+      (__mmask8)__U,
+      __builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B, (__v2df)__C),
+      (__v2df)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd (-(__v2df) __A,
-                                             (__v2df) __B,
-                                             (__v2df) __C),
-                    (__v2df)_mm_setzero_pd());
+  return (__m128d)__builtin_ia32_selectpd_128(
+      (__mmask8)__U,
+      __builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B, (__v2df)__C),
+      (__v2df)_mm_setzero_pd());
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd (-(__v2df) __A,
-                                             (__v2df) __B,
-                                             -(__v2df) __C),
-                    (__v2df)_mm_setzero_pd());
+  return (__m128d)__builtin_ia32_selectpd_128(
+      (__mmask8)__U,
+      __builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B, -(__v2df)__C),
+      (__v2df)_mm_setzero_pd());
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
 {
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                (__v4df) __B,
-                                                (__v4df) __C),
-                    (__v4df) __A);
+  return (__m256d)__builtin_ia32_selectpd_256(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v4df)__A, (__v4df)__B, (__v4df)__C),
+      (__v4df)__A);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
 {
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                (__v4df) __B,
-                                                (__v4df) __C),
-                    (__v4df) __C);
+  return (__m256d)__builtin_ia32_selectpd_256(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v4df)__A, (__v4df)__B, (__v4df)__C),
+      (__v4df)__C);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                (__v4df) __B,
-                                                (__v4df) __C),
-                    (__v4df)_mm256_setzero_pd());
+  return (__m256d)__builtin_ia32_selectpd_256(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v4df)__A, (__v4df)__B, (__v4df)__C),
+      (__v4df)_mm256_setzero_pd());
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
 {
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                (__v4df) __B,
-                                                -(__v4df) __C),
-                    (__v4df) __A);
+  return (__m256d)__builtin_ia32_selectpd_256(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v4df)__A, (__v4df)__B, -(__v4df)__C),
+      (__v4df)__A);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                (__v4df) __B,
-                                                -(__v4df) __C),
-                    (__v4df)_mm256_setzero_pd());
+  return (__m256d)__builtin_ia32_selectpd_256(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v4df)__A, (__v4df)__B, -(__v4df)__C),
+      (__v4df)_mm256_setzero_pd());
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
 {
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
-                                                (__v4df) __B,
-                                                (__v4df) __C),
-                    (__v4df) __C);
+  return (__m256d)__builtin_ia32_selectpd_256(
+      (__mmask8)__U,
+      __builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B, (__v4df)__C),
+      (__v4df)__C);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
-                                                (__v4df) __B,
-                                                (__v4df) __C),
-                    (__v4df)_mm256_setzero_pd());
+  return (__m256d)__builtin_ia32_selectpd_256(
+      (__mmask8)__U,
+      __builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B, (__v4df)__C),
+      (__v4df)_mm256_setzero_pd());
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
-                                                (__v4df) __B,
-                                                -(__v4df) __C),
-                    (__v4df)_mm256_setzero_pd());
+  return (__m256d)__builtin_ia32_selectpd_256(
+      (__mmask8)__U,
+      __builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B, -(__v4df)__C),
+      (__v4df)_mm256_setzero_pd());
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask_fmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
 {
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             (__v4sf) __B,
-                                             (__v4sf) __C),
-                    (__v4sf) __A);
+  return (__m128)__builtin_ia32_selectps_128(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, (__v4sf)__C),
+      (__v4sf)__A);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask3_fmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
 {
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             (__v4sf) __B,
-                                             (__v4sf) __C),
-                    (__v4sf) __C);
+  return (__m128)__builtin_ia32_selectps_128(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, (__v4sf)__C),
+      (__v4sf)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_maskz_fmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             (__v4sf) __B,
-                                             (__v4sf) __C),
-                    (__v4sf)_mm_setzero_ps());
+  return (__m128)__builtin_ia32_selectps_128(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, (__v4sf)__C),
+      (__v4sf)_mm_setzero_ps());
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask_fmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
 {
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             (__v4sf) __B,
-                                             -(__v4sf) __C),
-                    (__v4sf) __A);
+  return (__m128)__builtin_ia32_selectps_128(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C),
+      (__v4sf)__A);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_maskz_fmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             (__v4sf) __B,
-                                             -(__v4sf) __C),
-                    (__v4sf)_mm_setzero_ps());
+  return (__m128)__builtin_ia32_selectps_128(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C),
+      (__v4sf)_mm_setzero_ps());
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask3_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
 {
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps (-(__v4sf) __A,
-                                             (__v4sf) __B,
-                                             (__v4sf) __C),
-                    (__v4sf) __C);
+  return (__m128)__builtin_ia32_selectps_128(
+      (__mmask8)__U,
+      __builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C),
+      (__v4sf)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_maskz_fnmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps (-(__v4sf) __A,
-                                             (__v4sf) __B,
-                                             (__v4sf) __C),
-                    (__v4sf)_mm_setzero_ps());
+  return (__m128)__builtin_ia32_selectps_128(
+      (__mmask8)__U,
+      __builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C),
+      (__v4sf)_mm_setzero_ps());
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_maskz_fnmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps (-(__v4sf) __A,
-                                             (__v4sf) __B,
-                                             -(__v4sf) __C),
-                    (__v4sf)_mm_setzero_ps());
+  return (__m128)__builtin_ia32_selectps_128(
+      (__mmask8)__U,
+      __builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C),
+      (__v4sf)_mm_setzero_ps());
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_mask_fmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
 {
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                (__v8sf) __B,
-                                                (__v8sf) __C),
-                    (__v8sf) __A);
+  return (__m256)__builtin_ia32_selectps_256(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, (__v8sf)__C),
+      (__v8sf)__A);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_mask3_fmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
 {
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                (__v8sf) __B,
-                                                (__v8sf) __C),
-                    (__v8sf) __C);
+  return (__m256)__builtin_ia32_selectps_256(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, (__v8sf)__C),
+      (__v8sf)__C);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_maskz_fmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                (__v8sf) __B,
-                                                (__v8sf) __C),
-                    (__v8sf)_mm256_setzero_ps());
+  return (__m256)__builtin_ia32_selectps_256(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, (__v8sf)__C),
+      (__v8sf)_mm256_setzero_ps());
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_mask_fmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
 {
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                (__v8sf) __B,
-                                                -(__v8sf) __C),
-                    (__v8sf) __A);
+  return (__m256)__builtin_ia32_selectps_256(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C),
+      (__v8sf)__A);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_maskz_fmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                (__v8sf) __B,
-                                                -(__v8sf) __C),
-                    (__v8sf)_mm256_setzero_ps());
+  return (__m256)__builtin_ia32_selectps_256(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C),
+      (__v8sf)_mm256_setzero_ps());
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_mask3_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
 {
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
-                                                (__v8sf) __B,
-                                                (__v8sf) __C),
-                    (__v8sf) __C);
+  return (__m256)__builtin_ia32_selectps_256(
+      (__mmask8)__U,
+      __builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C),
+      (__v8sf)__C);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_maskz_fnmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
-                                                (__v8sf) __B,
-                                                (__v8sf) __C),
-                    (__v8sf)_mm256_setzero_ps());
+  return (__m256)__builtin_ia32_selectps_256(
+      (__mmask8)__U,
+      __builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C),
+      (__v8sf)_mm256_setzero_ps());
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_maskz_fnmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
-                                                (__v8sf) __B,
-                                                -(__v8sf) __C),
-                    (__v8sf)_mm256_setzero_ps());
+  return (__m256)__builtin_ia32_selectps_256(
+      (__mmask8)__U,
+      __builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C),
+      (__v8sf)_mm256_setzero_ps());
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
@@ -1420,41 +1388,37 @@ _mm256_maskz_fmsubadd_ps(__mmask8 __U, __m256 __A, 
__m256 __B, __m256 __C)
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask3_fmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
 {
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             (__v2df) __B,
-                                             -(__v2df) __C),
-                    (__v2df) __C);
+  return (__m128d)__builtin_ia32_selectpd_128(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v2df)__A, (__v2df)__B, -(__v2df)__C),
+      (__v2df)__C);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_mask3_fmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
 {
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                (__v4df) __B,
-                                                -(__v4df) __C),
-                    (__v4df) __C);
+  return (__m256d)__builtin_ia32_selectpd_256(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v4df)__A, (__v4df)__B, -(__v4df)__C),
+      (__v4df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask3_fmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
 {
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             (__v4sf) __B,
-                                             -(__v4sf) __C),
-                    (__v4sf) __C);
+  return (__m128)__builtin_ia32_selectps_128(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C),
+      (__v4sf)__C);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_mask3_fmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
 {
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                (__v8sf) __B,
-                                                -(__v8sf) __C),
-                    (__v8sf) __C);
+  return (__m256)__builtin_ia32_selectps_256(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C),
+      (__v8sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
@@ -1500,121 +1464,109 @@ _mm256_mask3_fmsubadd_ps(__m256 __A, __m256 __B, 
__m256 __C, __mmask8 __U)
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask_fnmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
 {
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             -(__v2df) __B,
-                                             (__v2df) __C),
-                    (__v2df) __A);
+  return (__m128d)__builtin_ia32_selectpd_128(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v2df)__A, -(__v2df)__B, (__v2df)__C),
+      (__v2df)__A);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_mask_fnmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
 {
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                -(__v4df) __B,
-                                                (__v4df) __C),
-                    (__v4df) __A);
+  return (__m256d)__builtin_ia32_selectpd_256(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v4df)__A, -(__v4df)__B, (__v4df)__C),
+      (__v4df)__A);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask_fnmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
 {
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             -(__v4sf) __B,
-                                             (__v4sf) __C),
-                    (__v4sf) __A);
+  return (__m128)__builtin_ia32_selectps_128(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v4sf)__A, -(__v4sf)__B, (__v4sf)__C),
+      (__v4sf)__A);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_mask_fnmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
 {
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                -(__v8sf) __B,
-                                                (__v8sf) __C),
-                    (__v8sf) __A);
+  return (__m256)__builtin_ia32_selectps_256(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v8sf)__A, -(__v8sf)__B, (__v8sf)__C),
+      (__v8sf)__A);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask_fnmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
 {
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             -(__v2df) __B,
-                                             -(__v2df) __C),
-                    (__v2df) __A);
+  return (__m128d)__builtin_ia32_selectpd_128(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v2df)__A, -(__v2df)__B, -(__v2df)__C),
+      (__v2df)__A);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask3_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
 {
-  return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
-                                             -(__v2df) __B,
-                                             -(__v2df) __C),
-                    (__v2df) __C);
+  return (__m128d)__builtin_ia32_selectpd_128(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v2df)__A, -(__v2df)__B, -(__v2df)__C),
+      (__v2df)__C);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_mask_fnmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
 {
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                -(__v4df) __B,
-                                                -(__v4df) __C),
-                    (__v4df) __A);
+  return (__m256d)__builtin_ia32_selectpd_256(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v4df)__A, -(__v4df)__B, -(__v4df)__C),
+      (__v4df)__A);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_mask3_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
 {
-  return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
-                                                -(__v4df) __B,
-                                                -(__v4df) __C),
-                    (__v4df) __C);
+  return (__m256d)__builtin_ia32_selectpd_256(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v4df)__A, -(__v4df)__B, -(__v4df)__C),
+      (__v4df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask_fnmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
 {
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             -(__v4sf) __B,
-                                             -(__v4sf) __C),
-                    (__v4sf) __A);
+  return (__m128)__builtin_ia32_selectps_128(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v4sf)__A, -(__v4sf)__B, -(__v4sf)__C),
+      (__v4sf)__A);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask3_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
 {
-  return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
-                                             -(__v4sf) __B,
-                                             -(__v4sf) __C),
-                    (__v4sf) __C);
+  return (__m128)__builtin_ia32_selectps_128(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v4sf)__A, -(__v4sf)__B, -(__v4sf)__C),
+      (__v4sf)__C);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_mask_fnmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
 {
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                -(__v8sf) __B,
-                                                -(__v8sf) __C),
-                    (__v8sf) __A);
+  return (__m256)__builtin_ia32_selectps_256(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v8sf)__A, -(__v8sf)__B, -(__v8sf)__C),
+      (__v8sf)__A);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
 {
-  return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
-                                                -(__v8sf) __B,
-                                                -(__v8sf) __C),
-                    (__v8sf) __C);
+  return (__m256)__builtin_ia32_selectps_256(
+      (__mmask8)__U,
+      __builtin_elementwise_fma((__v8sf)__A, -(__v8sf)__B, -(__v8sf)__C),
+      (__v8sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128

diff  --git a/clang/lib/Headers/fma4intrin.h b/clang/lib/Headers/fma4intrin.h
index 694801b3e8dd4..69977fb2ccee3 100644
--- a/clang/lib/Headers/fma4intrin.h
+++ b/clang/lib/Headers/fma4intrin.h
@@ -23,13 +23,15 @@
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_macc_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, 
(__v4sf)__C);
+  return (__m128)__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B,
+                                           (__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_macc_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, 
(__v2df)__C);
+  return (__m128d)__builtin_elementwise_fma((__v2df)__A, (__v2df)__B,
+                                            (__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
@@ -47,13 +49,15 @@ _mm_macc_sd(__m128d __A, __m128d __B, __m128d __C)
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_msub_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, 
-(__v4sf)__C);
+  return (__m128)__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B,
+                                           -(__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_msub_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, 
-(__v2df)__C);
+  return (__m128d)__builtin_elementwise_fma((__v2df)__A, (__v2df)__B,
+                                            -(__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
@@ -71,13 +75,15 @@ _mm_msub_sd(__m128d __A, __m128d __B, __m128d __C)
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_nmacc_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, 
(__v4sf)__C);
+  return (__m128)__builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B,
+                                           (__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_nmacc_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, 
(__v2df)__C);
+  return (__m128d)__builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B,
+                                            (__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
@@ -95,13 +101,15 @@ _mm_nmacc_sd(__m128d __A, __m128d __B, __m128d __C)
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_nmsub_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, 
-(__v4sf)__C);
+  return (__m128)__builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B,
+                                           -(__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_nmsub_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, 
-(__v2df)__C);
+  return (__m128d)__builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B,
+                                            -(__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
@@ -143,49 +151,57 @@ _mm_msubadd_pd(__m128d __A, __m128d __B, __m128d __C)
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_macc_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, 
(__v8sf)__C);
+  return (__m256)__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B,
+                                           (__v8sf)__C);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_macc_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, 
(__v4df)__C);
+  return (__m256d)__builtin_elementwise_fma((__v4df)__A, (__v4df)__B,
+                                            (__v4df)__C);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_msub_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, 
-(__v8sf)__C);
+  return (__m256)__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B,
+                                           -(__v8sf)__C);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_msub_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, 
-(__v4df)__C);
+  return (__m256d)__builtin_elementwise_fma((__v4df)__A, (__v4df)__B,
+                                            -(__v4df)__C);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_nmacc_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, 
(__v8sf)__C);
+  return (__m256)__builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B,
+                                           (__v8sf)__C);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_nmacc_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, 
(__v4df)__C);
+  return (__m256d)__builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B,
+                                            (__v4df)__C);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_nmsub_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, 
-(__v8sf)__C);
+  return (__m256)__builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B,
+                                           -(__v8sf)__C);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_nmsub_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, 
-(__v4df)__C);
+  return (__m256d)__builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B,
+                                            -(__v4df)__C);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS256

diff  --git a/clang/lib/Headers/fmaintrin.h b/clang/lib/Headers/fmaintrin.h
index 22d1a780bbfd4..24584a928da4d 100644
--- a/clang/lib/Headers/fmaintrin.h
+++ b/clang/lib/Headers/fmaintrin.h
@@ -35,7 +35,8 @@
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, 
(__v4sf)__C);
+  return (__m128)__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B,
+                                           (__v4sf)__C);
 }
 
 /// Computes a multiply-add of 128-bit vectors of [2 x double].
@@ -55,7 +56,8 @@ _mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, 
(__v2df)__C);
+  return (__m128d)__builtin_elementwise_fma((__v2df)__A, (__v2df)__B,
+                                            (__v2df)__C);
 }
 
 /// Computes a scalar multiply-add of the single-precision values in the
@@ -133,7 +135,8 @@ _mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C)
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, 
-(__v4sf)__C);
+  return (__m128)__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B,
+                                           -(__v4sf)__C);
 }
 
 /// Computes a multiply-subtract of 128-bit vectors of [2 x double].
@@ -153,7 +156,8 @@ _mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C)
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, 
-(__v2df)__C);
+  return (__m128d)__builtin_elementwise_fma((__v2df)__A, (__v2df)__B,
+                                            -(__v2df)__C);
 }
 
 /// Computes a scalar multiply-subtract of the single-precision values in
@@ -231,7 +235,8 @@ _mm_fmsub_sd(__m128d __A, __m128d __B, __m128d __C)
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, 
(__v4sf)__C);
+  return (__m128)__builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B,
+                                           (__v4sf)__C);
 }
 
 /// Computes a negated multiply-add of 128-bit vectors of [2 x double].
@@ -251,7 +256,8 @@ _mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C)
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, 
(__v2df)__C);
+  return (__m128d)__builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B,
+                                            (__v2df)__C);
 }
 
 /// Computes a scalar negated multiply-add of the single-precision values in
@@ -329,7 +335,8 @@ _mm_fnmadd_sd(__m128d __A, __m128d __B, __m128d __C)
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, 
-(__v4sf)__C);
+  return (__m128)__builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B,
+                                           -(__v4sf)__C);
 }
 
 /// Computes a negated multiply-subtract of 128-bit vectors of [2 x double].
@@ -349,7 +356,8 @@ _mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C)
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, 
-(__v2df)__C);
+  return (__m128d)__builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B,
+                                            -(__v2df)__C);
 }
 
 /// Computes a scalar negated multiply-subtract of the single-precision
@@ -531,7 +539,8 @@ _mm_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C)
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, 
(__v8sf)__C);
+  return (__m256)__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B,
+                                           (__v8sf)__C);
 }
 
 /// Computes a multiply-add of 256-bit vectors of [4 x double].
@@ -551,7 +560,8 @@ _mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C)
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, 
(__v4df)__C);
+  return (__m256d)__builtin_elementwise_fma((__v4df)__A, (__v4df)__B,
+                                            (__v4df)__C);
 }
 
 /// Computes a multiply-subtract of 256-bit vectors of [8 x float].
@@ -571,7 +581,8 @@ _mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C)
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, 
-(__v8sf)__C);
+  return (__m256)__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B,
+                                           -(__v8sf)__C);
 }
 
 /// Computes a multiply-subtract of 256-bit vectors of [4 x double].
@@ -591,7 +602,8 @@ _mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C)
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, 
-(__v4df)__C);
+  return (__m256d)__builtin_elementwise_fma((__v4df)__A, (__v4df)__B,
+                                            -(__v4df)__C);
 }
 
 /// Computes a negated multiply-add of 256-bit vectors of [8 x float].
@@ -611,7 +623,8 @@ _mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C)
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, 
(__v8sf)__C);
+  return (__m256)__builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B,
+                                           (__v8sf)__C);
 }
 
 /// Computes a negated multiply-add of 256-bit vectors of [4 x double].
@@ -631,7 +644,8 @@ _mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C)
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, 
(__v4df)__C);
+  return (__m256d)__builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B,
+                                            (__v4df)__C);
 }
 
 /// Computes a negated multiply-subtract of 256-bit vectors of [8 x float].
@@ -651,7 +665,8 @@ _mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C)
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, 
-(__v8sf)__C);
+  return (__m256)__builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B,
+                                           -(__v8sf)__C);
 }
 
 /// Computes a negated multiply-subtract of 256-bit vectors of [4 x double].
@@ -671,7 +686,8 @@ _mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C)
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, 
-(__v4df)__C);
+  return (__m256d)__builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B,
+                                            -(__v4df)__C);
 }
 
 /// Computes a multiply with alternating add/subtract of 256-bit vectors of

diff  --git a/clang/test/CodeGen/target-builtin-noerror.c 
b/clang/test/CodeGen/target-builtin-noerror.c
index 0bbd8c3e5ddd8..5cf53b2d2fc8f 100644
--- a/clang/test/CodeGen/target-builtin-noerror.c
+++ b/clang/test/CodeGen/target-builtin-noerror.c
@@ -32,15 +32,15 @@ int qq(void) {
 
 // Test that fma and fma4 are both separately and combined valid for an fma 
intrinsic.
 __m128 __attribute__((target("fma"))) fma_1(__m128 a, __m128 b, __m128 c) {
-  return __builtin_ia32_vfmaddps(a, b, c);
+  return __builtin_ia32_vfmaddsubps(a, b, c);
 }
 
 __m128 __attribute__((target("fma4"))) fma_2(__m128 a, __m128 b, __m128 c) {
-  return __builtin_ia32_vfmaddps(a, b, c);
+  return __builtin_ia32_vfmaddsubps(a, b, c);
 }
 
 __m128 __attribute__((target("fma,fma4"))) fma_3(__m128 a, __m128 b, __m128 c) 
{
-  return __builtin_ia32_vfmaddps(a, b, c);
+  return __builtin_ia32_vfmaddsubps(a, b, c);
 }
 
 void verifyfeaturestrings(void) {


        
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to