https://github.com/moorabbit created 
https://github.com/llvm/llvm-project/pull/152545

The following intrinsics were replaced by `__builtin_elementwise_fma`:
- `__builtin_ia32_vfmaddps(256)`
- `__builtin_ia32_vfmaddpd(256)`
- `__builtin_ia32_vfmaddph(256)`
- `__builtin_ia32_vfmaddbf16(128 | 256 | 512)`

All `__builtin_ia32_vfmadd*` intrinsics are lowered to 
`__builtin_elementwise_fma`, so keeping them is an unnecessary indirection.

Fixes [#152461](https://github.com/llvm/llvm-project/issues/152461)

>From f6ac261dd7bccbe1085b453fec2fd73f310ad09e Mon Sep 17 00:00:00 2001
From: moorabbit <moorab...@proton.me>
Date: Thu, 7 Aug 2025 11:59:19 -0400
Subject: [PATCH] [Clang][X86] Replace some `__builtin_ia32_vfmadd*` with
 `__builtin_elementwise_fma`

The following intrinsics were replaced by `__builtin_elementwise_fma`:
- `__builtin_ia32_vfmaddps(256)`
- `__builtin_ia32_vfmaddpd(256)`
- `__builtin_ia32_vfmaddph(256)`
- `__builtin_ia32_vfmaddbf16(128 | 256 | 512)`

All `__builtin_ia32_vfmadd*` intrinsics are lowered to 
`__builtin_elementwise_fma`, so keeping
them is an unnecessary indirection.
---
 clang/include/clang/Basic/BuiltinsX86.td    | 24 ------
 clang/lib/CodeGen/TargetBuiltins/X86.cpp    |  9 --
 clang/lib/Headers/avx10_2_512bf16intrin.h   |  8 +-
 clang/lib/Headers/avx10_2bf16intrin.h       | 16 ++--
 clang/lib/Headers/avx512vlfp16intrin.h      | 60 ++++++-------
 clang/lib/Headers/avx512vlintrin.h          | 96 ++++++++++-----------
 clang/lib/Headers/fma4intrin.h              | 32 +++----
 clang/lib/Headers/fmaintrin.h               | 32 +++----
 clang/test/CodeGen/target-builtin-noerror.c |  6 +-
 9 files changed, 125 insertions(+), 158 deletions(-)

diff --git a/clang/include/clang/Basic/BuiltinsX86.td 
b/clang/include/clang/Basic/BuiltinsX86.td
index a4acc72fdc37d..609b720357e38 100644
--- a/clang/include/clang/Basic/BuiltinsX86.td
+++ b/clang/include/clang/Basic/BuiltinsX86.td
@@ -878,11 +878,6 @@ let Features = "sha", Attributes = [NoThrow, Const, 
RequiredVectorWidth<128>] in
   def sha256msg2 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, 
int>)">;
 }
 
-let Features = "fma|fma4", Attributes = [NoThrow, Const, 
RequiredVectorWidth<128>] in {
-  def vfmaddps : X86Builtin<"_Vector<4, float>(_Vector<4, float>, _Vector<4, 
float>, _Vector<4, float>)">;
-  def vfmaddpd : X86Builtin<"_Vector<2, double>(_Vector<2, double>, _Vector<2, 
double>, _Vector<2, double>)">;
-}
-
 let Features = "fma", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] 
in {
   def vfmaddss3 : X86Builtin<"_Vector<4, float>(_Vector<4, float>, _Vector<4, 
float>, _Vector<4, float>)">;
   def vfmaddsd3 : X86Builtin<"_Vector<2, double>(_Vector<2, double>, 
_Vector<2, double>, _Vector<2, double>)">;
@@ -899,8 +894,6 @@ let Features = "fma|fma4", Attributes = [NoThrow, Const, 
RequiredVectorWidth<128
 }
 
 let Features = "fma|fma4", Attributes = [NoThrow, Const, 
RequiredVectorWidth<256>] in {
-  def vfmaddps256 : X86Builtin<"_Vector<8, float>(_Vector<8, float>, 
_Vector<8, float>, _Vector<8, float>)">;
-  def vfmaddpd256 : X86Builtin<"_Vector<4, double>(_Vector<4, double>, 
_Vector<4, double>, _Vector<4, double>)">;
   def vfmaddsubps256 : X86Builtin<"_Vector<8, float>(_Vector<8, float>, 
_Vector<8, float>, _Vector<8, float>)">;
   def vfmaddsubpd256 : X86Builtin<"_Vector<4, double>(_Vector<4, double>, 
_Vector<4, double>, _Vector<4, double>)">;
 }
@@ -4140,14 +4133,6 @@ let Features = "avx512fp16,evex512", Attributes = 
[NoThrow, Const, RequiredVecto
   def vcvtps2phx512_mask : X86Builtin<"_Vector<16, _Float16>(_Vector<16, 
float>, _Vector<16, _Float16>, unsigned short, _Constant int)">;
 }
 
-let Features = "avx512fp16,avx512vl", Attributes = [NoThrow, Const, 
RequiredVectorWidth<128>] in {
-  def vfmaddph : X86Builtin<"_Vector<8, _Float16>(_Vector<8, _Float16>, 
_Vector<8, _Float16>, _Vector<8, _Float16>)">;
-}
-
-let Features = "avx512fp16,avx512vl", Attributes = [NoThrow, Const, 
RequiredVectorWidth<256>] in {
-  def vfmaddph256 : X86Builtin<"_Vector<16, _Float16>(_Vector<16, _Float16>, 
_Vector<16, _Float16>, _Vector<16, _Float16>)">;
-}
-
 let Features = "avx512fp16,evex512", Attributes = [NoThrow, Const, 
RequiredVectorWidth<512>] in {
   def vfmaddph512_mask : X86Builtin<"_Vector<32, _Float16>(_Vector<32, 
_Float16>, _Vector<32, _Float16>, _Vector<32, _Float16>, unsigned int, 
_Constant int)">;
   def vfmaddph512_mask3 : X86Builtin<"_Vector<32, _Float16>(_Vector<32, 
_Float16>, _Vector<32, _Float16>, _Vector<32, _Float16>, unsigned int, 
_Constant int)">;
@@ -5373,13 +5358,4 @@ let Features = "avx10.2-256", Attributes = [NoThrow, 
Const, RequiredVectorWidth<
 
 let Features = "avx10.2-512", Attributes = [NoThrow, Const, 
RequiredVectorWidth<512>] in {
   def vsqrtbf16512 : X86Builtin<"_Vector<32, __bf16>(_Vector<32, __bf16>)">;
-  def vfmaddbf16512 : X86Builtin<"_Vector<32, __bf16>(_Vector<32, __bf16>, 
_Vector<32, __bf16>, _Vector<32, __bf16>)">;
-}
-
-let Features = "avx10.2-256", Attributes = [NoThrow, Const, 
RequiredVectorWidth<256>] in {
-  def vfmaddbf16256 : X86Builtin<"_Vector<16, __bf16>(_Vector<16, __bf16>, 
_Vector<16, __bf16>, _Vector<16, __bf16>)">;
-}
-
-let Features = "avx10.2-256", Attributes = [NoThrow, Const, 
RequiredVectorWidth<128>] in {
-  def vfmaddbf16128 : X86Builtin<"_Vector<8, __bf16>(_Vector<8, __bf16>, 
_Vector<8, __bf16>, _Vector<8, __bf16>)">;
 }
diff --git a/clang/lib/CodeGen/TargetBuiltins/X86.cpp 
b/clang/lib/CodeGen/TargetBuiltins/X86.cpp
index e23d19d2f6b6b..b508709e4bbae 100644
--- a/clang/lib/CodeGen/TargetBuiltins/X86.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/X86.cpp
@@ -1051,18 +1051,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned 
BuiltinID,
   case X86::BI__builtin_ia32_vfmsubsd3_mask3:
     return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2,
                              /*NegAcc*/ true);
-  case X86::BI__builtin_ia32_vfmaddph:
-  case X86::BI__builtin_ia32_vfmaddps:
-  case X86::BI__builtin_ia32_vfmaddpd:
-  case X86::BI__builtin_ia32_vfmaddph256:
-  case X86::BI__builtin_ia32_vfmaddps256:
-  case X86::BI__builtin_ia32_vfmaddpd256:
   case X86::BI__builtin_ia32_vfmaddph512_mask:
   case X86::BI__builtin_ia32_vfmaddph512_maskz:
   case X86::BI__builtin_ia32_vfmaddph512_mask3:
-  case X86::BI__builtin_ia32_vfmaddbf16128:
-  case X86::BI__builtin_ia32_vfmaddbf16256:
-  case X86::BI__builtin_ia32_vfmaddbf16512:
   case X86::BI__builtin_ia32_vfmaddps512_mask:
   case X86::BI__builtin_ia32_vfmaddps512_maskz:
   case X86::BI__builtin_ia32_vfmaddps512_mask3:
diff --git a/clang/lib/Headers/avx10_2_512bf16intrin.h 
b/clang/lib/Headers/avx10_2_512bf16intrin.h
index 75290d22ef259..a1613420eee2b 100644
--- a/clang/lib/Headers/avx10_2_512bf16intrin.h
+++ b/clang/lib/Headers/avx10_2_512bf16intrin.h
@@ -441,7 +441,7 @@ _mm512_maskz_sqrt_pbh(__mmask32 __U, __m512bh __A) {
 
 static __inline__ __m512bh __DEFAULT_FN_ATTRS512
 _mm512_fmadd_pbh(__m512bh __A, __m512bh __B, __m512bh __C) {
-  return (__m512bh)__builtin_ia32_vfmaddbf16512((__v32bf)__A, (__v32bf)__B,
+  return (__m512bh)__builtin_elementwise_fma((__v32bf)__A, (__v32bf)__B,
                                                 (__v32bf)__C);
 }
 
@@ -469,7 +469,7 @@ static __inline__ __m512bh __DEFAULT_FN_ATTRS512 
_mm512_maskz_fmadd_pbh(
 
 static __inline__ __m512bh __DEFAULT_FN_ATTRS512
 _mm512_fmsub_pbh(__m512bh __A, __m512bh __B, __m512bh __C) {
-  return (__m512bh)__builtin_ia32_vfmaddbf16512((__v32bf)__A, (__v32bf)__B,
+  return (__m512bh)__builtin_elementwise_fma((__v32bf)__A, (__v32bf)__B,
                                                 -(__v32bf)__C);
 }
 
@@ -497,7 +497,7 @@ static __inline__ __m512bh __DEFAULT_FN_ATTRS512 
_mm512_maskz_fmsub_pbh(
 
 static __inline__ __m512bh __DEFAULT_FN_ATTRS512
 _mm512_fnmadd_pbh(__m512bh __A, __m512bh __B, __m512bh __C) {
-  return (__m512bh)__builtin_ia32_vfmaddbf16512((__v32bf)__A, -(__v32bf)__B,
+  return (__m512bh)__builtin_elementwise_fma((__v32bf)__A, -(__v32bf)__B,
                                                 (__v32bf)__C);
 }
 
@@ -527,7 +527,7 @@ static __inline__ __m512bh __DEFAULT_FN_ATTRS512 
_mm512_maskz_fnmadd_pbh(
 
 static __inline__ __m512bh __DEFAULT_FN_ATTRS512
 _mm512_fnmsub_pbh(__m512bh __A, __m512bh __B, __m512bh __C) {
-  return (__m512bh)__builtin_ia32_vfmaddbf16512((__v32bf)__A, -(__v32bf)__B,
+  return (__m512bh)__builtin_elementwise_fma((__v32bf)__A, -(__v32bf)__B,
                                                 -(__v32bf)__C);
 }
 
diff --git a/clang/lib/Headers/avx10_2bf16intrin.h 
b/clang/lib/Headers/avx10_2bf16intrin.h
index 66797ae00fe4f..8cf1f78f189aa 100644
--- a/clang/lib/Headers/avx10_2bf16intrin.h
+++ b/clang/lib/Headers/avx10_2bf16intrin.h
@@ -852,7 +852,7 @@ _mm_maskz_sqrt_pbh(__mmask8 __U, __m128bh __A) {
 
 static __inline__ __m256bh __DEFAULT_FN_ATTRS256
 _mm256_fmadd_pbh(__m256bh __A, __m256bh __B, __m256bh __C) {
-  return (__m256bh)__builtin_ia32_vfmaddbf16256((__v16bf)__A, (__v16bf)__B,
+  return (__m256bh)__builtin_elementwise_fma((__v16bf)__A, (__v16bf)__B,
                                                 (__v16bf)__C);
 }
 
@@ -880,7 +880,7 @@ static __inline__ __m256bh __DEFAULT_FN_ATTRS256 
_mm256_maskz_fmadd_pbh(
 
 static __inline__ __m256bh __DEFAULT_FN_ATTRS256
 _mm256_fmsub_pbh(__m256bh __A, __m256bh __B, __m256bh __C) {
-  return (__m256bh)__builtin_ia32_vfmaddbf16256((__v16bf)__A, (__v16bf)__B,
+  return (__m256bh)__builtin_elementwise_fma((__v16bf)__A, (__v16bf)__B,
                                                 -(__v16bf)__C);
 }
 
@@ -908,7 +908,7 @@ static __inline__ __m256bh __DEFAULT_FN_ATTRS256 
_mm256_maskz_fmsub_pbh(
 
 static __inline__ __m256bh __DEFAULT_FN_ATTRS256
 _mm256_fnmadd_pbh(__m256bh __A, __m256bh __B, __m256bh __C) {
-  return (__m256bh)__builtin_ia32_vfmaddbf16256((__v16bf)__A, -(__v16bf)__B,
+  return (__m256bh)__builtin_elementwise_fma((__v16bf)__A, -(__v16bf)__B,
                                                 (__v16bf)__C);
 }
 
@@ -938,7 +938,7 @@ static __inline__ __m256bh __DEFAULT_FN_ATTRS256 
_mm256_maskz_fnmadd_pbh(
 
 static __inline__ __m256bh __DEFAULT_FN_ATTRS256
 _mm256_fnmsub_pbh(__m256bh __A, __m256bh __B, __m256bh __C) {
-  return (__m256bh)__builtin_ia32_vfmaddbf16256((__v16bf)__A, -(__v16bf)__B,
+  return (__m256bh)__builtin_elementwise_fma((__v16bf)__A, -(__v16bf)__B,
                                                 -(__v16bf)__C);
 }
 
@@ -969,7 +969,7 @@ static __inline__ __m256bh __DEFAULT_FN_ATTRS256 
_mm256_maskz_fnmsub_pbh(
 static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_fmadd_pbh(__m128bh __A,
                                                                __m128bh __B,
                                                                __m128bh __C) {
-  return (__m128bh)__builtin_ia32_vfmaddbf16128((__v8bf)__A, (__v8bf)__B,
+  return (__m128bh)__builtin_elementwise_fma((__v8bf)__A, (__v8bf)__B,
                                                 (__v8bf)__C);
 }
 
@@ -997,7 +997,7 @@ _mm_maskz_fmadd_pbh(__mmask8 __U, __m128bh __A, __m128bh 
__B, __m128bh __C) {
 static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_fmsub_pbh(__m128bh __A,
                                                                __m128bh __B,
                                                                __m128bh __C) {
-  return (__m128bh)__builtin_ia32_vfmaddbf16128((__v8bf)__A, (__v8bf)__B,
+  return (__m128bh)__builtin_elementwise_fma((__v8bf)__A, (__v8bf)__B,
                                                 -(__v8bf)__C);
 }
 
@@ -1025,7 +1025,7 @@ _mm_maskz_fmsub_pbh(__mmask8 __U, __m128bh __A, __m128bh 
__B, __m128bh __C) {
 static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_fnmadd_pbh(__m128bh __A,
                                                                 __m128bh __B,
                                                                 __m128bh __C) {
-  return (__m128bh)__builtin_ia32_vfmaddbf16128((__v8bf)__A, -(__v8bf)__B,
+  return (__m128bh)__builtin_elementwise_fma((__v8bf)__A, -(__v8bf)__B,
                                                 (__v8bf)__C);
 }
 
@@ -1053,7 +1053,7 @@ _mm_maskz_fnmadd_pbh(__mmask8 __U, __m128bh __A, __m128bh 
__B, __m128bh __C) {
 static __inline__ __m128bh __DEFAULT_FN_ATTRS128 _mm_fnmsub_pbh(__m128bh __A,
                                                                 __m128bh __B,
                                                                 __m128bh __C) {
-  return (__m128bh)__builtin_ia32_vfmaddbf16128((__v8bf)__A, -(__v8bf)__B,
+  return (__m128bh)__builtin_elementwise_fma((__v8bf)__A, -(__v8bf)__B,
                                                 -(__v8bf)__C);
 }
 
diff --git a/clang/lib/Headers/avx512vlfp16intrin.h 
b/clang/lib/Headers/avx512vlfp16intrin.h
index a12acb7d9a24a..ba27f0459ff25 100644
--- a/clang/lib/Headers/avx512vlfp16intrin.h
+++ b/clang/lib/Headers/avx512vlfp16intrin.h
@@ -1419,7 +1419,7 @@ _mm256_maskz_cvtxps_ph(__mmask8 __U, __m256 __A) {
 static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmadd_ph(__m128h __A,
                                                              __m128h __B,
                                                              __m128h __C) {
-  return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B,
+  return (__m128h)__builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B,
                                           (__v8hf)__C);
 }
 
@@ -1429,7 +1429,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 
_mm_mask_fmadd_ph(__m128h __A,
                                                                   __m128h __C) 
{
   return (__m128h)__builtin_ia32_selectph_128(
       (__mmask8)__U,
-      __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+      __builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
       (__v8hf)__A);
 }
 
@@ -1437,7 +1437,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_mask3_fmadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
   return (__m128h)__builtin_ia32_selectph_128(
       (__mmask8)__U,
-      __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+      __builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
       (__v8hf)__C);
 }
 
@@ -1445,14 +1445,14 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_maskz_fmadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
   return (__m128h)__builtin_ia32_selectph_128(
       (__mmask8)__U,
-      __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+      __builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
       (__v8hf)_mm_setzero_ph());
 }
 
 static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmsub_ph(__m128h __A,
                                                              __m128h __B,
                                                              __m128h __C) {
-  return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B,
+  return (__m128h)__builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B,
                                           -(__v8hf)__C);
 }
 
@@ -1476,7 +1476,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_mask3_fnmadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
   return (__m128h)__builtin_ia32_selectph_128(
       (__mmask8)__U,
-      __builtin_ia32_vfmaddph(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+      __builtin_elementwise_fma(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
       (__v8hf)__C);
 }
 
@@ -1484,7 +1484,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_maskz_fnmadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
   return (__m128h)__builtin_ia32_selectph_128(
       (__mmask8)__U,
-      __builtin_ia32_vfmaddph(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+      __builtin_elementwise_fma(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
       (__v8hf)_mm_setzero_ph());
 }
 
@@ -1492,14 +1492,14 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_maskz_fnmsub_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
   return (__m128h)__builtin_ia32_selectph_128(
       (__mmask8)__U,
-      __builtin_ia32_vfmaddph(-(__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
+      __builtin_elementwise_fma(-(__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
       (__v8hf)_mm_setzero_ph());
 }
 
 static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fmadd_ph(__m256h __A,
                                                                 __m256h __B,
                                                                 __m256h __C) {
-  return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B,
+  return (__m256h)__builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B,
                                              (__v16hf)__C);
 }
 
@@ -1507,7 +1507,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask_fmadd_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
   return (__m256h)__builtin_ia32_selectph_256(
       (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+      __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
       (__v16hf)__A);
 }
 
@@ -1515,7 +1515,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask3_fmadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
   return (__m256h)__builtin_ia32_selectph_256(
       (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+      __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
       (__v16hf)__C);
 }
 
@@ -1523,14 +1523,14 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_maskz_fmadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
   return (__m256h)__builtin_ia32_selectph_256(
       (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+      __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
       (__v16hf)_mm256_setzero_ph());
 }
 
 static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fmsub_ph(__m256h __A,
                                                                 __m256h __B,
                                                                 __m256h __C) {
-  return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B,
+  return (__m256h)__builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B,
                                              -(__v16hf)__C);
 }
 
@@ -1538,7 +1538,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask_fmsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
   return (__m256h)__builtin_ia32_selectph_256(
       (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
+      __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
       (__v16hf)__A);
 }
 
@@ -1546,7 +1546,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_maskz_fmsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
   return (__m256h)__builtin_ia32_selectph_256(
       (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
+      __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
       (__v16hf)_mm256_setzero_ph());
 }
 
@@ -1554,7 +1554,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask3_fnmadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
   return (__m256h)__builtin_ia32_selectph_256(
       (__mmask16)__U,
-      __builtin_ia32_vfmaddph256(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+      __builtin_elementwise_fma(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
       (__v16hf)__C);
 }
 
@@ -1562,7 +1562,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_maskz_fnmadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
   return (__m256h)__builtin_ia32_selectph_256(
       (__mmask16)__U,
-      __builtin_ia32_vfmaddph256(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+      __builtin_elementwise_fma(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
       (__v16hf)_mm256_setzero_ph());
 }
 
@@ -1570,7 +1570,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_maskz_fnmsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
   return (__m256h)__builtin_ia32_selectph_256(
       (__mmask16)__U,
-      __builtin_ia32_vfmaddph256(-(__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
+      __builtin_elementwise_fma(-(__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
       (__v16hf)_mm256_setzero_ph());
 }
 
@@ -1684,7 +1684,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_mask3_fmsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
   return (__m128h)__builtin_ia32_selectph_128(
       (__mmask8)__U,
-      __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
+      __builtin_elementwise_fma((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
       (__v8hf)__C);
 }
 
@@ -1692,7 +1692,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask3_fmsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
   return (__m256h)__builtin_ia32_selectph_256(
       (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
+      __builtin_elementwise_fma((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
       (__v16hf)__C);
 }
 
@@ -1715,7 +1715,7 @@ _mm256_mask3_fmsubadd_ph(__m256h __A, __m256h __B, 
__m256h __C, __mmask16 __U) {
 static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fnmadd_ph(__m128h __A,
                                                               __m128h __B,
                                                               __m128h __C) {
-  return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B,
+  return (__m128h)__builtin_elementwise_fma((__v8hf)__A, -(__v8hf)__B,
                                           (__v8hf)__C);
 }
 
@@ -1723,14 +1723,14 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_mask_fnmadd_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
   return (__m128h)__builtin_ia32_selectph_128(
       (__mmask8)__U,
-      __builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, (__v8hf)__C),
+      __builtin_elementwise_fma((__v8hf)__A, -(__v8hf)__B, (__v8hf)__C),
       (__v8hf)__A);
 }
 
 static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fnmadd_ph(__m256h __A,
                                                                  __m256h __B,
                                                                  __m256h __C) {
-  return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B,
+  return (__m256h)__builtin_elementwise_fma((__v16hf)__A, -(__v16hf)__B,
                                              (__v16hf)__C);
 }
 
@@ -1738,14 +1738,14 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask_fnmadd_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
   return (__m256h)__builtin_ia32_selectph_256(
       (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, (__v16hf)__C),
+      __builtin_elementwise_fma((__v16hf)__A, -(__v16hf)__B, (__v16hf)__C),
       (__v16hf)__A);
 }
 
 static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fnmsub_ph(__m128h __A,
                                                               __m128h __B,
                                                               __m128h __C) {
-  return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B,
+  return (__m128h)__builtin_elementwise_fma((__v8hf)__A, -(__v8hf)__B,
                                           -(__v8hf)__C);
 }
 
@@ -1753,7 +1753,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_mask_fnmsub_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
   return (__m128h)__builtin_ia32_selectph_128(
       (__mmask8)__U,
-      __builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C),
+      __builtin_elementwise_fma((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C),
       (__v8hf)__A);
 }
 
@@ -1761,14 +1761,14 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_mask3_fnmsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
   return (__m128h)__builtin_ia32_selectph_128(
       (__mmask8)__U,
-      __builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C),
+      __builtin_elementwise_fma((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C),
       (__v8hf)__C);
 }
 
 static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fnmsub_ph(__m256h __A,
                                                                  __m256h __B,
                                                                  __m256h __C) {
-  return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B,
+  return (__m256h)__builtin_elementwise_fma((__v16hf)__A, -(__v16hf)__B,
                                              -(__v16hf)__C);
 }
 
@@ -1776,7 +1776,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask_fnmsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
   return (__m256h)__builtin_ia32_selectph_256(
       (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C),
+      __builtin_elementwise_fma((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C),
       (__v16hf)__A);
 }
 
@@ -1784,7 +1784,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask3_fnmsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
   return (__m256h)__builtin_ia32_selectph_256(
       (__mmask16)__U,
-      __builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C),
+      __builtin_elementwise_fma((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C),
       (__v16hf)__C);
 }
 
diff --git a/clang/lib/Headers/avx512vlintrin.h 
b/clang/lib/Headers/avx512vlintrin.h
index 2a5f7b43f63fc..a60d984beafee 100644
--- a/clang/lib/Headers/avx512vlintrin.h
+++ b/clang/lib/Headers/avx512vlintrin.h
@@ -900,7 +900,7 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
 {
   return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
+                    __builtin_elementwise_fma ((__v2df) __A,
                                              (__v2df) __B,
                                              (__v2df) __C),
                     (__v2df) __A);
@@ -910,7 +910,7 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask3_fmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
 {
   return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
+                    __builtin_elementwise_fma ((__v2df) __A,
                                              (__v2df) __B,
                                              (__v2df) __C),
                     (__v2df) __C);
@@ -920,7 +920,7 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_maskz_fmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
 {
   return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
+                    __builtin_elementwise_fma ((__v2df) __A,
                                              (__v2df) __B,
                                              (__v2df) __C),
                     (__v2df)_mm_setzero_pd());
@@ -930,7 +930,7 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask_fmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
 {
   return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
+                    __builtin_elementwise_fma ((__v2df) __A,
                                              (__v2df) __B,
                                              -(__v2df) __C),
                     (__v2df) __A);
@@ -940,7 +940,7 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_maskz_fmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
 {
   return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
+                    __builtin_elementwise_fma ((__v2df) __A,
                                              (__v2df) __B,
                                              -(__v2df) __C),
                     (__v2df)_mm_setzero_pd());
@@ -950,7 +950,7 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask3_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
 {
   return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd (-(__v2df) __A,
+                    __builtin_elementwise_fma (-(__v2df) __A,
                                              (__v2df) __B,
                                              (__v2df) __C),
                     (__v2df) __C);
@@ -960,7 +960,7 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_maskz_fnmadd_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
 {
   return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd (-(__v2df) __A,
+                    __builtin_elementwise_fma (-(__v2df) __A,
                                              (__v2df) __B,
                                              (__v2df) __C),
                     (__v2df)_mm_setzero_pd());
@@ -970,7 +970,7 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_maskz_fnmsub_pd(__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
 {
   return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd (-(__v2df) __A,
+                    __builtin_elementwise_fma (-(__v2df) __A,
                                              (__v2df) __B,
                                              -(__v2df) __C),
                     (__v2df)_mm_setzero_pd());
@@ -980,7 +980,7 @@ static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_mask_fmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
 {
   return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
+                    __builtin_elementwise_fma ((__v4df) __A,
                                                 (__v4df) __B,
                                                 (__v4df) __C),
                     (__v4df) __A);
@@ -990,7 +990,7 @@ static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_mask3_fmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
 {
   return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
+                    __builtin_elementwise_fma ((__v4df) __A,
                                                 (__v4df) __B,
                                                 (__v4df) __C),
                     (__v4df) __C);
@@ -1000,7 +1000,7 @@ static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_maskz_fmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
 {
   return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
+                    __builtin_elementwise_fma ((__v4df) __A,
                                                 (__v4df) __B,
                                                 (__v4df) __C),
                     (__v4df)_mm256_setzero_pd());
@@ -1010,7 +1010,7 @@ static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_mask_fmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
 {
   return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
+                    __builtin_elementwise_fma ((__v4df) __A,
                                                 (__v4df) __B,
                                                 -(__v4df) __C),
                     (__v4df) __A);
@@ -1020,7 +1020,7 @@ static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_maskz_fmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
 {
   return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
+                    __builtin_elementwise_fma ((__v4df) __A,
                                                 (__v4df) __B,
                                                 -(__v4df) __C),
                     (__v4df)_mm256_setzero_pd());
@@ -1030,7 +1030,7 @@ static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_mask3_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
 {
   return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
+                    __builtin_elementwise_fma (-(__v4df) __A,
                                                 (__v4df) __B,
                                                 (__v4df) __C),
                     (__v4df) __C);
@@ -1040,7 +1040,7 @@ static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_maskz_fnmadd_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
 {
   return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
+                    __builtin_elementwise_fma (-(__v4df) __A,
                                                 (__v4df) __B,
                                                 (__v4df) __C),
                     (__v4df)_mm256_setzero_pd());
@@ -1050,7 +1050,7 @@ static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_maskz_fnmsub_pd(__mmask8 __U, __m256d __A, __m256d __B, __m256d __C)
 {
   return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 (-(__v4df) __A,
+                    __builtin_elementwise_fma (-(__v4df) __A,
                                                 (__v4df) __B,
                                                 -(__v4df) __C),
                     (__v4df)_mm256_setzero_pd());
@@ -1060,7 +1060,7 @@ static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask_fmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
 {
   return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
+                    __builtin_elementwise_fma ((__v4sf) __A,
                                              (__v4sf) __B,
                                              (__v4sf) __C),
                     (__v4sf) __A);
@@ -1070,7 +1070,7 @@ static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask3_fmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
 {
   return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
+                    __builtin_elementwise_fma ((__v4sf) __A,
                                              (__v4sf) __B,
                                              (__v4sf) __C),
                     (__v4sf) __C);
@@ -1080,7 +1080,7 @@ static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_maskz_fmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
 {
   return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
+                    __builtin_elementwise_fma ((__v4sf) __A,
                                              (__v4sf) __B,
                                              (__v4sf) __C),
                     (__v4sf)_mm_setzero_ps());
@@ -1090,7 +1090,7 @@ static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask_fmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
 {
   return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
+                    __builtin_elementwise_fma ((__v4sf) __A,
                                              (__v4sf) __B,
                                              -(__v4sf) __C),
                     (__v4sf) __A);
@@ -1100,7 +1100,7 @@ static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_maskz_fmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
 {
   return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
+                    __builtin_elementwise_fma ((__v4sf) __A,
                                              (__v4sf) __B,
                                              -(__v4sf) __C),
                     (__v4sf)_mm_setzero_ps());
@@ -1110,7 +1110,7 @@ static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask3_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
 {
   return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps (-(__v4sf) __A,
+                    __builtin_elementwise_fma (-(__v4sf) __A,
                                              (__v4sf) __B,
                                              (__v4sf) __C),
                     (__v4sf) __C);
@@ -1120,7 +1120,7 @@ static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_maskz_fnmadd_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
 {
   return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps (-(__v4sf) __A,
+                    __builtin_elementwise_fma (-(__v4sf) __A,
                                              (__v4sf) __B,
                                              (__v4sf) __C),
                     (__v4sf)_mm_setzero_ps());
@@ -1130,7 +1130,7 @@ static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_maskz_fnmsub_ps(__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
 {
   return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps (-(__v4sf) __A,
+                    __builtin_elementwise_fma (-(__v4sf) __A,
                                              (__v4sf) __B,
                                              -(__v4sf) __C),
                     (__v4sf)_mm_setzero_ps());
@@ -1140,7 +1140,7 @@ static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_mask_fmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
 {
   return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
+                    __builtin_elementwise_fma ((__v8sf) __A,
                                                 (__v8sf) __B,
                                                 (__v8sf) __C),
                     (__v8sf) __A);
@@ -1150,7 +1150,7 @@ static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_mask3_fmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
 {
   return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
+                    __builtin_elementwise_fma ((__v8sf) __A,
                                                 (__v8sf) __B,
                                                 (__v8sf) __C),
                     (__v8sf) __C);
@@ -1160,7 +1160,7 @@ static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_maskz_fmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
 {
   return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
+                    __builtin_elementwise_fma ((__v8sf) __A,
                                                 (__v8sf) __B,
                                                 (__v8sf) __C),
                     (__v8sf)_mm256_setzero_ps());
@@ -1170,7 +1170,7 @@ static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_mask_fmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
 {
   return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
+                    __builtin_elementwise_fma ((__v8sf) __A,
                                                 (__v8sf) __B,
                                                 -(__v8sf) __C),
                     (__v8sf) __A);
@@ -1180,7 +1180,7 @@ static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_maskz_fmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
 {
   return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
+                    __builtin_elementwise_fma ((__v8sf) __A,
                                                 (__v8sf) __B,
                                                 -(__v8sf) __C),
                     (__v8sf)_mm256_setzero_ps());
@@ -1190,7 +1190,7 @@ static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_mask3_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
 {
   return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
+                    __builtin_elementwise_fma (-(__v8sf) __A,
                                                 (__v8sf) __B,
                                                 (__v8sf) __C),
                     (__v8sf) __C);
@@ -1200,7 +1200,7 @@ static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_maskz_fnmadd_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
 {
   return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
+                    __builtin_elementwise_fma (-(__v8sf) __A,
                                                 (__v8sf) __B,
                                                 (__v8sf) __C),
                     (__v8sf)_mm256_setzero_ps());
@@ -1210,7 +1210,7 @@ static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_maskz_fnmsub_ps(__mmask8 __U, __m256 __A, __m256 __B, __m256 __C)
 {
   return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 (-(__v8sf) __A,
+                    __builtin_elementwise_fma (-(__v8sf) __A,
                                                 (__v8sf) __B,
                                                 -(__v8sf) __C),
                     (__v8sf)_mm256_setzero_ps());
@@ -1421,7 +1421,7 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask3_fmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
 {
   return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
+                    __builtin_elementwise_fma ((__v2df) __A,
                                              (__v2df) __B,
                                              -(__v2df) __C),
                     (__v2df) __C);
@@ -1431,7 +1431,7 @@ static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_mask3_fmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
 {
   return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
+                    __builtin_elementwise_fma ((__v4df) __A,
                                                 (__v4df) __B,
                                                 -(__v4df) __C),
                     (__v4df) __C);
@@ -1441,7 +1441,7 @@ static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask3_fmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
 {
   return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
+                    __builtin_elementwise_fma ((__v4sf) __A,
                                              (__v4sf) __B,
                                              -(__v4sf) __C),
                     (__v4sf) __C);
@@ -1451,7 +1451,7 @@ static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_mask3_fmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
 {
   return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
+                    __builtin_elementwise_fma ((__v8sf) __A,
                                                 (__v8sf) __B,
                                                 -(__v8sf) __C),
                     (__v8sf) __C);
@@ -1501,7 +1501,7 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask_fnmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
 {
   return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
+                    __builtin_elementwise_fma ((__v2df) __A,
                                              -(__v2df) __B,
                                              (__v2df) __C),
                     (__v2df) __A);
@@ -1511,7 +1511,7 @@ static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_mask_fnmadd_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
 {
   return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
+                    __builtin_elementwise_fma ((__v4df) __A,
                                                 -(__v4df) __B,
                                                 (__v4df) __C),
                     (__v4df) __A);
@@ -1521,7 +1521,7 @@ static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask_fnmadd_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
 {
   return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
+                    __builtin_elementwise_fma ((__v4sf) __A,
                                              -(__v4sf) __B,
                                              (__v4sf) __C),
                     (__v4sf) __A);
@@ -1531,7 +1531,7 @@ static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_mask_fnmadd_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
 {
   return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
+                    __builtin_elementwise_fma ((__v8sf) __A,
                                                 -(__v8sf) __B,
                                                 (__v8sf) __C),
                     (__v8sf) __A);
@@ -1541,7 +1541,7 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask_fnmsub_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
 {
   return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
+                    __builtin_elementwise_fma ((__v2df) __A,
                                              -(__v2df) __B,
                                              -(__v2df) __C),
                     (__v2df) __A);
@@ -1551,7 +1551,7 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_mask3_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C, __mmask8 __U)
 {
   return (__m128d) __builtin_ia32_selectpd_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd ((__v2df) __A,
+                    __builtin_elementwise_fma ((__v2df) __A,
                                              -(__v2df) __B,
                                              -(__v2df) __C),
                     (__v2df) __C);
@@ -1561,7 +1561,7 @@ static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_mask_fnmsub_pd(__m256d __A, __mmask8 __U, __m256d __B, __m256d __C)
 {
   return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
+                    __builtin_elementwise_fma ((__v4df) __A,
                                                 -(__v4df) __B,
                                                 -(__v4df) __C),
                     (__v4df) __A);
@@ -1571,7 +1571,7 @@ static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_mask3_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C, __mmask8 __U)
 {
   return (__m256d) __builtin_ia32_selectpd_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddpd256 ((__v4df) __A,
+                    __builtin_elementwise_fma ((__v4df) __A,
                                                 -(__v4df) __B,
                                                 -(__v4df) __C),
                     (__v4df) __C);
@@ -1581,7 +1581,7 @@ static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask_fnmsub_ps(__m128 __A, __mmask8 __U, __m128 __B, __m128 __C)
 {
   return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
+                    __builtin_elementwise_fma ((__v4sf) __A,
                                              -(__v4sf) __B,
                                              -(__v4sf) __C),
                     (__v4sf) __A);
@@ -1591,7 +1591,7 @@ static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_mask3_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C, __mmask8 __U)
 {
   return (__m128) __builtin_ia32_selectps_128((__mmask8) __U,
-                    __builtin_ia32_vfmaddps ((__v4sf) __A,
+                    __builtin_elementwise_fma ((__v4sf) __A,
                                              -(__v4sf) __B,
                                              -(__v4sf) __C),
                     (__v4sf) __C);
@@ -1601,7 +1601,7 @@ static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_mask_fnmsub_ps(__m256 __A, __mmask8 __U, __m256 __B, __m256 __C)
 {
   return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
+                    __builtin_elementwise_fma ((__v8sf) __A,
                                                 -(__v8sf) __B,
                                                 -(__v8sf) __C),
                     (__v8sf) __A);
@@ -1611,7 +1611,7 @@ static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
 {
   return (__m256) __builtin_ia32_selectps_256((__mmask8) __U,
-                    __builtin_ia32_vfmaddps256 ((__v8sf) __A,
+                    __builtin_elementwise_fma ((__v8sf) __A,
                                                 -(__v8sf) __B,
                                                 -(__v8sf) __C),
                     (__v8sf) __C);
diff --git a/clang/lib/Headers/fma4intrin.h b/clang/lib/Headers/fma4intrin.h
index 694801b3e8dd4..cc175635336eb 100644
--- a/clang/lib/Headers/fma4intrin.h
+++ b/clang/lib/Headers/fma4intrin.h
@@ -23,13 +23,13 @@
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_macc_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, 
(__v4sf)__C);
+  return (__m128)__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, 
(__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_macc_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, 
(__v2df)__C);
+  return (__m128d)__builtin_elementwise_fma((__v2df)__A, (__v2df)__B, 
(__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
@@ -47,13 +47,13 @@ _mm_macc_sd(__m128d __A, __m128d __B, __m128d __C)
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_msub_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, 
-(__v4sf)__C);
+  return (__m128)__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, 
-(__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_msub_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, 
-(__v2df)__C);
+  return (__m128d)__builtin_elementwise_fma((__v2df)__A, (__v2df)__B, 
-(__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
@@ -71,13 +71,13 @@ _mm_msub_sd(__m128d __A, __m128d __B, __m128d __C)
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_nmacc_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, 
(__v4sf)__C);
+  return (__m128)__builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B, 
(__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_nmacc_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, 
(__v2df)__C);
+  return (__m128d)__builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B, 
(__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
@@ -95,13 +95,13 @@ _mm_nmacc_sd(__m128d __A, __m128d __B, __m128d __C)
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_nmsub_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, 
-(__v4sf)__C);
+  return (__m128)__builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B, 
-(__v4sf)__C);
 }
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_nmsub_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, 
-(__v2df)__C);
+  return (__m128d)__builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B, 
-(__v2df)__C);
 }
 
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
@@ -143,49 +143,49 @@ _mm_msubadd_pd(__m128d __A, __m128d __B, __m128d __C)
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_macc_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, 
(__v8sf)__C);
+  return (__m256)__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, 
(__v8sf)__C);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_macc_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, 
(__v4df)__C);
+  return (__m256d)__builtin_elementwise_fma((__v4df)__A, (__v4df)__B, 
(__v4df)__C);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_msub_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, 
-(__v8sf)__C);
+  return (__m256)__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, 
-(__v8sf)__C);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_msub_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, 
-(__v4df)__C);
+  return (__m256d)__builtin_elementwise_fma((__v4df)__A, (__v4df)__B, 
-(__v4df)__C);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_nmacc_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, 
(__v8sf)__C);
+  return (__m256)__builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B, 
(__v8sf)__C);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_nmacc_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, 
(__v4df)__C);
+  return (__m256d)__builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B, 
(__v4df)__C);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_nmsub_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, 
-(__v8sf)__C);
+  return (__m256)__builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B, 
-(__v8sf)__C);
 }
 
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_nmsub_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, 
-(__v4df)__C);
+  return (__m256d)__builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B, 
-(__v4df)__C);
 }
 
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
diff --git a/clang/lib/Headers/fmaintrin.h b/clang/lib/Headers/fmaintrin.h
index 22d1a780bbfd4..0d55a453df66a 100644
--- a/clang/lib/Headers/fmaintrin.h
+++ b/clang/lib/Headers/fmaintrin.h
@@ -35,7 +35,7 @@
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, 
(__v4sf)__C);
+  return (__m128)__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, 
(__v4sf)__C);
 }
 
 /// Computes a multiply-add of 128-bit vectors of [2 x double].
@@ -55,7 +55,7 @@ _mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, 
(__v2df)__C);
+  return (__m128d)__builtin_elementwise_fma((__v2df)__A, (__v2df)__B, 
(__v2df)__C);
 }
 
 /// Computes a scalar multiply-add of the single-precision values in the
@@ -133,7 +133,7 @@ _mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C)
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, 
-(__v4sf)__C);
+  return (__m128)__builtin_elementwise_fma((__v4sf)__A, (__v4sf)__B, 
-(__v4sf)__C);
 }
 
 /// Computes a multiply-subtract of 128-bit vectors of [2 x double].
@@ -153,7 +153,7 @@ _mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C)
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, 
-(__v2df)__C);
+  return (__m128d)__builtin_elementwise_fma((__v2df)__A, (__v2df)__B, 
-(__v2df)__C);
 }
 
 /// Computes a scalar multiply-subtract of the single-precision values in
@@ -231,7 +231,7 @@ _mm_fmsub_sd(__m128d __A, __m128d __B, __m128d __C)
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, 
(__v4sf)__C);
+  return (__m128)__builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B, 
(__v4sf)__C);
 }
 
 /// Computes a negated multiply-add of 128-bit vectors of [2 x double].
@@ -251,7 +251,7 @@ _mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C)
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, 
(__v2df)__C);
+  return (__m128d)__builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B, 
(__v2df)__C);
 }
 
 /// Computes a scalar negated multiply-add of the single-precision values in
@@ -329,7 +329,7 @@ _mm_fnmadd_sd(__m128d __A, __m128d __B, __m128d __C)
 static __inline__ __m128 __DEFAULT_FN_ATTRS128
 _mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C)
 {
-  return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, 
-(__v4sf)__C);
+  return (__m128)__builtin_elementwise_fma(-(__v4sf)__A, (__v4sf)__B, 
-(__v4sf)__C);
 }
 
 /// Computes a negated multiply-subtract of 128-bit vectors of [2 x double].
@@ -349,7 +349,7 @@ _mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C)
 static __inline__ __m128d __DEFAULT_FN_ATTRS128
 _mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C)
 {
-  return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, 
-(__v2df)__C);
+  return (__m128d)__builtin_elementwise_fma(-(__v2df)__A, (__v2df)__B, 
-(__v2df)__C);
 }
 
 /// Computes a scalar negated multiply-subtract of the single-precision
@@ -531,7 +531,7 @@ _mm_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C)
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, 
(__v8sf)__C);
+  return (__m256)__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, 
(__v8sf)__C);
 }
 
 /// Computes a multiply-add of 256-bit vectors of [4 x double].
@@ -551,7 +551,7 @@ _mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C)
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, 
(__v4df)__C);
+  return (__m256d)__builtin_elementwise_fma((__v4df)__A, (__v4df)__B, 
(__v4df)__C);
 }
 
 /// Computes a multiply-subtract of 256-bit vectors of [8 x float].
@@ -571,7 +571,7 @@ _mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C)
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, 
-(__v8sf)__C);
+  return (__m256)__builtin_elementwise_fma((__v8sf)__A, (__v8sf)__B, 
-(__v8sf)__C);
 }
 
 /// Computes a multiply-subtract of 256-bit vectors of [4 x double].
@@ -591,7 +591,7 @@ _mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C)
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, 
-(__v4df)__C);
+  return (__m256d)__builtin_elementwise_fma((__v4df)__A, (__v4df)__B, 
-(__v4df)__C);
 }
 
 /// Computes a negated multiply-add of 256-bit vectors of [8 x float].
@@ -611,7 +611,7 @@ _mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C)
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, 
(__v8sf)__C);
+  return (__m256)__builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B, 
(__v8sf)__C);
 }
 
 /// Computes a negated multiply-add of 256-bit vectors of [4 x double].
@@ -631,7 +631,7 @@ _mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C)
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, 
(__v4df)__C);
+  return (__m256d)__builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B, 
(__v4df)__C);
 }
 
 /// Computes a negated multiply-subtract of 256-bit vectors of [8 x float].
@@ -651,7 +651,7 @@ _mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C)
 static __inline__ __m256 __DEFAULT_FN_ATTRS256
 _mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C)
 {
-  return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, 
-(__v8sf)__C);
+  return (__m256)__builtin_elementwise_fma(-(__v8sf)__A, (__v8sf)__B, 
-(__v8sf)__C);
 }
 
 /// Computes a negated multiply-subtract of 256-bit vectors of [4 x double].
@@ -671,7 +671,7 @@ _mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C)
 static __inline__ __m256d __DEFAULT_FN_ATTRS256
 _mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C)
 {
-  return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, 
-(__v4df)__C);
+  return (__m256d)__builtin_elementwise_fma(-(__v4df)__A, (__v4df)__B, 
-(__v4df)__C);
 }
 
 /// Computes a multiply with alternating add/subtract of 256-bit vectors of
diff --git a/clang/test/CodeGen/target-builtin-noerror.c 
b/clang/test/CodeGen/target-builtin-noerror.c
index 0bbd8c3e5ddd8..04beff90e2eb8 100644
--- a/clang/test/CodeGen/target-builtin-noerror.c
+++ b/clang/test/CodeGen/target-builtin-noerror.c
@@ -32,15 +32,15 @@ int qq(void) {
 
 // Test that fma and fma4 are both separately and combined valid for an fma 
intrinsic.
 __m128 __attribute__((target("fma"))) fma_1(__m128 a, __m128 b, __m128 c) {
-  return __builtin_ia32_vfmaddps(a, b, c);
+  return __builtin_elementwise_fma(a, b, c);
 }
 
 __m128 __attribute__((target("fma4"))) fma_2(__m128 a, __m128 b, __m128 c) {
-  return __builtin_ia32_vfmaddps(a, b, c);
+  return __builtin_elementwise_fma(a, b, c);
 }
 
 __m128 __attribute__((target("fma,fma4"))) fma_3(__m128 a, __m128 b, __m128 c) 
{
-  return __builtin_ia32_vfmaddps(a, b, c);
+  return __builtin_elementwise_fma(a, b, c);
 }
 
 void verifyfeaturestrings(void) {

_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to