https://github.com/rampitec created 
https://github.com/llvm/llvm-project/pull/152578

None

>From 6a9971d7cadb2dcc0169f02f92bd3f1eafb65635 Mon Sep 17 00:00:00 2001
From: Stanislav Mekhanoshin <stanislav.mekhanos...@amd.com>
Date: Thu, 7 Aug 2025 12:11:17 -0700
Subject: [PATCH] [AMDGPU] Enable CodeGen for v_pk_fma_bf16

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp |    1 +
 llvm/test/CodeGen/AMDGPU/bf16-math.ll     |   43 +-
 llvm/test/CodeGen/AMDGPU/bf16.ll          | 1139 +++++++--------------
 3 files changed, 392 insertions(+), 791 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp 
b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 8f44c03d95b43..fd1be72ce6d82 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -6106,6 +6106,7 @@ bool SITargetLowering::isFMAFasterThanFMulAndFAdd(const 
MachineFunction &MF,
   case MVT::f64:
     return true;
   case MVT::f16:
+  case MVT::bf16:
     return Subtarget->has16BitInsts() && !denormalModeIsFlushAllF64F16(MF);
   default:
     break;
diff --git a/llvm/test/CodeGen/AMDGPU/bf16-math.ll 
b/llvm/test/CodeGen/AMDGPU/bf16-math.ll
index 682b3b4d57209..3a82f848f06a5 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16-math.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16-math.ll
@@ -370,6 +370,9 @@ define amdgpu_ps bfloat @test_clamp_bf16_folding(bfloat 
%src) {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    v_exp_bf16_e64 v0, v0 clamp
 ; GCN-NEXT:    ; return to shader part epilog
+
+
+
   %exp = call bfloat @llvm.exp2.bf16(bfloat %src)
   %max = call bfloat @llvm.maxnum.bf16(bfloat %exp, bfloat 0.0)
   %clamp = call bfloat @llvm.minnum.bf16(bfloat %max, bfloat 1.0)
@@ -381,6 +384,9 @@ define amdgpu_ps float @test_clamp_v2bf16_folding(<2 x 
bfloat> %src0, <2 x bfloa
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    v_pk_mul_bf16 v0, v0, v1 clamp
 ; GCN-NEXT:    ; return to shader part epilog
+
+
+
   %mul = fmul <2 x bfloat> %src0, %src1
   %max = call <2 x bfloat> @llvm.maxnum.v2bf16(<2 x bfloat> %mul, <2 x bfloat> 
<bfloat 0.0, bfloat 0.0>)
   %clamp = call <2 x bfloat> @llvm.minnum.v2bf16(<2 x bfloat> %max, <2 x 
bfloat> <bfloat 1.0, bfloat 1.0>)
@@ -391,11 +397,12 @@ define amdgpu_ps float @test_clamp_v2bf16_folding(<2 x 
bfloat> %src0, <2 x bfloa
 define amdgpu_ps void @v_test_mul_add_v2bf16_vvv(ptr addrspace(1) %out, <2 x 
bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c) {
 ; GCN-LABEL: v_test_mul_add_v2bf16_vvv:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    v_pk_mul_bf16 v2, v2, v3
-; GCN-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT:    v_pk_add_bf16 v2, v2, v4
+; GCN-NEXT:    v_pk_fma_bf16 v2, v2, v3, v4
 ; GCN-NEXT:    global_store_b32 v[0:1], v2, off
 ; GCN-NEXT:    s_endpgm
+
+
+
   %mul = fmul contract <2 x bfloat> %a, %b
   %add = fadd contract <2 x bfloat> %mul, %c
   store <2 x bfloat> %add, ptr addrspace(1) %out
@@ -405,11 +412,12 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vvv(ptr 
addrspace(1) %out, <2 x bfl
 define amdgpu_ps void @v_test_mul_add_v2bf16_vss(ptr addrspace(1) %out, <2 x 
bfloat> %a, <2 x bfloat> inreg %b, <2 x bfloat> inreg %c) {
 ; GCN-LABEL: v_test_mul_add_v2bf16_vss:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    v_pk_mul_bf16 v2, v2, s0
-; GCN-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT:    v_pk_add_bf16 v2, v2, s1
+; GCN-NEXT:    v_pk_fma_bf16 v2, v2, s0, s1
 ; GCN-NEXT:    global_store_b32 v[0:1], v2, off
 ; GCN-NEXT:    s_endpgm
+
+
+
   %mul = fmul contract <2 x bfloat> %a, %b
   %add = fadd contract <2 x bfloat> %mul, %c
   store <2 x bfloat> %add, ptr addrspace(1) %out
@@ -419,11 +427,14 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vss(ptr 
addrspace(1) %out, <2 x bfl
 define amdgpu_ps void @v_test_mul_add_v2bf16_sss(ptr addrspace(1) %out, <2 x 
bfloat> inreg %a, <2 x bfloat> inreg %b, <2 x bfloat> inreg %c) {
 ; GCN-LABEL: v_test_mul_add_v2bf16_sss:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    v_pk_mul_bf16 v2, s0, s1
+; GCN-NEXT:    v_mov_b32_e32 v2, s2
 ; GCN-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT:    v_pk_add_bf16 v2, v2, s2
+; GCN-NEXT:    v_pk_fma_bf16 v2, s0, s1, v2
 ; GCN-NEXT:    global_store_b32 v[0:1], v2, off
 ; GCN-NEXT:    s_endpgm
+
+
+
   %mul = fmul contract <2 x bfloat> %a, %b
   %add = fadd contract <2 x bfloat> %mul, %c
   store <2 x bfloat> %add, ptr addrspace(1) %out
@@ -433,11 +444,12 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_sss(ptr 
addrspace(1) %out, <2 x bfl
 define amdgpu_ps void @v_test_mul_add_v2bf16_vsc(ptr addrspace(1) %out, <2 x 
bfloat> %a, <2 x bfloat> inreg %b) {
 ; GCN-LABEL: v_test_mul_add_v2bf16_vsc:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    v_pk_mul_bf16 v2, v2, s0
-; GCN-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT:    v_pk_add_bf16 v2, v2, 0.5 op_sel_hi:[1,0]
+; GCN-NEXT:    v_pk_fma_bf16 v2, v2, s0, 0.5 op_sel_hi:[1,1,0]
 ; GCN-NEXT:    global_store_b32 v[0:1], v2, off
 ; GCN-NEXT:    s_endpgm
+
+
+
   %mul = fmul contract <2 x bfloat> %a, %b
   %add = fadd contract <2 x bfloat> %mul, <bfloat 0.5, bfloat 0.5>
   store <2 x bfloat> %add, ptr addrspace(1) %out
@@ -447,11 +459,14 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vsc(ptr 
addrspace(1) %out, <2 x bfl
 define amdgpu_ps void @v_test_mul_add_v2bf16_vll(ptr addrspace(1) %out, <2 x 
bfloat> %a) {
 ; GCN-LABEL: v_test_mul_add_v2bf16_vll:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    v_pk_mul_bf16 v2, 0x42c83f80, v2
-; GCN-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT:    v_pk_add_bf16 v2, 0x43484000, v2
+; GCN-NEXT:    s_mov_b32 s0, 0x43484000
+; GCN-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT:    v_pk_fma_bf16 v2, 0x42c83f80, v2, s0
 ; GCN-NEXT:    global_store_b32 v[0:1], v2, off
 ; GCN-NEXT:    s_endpgm
+
+
+
   %mul = fmul contract <2 x bfloat> %a, <bfloat 1.0, bfloat 100.0>
   %add = fadd contract <2 x bfloat> %mul, <bfloat 2.0, bfloat 200.0>
   store <2 x bfloat> %add, ptr addrspace(1) %out
diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll
index 52e697cae9fe5..8f8ea13487810 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16.ll
@@ -47043,18 +47043,10 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, 
bfloat %c) {
 ; GFX8-LABEL: v_fmuladd_bf16:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX8-NEXT:    v_mul_f32_e32 v0, v0, v1
-; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
-; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT:    v_or_b32_e32 v3, 0x400000, v0
-; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v3, vcc
-; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 16, v2
-; GFX8-NEXT:    v_add_f32_e32 v0, v0, v1
+; GFX8-NEXT:    v_fma_f32 v0, v0, v1, v2
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
@@ -47067,20 +47059,13 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, 
bfloat %c) {
 ; GFX900-LABEL: v_fmuladd_bf16:
 ; GFX900:       ; %bb.0:
 ; GFX900-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX900-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
 ; GFX900-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX900-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX900-NEXT:    v_mul_f32_e32 v0, v0, v1
+; GFX900-NEXT:    v_fma_f32 v0, v0, v1, v2
 ; GFX900-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX900-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX900-NEXT:    v_add3_u32 v1, v1, v0, s4
-; GFX900-NEXT:    v_or_b32_e32 v3, 0x400000, v0
-; GFX900-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
-; GFX900-NEXT:    v_cndmask_b32_e32 v0, v1, v3, vcc
-; GFX900-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX900-NEXT:    v_lshlrev_b32_e32 v1, 16, v2
-; GFX900-NEXT:    v_add_f32_e32 v0, v0, v1
-; GFX900-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX900-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX900-NEXT:    v_or_b32_e32 v2, 0x400000, v0
 ; GFX900-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX900-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -47090,35 +47075,25 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, 
bfloat %c) {
 ; GFX950-LABEL: v_fmuladd_bf16:
 ; GFX950:       ; %bb.0:
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
 ; GFX950-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX950-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX950-NEXT:    v_mul_f32_e32 v0, v0, v1
-; GFX950-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
-; GFX950-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX950-NEXT:    v_lshlrev_b32_e32 v1, 16, v2
-; GFX950-NEXT:    v_add_f32_e32 v0, v0, v1
-; GFX950-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX950-NEXT:    v_fmac_f32_e32 v2, v0, v1
+; GFX950-NEXT:    v_cvt_pk_bf16_f32 v0, v2, s0
 ; GFX950-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_fmuladd_bf16:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT:    v_mul_f32_e32 v0, v0, v1
-; GFX10-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v3, 0x400000, v0
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT:    v_add3_u32 v1, v1, v0, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v0, v1, v3, vcc_lo
-; GFX10-NEXT:    v_lshlrev_b32_e32 v1, 16, v2
-; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT:    v_add_f32_e32 v0, v0, v1
-; GFX10-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v2, 0x400000, v0
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT:    v_add3_u32 v1, v1, v0, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX10-NEXT:    v_fmac_f32_e32 v2, v0, v1
+; GFX10-NEXT:    v_bfe_u32 v0, v2, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v1, 0x400000, v2
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX10-NEXT:    v_add3_u32 v0, v0, v2, 0x7fff
+; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc_lo
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -47126,55 +47101,38 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, 
bfloat %c) {
 ; GFX11TRUE16:       ; %bb.0:
 ; GFX11TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11TRUE16-NEXT:    v_mov_b16_e32 v3.l, 0
-; GFX11TRUE16-NEXT:    v_mov_b16_e32 v3.h, v1.l
+; GFX11TRUE16-NEXT:    v_mov_b16_e32 v3.h, v2.l
+; GFX11TRUE16-NEXT:    v_mov_b16_e32 v2.h, v1.l
 ; GFX11TRUE16-NEXT:    v_mov_b16_e32 v1.h, v0.l
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | 
instid1(VALU_DEP_1)
+; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | 
instid1(VALU_DEP_1)
+; GFX11TRUE16-NEXT:    v_mov_b16_e32 v2.l, v3.l
 ; GFX11TRUE16-NEXT:    v_mov_b16_e32 v1.l, v3.l
-; GFX11TRUE16-NEXT:    v_mul_f32_e32 v0, v1, v3
-; GFX11TRUE16-NEXT:    v_mov_b16_e32 v3.h, v2.l
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | 
instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX11TRUE16-NEXT:    v_or_b32_e32 v4, 0x400000, v0
-; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11TRUE16-NEXT:    v_add3_u32 v1, v1, v0, 0x7fff
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | 
instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v0, v1, v4, vcc_lo
-; GFX11TRUE16-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11TRUE16-NEXT:    v_fmac_f32_e32 v3, v1, v2
+; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | 
instid1(VALU_DEP_3)
+; GFX11TRUE16-NEXT:    v_bfe_u32 v0, v3, 16, 1
+; GFX11TRUE16-NEXT:    v_or_b32_e32 v1, 0x400000, v3
+; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11TRUE16-NEXT:    v_add3_u32 v0, v0, v3, 0x7fff
 ; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | 
instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT:    v_add_f32_e32 v0, v0, v3
-; GFX11TRUE16-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX11TRUE16-NEXT:    v_or_b32_e32 v2, 0x400000, v0
-; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | 
instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT:    v_add3_u32 v1, v1, v0, 0x7fff
-; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc_lo
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc_lo
 ; GFX11TRUE16-NEXT:    v_mov_b16_e32 v0.l, v0.h
 ; GFX11TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11FAKE16-LABEL: v_fmuladd_bf16:
 ; GFX11FAKE16:       ; %bb.0:
 ; GFX11FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
 ; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | 
instid1(VALU_DEP_1)
-; GFX11FAKE16-NEXT:    v_mul_f32_e32 v0, v0, v1
-; GFX11FAKE16-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX11FAKE16-NEXT:    v_or_b32_e32 v3, 0x400000, v0
-; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11FAKE16-NEXT:    v_fmac_f32_e32 v2, v0, v1
+; GFX11FAKE16-NEXT:    v_bfe_u32 v0, v2, 16, 1
+; GFX11FAKE16-NEXT:    v_or_b32_e32 v1, 0x400000, v2
+; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v2, v2
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | 
instid1(VALU_DEP_1)
-; GFX11FAKE16-NEXT:    v_add3_u32 v1, v1, v0, 0x7fff
-; GFX11FAKE16-NEXT:    v_dual_cndmask_b32 v0, v1, v3 :: v_dual_lshlrev_b32 v1, 
16, v2
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | 
instid1(VALU_DEP_1)
-; GFX11FAKE16-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11FAKE16-NEXT:    v_add_f32_e32 v0, v0, v1
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | 
instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX11FAKE16-NEXT:    v_or_b32_e32 v2, 0x400000, v0
-; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11FAKE16-NEXT:    v_add3_u32 v1, v1, v0, 0x7fff
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | 
instid1(VALU_DEP_1)
-; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11FAKE16-NEXT:    v_add3_u32 v0, v0, v2, 0x7fff
+; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc_lo
+; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_lshrrev_b32_e32 v0, 16, v0
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %op = call bfloat @llvm.fmuladd.bf16(bfloat %a, bfloat %b, bfloat %c)
@@ -47235,39 +47193,22 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> 
%a, <2 x bfloat> %b, <2 x bfl
 ; GFX8-LABEL: v_fmuladd_v2bf16:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_lshlrev_b32_e32 v3, 16, v1
-; GFX8-NEXT:    v_lshlrev_b32_e32 v4, 16, v0
-; GFX8-NEXT:    v_mul_f32_e32 v3, v4, v3
-; GFX8-NEXT:    v_bfe_u32 v4, v3, 16, 1
-; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v3
-; GFX8-NEXT:    v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v3
-; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
-; GFX8-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
-; GFX8-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
-; GFX8-NEXT:    v_lshlrev_b32_e32 v4, 16, v2
-; GFX8-NEXT:    v_add_f32_e32 v3, v3, v4
+; GFX8-NEXT:    v_lshlrev_b32_e32 v3, 16, v2
+; GFX8-NEXT:    v_lshlrev_b32_e32 v4, 16, v1
+; GFX8-NEXT:    v_lshlrev_b32_e32 v5, 16, v0
+; GFX8-NEXT:    v_fma_f32 v3, v5, v4, v3
 ; GFX8-NEXT:    v_bfe_u32 v4, v3, 16, 1
-; GFX8-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v4, v3
+; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX8-NEXT:    v_add_u32_e32 v4, vcc, s4, v4
-; GFX8-NEXT:    v_mul_f32_e32 v0, v0, v1
+; GFX8-NEXT:    v_add_u32_e32 v4, vcc, 0x7fff, v4
+; GFX8-NEXT:    v_fma_f32 v0, v0, v1, v2
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
-; GFX8-NEXT:    v_add_u32_e32 v1, vcc, s4, v1
-; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v0
-; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, v1, v4, vcc
-; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v2
-; GFX8-NEXT:    v_add_f32_e32 v0, v0, v1
-; GFX8-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX8-NEXT:    v_add_u32_e32 v1, vcc, v1, v0
 ; GFX8-NEXT:    v_add_u32_e32 v1, vcc, 0x7fff, v1
 ; GFX8-NEXT:    v_or_b32_e32 v2, 0x400000, v0
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -47279,36 +47220,22 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> 
%a, <2 x bfloat> %b, <2 x bfl
 ; GFX900-LABEL: v_fmuladd_v2bf16:
 ; GFX900:       ; %bb.0:
 ; GFX900-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-NEXT:    v_lshlrev_b32_e32 v3, 16, v1
-; GFX900-NEXT:    v_lshlrev_b32_e32 v4, 16, v0
-; GFX900-NEXT:    v_mul_f32_e32 v3, v4, v3
-; GFX900-NEXT:    v_bfe_u32 v4, v3, 16, 1
-; GFX900-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX900-NEXT:    v_add3_u32 v4, v4, v3, s4
-; GFX900-NEXT:    v_or_b32_e32 v5, 0x400000, v3
-; GFX900-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
-; GFX900-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
-; GFX900-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
-; GFX900-NEXT:    v_lshlrev_b32_e32 v4, 16, v2
-; GFX900-NEXT:    v_add_f32_e32 v3, v3, v4
+; GFX900-NEXT:    v_lshlrev_b32_e32 v3, 16, v2
+; GFX900-NEXT:    v_lshlrev_b32_e32 v4, 16, v1
+; GFX900-NEXT:    v_lshlrev_b32_e32 v5, 16, v0
+; GFX900-NEXT:    v_fma_f32 v3, v5, v4, v3
+; GFX900-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX900-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX900-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX900-NEXT:    v_bfe_u32 v4, v3, 16, 1
-; GFX900-NEXT:    v_mul_f32_e32 v0, v0, v1
+; GFX900-NEXT:    s_movk_i32 s4, 0x7fff
+; GFX900-NEXT:    v_fma_f32 v0, v0, v1, v2
 ; GFX900-NEXT:    v_add3_u32 v4, v4, v3, s4
 ; GFX900-NEXT:    v_or_b32_e32 v5, 0x400000, v3
 ; GFX900-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX900-NEXT:    v_bfe_u32 v1, v0, 16, 1
 ; GFX900-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
 ; GFX900-NEXT:    v_add3_u32 v1, v1, v0, s4
-; GFX900-NEXT:    v_or_b32_e32 v4, 0x400000, v0
-; GFX900-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
-; GFX900-NEXT:    v_cndmask_b32_e32 v0, v1, v4, vcc
-; GFX900-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX900-NEXT:    v_and_b32_e32 v1, 0xffff0000, v2
-; GFX900-NEXT:    v_add_f32_e32 v0, v0, v1
-; GFX900-NEXT:    v_bfe_u32 v1, v0, 16, 1
-; GFX900-NEXT:    v_add3_u32 v1, v1, v0, s4
 ; GFX900-NEXT:    v_or_b32_e32 v2, 0x400000, v0
 ; GFX900-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX900-NEXT:    v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -47319,150 +47246,94 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> 
%a, <2 x bfloat> %b, <2 x bfl
 ; GFX950-LABEL: v_fmuladd_v2bf16:
 ; GFX950:       ; %bb.0:
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX950-NEXT:    v_and_b32_e32 v3, 0xffff0000, v1
-; GFX950-NEXT:    v_and_b32_e32 v4, 0xffff0000, v0
+; GFX950-NEXT:    v_and_b32_e32 v3, 0xffff0000, v2
+; GFX950-NEXT:    v_and_b32_e32 v4, 0xffff0000, v1
+; GFX950-NEXT:    v_and_b32_e32 v5, 0xffff0000, v0
+; GFX950-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
 ; GFX950-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX950-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX950-NEXT:    v_mul_f32_e32 v3, v4, v3
-; GFX950-NEXT:    v_mul_f32_e32 v0, v0, v1
-; GFX950-NEXT:    v_cvt_pk_bf16_f32 v3, v3, s0
-; GFX950-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
-; GFX950-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
-; GFX950-NEXT:    v_and_b32_e32 v4, 0xffff0000, v2
-; GFX950-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX950-NEXT:    v_lshlrev_b32_e32 v1, 16, v2
-; GFX950-NEXT:    v_add_f32_e32 v3, v3, v4
-; GFX950-NEXT:    v_add_f32_e32 v0, v0, v1
-; GFX950-NEXT:    v_cvt_pk_bf16_f32 v0, v0, v3
+; GFX950-NEXT:    v_fmac_f32_e32 v3, v5, v4
+; GFX950-NEXT:    v_fmac_f32_e32 v2, v0, v1
+; GFX950-NEXT:    v_cvt_pk_bf16_f32 v0, v2, v3
 ; GFX950-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_fmuladd_v2bf16:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_lshlrev_b32_e32 v3, 16, v1
-; GFX10-NEXT:    v_lshlrev_b32_e32 v4, 16, v0
-; GFX10-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT:    v_mul_f32_e32 v3, v4, v3
-; GFX10-NEXT:    v_mul_f32_e32 v0, v0, v1
-; GFX10-NEXT:    v_bfe_u32 v1, v3, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v5, 0x400000, v3
-; GFX10-NEXT:    v_bfe_u32 v4, v0, 16, 1
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX10-NEXT:    v_or_b32_e32 v6, 0x400000, v0
-; GFX10-NEXT:    v_add3_u32 v1, v1, v3, 0x7fff
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v3, 16, v2
-; GFX10-NEXT:    v_add3_u32 v4, v4, v0, 0x7fff
+; GFX10-NEXT:    v_lshlrev_b32_e32 v4, 16, v1
+; GFX10-NEXT:    v_lshlrev_b32_e32 v5, 16, v0
 ; GFX10-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
 ; GFX10-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; GFX10-NEXT:    v_cndmask_b32_e32 v0, v4, v6, vcc_lo
-; GFX10-NEXT:    v_add_f32_e32 v1, v1, v3
 ; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT:    v_or_b32_e32 v4, 0x400000, v1
-; GFX10-NEXT:    v_add_f32_e32 v0, v0, v2
-; GFX10-NEXT:    v_bfe_u32 v2, v1, 16, 1
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT:    v_bfe_u32 v3, v0, 16, 1
-; GFX10-NEXT:    v_add3_u32 v2, v2, v1, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v5, 0x400000, v0
-; GFX10-NEXT:    v_add3_u32 v3, v3, v0, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, v2, v4, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT:    v_cndmask_b32_e32 v0, v3, v5, vcc_lo
-; GFX10-NEXT:    v_perm_b32 v0, v0, v1, 0x7060302
+; GFX10-NEXT:    v_fmac_f32_e32 v3, v5, v4
+; GFX10-NEXT:    v_fmac_f32_e32 v2, v0, v1
+; GFX10-NEXT:    v_bfe_u32 v0, v3, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v4, 0x400000, v3
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX10-NEXT:    v_bfe_u32 v1, v2, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v5, 0x400000, v2
+; GFX10-NEXT:    v_add3_u32 v0, v0, v3, 0x7fff
+; GFX10-NEXT:    v_add3_u32 v1, v1, v2, 0x7fff
+; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc_lo
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc_lo
+; GFX10-NEXT:    v_perm_b32 v0, v1, v0, 0x7060302
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11TRUE16-LABEL: v_fmuladd_v2bf16:
 ; GFX11TRUE16:       ; %bb.0:
 ; GFX11TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v3, 16, v1
-; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v4, 16, v0
+; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v4, 16, v1
+; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v5, 16, v0
 ; GFX11TRUE16-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | 
instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT:    v_dual_mul_f32 v3, v4, v3 :: v_dual_and_b32 v0, 
0xffff0000, v0
-; GFX11TRUE16-NEXT:    v_mul_f32_e32 v0, v0, v1
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | 
instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT:    v_bfe_u32 v1, v3, 16, 1
-; GFX11TRUE16-NEXT:    v_or_b32_e32 v5, 0x400000, v3
-; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11TRUE16-NEXT:    v_add3_u32 v1, v1, v3, 0x7fff
+; GFX11TRUE16-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v3, 16, v2
-; GFX11TRUE16-NEXT:    v_bfe_u32 v4, v0, 16, 1
-; GFX11TRUE16-NEXT:    v_or_b32_e32 v6, 0x400000, v0
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | 
instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc_lo
-; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11TRUE16-NEXT:    v_add3_u32 v4, v4, v0, 0x7fff
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | 
instid1(VALU_DEP_2)
-; GFX11TRUE16-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX11TRUE16-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11TRUE16-NEXT:    v_dual_cndmask_b32 v0, v4, v6 :: v_dual_add_f32 v1, v1, 
v3
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | 
instid1(VALU_DEP_2)
-; GFX11TRUE16-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11TRUE16-NEXT:    v_or_b32_e32 v4, 0x400000, v1
-; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | 
instid1(VALU_DEP_2)
-; GFX11TRUE16-NEXT:    v_add_f32_e32 v0, v0, v2
-; GFX11TRUE16-NEXT:    v_bfe_u32 v2, v1, 16, 1
-; GFX11TRUE16-NEXT:    v_bfe_u32 v3, v0, 16, 1
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | 
instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT:    v_add3_u32 v2, v2, v1, 0x7fff
-; GFX11TRUE16-NEXT:    v_or_b32_e32 v5, 0x400000, v0
-; GFX11TRUE16-NEXT:    v_add3_u32 v3, v3, v0, 0x7fff
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | 
instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v1, v2, v4, vcc_lo
-; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v0, v3, v5, vcc_lo
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | 
instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT:    v_mov_b16_e32 v1.l, v1.h
-; GFX11TRUE16-NEXT:    v_bfi_b32 v0, 0xffff, v1, v0
+; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | 
instid1(VALU_DEP_1)
+; GFX11TRUE16-NEXT:    v_dual_fmac_f32 v2, v0, v1 :: v_dual_fmac_f32 v3, v5, v4
+; GFX11TRUE16-NEXT:    v_bfe_u32 v1, v2, 16, 1
+; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11TRUE16-NEXT:    v_bfe_u32 v0, v3, 16, 1
+; GFX11TRUE16-NEXT:    v_or_b32_e32 v4, 0x400000, v3
+; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11TRUE16-NEXT:    v_or_b32_e32 v5, 0x400000, v2
+; GFX11TRUE16-NEXT:    v_add3_u32 v1, v1, v2, 0x7fff
+; GFX11TRUE16-NEXT:    v_add3_u32 v0, v0, v3, 0x7fff
+; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | 
instid1(VALU_DEP_2)
+; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc_lo
+; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11TRUE16-NEXT:    v_mov_b16_e32 v0.l, v0.h
+; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc_lo
+; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11TRUE16-NEXT:    v_bfi_b32 v0, 0xffff, v0, v1
 ; GFX11TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11FAKE16-LABEL: v_fmuladd_v2bf16:
 ; GFX11FAKE16:       ; %bb.0:
 ; GFX11FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v3, 16, v1
-; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v4, 16, v0
+; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v4, 16, v1
+; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v5, 16, v0
 ; GFX11FAKE16-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | 
instid1(VALU_DEP_1)
-; GFX11FAKE16-NEXT:    v_dual_mul_f32 v3, v4, v3 :: v_dual_and_b32 v0, 
0xffff0000, v0
-; GFX11FAKE16-NEXT:    v_mul_f32_e32 v0, v0, v1
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | 
instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT:    v_bfe_u32 v1, v3, 16, 1
-; GFX11FAKE16-NEXT:    v_or_b32_e32 v5, 0x400000, v3
-; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11FAKE16-NEXT:    v_add3_u32 v1, v1, v3, 0x7fff
+; GFX11FAKE16-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v3, 16, v2
-; GFX11FAKE16-NEXT:    v_bfe_u32 v4, v0, 16, 1
-; GFX11FAKE16-NEXT:    v_or_b32_e32 v6, 0x400000, v0
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | 
instid1(VALU_DEP_4)
-; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc_lo
-; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11FAKE16-NEXT:    v_add3_u32 v4, v4, v0, 0x7fff
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | 
instid1(VALU_DEP_2)
-; GFX11FAKE16-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX11FAKE16-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11FAKE16-NEXT:    v_dual_cndmask_b32 v0, v4, v6 :: v_dual_add_f32 v1, v1, 
v3
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | 
instid1(VALU_DEP_2)
-; GFX11FAKE16-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11FAKE16-NEXT:    v_or_b32_e32 v4, 0x400000, v1
-; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | 
instid1(VALU_DEP_2)
-; GFX11FAKE16-NEXT:    v_add_f32_e32 v0, v0, v2
-; GFX11FAKE16-NEXT:    v_bfe_u32 v2, v1, 16, 1
-; GFX11FAKE16-NEXT:    v_bfe_u32 v3, v0, 16, 1
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | 
instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT:    v_add3_u32 v2, v2, v1, 0x7fff
-; GFX11FAKE16-NEXT:    v_or_b32_e32 v5, 0x400000, v0
-; GFX11FAKE16-NEXT:    v_add3_u32 v3, v3, v0, 0x7fff
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | 
instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v1, v2, v4, vcc_lo
-; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v0, v3, v5, vcc_lo
+; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | 
instid1(VALU_DEP_1)
+; GFX11FAKE16-NEXT:    v_dual_fmac_f32 v2, v0, v1 :: v_dual_fmac_f32 v3, v5, v4
+; GFX11FAKE16-NEXT:    v_bfe_u32 v1, v2, 16, 1
+; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11FAKE16-NEXT:    v_bfe_u32 v0, v3, 16, 1
+; GFX11FAKE16-NEXT:    v_or_b32_e32 v4, 0x400000, v3
+; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11FAKE16-NEXT:    v_or_b32_e32 v5, 0x400000, v2
+; GFX11FAKE16-NEXT:    v_add3_u32 v1, v1, v2, 0x7fff
+; GFX11FAKE16-NEXT:    v_add3_u32 v0, v0, v3, 0x7fff
+; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | 
instid1(VALU_DEP_4)
+; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc_lo
+; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc_lo
 ; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11FAKE16-NEXT:    v_perm_b32 v0, v0, v1, 0x7060302
+; GFX11FAKE16-NEXT:    v_perm_b32 v0, v1, v0, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %op = call <2 x bfloat> @llvm.fmuladd.v2bf16(<2 x bfloat> %a, <2 x bfloat> 
%b, <2 x bfloat> %c)
   ret <2 x bfloat> %op
@@ -47542,57 +47413,33 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> 
%a, <3 x bfloat> %b, <3 x bfl
 ; GFX8-LABEL: v_fmuladd_v3bf16:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_lshlrev_b32_e32 v5, 16, v5
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GFX8-NEXT:    v_mul_f32_e32 v1, v1, v3
+; GFX8-NEXT:    v_fma_f32 v1, v1, v3, v5
 ; GFX8-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT:    v_or_b32_e32 v6, 0x400000, v1
-; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v6, vcc
-; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; GFX8-NEXT:    v_lshlrev_b32_e32 v3, 16, v5
-; GFX8-NEXT:    v_add_f32_e32 v1, v1, v3
-; GFX8-NEXT:    v_bfe_u32 v3, v1, 16, 1
-; GFX8-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
-; GFX8-NEXT:    v_add_u32_e32 v3, vcc, s4, v3
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v1
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v5, vcc
-; GFX8-NEXT:    v_lshlrev_b32_e32 v3, 16, v2
-; GFX8-NEXT:    v_lshlrev_b32_e32 v5, 16, v0
-; GFX8-NEXT:    v_mul_f32_e32 v3, v5, v3
-; GFX8-NEXT:    v_bfe_u32 v5, v3, 16, 1
-; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v3
-; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT:    v_or_b32_e32 v6, 0x400000, v3
-; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
-; GFX8-NEXT:    v_cndmask_b32_e32 v3, v5, v6, vcc
-; GFX8-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
-; GFX8-NEXT:    v_lshlrev_b32_e32 v5, 16, v4
-; GFX8-NEXT:    v_add_f32_e32 v3, v3, v5
+; GFX8-NEXT:    v_lshlrev_b32_e32 v3, 16, v4
+; GFX8-NEXT:    v_lshlrev_b32_e32 v5, 16, v2
+; GFX8-NEXT:    v_lshlrev_b32_e32 v6, 16, v0
+; GFX8-NEXT:    v_fma_f32 v3, v6, v5, v3
 ; GFX8-NEXT:    v_bfe_u32 v5, v3, 16, 1
+; GFX8-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v3
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT:    v_mul_f32_e32 v0, v0, v2
+; GFX8-NEXT:    v_fma_f32 v0, v0, v2, v4
 ; GFX8-NEXT:    v_or_b32_e32 v6, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX8-NEXT:    v_bfe_u32 v2, v0, 16, 1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v5, v6, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
-; GFX8-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
-; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v0
-; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v5, vcc
-; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v4
-; GFX8-NEXT:    v_add_f32_e32 v0, v0, v2
-; GFX8-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v0
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -47605,52 +47452,31 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> 
%a, <3 x bfloat> %b, <3 x bfl
 ; GFX900-LABEL: v_fmuladd_v3bf16:
 ; GFX900:       ; %bb.0:
 ; GFX900-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX900-NEXT:    v_lshlrev_b32_e32 v5, 16, v5
 ; GFX900-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
 ; GFX900-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GFX900-NEXT:    v_mul_f32_e32 v1, v1, v3
+; GFX900-NEXT:    v_fma_f32 v1, v1, v3, v5
 ; GFX900-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX900-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX900-NEXT:    v_add3_u32 v3, v3, v1, s4
-; GFX900-NEXT:    v_or_b32_e32 v6, 0x400000, v1
-; GFX900-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
-; GFX900-NEXT:    v_cndmask_b32_e32 v1, v3, v6, vcc
-; GFX900-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; GFX900-NEXT:    v_lshlrev_b32_e32 v3, 16, v5
-; GFX900-NEXT:    v_add_f32_e32 v1, v1, v3
-; GFX900-NEXT:    v_bfe_u32 v3, v1, 16, 1
-; GFX900-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX900-NEXT:    v_or_b32_e32 v5, 0x400000, v1
 ; GFX900-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX900-NEXT:    v_cndmask_b32_e32 v1, v3, v5, vcc
-; GFX900-NEXT:    v_lshlrev_b32_e32 v3, 16, v2
-; GFX900-NEXT:    v_lshlrev_b32_e32 v5, 16, v0
-; GFX900-NEXT:    v_mul_f32_e32 v3, v5, v3
-; GFX900-NEXT:    v_bfe_u32 v5, v3, 16, 1
-; GFX900-NEXT:    v_add3_u32 v5, v5, v3, s4
-; GFX900-NEXT:    v_or_b32_e32 v6, 0x400000, v3
-; GFX900-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
-; GFX900-NEXT:    v_cndmask_b32_e32 v3, v5, v6, vcc
-; GFX900-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
-; GFX900-NEXT:    v_lshlrev_b32_e32 v5, 16, v4
-; GFX900-NEXT:    v_add_f32_e32 v3, v3, v5
+; GFX900-NEXT:    v_lshlrev_b32_e32 v3, 16, v4
+; GFX900-NEXT:    v_lshlrev_b32_e32 v5, 16, v2
+; GFX900-NEXT:    v_lshlrev_b32_e32 v6, 16, v0
+; GFX900-NEXT:    v_fma_f32 v3, v6, v5, v3
+; GFX900-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX900-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX900-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX900-NEXT:    v_bfe_u32 v5, v3, 16, 1
-; GFX900-NEXT:    v_mul_f32_e32 v0, v0, v2
+; GFX900-NEXT:    v_fma_f32 v0, v0, v2, v4
 ; GFX900-NEXT:    v_add3_u32 v5, v5, v3, s4
 ; GFX900-NEXT:    v_or_b32_e32 v6, 0x400000, v3
 ; GFX900-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX900-NEXT:    v_bfe_u32 v2, v0, 16, 1
 ; GFX900-NEXT:    v_cndmask_b32_e32 v3, v5, v6, vcc
 ; GFX900-NEXT:    v_add3_u32 v2, v2, v0, s4
-; GFX900-NEXT:    v_or_b32_e32 v5, 0x400000, v0
-; GFX900-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
-; GFX900-NEXT:    v_cndmask_b32_e32 v0, v2, v5, vcc
-; GFX900-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX900-NEXT:    v_and_b32_e32 v2, 0xffff0000, v4
-; GFX900-NEXT:    v_add_f32_e32 v0, v0, v2
-; GFX900-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX900-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX900-NEXT:    v_or_b32_e32 v4, 0x400000, v0
 ; GFX900-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX900-NEXT:    v_cndmask_b32_e32 v0, v2, v4, vcc
@@ -47662,211 +47488,132 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> 
%a, <3 x bfloat> %b, <3 x bfl
 ; GFX950-LABEL: v_fmuladd_v3bf16:
 ; GFX950:       ; %bb.0:
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT:    v_lshlrev_b32_e32 v5, 16, v5
 ; GFX950-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
 ; GFX950-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GFX950-NEXT:    v_mul_f32_e32 v1, v1, v3
-; GFX950-NEXT:    v_cvt_pk_bf16_f32 v1, v1, s0
-; GFX950-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GFX950-NEXT:    v_lshlrev_b32_e32 v3, 16, v5
-; GFX950-NEXT:    v_add_f32_e32 v1, v1, v3
-; GFX950-NEXT:    v_and_b32_e32 v3, 0xffff0000, v2
-; GFX950-NEXT:    v_and_b32_e32 v5, 0xffff0000, v0
+; GFX950-NEXT:    v_fmac_f32_e32 v5, v1, v3
+; GFX950-NEXT:    v_cvt_pk_bf16_f32 v1, v5, s0
+; GFX950-NEXT:    v_and_b32_e32 v3, 0xffff0000, v4
+; GFX950-NEXT:    v_and_b32_e32 v5, 0xffff0000, v2
+; GFX950-NEXT:    v_and_b32_e32 v6, 0xffff0000, v0
+; GFX950-NEXT:    v_lshlrev_b32_e32 v4, 16, v4
 ; GFX950-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
 ; GFX950-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX950-NEXT:    v_mul_f32_e32 v3, v5, v3
-; GFX950-NEXT:    v_mul_f32_e32 v0, v0, v2
-; GFX950-NEXT:    v_cvt_pk_bf16_f32 v3, v3, s0
-; GFX950-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
-; GFX950-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
-; GFX950-NEXT:    v_and_b32_e32 v5, 0xffff0000, v4
-; GFX950-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX950-NEXT:    v_lshlrev_b32_e32 v2, 16, v4
-; GFX950-NEXT:    v_add_f32_e32 v3, v3, v5
-; GFX950-NEXT:    v_add_f32_e32 v0, v0, v2
-; GFX950-NEXT:    v_cvt_pk_bf16_f32 v1, v1, s0
-; GFX950-NEXT:    v_cvt_pk_bf16_f32 v0, v0, v3
+; GFX950-NEXT:    v_fmac_f32_e32 v3, v6, v5
+; GFX950-NEXT:    v_fmac_f32_e32 v4, v0, v2
+; GFX950-NEXT:    v_cvt_pk_bf16_f32 v0, v4, v3
 ; GFX950-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_fmuladd_v3bf16:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    v_lshlrev_b32_e32 v6, 16, v4
+; GFX10-NEXT:    v_lshlrev_b32_e32 v7, 16, v2
+; GFX10-NEXT:    v_lshlrev_b32_e32 v8, 16, v0
+; GFX10-NEXT:    v_lshlrev_b32_e32 v5, 16, v5
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GFX10-NEXT:    v_lshlrev_b32_e32 v6, 16, v2
-; GFX10-NEXT:    v_lshlrev_b32_e32 v7, 16, v0
-; GFX10-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
-; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT:    v_mul_f32_e32 v1, v1, v3
-; GFX10-NEXT:    v_mul_f32_e32 v3, v7, v6
-; GFX10-NEXT:    v_mul_f32_e32 v0, v0, v2
-; GFX10-NEXT:    v_bfe_u32 v2, v1, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v6, 0x400000, v1
-; GFX10-NEXT:    v_bfe_u32 v7, v3, 16, 1
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT:    v_bfe_u32 v8, v0, 16, 1
-; GFX10-NEXT:    v_add3_u32 v2, v2, v1, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v9, 0x400000, v3
-; GFX10-NEXT:    v_add3_u32 v7, v7, v3, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v10, 0x400000, v0
-; GFX10-NEXT:    v_add3_u32 v8, v8, v0, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, v2, v6, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX10-NEXT:    v_lshlrev_b32_e32 v3, 16, v5
-; GFX10-NEXT:    v_lshlrev_b32_e32 v5, 16, v4
 ; GFX10-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
-; GFX10-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, v7, v9, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT:    v_add_f32_e32 v1, v1, v3
 ; GFX10-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
-; GFX10-NEXT:    v_cndmask_b32_e32 v0, v8, v10, vcc_lo
-; GFX10-NEXT:    v_or_b32_e32 v6, 0x400000, v1
-; GFX10-NEXT:    v_add_f32_e32 v2, v2, v5
 ; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT:    v_bfe_u32 v3, v2, 16, 1
-; GFX10-NEXT:    v_add_f32_e32 v0, v0, v4
-; GFX10-NEXT:    v_or_b32_e32 v7, 0x400000, v2
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT:    v_bfe_u32 v4, v1, 16, 1
-; GFX10-NEXT:    v_add3_u32 v3, v3, v2, 0x7fff
-; GFX10-NEXT:    v_bfe_u32 v5, v0, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v8, 0x400000, v0
-; GFX10-NEXT:    v_add3_u32 v4, v4, v1, 0x7fff
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, v3, v7, vcc_lo
-; GFX10-NEXT:    v_add3_u32 v5, v5, v0, 0x7fff
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT:    v_cndmask_b32_e32 v0, v5, v8, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT:    v_perm_b32 v0, v0, v2, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, v4, v6, vcc_lo
-; GFX10-NEXT:    v_alignbit_b32 v1, s4, v1, 16
+; GFX10-NEXT:    v_fmac_f32_e32 v6, v8, v7
+; GFX10-NEXT:    v_fmac_f32_e32 v5, v1, v3
+; GFX10-NEXT:    v_fmac_f32_e32 v4, v0, v2
+; GFX10-NEXT:    v_bfe_u32 v1, v6, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v3, 0x400000, v6
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX10-NEXT:    v_bfe_u32 v0, v5, 16, 1
+; GFX10-NEXT:    v_bfe_u32 v2, v4, 16, 1
+; GFX10-NEXT:    v_add3_u32 v1, v1, v6, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v7, 0x400000, v4
+; GFX10-NEXT:    v_or_b32_e32 v8, 0x400000, v5
+; GFX10-NEXT:    v_add3_u32 v0, v0, v5, 0x7fff
+; GFX10-NEXT:    v_add3_u32 v2, v2, v4, 0x7fff
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc_lo
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX10-NEXT:    v_cndmask_b32_e32 v3, v0, v8, vcc_lo
+; GFX10-NEXT:    v_perm_b32 v0, v2, v1, 0x7060302
+; GFX10-NEXT:    v_alignbit_b32 v1, s4, v3, 16
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11TRUE16-LABEL: v_fmuladd_v3bf16:
 ; GFX11TRUE16:       ; %bb.0:
 ; GFX11TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v7, 16, v2
+; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v8, 16, v0
+; GFX11TRUE16-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11TRUE16-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v5, 16, v5
 ; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
 ; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v6, 16, v2
-; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v5, 16, v5
-; GFX11TRUE16-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v7, 16, v0
-; GFX11TRUE16-NEXT:    v_dual_mul_f32 v1, v1, v3 :: v_dual_and_b32 v0, 
0xffff0000, v0
+; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v6, 16, v4
+; GFX11TRUE16-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | 
instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT:    v_mul_f32_e32 v0, v0, v2
-; GFX11TRUE16-NEXT:    v_mul_f32_e32 v6, v7, v6
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | 
instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT:    v_bfe_u32 v9, v1, 16, 1
-; GFX11TRUE16-NEXT:    v_bfe_u32 v7, v0, 16, 1
+; GFX11TRUE16-NEXT:    v_dual_fmac_f32 v4, v0, v2 :: v_dual_fmac_f32 v5, v1, v3
+; GFX11TRUE16-NEXT:    v_fmac_f32_e32 v6, v8, v7
+; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | 
instid1(VALU_DEP_3)
+; GFX11TRUE16-NEXT:    v_bfe_u32 v1, v4, 16, 1
+; GFX11TRUE16-NEXT:    v_bfe_u32 v3, v5, 16, 1
 ; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3)
-; GFX11TRUE16-NEXT:    v_bfe_u32 v2, v6, 16, 1
-; GFX11TRUE16-NEXT:    v_or_b32_e32 v8, 0x400000, v6
+; GFX11TRUE16-NEXT:    v_bfe_u32 v0, v6, 16, 1
+; GFX11TRUE16-NEXT:    v_or_b32_e32 v2, 0x400000, v6
 ; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11TRUE16-NEXT:    v_or_b32_e32 v10, 0x400000, v0
-; GFX11TRUE16-NEXT:    v_add3_u32 v7, v7, v0, 0x7fff
-; GFX11TRUE16-NEXT:    v_add3_u32 v2, v2, v6, 0x7fff
-; GFX11TRUE16-NEXT:    v_or_b32_e32 v6, 0x400000, v1
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
-; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v2, v2, v8, vcc_lo
-; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v3, 16, v4
-; GFX11TRUE16-NEXT:    v_add3_u32 v8, v9, v1, 0x7fff
-; GFX11TRUE16-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11TRUE16-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v0, v7, v10, vcc_lo
-; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | 
instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT:    v_add_f32_e32 v2, v2, v3
-; GFX11TRUE16-NEXT:    v_dual_cndmask_b32 v1, v8, v6 :: v_dual_and_b32 v0, 
0xffff0000, v0
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | 
instid1(VALU_DEP_2)
-; GFX11TRUE16-NEXT:    v_bfe_u32 v3, v2, 16, 1
-; GFX11TRUE16-NEXT:    v_dual_add_f32 v0, v0, v4 :: v_dual_and_b32 v1, 
0xffff0000, v1
-; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | 
instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT:    v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11TRUE16-NEXT:    v_bfe_u32 v4, v0, 16, 1
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | 
instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT:    v_add_f32_e32 v1, v1, v5
-; GFX11TRUE16-NEXT:    v_or_b32_e32 v5, 0x400000, v2
-; GFX11TRUE16-NEXT:    v_or_b32_e32 v7, 0x400000, v0
-; GFX11TRUE16-NEXT:    v_add3_u32 v4, v4, v0, 0x7fff
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | 
instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT:    v_bfe_u32 v6, v1, 16, 1
-; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v2, v3, v5, vcc_lo
-; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11TRUE16-NEXT:    v_or_b32_e32 v3, 0x400000, v1
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | 
instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT:    v_add3_u32 v5, v6, v1, 0x7fff
-; GFX11TRUE16-NEXT:    v_mov_b16_e32 v2.l, v2.h
-; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v0, v4, v7, vcc_lo
-; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11TRUE16-NEXT:    v_add3_u32 v1, v1, v4, 0x7fff
+; GFX11TRUE16-NEXT:    v_or_b32_e32 v7, 0x400000, v4
+; GFX11TRUE16-NEXT:    v_add3_u32 v0, v0, v6, 0x7fff
+; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | 
instid1(VALU_DEP_4)
+; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc_lo
+; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11TRUE16-NEXT:    v_add3_u32 v2, v3, v5, 0x7fff
+; GFX11TRUE16-NEXT:    v_or_b32_e32 v3, 0x400000, v5
+; GFX11TRUE16-NEXT:    v_mov_b16_e32 v0.l, v0.h
+; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v5, v5
 ; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | 
instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT:    v_bfi_b32 v0, 0xffff, v2, v0
-; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v1, v5, v3, vcc_lo
-; GFX11TRUE16-NEXT:    v_mov_b16_e32 v1.l, v1.h
+; GFX11TRUE16-NEXT:    v_bfi_b32 v0, 0xffff, v0, v1
+; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11TRUE16-NEXT:    v_mov_b16_e32 v1.l, v2.h
 ; GFX11TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11FAKE16-LABEL: v_fmuladd_v3bf16:
 ; GFX11FAKE16:       ; %bb.0:
 ; GFX11FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v6, 16, v2
-; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v7, 16, v0
-; GFX11FAKE16-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11FAKE16-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | 
instid1(VALU_DEP_1)
-; GFX11FAKE16-NEXT:    v_dual_mul_f32 v0, v0, v2 :: v_dual_lshlrev_b32 v3, 16, 
v3
-; GFX11FAKE16-NEXT:    v_bfe_u32 v8, v0, 16, 1
-; GFX11FAKE16-NEXT:    v_or_b32_e32 v10, 0x400000, v0
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | 
instid1(VALU_DEP_1)
-; GFX11FAKE16-NEXT:    v_add3_u32 v8, v8, v0, 0x7fff
-; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GFX11FAKE16-NEXT:    v_mul_f32_e32 v1, v1, v3
-; GFX11FAKE16-NEXT:    v_mul_f32_e32 v3, v7, v6
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | 
instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT:    v_bfe_u32 v2, v1, 16, 1
-; GFX11FAKE16-NEXT:    v_or_b32_e32 v6, 0x400000, v1
-; GFX11FAKE16-NEXT:    v_bfe_u32 v7, v3, 16, 1
-; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11FAKE16-NEXT:    v_or_b32_e32 v9, 0x400000, v3
-; GFX11FAKE16-NEXT:    v_add3_u32 v2, v2, v1, 0x7fff
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | 
instid1(VALU_DEP_2)
-; GFX11FAKE16-NEXT:    v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v1, v2, v6, vcc_lo
-; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v3, 16, v5
-; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v5, 16, v4
+; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v6, 16, v4
+; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v7, 16, v2
+; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v8, 16, v0
 ; GFX11FAKE16-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v2, v7, v9, vcc_lo
-; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | 
instid1(VALU_DEP_2)
 ; GFX11FAKE16-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v0, v8, v10, vcc_lo
-; GFX11FAKE16-NEXT:    v_add_f32_e32 v2, v2, v5
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | 
instid1(VALU_DEP_3)
 ; GFX11FAKE16-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11FAKE16-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11FAKE16-NEXT:    v_or_b32_e32 v7, 0x400000, v2
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | 
instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT:    v_dual_add_f32 v0, v0, v4 :: v_dual_add_f32 v1, v1, v3
-; GFX11FAKE16-NEXT:    v_bfe_u32 v3, v2, 16, 1
-; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11FAKE16-NEXT:    v_bfe_u32 v5, v0, 16, 1
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | 
instid1(VALU_DEP_4)
-; GFX11FAKE16-NEXT:    v_bfe_u32 v4, v1, 16, 1
-; GFX11FAKE16-NEXT:    v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11FAKE16-NEXT:    v_or_b32_e32 v8, 0x400000, v0
-; GFX11FAKE16-NEXT:    v_or_b32_e32 v6, 0x400000, v1
-; GFX11FAKE16-NEXT:    v_add3_u32 v5, v5, v0, 0x7fff
-; GFX11FAKE16-NEXT:    v_add3_u32 v4, v4, v1, 0x7fff
-; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v2, v3, v7, vcc_lo
-; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | 
instid1(VALU_DEP_2)
-; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v0, v5, v8, vcc_lo
-; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11FAKE16-NEXT:    v_perm_b32 v0, v0, v2, 0x7060302
-; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v1, v4, v6, vcc_lo
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11FAKE16-NEXT:    v_alignbit_b32 v1, s0, v1, 16
+; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; GFX11FAKE16-NEXT:    v_dual_fmac_f32 v6, v8, v7 :: v_dual_lshlrev_b32 v5, 
16, v5
+; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | 
instid1(VALU_DEP_2)
+; GFX11FAKE16-NEXT:    v_fmac_f32_e32 v4, v0, v2
+; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | 
instid1(VALU_DEP_2)
+; GFX11FAKE16-NEXT:    v_bfe_u32 v2, v4, 16, 1
+; GFX11FAKE16-NEXT:    v_or_b32_e32 v7, 0x400000, v4
+; GFX11FAKE16-NEXT:    v_add3_u32 v2, v2, v4, 0x7fff
+; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | 
instid1(VALU_DEP_3)
+; GFX11FAKE16-NEXT:    v_fmac_f32_e32 v5, v1, v3
+; GFX11FAKE16-NEXT:    v_bfe_u32 v1, v6, 16, 1
+; GFX11FAKE16-NEXT:    v_or_b32_e32 v3, 0x400000, v6
+; GFX11FAKE16-NEXT:    v_bfe_u32 v0, v5, 16, 1
+; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | 
instid1(VALU_DEP_3)
+; GFX11FAKE16-NEXT:    v_add3_u32 v1, v1, v6, 0x7fff
+; GFX11FAKE16-NEXT:    v_or_b32_e32 v8, 0x400000, v5
+; GFX11FAKE16-NEXT:    v_add3_u32 v0, v0, v5, 0x7fff
+; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | 
instid1(VALU_DEP_3)
+; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc_lo
+; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v3, v0, v8, vcc_lo
+; GFX11FAKE16-NEXT:    v_perm_b32 v0, v2, v1, 0x7060302
+; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11FAKE16-NEXT:    v_alignbit_b32 v1, s0, v3, 16
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %op = call <3 x bfloat> @llvm.fmuladd.v3bf16(<3 x bfloat> %a, <3 x bfloat> 
%b, <3 x bfloat> %c)
   ret <3 x bfloat> %op
@@ -47966,75 +47713,43 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> 
%a, <4 x bfloat> %b, <4 x bfl
 ; GFX8-LABEL: v_fmuladd_v4bf16:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_lshlrev_b32_e32 v6, 16, v3
-; GFX8-NEXT:    v_lshlrev_b32_e32 v7, 16, v1
-; GFX8-NEXT:    v_mul_f32_e32 v6, v7, v6
-; GFX8-NEXT:    v_bfe_u32 v7, v6, 16, 1
-; GFX8-NEXT:    v_add_u32_e32 v7, vcc, v7, v6
-; GFX8-NEXT:    v_add_u32_e32 v7, vcc, 0x7fff, v7
-; GFX8-NEXT:    v_or_b32_e32 v8, 0x400000, v6
-; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
-; GFX8-NEXT:    v_cndmask_b32_e32 v6, v7, v8, vcc
-; GFX8-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
-; GFX8-NEXT:    v_lshlrev_b32_e32 v7, 16, v5
-; GFX8-NEXT:    v_add_f32_e32 v6, v6, v7
+; GFX8-NEXT:    v_lshlrev_b32_e32 v6, 16, v5
+; GFX8-NEXT:    v_lshlrev_b32_e32 v7, 16, v3
+; GFX8-NEXT:    v_lshlrev_b32_e32 v8, 16, v1
+; GFX8-NEXT:    v_fma_f32 v6, v8, v7, v6
 ; GFX8-NEXT:    v_bfe_u32 v7, v6, 16, 1
-; GFX8-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX8-NEXT:    v_add_u32_e32 v7, vcc, v7, v6
+; GFX8-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX8-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; GFX8-NEXT:    v_add_u32_e32 v7, vcc, s4, v7
-; GFX8-NEXT:    v_mul_f32_e32 v1, v1, v3
+; GFX8-NEXT:    v_add_u32_e32 v7, vcc, 0x7fff, v7
+; GFX8-NEXT:    v_fma_f32 v1, v1, v3, v5
 ; GFX8-NEXT:    v_or_b32_e32 v8, 0x400000, v6
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
 ; GFX8-NEXT:    v_bfe_u32 v3, v1, 16, 1
+; GFX8-NEXT:    s_movk_i32 s4, 0x7fff
 ; GFX8-NEXT:    v_cndmask_b32_e32 v6, v7, v8, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
 ; GFX8-NEXT:    v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT:    v_or_b32_e32 v7, 0x400000, v1
-; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v7, vcc
-; GFX8-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; GFX8-NEXT:    v_and_b32_e32 v3, 0xffff0000, v5
-; GFX8-NEXT:    v_add_f32_e32 v1, v1, v3
-; GFX8-NEXT:    v_bfe_u32 v3, v1, 16, 1
-; GFX8-NEXT:    v_add_u32_e32 v3, vcc, v3, v1
-; GFX8-NEXT:    v_add_u32_e32 v3, vcc, s4, v3
 ; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v1
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v1, v3, v5, vcc
-; GFX8-NEXT:    v_lshlrev_b32_e32 v3, 16, v2
-; GFX8-NEXT:    v_lshlrev_b32_e32 v5, 16, v0
-; GFX8-NEXT:    v_mul_f32_e32 v3, v5, v3
-; GFX8-NEXT:    v_bfe_u32 v5, v3, 16, 1
-; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v3
-; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT:    v_or_b32_e32 v7, 0x400000, v3
-; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
-; GFX8-NEXT:    v_cndmask_b32_e32 v3, v5, v7, vcc
-; GFX8-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
-; GFX8-NEXT:    v_lshlrev_b32_e32 v5, 16, v4
-; GFX8-NEXT:    v_add_f32_e32 v3, v3, v5
+; GFX8-NEXT:    v_lshlrev_b32_e32 v3, 16, v4
+; GFX8-NEXT:    v_lshlrev_b32_e32 v5, 16, v2
+; GFX8-NEXT:    v_lshlrev_b32_e32 v7, 16, v0
+; GFX8-NEXT:    v_fma_f32 v3, v7, v5, v3
 ; GFX8-NEXT:    v_bfe_u32 v5, v3, 16, 1
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, v5, v3
+; GFX8-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT:    v_mul_f32_e32 v0, v0, v2
+; GFX8-NEXT:    v_fma_f32 v0, v0, v2, v4
 ; GFX8-NEXT:    v_or_b32_e32 v7, 0x400000, v3
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX8-NEXT:    v_bfe_u32 v2, v0, 16, 1
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v5, v7, vcc
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
-; GFX8-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
-; GFX8-NEXT:    v_or_b32_e32 v5, 0x400000, v0
-; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, v2, v5, vcc
-; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX8-NEXT:    v_and_b32_e32 v2, 0xffff0000, v4
-; GFX8-NEXT:    v_add_f32_e32 v0, v0, v2
-; GFX8-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v2, v0
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, 0x7fff, v2
 ; GFX8-NEXT:    v_or_b32_e32 v4, 0x400000, v0
 ; GFX8-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
@@ -48048,68 +47763,40 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> 
%a, <4 x bfloat> %b, <4 x bfl
 ; GFX900-LABEL: v_fmuladd_v4bf16:
 ; GFX900:       ; %bb.0:
 ; GFX900-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-NEXT:    v_lshlrev_b32_e32 v6, 16, v3
-; GFX900-NEXT:    v_lshlrev_b32_e32 v7, 16, v1
-; GFX900-NEXT:    v_mul_f32_e32 v6, v7, v6
-; GFX900-NEXT:    v_bfe_u32 v7, v6, 16, 1
-; GFX900-NEXT:    s_movk_i32 s4, 0x7fff
-; GFX900-NEXT:    v_add3_u32 v7, v7, v6, s4
-; GFX900-NEXT:    v_or_b32_e32 v8, 0x400000, v6
-; GFX900-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
-; GFX900-NEXT:    v_cndmask_b32_e32 v6, v7, v8, vcc
-; GFX900-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
-; GFX900-NEXT:    v_lshlrev_b32_e32 v7, 16, v5
-; GFX900-NEXT:    v_add_f32_e32 v6, v6, v7
+; GFX900-NEXT:    v_lshlrev_b32_e32 v6, 16, v5
+; GFX900-NEXT:    v_lshlrev_b32_e32 v7, 16, v3
+; GFX900-NEXT:    v_lshlrev_b32_e32 v8, 16, v1
+; GFX900-NEXT:    v_fma_f32 v6, v8, v7, v6
+; GFX900-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX900-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX900-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX900-NEXT:    v_bfe_u32 v7, v6, 16, 1
-; GFX900-NEXT:    v_mul_f32_e32 v1, v1, v3
+; GFX900-NEXT:    s_movk_i32 s4, 0x7fff
+; GFX900-NEXT:    v_fma_f32 v1, v1, v3, v5
 ; GFX900-NEXT:    v_add3_u32 v7, v7, v6, s4
 ; GFX900-NEXT:    v_or_b32_e32 v8, 0x400000, v6
 ; GFX900-NEXT:    v_cmp_u_f32_e32 vcc, v6, v6
 ; GFX900-NEXT:    v_bfe_u32 v3, v1, 16, 1
 ; GFX900-NEXT:    v_cndmask_b32_e32 v6, v7, v8, vcc
 ; GFX900-NEXT:    v_add3_u32 v3, v3, v1, s4
-; GFX900-NEXT:    v_or_b32_e32 v7, 0x400000, v1
-; GFX900-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
-; GFX900-NEXT:    v_cndmask_b32_e32 v1, v3, v7, vcc
-; GFX900-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; GFX900-NEXT:    v_and_b32_e32 v3, 0xffff0000, v5
-; GFX900-NEXT:    v_add_f32_e32 v1, v1, v3
-; GFX900-NEXT:    v_bfe_u32 v3, v1, 16, 1
-; GFX900-NEXT:    v_add3_u32 v3, v3, v1, s4
 ; GFX900-NEXT:    v_or_b32_e32 v5, 0x400000, v1
 ; GFX900-NEXT:    v_cmp_u_f32_e32 vcc, v1, v1
 ; GFX900-NEXT:    v_cndmask_b32_e32 v1, v3, v5, vcc
-; GFX900-NEXT:    v_lshlrev_b32_e32 v3, 16, v2
-; GFX900-NEXT:    v_lshlrev_b32_e32 v5, 16, v0
-; GFX900-NEXT:    v_mul_f32_e32 v3, v5, v3
-; GFX900-NEXT:    v_bfe_u32 v5, v3, 16, 1
-; GFX900-NEXT:    v_add3_u32 v5, v5, v3, s4
-; GFX900-NEXT:    v_or_b32_e32 v7, 0x400000, v3
-; GFX900-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
-; GFX900-NEXT:    v_cndmask_b32_e32 v3, v5, v7, vcc
-; GFX900-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
-; GFX900-NEXT:    v_lshlrev_b32_e32 v5, 16, v4
-; GFX900-NEXT:    v_add_f32_e32 v3, v3, v5
+; GFX900-NEXT:    v_lshlrev_b32_e32 v3, 16, v4
+; GFX900-NEXT:    v_lshlrev_b32_e32 v5, 16, v2
+; GFX900-NEXT:    v_lshlrev_b32_e32 v7, 16, v0
+; GFX900-NEXT:    v_fma_f32 v3, v7, v5, v3
+; GFX900-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX900-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
 ; GFX900-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX900-NEXT:    v_bfe_u32 v5, v3, 16, 1
-; GFX900-NEXT:    v_mul_f32_e32 v0, v0, v2
+; GFX900-NEXT:    v_fma_f32 v0, v0, v2, v4
 ; GFX900-NEXT:    v_add3_u32 v5, v5, v3, s4
 ; GFX900-NEXT:    v_or_b32_e32 v7, 0x400000, v3
 ; GFX900-NEXT:    v_cmp_u_f32_e32 vcc, v3, v3
 ; GFX900-NEXT:    v_bfe_u32 v2, v0, 16, 1
 ; GFX900-NEXT:    v_cndmask_b32_e32 v3, v5, v7, vcc
 ; GFX900-NEXT:    v_add3_u32 v2, v2, v0, s4
-; GFX900-NEXT:    v_or_b32_e32 v5, 0x400000, v0
-; GFX900-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
-; GFX900-NEXT:    v_cndmask_b32_e32 v0, v2, v5, vcc
-; GFX900-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX900-NEXT:    v_and_b32_e32 v2, 0xffff0000, v4
-; GFX900-NEXT:    v_add_f32_e32 v0, v0, v2
-; GFX900-NEXT:    v_bfe_u32 v2, v0, 16, 1
-; GFX900-NEXT:    v_add3_u32 v2, v2, v0, s4
 ; GFX900-NEXT:    v_or_b32_e32 v4, 0x400000, v0
 ; GFX900-NEXT:    v_cmp_u_f32_e32 vcc, v0, v0
 ; GFX900-NEXT:    v_cndmask_b32_e32 v0, v2, v4, vcc
@@ -48121,264 +47808,162 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> 
%a, <4 x bfloat> %b, <4 x bfl
 ; GFX950-LABEL: v_fmuladd_v4bf16:
 ; GFX950:       ; %bb.0:
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX950-NEXT:    v_and_b32_e32 v6, 0xffff0000, v3
-; GFX950-NEXT:    v_and_b32_e32 v7, 0xffff0000, v1
+; GFX950-NEXT:    v_and_b32_e32 v6, 0xffff0000, v5
+; GFX950-NEXT:    v_and_b32_e32 v7, 0xffff0000, v3
+; GFX950-NEXT:    v_and_b32_e32 v8, 0xffff0000, v1
+; GFX950-NEXT:    v_lshlrev_b32_e32 v5, 16, v5
 ; GFX950-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
 ; GFX950-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GFX950-NEXT:    v_mul_f32_e32 v1, v1, v3
-; GFX950-NEXT:    v_cvt_pk_bf16_f32 v1, v1, s0
-; GFX950-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GFX950-NEXT:    v_lshlrev_b32_e32 v3, 16, v5
-; GFX950-NEXT:    v_mul_f32_e32 v6, v7, v6
-; GFX950-NEXT:    v_and_b32_e32 v7, 0xffff0000, v5
-; GFX950-NEXT:    v_add_f32_e32 v1, v1, v3
+; GFX950-NEXT:    v_fmac_f32_e32 v6, v8, v7
+; GFX950-NEXT:    v_fmac_f32_e32 v5, v1, v3
+; GFX950-NEXT:    v_and_b32_e32 v1, 0xffff0000, v4
 ; GFX950-NEXT:    v_and_b32_e32 v3, 0xffff0000, v2
-; GFX950-NEXT:    v_and_b32_e32 v5, 0xffff0000, v0
+; GFX950-NEXT:    v_and_b32_e32 v7, 0xffff0000, v0
+; GFX950-NEXT:    v_fmac_f32_e32 v1, v7, v3
+; GFX950-NEXT:    v_lshlrev_b32_e32 v3, 16, v4
 ; GFX950-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
 ; GFX950-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX950-NEXT:    v_mul_f32_e32 v3, v5, v3
-; GFX950-NEXT:    v_mul_f32_e32 v0, v0, v2
-; GFX950-NEXT:    v_cvt_pk_bf16_f32 v6, v6, s0
-; GFX950-NEXT:    v_cvt_pk_bf16_f32 v3, v3, s0
-; GFX950-NEXT:    v_cvt_pk_bf16_f32 v0, v0, s0
-; GFX950-NEXT:    v_lshlrev_b32_e32 v6, 16, v6
-; GFX950-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
-; GFX950-NEXT:    v_and_b32_e32 v5, 0xffff0000, v4
-; GFX950-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX950-NEXT:    v_lshlrev_b32_e32 v2, 16, v4
-; GFX950-NEXT:    v_add_f32_e32 v6, v6, v7
-; GFX950-NEXT:    v_add_f32_e32 v3, v3, v5
-; GFX950-NEXT:    v_add_f32_e32 v0, v0, v2
-; GFX950-NEXT:    v_cvt_pk_bf16_f32 v0, v0, v3
-; GFX950-NEXT:    v_cvt_pk_bf16_f32 v1, v1, v6
+; GFX950-NEXT:    v_fmac_f32_e32 v3, v0, v2
+; GFX950-NEXT:    v_cvt_pk_bf16_f32 v0, v3, v1
+; GFX950-NEXT:    v_cvt_pk_bf16_f32 v1, v5, v6
 ; GFX950-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_fmuladd_v4bf16:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_lshlrev_b32_e32 v6, 16, v3
-; GFX10-NEXT:    v_lshlrev_b32_e32 v7, 16, v1
+; GFX10-NEXT:    v_lshlrev_b32_e32 v6, 16, v5
+; GFX10-NEXT:    v_lshlrev_b32_e32 v7, 16, v3
+; GFX10-NEXT:    v_lshlrev_b32_e32 v8, 16, v1
+; GFX10-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
 ; GFX10-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
 ; GFX10-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v9, 16, v0
-; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT:    v_mul_f32_e32 v6, v7, v6
-; GFX10-NEXT:    v_lshlrev_b32_e32 v7, 16, v2
+; GFX10-NEXT:    v_fmac_f32_e32 v6, v8, v7
+; GFX10-NEXT:    v_lshlrev_b32_e32 v7, 16, v4
+; GFX10-NEXT:    v_lshlrev_b32_e32 v8, 16, v2
+; GFX10-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
 ; GFX10-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
-; GFX10-NEXT:    v_mul_f32_e32 v1, v1, v3
-; GFX10-NEXT:    v_lshlrev_b32_e32 v8, 16, v5
+; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
 ; GFX10-NEXT:    v_bfe_u32 v10, v6, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v3, 0x400000, v6
-; GFX10-NEXT:    v_mul_f32_e32 v7, v9, v7
-; GFX10-NEXT:    v_mul_f32_e32 v0, v0, v2
-; GFX10-NEXT:    v_bfe_u32 v2, v1, 16, 1
-; GFX10-NEXT:    v_add3_u32 v10, v10, v6, 0x7fff
+; GFX10-NEXT:    v_fmac_f32_e32 v5, v1, v3
+; GFX10-NEXT:    v_fmac_f32_e32 v7, v9, v8
+; GFX10-NEXT:    v_or_b32_e32 v1, 0x400000, v6
+; GFX10-NEXT:    v_fmac_f32_e32 v4, v0, v2
+; GFX10-NEXT:    v_add3_u32 v0, v10, v6, 0x7fff
+; GFX10-NEXT:    v_bfe_u32 v2, v5, 16, 1
+; GFX10-NEXT:    v_bfe_u32 v3, v7, 16, 1
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX10-NEXT:    v_or_b32_e32 v6, 0x400000, v1
-; GFX10-NEXT:    v_bfe_u32 v9, v7, 16, 1
-; GFX10-NEXT:    v_add3_u32 v2, v2, v1, 0x7fff
-; GFX10-NEXT:    v_bfe_u32 v11, v0, 16, 1
-; GFX10-NEXT:    v_cndmask_b32_e32 v3, v10, v3, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT:    v_or_b32_e32 v10, 0x400000, v7
-; GFX10-NEXT:    v_add3_u32 v9, v9, v7, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v12, 0x400000, v0
-; GFX10-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, v2, v6, vcc_lo
+; GFX10-NEXT:    v_bfe_u32 v8, v4, 16, 1
+; GFX10-NEXT:    v_or_b32_e32 v9, 0x400000, v5
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc_lo
+; GFX10-NEXT:    v_add3_u32 v0, v2, v5, 0x7fff
+; GFX10-NEXT:    v_add3_u32 v2, v3, v7, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v3, 0x400000, v7
 ; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX10-NEXT:    v_add3_u32 v11, v11, v0, 0x7fff
-; GFX10-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
-; GFX10-NEXT:    v_add_f32_e32 v3, v3, v8
-; GFX10-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, v9, v10, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT:    v_lshlrev_b32_e32 v6, 16, v4
-; GFX10-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
-; GFX10-NEXT:    v_bfe_u32 v7, v3, 16, 1
-; GFX10-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
-; GFX10-NEXT:    v_cndmask_b32_e32 v0, v11, v12, vcc_lo
-; GFX10-NEXT:    v_add_f32_e32 v1, v1, v5
-; GFX10-NEXT:    v_or_b32_e32 v5, 0x400000, v3
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX10-NEXT:    v_add_f32_e32 v2, v2, v6
-; GFX10-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT:    v_bfe_u32 v6, v1, 16, 1
-; GFX10-NEXT:    v_or_b32_e32 v9, 0x400000, v1
-; GFX10-NEXT:    v_add_f32_e32 v0, v0, v4
-; GFX10-NEXT:    v_add3_u32 v4, v7, v3, 0x7fff
-; GFX10-NEXT:    v_bfe_u32 v7, v2, 16, 1
-; GFX10-NEXT:    v_bfe_u32 v8, v0, 16, 1
-; GFX10-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc_lo
-; GFX10-NEXT:    v_add3_u32 v4, v6, v1, 0x7fff
-; GFX10-NEXT:    v_add3_u32 v5, v7, v2, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v6, 0x400000, v2
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT:    v_add3_u32 v7, v8, v0, 0x7fff
-; GFX10-NEXT:    v_or_b32_e32 v8, 0x400000, v0
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, v5, v6, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT:    v_cndmask_b32_e32 v0, v7, v8, vcc_lo
-; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT:    v_perm_b32 v0, v0, v2, 0x7060302
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, v4, v9, vcc_lo
-; GFX10-NEXT:    v_perm_b32 v1, v1, v3, 0x7060302
+; GFX10-NEXT:    v_add3_u32 v6, v8, v4, 0x7fff
+; GFX10-NEXT:    v_or_b32_e32 v8, 0x400000, v4
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX10-NEXT:    v_cndmask_b32_e32 v3, v6, v8, vcc_lo
+; GFX10-NEXT:    v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX10-NEXT:    v_cndmask_b32_e32 v4, v0, v9, vcc_lo
+; GFX10-NEXT:    v_perm_b32 v0, v3, v2, 0x7060302
+; GFX10-NEXT:    v_perm_b32 v1, v4, v1, 0x7060302
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11TRUE16-LABEL: v_fmuladd_v4bf16:
 ; GFX11TRUE16:       ; %bb.0:
 ; GFX11TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11TRUE16-NEXT:    v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11TRUE16-NEXT:    v_and_b32_e32 v9, 0xffff0000, v0
-; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; GFX11TRUE16-NEXT:    v_and_b32_e32 v7, 0xffff0000, v1
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | 
instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT:    v_dual_mul_f32 v6, v7, v6 :: v_dual_lshlrev_b32 v3, 16, 
v3
-; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11TRUE16-NEXT:    v_and_b32_e32 v7, 0xffff0000, v3
+; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v3, 16, v3
+; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v10, 16, v0
+; GFX11TRUE16-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11TRUE16-NEXT:    v_and_b32_e32 v8, 0xffff0000, v1
 ; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GFX11TRUE16-NEXT:    v_and_b32_e32 v8, 0xffff0000, v5
-; GFX11TRUE16-NEXT:    v_bfe_u32 v10, v6, 16, 1
+; GFX11TRUE16-NEXT:    v_and_b32_e32 v6, 0xffff0000, v5
 ; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v5, 16, v5
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | 
instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT:    v_mul_f32_e32 v1, v1, v3
-; GFX11TRUE16-NEXT:    v_and_b32_e32 v7, 0xffff0000, v2
-; GFX11TRUE16-NEXT:    v_dual_mul_f32 v3, v9, v7 :: v_dual_lshlrev_b32 v2, 16, 
v2
-; GFX11TRUE16-NEXT:    v_or_b32_e32 v7, 0x400000, v6
-; GFX11TRUE16-NEXT:    v_add3_u32 v9, v10, v6, 0x7fff
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | 
instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT:    v_mul_f32_e32 v0, v0, v2
-; GFX11TRUE16-NEXT:    v_bfe_u32 v2, v1, 16, 1
-; GFX11TRUE16-NEXT:    v_or_b32_e32 v10, 0x400000, v1
-; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v6, v9, v7, vcc_lo
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | 
instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT:    v_bfe_u32 v9, v0, 16, 1
-; GFX11TRUE16-NEXT:    v_add3_u32 v2, v2, v1, 0x7fff
-; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11TRUE16-NEXT:    v_or_b32_e32 v11, 0x400000, v0
-; GFX11TRUE16-NEXT:    v_bfe_u32 v7, v3, 16, 1
-; GFX11TRUE16-NEXT:    v_add3_u32 v9, v9, v0, 0x7fff
-; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v1, v2, v10, vcc_lo
-; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | 
instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT:    v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11TRUE16-NEXT:    v_dual_cndmask_b32 v0, v9, v11 :: v_dual_and_b32 v1, 
0xffff0000, v1
-; GFX11TRUE16-NEXT:    v_and_b32_e32 v6, 0xffff0000, v6
-; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | 
instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT:    v_dual_add_f32 v1, v1, v5 :: v_dual_and_b32 v0, 
0xffff0000, v0
-; GFX11TRUE16-NEXT:    v_add_f32_e32 v2, v6, v8
-; GFX11TRUE16-NEXT:    v_or_b32_e32 v6, 0x400000, v3
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | 
instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT:    v_bfe_u32 v5, v1, 16, 1
+; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | 
instid1(VALU_DEP_3)
+; GFX11TRUE16-NEXT:    v_fmac_f32_e32 v5, v1, v3
+; GFX11TRUE16-NEXT:    v_dual_fmac_f32 v6, v8, v7 :: v_dual_lshlrev_b32 v7, 
16, v4
+; GFX11TRUE16-NEXT:    v_lshlrev_b32_e32 v8, 16, v2
+; GFX11TRUE16-NEXT:    v_and_b32_e32 v1, 0xffff0000, v4
+; GFX11TRUE16-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11TRUE16-NEXT:    v_bfe_u32 v3, v5, 16, 1
+; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11TRUE16-NEXT:    v_fmac_f32_e32 v7, v10, v8
+; GFX11TRUE16-NEXT:    v_or_b32_e32 v8, 0x400000, v5
+; GFX11TRUE16-NEXT:    v_fmac_f32_e32 v1, v0, v2
+; GFX11TRUE16-NEXT:    v_add3_u32 v3, v3, v5, 0x7fff
+; GFX11TRUE16-NEXT:    v_bfe_u32 v9, v6, 16, 1
+; GFX11TRUE16-NEXT:    v_bfe_u32 v0, v7, 16, 1
+; GFX11TRUE16-NEXT:    v_or_b32_e32 v10, 0x400000, v7
+; GFX11TRUE16-NEXT:    v_or_b32_e32 v2, 0x400000, v6
+; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v3, v3, v8, vcc_lo
+; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11TRUE16-NEXT:    v_add3_u32 v0, v0, v7, 0x7fff
+; GFX11TRUE16-NEXT:    v_add3_u32 v4, v9, v6, 0x7fff
+; GFX11TRUE16-NEXT:    v_bfe_u32 v9, v1, 16, 1
 ; GFX11TRUE16-NEXT:    v_or_b32_e32 v8, 0x400000, v1
-; GFX11TRUE16-NEXT:    v_dual_cndmask_b32 v3, v7, v6 :: v_dual_lshlrev_b32 v6, 
16, v4
-; GFX11TRUE16-NEXT:    v_bfe_u32 v7, v2, 16, 1
-; GFX11TRUE16-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11TRUE16-NEXT:    v_add3_u32 v5, v5, v1, 0x7fff
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | 
instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT:    v_dual_add_f32 v0, v0, v6 :: v_dual_and_b32 v3, 
0xffff0000, v3
-; GFX11TRUE16-NEXT:    v_add3_u32 v6, v7, v2, 0x7fff
-; GFX11TRUE16-NEXT:    v_or_b32_e32 v7, 0x400000, v2
+; GFX11TRUE16-NEXT:    v_mov_b16_e32 v3.l, v3.h
+; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v0, v0, v10, vcc_lo
+; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11TRUE16-NEXT:    v_add3_u32 v5, v9, v1, 0x7fff
+; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | 
instid1(VALU_DEP_4)
+; GFX11TRUE16-NEXT:    v_mov_b16_e32 v0.l, v0.h
+; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc_lo
 ; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | 
instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT:    v_add_f32_e32 v3, v3, v4
-; GFX11TRUE16-NEXT:    v_bfe_u32 v4, v0, 16, 1
-; GFX11TRUE16-NEXT:    v_or_b32_e32 v10, 0x400000, v0
 ; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v1, v5, v8, vcc_lo
-; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11TRUE16-NEXT:    v_add3_u32 v4, v4, v0, 0x7fff
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | 
instid1(VALU_DEP_2)
-; GFX11TRUE16-NEXT:    v_mov_b16_e32 v1.l, v1.h
-; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v0, v4, v10, vcc_lo
-; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | 
instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT:    v_mov_b16_e32 v0.l, v0.h
-; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v2, v6, v7, vcc_lo
-; GFX11TRUE16-NEXT:    v_bfe_u32 v9, v3, 16, 1
-; GFX11TRUE16-NEXT:    v_or_b32_e32 v8, 0x400000, v3
-; GFX11TRUE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11TRUE16-NEXT:    v_bfi_b32 v1, 0xffff, v1, v2
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | 
instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT:    v_add3_u32 v5, v9, v3, 0x7fff
-; GFX11TRUE16-NEXT:    v_cndmask_b32_e32 v3, v5, v8, vcc_lo
-; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11TRUE16-NEXT:    v_bfi_b32 v0, 0xffff, v0, v3
+; GFX11TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | 
instid1(VALU_DEP_4)
+; GFX11TRUE16-NEXT:    v_bfi_b32 v0, 0xffff, v0, v1
+; GFX11TRUE16-NEXT:    v_bfi_b32 v1, 0xffff, v3, v2
 ; GFX11TRUE16-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11FAKE16-LABEL: v_fmuladd_v4bf16:
 ; GFX11FAKE16:       ; %bb.0:
 ; GFX11FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v8, 16, v1
+; GFX11FAKE16-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
 ; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v9, 16, v0
 ; GFX11FAKE16-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v7, 16, v1
-; GFX11FAKE16-NEXT:    v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v8, 16, v5
-; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v6, 16, v3
+; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v7, 16, v3
 ; GFX11FAKE16-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | 
instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT:    v_dual_mul_f32 v6, v7, v6 :: v_dual_and_b32 v5, 
0xffff0000, v5
-; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v7, 16, v2
-; GFX11FAKE16-NEXT:    v_dual_mul_f32 v1, v1, v3 :: v_dual_and_b32 v2, 
0xffff0000, v2
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | 
instid1(VALU_DEP_4)
+; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v6, 16, v5
+; GFX11FAKE16-NEXT:    v_and_b32_e32 v5, 0xffff0000, v5
+; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | 
instid1(VALU_DEP_3)
+; GFX11FAKE16-NEXT:    v_fmac_f32_e32 v5, v1, v3
+; GFX11FAKE16-NEXT:    v_dual_fmac_f32 v6, v8, v7 :: v_dual_lshlrev_b32 v7, 
16, v4
+; GFX11FAKE16-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
+; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | 
instid1(VALU_DEP_1)
 ; GFX11FAKE16-NEXT:    v_bfe_u32 v10, v6, 16, 1
-; GFX11FAKE16-NEXT:    v_or_b32_e32 v3, 0x400000, v6
+; GFX11FAKE16-NEXT:    v_or_b32_e32 v1, 0x400000, v6
 ; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11FAKE16-NEXT:    v_mul_f32_e32 v7, v9, v7
-; GFX11FAKE16-NEXT:    v_add3_u32 v10, v10, v6, 0x7fff
-; GFX11FAKE16-NEXT:    v_or_b32_e32 v6, 0x400000, v1
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | 
instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT:    v_bfe_u32 v9, v7, 16, 1
-; GFX11FAKE16-NEXT:    v_dual_cndmask_b32 v3, v10, v3 :: v_dual_mul_f32 v0, 
v0, v2
-; GFX11FAKE16-NEXT:    v_bfe_u32 v2, v1, 16, 1
-; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11FAKE16-NEXT:    v_or_b32_e32 v10, 0x400000, v7
-; GFX11FAKE16-NEXT:    v_add3_u32 v9, v9, v7, 0x7fff
-; GFX11FAKE16-NEXT:    v_bfe_u32 v11, v0, 16, 1
-; GFX11FAKE16-NEXT:    v_add3_u32 v2, v2, v1, 0x7fff
-; GFX11FAKE16-NEXT:    v_or_b32_e32 v12, 0x400000, v0
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | 
instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT:    v_add3_u32 v11, v11, v0, 0x7fff
-; GFX11FAKE16-NEXT:    v_dual_cndmask_b32 v1, v2, v6 :: v_dual_lshlrev_b32 v6, 
16, v4
+; GFX11FAKE16-NEXT:    v_lshlrev_b32_e32 v8, 16, v2
+; GFX11FAKE16-NEXT:    v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11FAKE16-NEXT:    v_fmac_f32_e32 v4, v0, v2
+; GFX11FAKE16-NEXT:    v_add3_u32 v0, v10, v6, 0x7fff
+; GFX11FAKE16-NEXT:    v_bfe_u32 v2, v5, 16, 1
+; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | 
instid1(VALU_DEP_4)
+; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc_lo
+; GFX11FAKE16-NEXT:    v_fmac_f32_e32 v7, v9, v8
+; GFX11FAKE16-NEXT:    v_bfe_u32 v8, v4, 16, 1
+; GFX11FAKE16-NEXT:    v_add3_u32 v0, v2, v5, 0x7fff
+; GFX11FAKE16-NEXT:    v_or_b32_e32 v9, 0x400000, v5
+; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | 
instid1(VALU_DEP_4)
+; GFX11FAKE16-NEXT:    v_bfe_u32 v3, v7, 16, 1
 ; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11FAKE16-NEXT:    v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | 
instid1(VALU_DEP_2)
-; GFX11FAKE16-NEXT:    v_dual_cndmask_b32 v2, v9, v10 :: v_dual_and_b32 v1, 
0xffff0000, v1
-; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11FAKE16-NEXT:    v_dual_add_f32 v1, v1, v5 :: v_dual_and_b32 v2, 
0xffff0000, v2
-; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v0, v11, v12, vcc_lo
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | 
instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT:    v_or_b32_e32 v9, 0x400000, v1
-; GFX11FAKE16-NEXT:    v_add_f32_e32 v2, v2, v6
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | 
instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT:    v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11FAKE16-NEXT:    v_and_b32_e32 v3, 0xffff0000, v3
-; GFX11FAKE16-NEXT:    v_bfe_u32 v6, v1, 16, 1
-; GFX11FAKE16-NEXT:    v_add_f32_e32 v0, v0, v4
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | 
instid1(VALU_DEP_2)
-; GFX11FAKE16-NEXT:    v_add_f32_e32 v3, v3, v8
-; GFX11FAKE16-NEXT:    v_bfe_u32 v8, v0, 16, 1
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | 
instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT:    v_bfe_u32 v7, v3, 16, 1
-; GFX11FAKE16-NEXT:    v_or_b32_e32 v5, 0x400000, v3
-; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11FAKE16-NEXT:    v_add3_u32 v4, v7, v3, 0x7fff
-; GFX11FAKE16-NEXT:    v_bfe_u32 v7, v2, 16, 1
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | 
instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc_lo
-; GFX11FAKE16-NEXT:    v_add3_u32 v4, v6, v1, 0x7fff
-; GFX11FAKE16-NEXT:    v_add3_u32 v5, v7, v2, 0x7fff
-; GFX11FAKE16-NEXT:    v_or_b32_e32 v6, 0x400000, v2
-; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11FAKE16-NEXT:    v_add3_u32 v7, v8, v0, 0x7fff
-; GFX11FAKE16-NEXT:    v_or_b32_e32 v8, 0x400000, v0
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | 
instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v2, v5, v6, vcc_lo
-; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v0, v7, v8, vcc_lo
-; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | 
instid1(VALU_DEP_1)
-; GFX11FAKE16-NEXT:    v_perm_b32 v0, v0, v2, 0x7060302
-; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v1, v4, v9, vcc_lo
-; GFX11FAKE16-NEXT:    v_perm_b32 v1, v1, v3, 0x7060302
+; GFX11FAKE16-NEXT:    v_add3_u32 v6, v8, v4, 0x7fff
+; GFX11FAKE16-NEXT:    v_or_b32_e32 v8, 0x400000, v4
+; GFX11FAKE16-NEXT:    v_add3_u32 v2, v3, v7, 0x7fff
+; GFX11FAKE16-NEXT:    v_or_b32_e32 v3, 0x400000, v7
+; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | 
instid1(VALU_DEP_3)
+; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v3, v6, v8, vcc_lo
+; GFX11FAKE16-NEXT:    v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11FAKE16-NEXT:    v_cndmask_b32_e32 v4, v0, v9, vcc_lo
+; GFX11FAKE16-NEXT:    v_perm_b32 v0, v3, v2, 0x7060302
+; GFX11FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11FAKE16-NEXT:    v_perm_b32 v1, v4, v1, 0x7060302
 ; GFX11FAKE16-NEXT:    s_setpc_b64 s[30:31]
   %op = call <4 x bfloat> @llvm.fmuladd.v4bf16(<4 x bfloat> %a, <4 x bfloat> 
%b, <4 x bfloat> %c)
   ret <4 x bfloat> %op

_______________________________________________
llvm-branch-commits mailing list
llvm-branch-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits

Reply via email to