https://github.com/arsenm updated https://github.com/llvm/llvm-project/pull/154500
>From f8e02ce702188b6999cab8107edebad9f75b1ca5 Mon Sep 17 00:00:00 2001 From: Matt Arsenault <matthew.arsena...@amd.com> Date: Wed, 20 Aug 2025 18:21:20 +0900 Subject: [PATCH] AMDGPU: Start using AV_MOV_B64_IMM_PSEUDO --- .../Target/AMDGPU/AMDGPUPrepareAGPRAlloc.cpp | 22 +- .../AMDGPU/amdgpu-prepare-agpr-alloc.mir | 53 +- .../AMDGPU/av-split-dead-valno-crash.ll | 52 +- .../AMDGPU/llvm.amdgcn.mfma.gfx950.bf16.ll | 16 +- .../CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.ll | 578 +++++++++--------- ...m.amdgcn.mfma.scale.f32.32x32x64.f8f6f4.ll | 8 +- 6 files changed, 388 insertions(+), 341 deletions(-) diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPrepareAGPRAlloc.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPrepareAGPRAlloc.cpp index 3b06e9b00ac69..0137b3f5943d7 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUPrepareAGPRAlloc.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPrepareAGPRAlloc.cpp @@ -34,6 +34,8 @@ class AMDGPUPrepareAGPRAllocImpl { const SIInstrInfo &TII; MachineRegisterInfo &MRI; + bool isAV64Imm(const MachineOperand &MO) const; + public: AMDGPUPrepareAGPRAllocImpl(const GCNSubtarget &ST, MachineRegisterInfo &MRI) : TII(*ST.getInstrInfo()), MRI(MRI) {} @@ -85,11 +87,16 @@ AMDGPUPrepareAGPRAllocPass::run(MachineFunction &MF, return PreservedAnalyses::all(); } +bool AMDGPUPrepareAGPRAllocImpl::isAV64Imm(const MachineOperand &MO) const { + return MO.isImm() && TII.isLegalAV64PseudoImm(MO.getImm()); +} + bool AMDGPUPrepareAGPRAllocImpl::run(MachineFunction &MF) { if (MRI.isReserved(AMDGPU::AGPR0)) return false; - const MCInstrDesc &AVImmPseudo = TII.get(AMDGPU::AV_MOV_B32_IMM_PSEUDO); + const MCInstrDesc &AVImmPseudo32 = TII.get(AMDGPU::AV_MOV_B32_IMM_PSEUDO); + const MCInstrDesc &AVImmPseudo64 = TII.get(AMDGPU::AV_MOV_B64_IMM_PSEUDO); bool Changed = false; for (MachineBasicBlock &MBB : MF) { @@ -98,8 +105,19 @@ bool AMDGPUPrepareAGPRAllocImpl::run(MachineFunction &MF) { TII.isInlineConstant(MI, 1)) || (MI.getOpcode() == AMDGPU::V_ACCVGPR_WRITE_B32_e64 && MI.getOperand(1).isImm())) { - MI.setDesc(AVImmPseudo); + MI.setDesc(AVImmPseudo32); + Changed = true; + continue; + } + + // TODO: If only half of the value is rewritable, is it worth splitting it + // up? + if ((MI.getOpcode() == AMDGPU::V_MOV_B64_e64 || + MI.getOpcode() == AMDGPU::V_MOV_B64_PSEUDO) && + isAV64Imm(MI.getOperand(1))) { + MI.setDesc(AVImmPseudo64); Changed = true; + continue; } } } diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-prepare-agpr-alloc.mir b/llvm/test/CodeGen/AMDGPU/amdgpu-prepare-agpr-alloc.mir index d277c8104fe44..aaacf1d6f793b 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgpu-prepare-agpr-alloc.mir +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-prepare-agpr-alloc.mir @@ -114,22 +114,22 @@ body: | ; HAS-AGPR-NEXT: liveins: $vgpr0_vgpr1 ; HAS-AGPR-NEXT: {{ $}} ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 $vgpr0_vgpr1, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 54, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 1, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 64, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 %stack.0, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 65, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 279172874240, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 279172874305, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B4:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B5:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO -9223372036854775808, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B6:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 1042479491, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B7:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4477415320595726336, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B8:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO $vgpr0_vgpr1, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B9:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO %stack.0, implicit $exec + ; HAS-AGPR-NEXT: [[AV_MOV_:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 54, implicit $exec + ; HAS-AGPR-NEXT: [[AV_MOV_1:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 1, implicit $exec + ; HAS-AGPR-NEXT: [[AV_MOV_2:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 64, implicit $exec + ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 %stack.0, implicit $exec + ; HAS-AGPR-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 65, implicit $exec + ; HAS-AGPR-NEXT: [[V_MOV_B1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 279172874240, implicit $exec + ; HAS-AGPR-NEXT: [[V_MOV_B2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 279172874305, implicit $exec + ; HAS-AGPR-NEXT: [[V_MOV_B3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec + ; HAS-AGPR-NEXT: [[V_MOV_B4:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO -9223372036854775808, implicit $exec + ; HAS-AGPR-NEXT: [[AV_MOV_3:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 1042479491, implicit $exec + ; HAS-AGPR-NEXT: [[AV_MOV_4:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 4477415320595726336, implicit $exec + ; HAS-AGPR-NEXT: [[V_MOV_B5:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO $vgpr0_vgpr1, implicit $exec + ; HAS-AGPR-NEXT: [[V_MOV_B6:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO %stack.0, implicit $exec ; HAS-AGPR-NEXT: {{ $}} ; HAS-AGPR-NEXT: bb.1: - ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_4:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 3, implicit $exec + ; HAS-AGPR-NEXT: [[AV_MOV_5:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 3, implicit $exec ; ; NO-AGPR-LABEL: name: func64 ; NO-AGPR: bb.0: @@ -181,14 +181,23 @@ tracksRegLiveness: true body: | bb.0: liveins: $vgpr0 - ; HAS-AGPR-LABEL: name: func64_no_agprs - ; HAS-AGPR: liveins: $vgpr0 - ; HAS-AGPR-NEXT: {{ $}} - ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 1, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO -9223372036854775808, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 1042479491, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4477415320595726336, implicit $exec + ; GFX90A-LABEL: name: func64_no_agprs + ; GFX90A: liveins: $vgpr0 + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: [[V_MOV_B64_e64_:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 1, implicit $exec + ; GFX90A-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec + ; GFX90A-NEXT: [[V_MOV_B1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO -9223372036854775808, implicit $exec + ; GFX90A-NEXT: [[V_MOV_B2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 1042479491, implicit $exec + ; GFX90A-NEXT: [[V_MOV_B3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4477415320595726336, implicit $exec + ; + ; GFX908-LABEL: name: func64_no_agprs + ; GFX908: liveins: $vgpr0 + ; GFX908-NEXT: {{ $}} + ; GFX908-NEXT: [[AV_MOV_:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 1, implicit $exec + ; GFX908-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec + ; GFX908-NEXT: [[V_MOV_B1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO -9223372036854775808, implicit $exec + ; GFX908-NEXT: [[AV_MOV_1:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 1042479491, implicit $exec + ; GFX908-NEXT: [[AV_MOV_2:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 4477415320595726336, implicit $exec ; ; NO-AGPR-LABEL: name: func64_no_agprs ; NO-AGPR: liveins: $vgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll b/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll index 89fe0ab526a8a..614b1e38a530f 100644 --- a/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll +++ b/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll @@ -16,10 +16,12 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2, ; CHECK-NEXT: s_bitcmp1_b32 s0, 8 ; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0 ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[2:3] -; CHECK-NEXT: s_xor_b64 s[20:21], s[2:3], -1 ; CHECK-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0 -; CHECK-NEXT: s_and_b64 s[2:3], exec, s[2:3] ; CHECK-NEXT: v_mov_b32_e32 v0, 0x9037ab78 +; CHECK-NEXT: v_accvgpr_write_b32 a3, v1 +; CHECK-NEXT: s_xor_b64 s[20:21], s[2:3], -1 +; CHECK-NEXT: s_and_b64 s[2:3], exec, s[2:3] +; CHECK-NEXT: v_accvgpr_write_b32 a2, v0 ; CHECK-NEXT: v_mov_b32_e32 v3, 0xbe927e4f ; CHECK-NEXT: v_mov_b32_e32 v4, 0x19f4ec90 ; CHECK-NEXT: v_mov_b32_e32 v5, 0x3efa01a0 @@ -34,14 +36,14 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2, ; CHECK-NEXT: v_mov_b32_e32 v14, 0x8427b883 ; CHECK-NEXT: v_mov_b32_e32 v15, 0x3fae1bb4 ; CHECK-NEXT: s_mov_b64 s[22:23], 0 -; CHECK-NEXT: v_mov_b32_e32 v16, 0x57b87036 -; CHECK-NEXT: v_mov_b32_e32 v17, 0x3fb3b136 +; CHECK-NEXT: v_mov_b32_e32 v0, 0x57b87036 +; CHECK-NEXT: v_mov_b32_e32 v1, 0x3fb3b136 ; CHECK-NEXT: s_and_b64 s[4:5], exec, s[16:17] ; CHECK-NEXT: v_mov_b32_e32 v18, 0x55555523 ; CHECK-NEXT: v_mov_b32_e32 v19, 0xbfd55555 ; CHECK-NEXT: s_and_b64 s[6:7], exec, s[18:19] ; CHECK-NEXT: v_mov_b32_e32 v20, 0 -; CHECK-NEXT: ; implicit-def: $agpr0_agpr1 +; CHECK-NEXT: ; implicit-def: $vgpr30_vgpr31 ; CHECK-NEXT: ; implicit-def: $vgpr22_vgpr23 ; CHECK-NEXT: s_branch .LBB0_2 ; CHECK-NEXT: .LBB0_1: ; %Flow9 @@ -61,9 +63,12 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2, ; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1 ; CHECK-NEXT: v_mov_b64_e32 v[24:25], s[14:15] ; CHECK-NEXT: flat_load_dwordx2 v[24:25], v[24:25] -; CHECK-NEXT: v_mov_b64_e32 v[26:27], v[0:1] +; CHECK-NEXT: v_accvgpr_read_b32 v27, a3 +; CHECK-NEXT: v_accvgpr_read_b32 v26, a2 ; CHECK-NEXT: v_mov_b64_e32 v[28:29], v[2:3] -; CHECK-NEXT: v_mov_b64_e32 v[30:31], v[16:17] +; CHECK-NEXT: v_mov_b64_e32 v[16:17], v[0:1] +; CHECK-NEXT: v_accvgpr_write_b32 a0, 0 +; CHECK-NEXT: v_accvgpr_write_b32 a1, 0 ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; CHECK-NEXT: v_fmac_f64_e32 v[26:27], 0, v[24:25] ; CHECK-NEXT: v_fmac_f64_e32 v[28:29], 0, v[26:27] @@ -79,10 +84,9 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2, ; CHECK-NEXT: v_fmac_f64_e32 v[26:27], 0, v[28:29] ; CHECK-NEXT: v_mov_b64_e32 v[28:29], v[14:15] ; CHECK-NEXT: v_fmac_f64_e32 v[28:29], 0, v[26:27] -; CHECK-NEXT: v_fmac_f64_e32 v[30:31], 0, v[28:29] +; CHECK-NEXT: v_fmac_f64_e32 v[16:17], 0, v[28:29] ; CHECK-NEXT: v_mov_b64_e32 v[26:27], v[18:19] -; CHECK-NEXT: v_fmac_f64_e32 v[26:27], 0, v[30:31] -; CHECK-NEXT: v_mov_b64_e32 v[30:31], 0 +; CHECK-NEXT: v_fmac_f64_e32 v[26:27], 0, v[16:17] ; CHECK-NEXT: s_branch .LBB0_6 ; CHECK-NEXT: .LBB0_5: ; %Flow ; CHECK-NEXT: ; in Loop: Header=BB0_6 Depth=2 @@ -91,30 +95,30 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2, ; CHECK-NEXT: .LBB0_6: ; %.preheader1855.i.i.i3329 ; CHECK-NEXT: ; Parent Loop BB0_2 Depth=1 ; CHECK-NEXT: ; => This Inner Loop Header: Depth=2 -; CHECK-NEXT: v_mov_b64_e32 v[28:29], v[30:31] +; CHECK-NEXT: v_accvgpr_read_b32 v29, a1 +; CHECK-NEXT: v_accvgpr_read_b32 v28, a0 ; CHECK-NEXT: s_mov_b64 s[24:25], -1 ; CHECK-NEXT: s_mov_b64 s[8:9], -1 ; CHECK-NEXT: s_mov_b64 vcc, s[2:3] -; CHECK-NEXT: ; implicit-def: $vgpr30_vgpr31 +; CHECK-NEXT: ; implicit-def: $agpr0_agpr1 ; CHECK-NEXT: s_cbranch_vccz .LBB0_5 ; CHECK-NEXT: ; %bb.7: ; %.lr.ph2070.i.i.i3291 ; CHECK-NEXT: ; in Loop: Header=BB0_6 Depth=2 -; CHECK-NEXT: v_accvgpr_read_b32 v31, a1 -; CHECK-NEXT: v_accvgpr_read_b32 v30, a0 +; CHECK-NEXT: v_accvgpr_write_b32 a0, v30 +; CHECK-NEXT: v_accvgpr_write_b32 a1, v31 ; CHECK-NEXT: s_mov_b64 s[8:9], s[18:19] ; CHECK-NEXT: s_mov_b64 vcc, s[6:7] ; CHECK-NEXT: s_cbranch_vccz .LBB0_5 ; CHECK-NEXT: ; %bb.8: ; %.preheader1856.preheader.i.i.i3325 ; CHECK-NEXT: ; in Loop: Header=BB0_6 Depth=2 +; CHECK-NEXT: v_accvgpr_write_b32 a0, v26 ; CHECK-NEXT: s_mov_b64 s[24:25], 0 -; CHECK-NEXT: v_mov_b64_e32 v[30:31], v[26:27] +; CHECK-NEXT: v_accvgpr_write_b32 a1, v27 ; CHECK-NEXT: s_mov_b64 s[8:9], 0 ; CHECK-NEXT: s_branch .LBB0_5 ; CHECK-NEXT: .LBB0_9: ; in Loop: Header=BB0_2 Depth=1 -; CHECK-NEXT: v_mov_b64_e32 v[24:25], s[10:11] -; CHECK-NEXT: v_accvgpr_write_b32 a0, v24 ; CHECK-NEXT: s_mov_b64 s[22:23], 0 -; CHECK-NEXT: v_accvgpr_write_b32 a1, v25 +; CHECK-NEXT: v_mov_b64_e32 v[30:31], s[10:11] ; CHECK-NEXT: s_mov_b64 s[8:9], s[20:21] ; CHECK-NEXT: s_branch .LBB0_15 ; CHECK-NEXT: .LBB0_10: ; in Loop: Header=BB0_2 Depth=1 @@ -128,24 +132,22 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2, ; CHECK-NEXT: ; %bb.12: ; %._crit_edge2105.i.i.i2330.loopexit ; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1 ; CHECK-NEXT: v_cmp_nlg_f64_e64 s[8:9], 0, v[28:29] -; CHECK-NEXT: v_accvgpr_write_b32 a0, v24 ; CHECK-NEXT: v_cndmask_b32_e64 v23, v23, 0, s[16:17] -; CHECK-NEXT: v_cndmask_b32_e64 v26, 0, 1, s[8:9] -; CHECK-NEXT: v_mov_b32_e32 v27, v26 -; CHECK-NEXT: s_and_b64 s[8:9], exec, s[16:17] ; CHECK-NEXT: v_cndmask_b32_e64 v22, v22, 0, s[16:17] -; CHECK-NEXT: global_store_dwordx2 v20, v[26:27], s[12:13] +; CHECK-NEXT: v_cndmask_b32_e64 v16, 0, 1, s[8:9] +; CHECK-NEXT: v_mov_b32_e32 v17, v16 +; CHECK-NEXT: s_and_b64 s[8:9], exec, s[16:17] +; CHECK-NEXT: global_store_dwordx2 v20, v[16:17], s[12:13] ; CHECK-NEXT: s_cselect_b32 s23, s23, 0 ; CHECK-NEXT: s_cselect_b32 s22, s22, 0 ; CHECK-NEXT: s_mov_b64 s[8:9], -1 ; CHECK-NEXT: s_branch .LBB0_14 ; CHECK-NEXT: .LBB0_13: ; in Loop: Header=BB0_2 Depth=1 -; CHECK-NEXT: v_accvgpr_write_b32 a0, v24 ; CHECK-NEXT: s_mov_b64 s[8:9], 0 ; CHECK-NEXT: v_mov_b64_e32 v[22:23], 0 ; CHECK-NEXT: .LBB0_14: ; %Flow6 ; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1 -; CHECK-NEXT: v_accvgpr_write_b32 a1, v25 +; CHECK-NEXT: v_mov_b64_e32 v[30:31], v[24:25] ; CHECK-NEXT: .LBB0_15: ; %Flow6 ; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1 ; CHECK-NEXT: s_mov_b64 s[24:25], -1 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.bf16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.bf16.ll index 8081a15b53bb7..284ced1727b7e 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.bf16.ll @@ -39,16 +39,16 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_bf16(<8 x bfloat> %arg0, <8 x ; GCN-NEXT: v_accvgpr_write_b32 a13, s21 ; GCN-NEXT: v_accvgpr_write_b32 a14, s22 ; GCN-NEXT: v_accvgpr_write_b32 a15, s23 -; GCN-NEXT: v_mov_b64_e32 v[14:15], 0 ; GCN-NEXT: v_mov_b32_e32 v16, s16 +; GCN-NEXT: v_mov_b32_e32 v17, s17 ; GCN-NEXT: v_mfma_f32_32x32x16_bf16 a[16:31], v[0:3], v[4:7], a[0:15] +; GCN-NEXT: v_mov_b32_e32 v18, s18 +; GCN-NEXT: v_mov_b32_e32 v19, s19 ; GCN-NEXT: v_mov_b32_e32 v0, s20 ; GCN-NEXT: v_mov_b32_e32 v1, s21 ; GCN-NEXT: v_mov_b32_e32 v2, s22 ; GCN-NEXT: v_mov_b32_e32 v3, s23 -; GCN-NEXT: v_mov_b32_e32 v17, s17 -; GCN-NEXT: v_mov_b32_e32 v18, s18 -; GCN-NEXT: v_mov_b32_e32 v19, s19 +; GCN-NEXT: v_mov_b64_e32 v[14:15], 0 ; GCN-NEXT: s_nop 4 ; GCN-NEXT: global_store_dwordx4 v[8:9], a[28:31], off sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) @@ -112,16 +112,16 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_bf16__flags(<8 x bfloat> %arg0 ; GCN-NEXT: v_accvgpr_write_b32 a13, s21 ; GCN-NEXT: v_accvgpr_write_b32 a14, s22 ; GCN-NEXT: v_accvgpr_write_b32 a15, s23 -; GCN-NEXT: v_mov_b64_e32 v[14:15], 0 ; GCN-NEXT: v_mov_b32_e32 v16, s16 +; GCN-NEXT: v_mov_b32_e32 v17, s17 ; GCN-NEXT: v_mfma_f32_32x32x16_bf16 a[16:31], v[0:3], v[4:7], a[0:15] cbsz:2 abid:3 blgp:1 +; GCN-NEXT: v_mov_b32_e32 v18, s18 +; GCN-NEXT: v_mov_b32_e32 v19, s19 ; GCN-NEXT: v_mov_b32_e32 v0, s20 ; GCN-NEXT: v_mov_b32_e32 v1, s21 ; GCN-NEXT: v_mov_b32_e32 v2, s22 ; GCN-NEXT: v_mov_b32_e32 v3, s23 -; GCN-NEXT: v_mov_b32_e32 v17, s17 -; GCN-NEXT: v_mov_b32_e32 v18, s18 -; GCN-NEXT: v_mov_b32_e32 v19, s19 +; GCN-NEXT: v_mov_b64_e32 v[14:15], 0 ; GCN-NEXT: s_nop 4 ; GCN-NEXT: global_store_dwordx4 v[8:9], a[28:31], off sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.ll index d81ec1c61634b..078a043b94604 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.ll @@ -406,16 +406,16 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16(<8 x half> %arg0, <8 x hal ; SDAG-NEXT: v_accvgpr_write_b32 a13, s21 ; SDAG-NEXT: v_accvgpr_write_b32 a14, s22 ; SDAG-NEXT: v_accvgpr_write_b32 a15, s23 -; SDAG-NEXT: v_mov_b64_e32 v[14:15], 0 ; SDAG-NEXT: v_mov_b32_e32 v16, s16 +; SDAG-NEXT: v_mov_b32_e32 v17, s17 ; SDAG-NEXT: v_mfma_f32_32x32x16_f16 a[16:31], v[0:3], v[4:7], a[0:15] +; SDAG-NEXT: v_mov_b32_e32 v18, s18 +; SDAG-NEXT: v_mov_b32_e32 v19, s19 ; SDAG-NEXT: v_mov_b32_e32 v0, s20 ; SDAG-NEXT: v_mov_b32_e32 v1, s21 ; SDAG-NEXT: v_mov_b32_e32 v2, s22 ; SDAG-NEXT: v_mov_b32_e32 v3, s23 -; SDAG-NEXT: v_mov_b32_e32 v17, s17 -; SDAG-NEXT: v_mov_b32_e32 v18, s18 -; SDAG-NEXT: v_mov_b32_e32 v19, s19 +; SDAG-NEXT: v_mov_b64_e32 v[14:15], 0 ; SDAG-NEXT: s_nop 4 ; SDAG-NEXT: global_store_dwordx4 v[8:9], a[28:31], off sc0 sc1 ; SDAG-NEXT: s_waitcnt vmcnt(0) @@ -449,9 +449,9 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16(<8 x half> %arg0, <8 x hal ; GISEL: ; %bb.0: ; GISEL-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24 ; GISEL-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64 -; GISEL-NEXT: v_mov_b64_e32 v[20:21], 0 -; GISEL-NEXT: v_mov_b64_e32 v[26:27], 48 -; GISEL-NEXT: v_mov_b64_e32 v[22:23], 16 +; GISEL-NEXT: v_mov_b64_e32 v[12:13], 0 +; GISEL-NEXT: v_mov_b64_e32 v[14:15], 16 +; GISEL-NEXT: v_mov_b64_e32 v[16:17], 32 ; GISEL-NEXT: s_waitcnt lgkmcnt(0) ; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[24:25] ; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[26:27] @@ -474,31 +474,34 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16(<8 x half> %arg0, <8 x hal ; GISEL-NEXT: v_accvgpr_write_b32 a14, s22 ; GISEL-NEXT: v_accvgpr_write_b32 a15, s23 ; GISEL-NEXT: v_mov_b64_e32 v[8:9], s[8:9] -; GISEL-NEXT: v_mov_b64_e32 v[12:13], s[12:13] +; GISEL-NEXT: v_mov_b64_e32 v[18:19], 48 ; GISEL-NEXT: v_mfma_f32_32x32x16_f16 a[16:31], v[0:3], v[4:7], a[0:15] -; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[20:21] -; GISEL-NEXT: v_mov_b64_e32 v[16:17], s[16:17] -; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[22:23] -; GISEL-NEXT: v_mov_b64_e32 v[24:25], 32 +; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[12:13] +; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[14:15] ; GISEL-NEXT: v_mov_b64_e32 v[10:11], s[10:11] -; GISEL-NEXT: v_mov_b64_e32 v[14:15], s[14:15] -; GISEL-NEXT: v_mov_b64_e32 v[18:19], s[18:19] -; GISEL-NEXT: s_nop 4 -; GISEL-NEXT: global_store_dwordx4 v[20:21], a[16:19], off sc0 sc1 +; GISEL-NEXT: s_nop 7 +; GISEL-NEXT: s_nop 0 +; GISEL-NEXT: global_store_dwordx4 v[12:13], a[16:19], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[22:23], a[20:23], off sc0 sc1 +; GISEL-NEXT: global_store_dwordx4 v[14:15], a[20:23], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[24:25], a[24:27], off sc0 sc1 +; GISEL-NEXT: global_store_dwordx4 v[16:17], a[24:27], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[26:27], a[28:31], off sc0 sc1 +; GISEL-NEXT: global_store_dwordx4 v[18:19], a[28:31], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[20:21], v[8:11], off sc0 sc1 +; GISEL-NEXT: global_store_dwordx4 v[12:13], v[8:11], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[22:23], v[12:15], off sc0 sc1 +; GISEL-NEXT: global_store_dwordx4 v[14:15], v[0:3], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[24:25], v[16:19], off sc0 sc1 +; GISEL-NEXT: s_nop 0 +; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[16:17] +; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[18:19] +; GISEL-NEXT: global_store_dwordx4 v[16:17], v[0:3], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[26:27], v[0:3], off sc0 sc1 +; GISEL-NEXT: s_nop 0 +; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[20:21] +; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[22:23] +; GISEL-NEXT: global_store_dwordx4 v[18:19], v[0:3], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) ; GISEL-NEXT: s_endpgm ; @@ -530,16 +533,16 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16(<8 x half> %arg0, <8 x hal ; HEURRC-NEXT: v_accvgpr_write_b32 a13, s21 ; HEURRC-NEXT: v_accvgpr_write_b32 a14, s22 ; HEURRC-NEXT: v_accvgpr_write_b32 a15, s23 -; HEURRC-NEXT: v_mov_b64_e32 v[14:15], 0 ; HEURRC-NEXT: v_mov_b32_e32 v16, s16 +; HEURRC-NEXT: v_mov_b32_e32 v17, s17 ; HEURRC-NEXT: v_mfma_f32_32x32x16_f16 a[16:31], v[0:3], v[4:7], a[0:15] +; HEURRC-NEXT: v_mov_b32_e32 v18, s18 +; HEURRC-NEXT: v_mov_b32_e32 v19, s19 ; HEURRC-NEXT: v_mov_b32_e32 v0, s20 ; HEURRC-NEXT: v_mov_b32_e32 v1, s21 ; HEURRC-NEXT: v_mov_b32_e32 v2, s22 ; HEURRC-NEXT: v_mov_b32_e32 v3, s23 -; HEURRC-NEXT: v_mov_b32_e32 v17, s17 -; HEURRC-NEXT: v_mov_b32_e32 v18, s18 -; HEURRC-NEXT: v_mov_b32_e32 v19, s19 +; HEURRC-NEXT: v_mov_b64_e32 v[14:15], 0 ; HEURRC-NEXT: s_nop 4 ; HEURRC-NEXT: global_store_dwordx4 v[8:9], a[28:31], off sc0 sc1 ; HEURRC-NEXT: s_waitcnt vmcnt(0) @@ -589,12 +592,12 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16(<8 x half> %arg0, <8 x hal ; VGPRRC-NEXT: v_mov_b64_e32 v[10:11], s[18:19] ; VGPRRC-NEXT: v_mov_b64_e32 v[12:13], s[20:21] ; VGPRRC-NEXT: v_mov_b64_e32 v[14:15], s[22:23] -; VGPRRC-NEXT: v_mov_b64_e32 v[46:47], 0 ; VGPRRC-NEXT: v_mov_b32_e32 v48, s16 -; VGPRRC-NEXT: v_mfma_f32_32x32x16_f16 v[16:31], v[32:35], v[36:39], v[0:15] ; VGPRRC-NEXT: v_mov_b32_e32 v49, s17 +; VGPRRC-NEXT: v_mfma_f32_32x32x16_f16 v[16:31], v[32:35], v[36:39], v[0:15] ; VGPRRC-NEXT: v_mov_b32_e32 v50, s18 ; VGPRRC-NEXT: v_mov_b32_e32 v51, s19 +; VGPRRC-NEXT: v_mov_b64_e32 v[46:47], 0 ; VGPRRC-NEXT: s_nop 7 ; VGPRRC-NEXT: s_nop 0 ; VGPRRC-NEXT: global_store_dwordx4 v[40:41], v[28:31], off sc0 sc1 @@ -605,12 +608,12 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16(<8 x half> %arg0, <8 x hal ; VGPRRC-NEXT: s_waitcnt vmcnt(0) ; VGPRRC-NEXT: global_store_dwordx4 v[46:47], v[16:19], off sc0 sc1 ; VGPRRC-NEXT: s_waitcnt vmcnt(0) -; VGPRRC-NEXT: global_store_dwordx4 v[42:43], v[48:51], off sc0 sc1 -; VGPRRC-NEXT: s_waitcnt vmcnt(0) ; VGPRRC-NEXT: v_mov_b32_e32 v0, s20 ; VGPRRC-NEXT: v_mov_b32_e32 v1, s21 ; VGPRRC-NEXT: v_mov_b32_e32 v2, s22 ; VGPRRC-NEXT: v_mov_b32_e32 v3, s23 +; VGPRRC-NEXT: global_store_dwordx4 v[42:43], v[48:51], off sc0 sc1 +; VGPRRC-NEXT: s_waitcnt vmcnt(0) ; VGPRRC-NEXT: global_store_dwordx4 v[40:41], v[0:3], off sc0 sc1 ; VGPRRC-NEXT: s_waitcnt vmcnt(0) ; VGPRRC-NEXT: s_nop 0 @@ -788,16 +791,16 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__flags(<8 x half> %arg0, < ; SDAG-NEXT: v_accvgpr_write_b32 a13, s21 ; SDAG-NEXT: v_accvgpr_write_b32 a14, s22 ; SDAG-NEXT: v_accvgpr_write_b32 a15, s23 -; SDAG-NEXT: v_mov_b64_e32 v[14:15], 0 ; SDAG-NEXT: v_mov_b32_e32 v16, s16 +; SDAG-NEXT: v_mov_b32_e32 v17, s17 ; SDAG-NEXT: v_mfma_f32_32x32x16_f16 a[16:31], v[0:3], v[4:7], a[0:15] cbsz:2 abid:3 blgp:1 +; SDAG-NEXT: v_mov_b32_e32 v18, s18 +; SDAG-NEXT: v_mov_b32_e32 v19, s19 ; SDAG-NEXT: v_mov_b32_e32 v0, s20 ; SDAG-NEXT: v_mov_b32_e32 v1, s21 ; SDAG-NEXT: v_mov_b32_e32 v2, s22 ; SDAG-NEXT: v_mov_b32_e32 v3, s23 -; SDAG-NEXT: v_mov_b32_e32 v17, s17 -; SDAG-NEXT: v_mov_b32_e32 v18, s18 -; SDAG-NEXT: v_mov_b32_e32 v19, s19 +; SDAG-NEXT: v_mov_b64_e32 v[14:15], 0 ; SDAG-NEXT: s_nop 4 ; SDAG-NEXT: global_store_dwordx4 v[8:9], a[28:31], off sc0 sc1 ; SDAG-NEXT: s_waitcnt vmcnt(0) @@ -831,9 +834,9 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__flags(<8 x half> %arg0, < ; GISEL: ; %bb.0: ; GISEL-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24 ; GISEL-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64 -; GISEL-NEXT: v_mov_b64_e32 v[20:21], 0 -; GISEL-NEXT: v_mov_b64_e32 v[26:27], 48 -; GISEL-NEXT: v_mov_b64_e32 v[22:23], 16 +; GISEL-NEXT: v_mov_b64_e32 v[12:13], 0 +; GISEL-NEXT: v_mov_b64_e32 v[14:15], 16 +; GISEL-NEXT: v_mov_b64_e32 v[16:17], 32 ; GISEL-NEXT: s_waitcnt lgkmcnt(0) ; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[24:25] ; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[26:27] @@ -856,31 +859,34 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__flags(<8 x half> %arg0, < ; GISEL-NEXT: v_accvgpr_write_b32 a14, s22 ; GISEL-NEXT: v_accvgpr_write_b32 a15, s23 ; GISEL-NEXT: v_mov_b64_e32 v[8:9], s[8:9] -; GISEL-NEXT: v_mov_b64_e32 v[12:13], s[12:13] +; GISEL-NEXT: v_mov_b64_e32 v[18:19], 48 ; GISEL-NEXT: v_mfma_f32_32x32x16_f16 a[16:31], v[0:3], v[4:7], a[0:15] cbsz:2 abid:3 blgp:1 -; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[20:21] -; GISEL-NEXT: v_mov_b64_e32 v[16:17], s[16:17] -; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[22:23] -; GISEL-NEXT: v_mov_b64_e32 v[24:25], 32 +; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[12:13] +; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[14:15] ; GISEL-NEXT: v_mov_b64_e32 v[10:11], s[10:11] -; GISEL-NEXT: v_mov_b64_e32 v[14:15], s[14:15] -; GISEL-NEXT: v_mov_b64_e32 v[18:19], s[18:19] -; GISEL-NEXT: s_nop 4 -; GISEL-NEXT: global_store_dwordx4 v[20:21], a[16:19], off sc0 sc1 +; GISEL-NEXT: s_nop 7 +; GISEL-NEXT: s_nop 0 +; GISEL-NEXT: global_store_dwordx4 v[12:13], a[16:19], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[22:23], a[20:23], off sc0 sc1 +; GISEL-NEXT: global_store_dwordx4 v[14:15], a[20:23], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[24:25], a[24:27], off sc0 sc1 +; GISEL-NEXT: global_store_dwordx4 v[16:17], a[24:27], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[26:27], a[28:31], off sc0 sc1 +; GISEL-NEXT: global_store_dwordx4 v[18:19], a[28:31], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[20:21], v[8:11], off sc0 sc1 +; GISEL-NEXT: global_store_dwordx4 v[12:13], v[8:11], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[22:23], v[12:15], off sc0 sc1 +; GISEL-NEXT: global_store_dwordx4 v[14:15], v[0:3], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[24:25], v[16:19], off sc0 sc1 +; GISEL-NEXT: s_nop 0 +; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[16:17] +; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[18:19] +; GISEL-NEXT: global_store_dwordx4 v[16:17], v[0:3], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[26:27], v[0:3], off sc0 sc1 +; GISEL-NEXT: s_nop 0 +; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[20:21] +; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[22:23] +; GISEL-NEXT: global_store_dwordx4 v[18:19], v[0:3], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) ; GISEL-NEXT: s_endpgm ; @@ -912,16 +918,16 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__flags(<8 x half> %arg0, < ; HEURRC-NEXT: v_accvgpr_write_b32 a13, s21 ; HEURRC-NEXT: v_accvgpr_write_b32 a14, s22 ; HEURRC-NEXT: v_accvgpr_write_b32 a15, s23 -; HEURRC-NEXT: v_mov_b64_e32 v[14:15], 0 ; HEURRC-NEXT: v_mov_b32_e32 v16, s16 +; HEURRC-NEXT: v_mov_b32_e32 v17, s17 ; HEURRC-NEXT: v_mfma_f32_32x32x16_f16 a[16:31], v[0:3], v[4:7], a[0:15] cbsz:2 abid:3 blgp:1 +; HEURRC-NEXT: v_mov_b32_e32 v18, s18 +; HEURRC-NEXT: v_mov_b32_e32 v19, s19 ; HEURRC-NEXT: v_mov_b32_e32 v0, s20 ; HEURRC-NEXT: v_mov_b32_e32 v1, s21 ; HEURRC-NEXT: v_mov_b32_e32 v2, s22 ; HEURRC-NEXT: v_mov_b32_e32 v3, s23 -; HEURRC-NEXT: v_mov_b32_e32 v17, s17 -; HEURRC-NEXT: v_mov_b32_e32 v18, s18 -; HEURRC-NEXT: v_mov_b32_e32 v19, s19 +; HEURRC-NEXT: v_mov_b64_e32 v[14:15], 0 ; HEURRC-NEXT: s_nop 4 ; HEURRC-NEXT: global_store_dwordx4 v[8:9], a[28:31], off sc0 sc1 ; HEURRC-NEXT: s_waitcnt vmcnt(0) @@ -971,12 +977,12 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__flags(<8 x half> %arg0, < ; VGPRRC-NEXT: v_mov_b64_e32 v[10:11], s[18:19] ; VGPRRC-NEXT: v_mov_b64_e32 v[12:13], s[20:21] ; VGPRRC-NEXT: v_mov_b64_e32 v[14:15], s[22:23] -; VGPRRC-NEXT: v_mov_b64_e32 v[46:47], 0 ; VGPRRC-NEXT: v_mov_b32_e32 v48, s16 -; VGPRRC-NEXT: v_mfma_f32_32x32x16_f16 v[16:31], v[32:35], v[36:39], v[0:15] cbsz:2 abid:3 blgp:1 ; VGPRRC-NEXT: v_mov_b32_e32 v49, s17 +; VGPRRC-NEXT: v_mfma_f32_32x32x16_f16 v[16:31], v[32:35], v[36:39], v[0:15] cbsz:2 abid:3 blgp:1 ; VGPRRC-NEXT: v_mov_b32_e32 v50, s18 ; VGPRRC-NEXT: v_mov_b32_e32 v51, s19 +; VGPRRC-NEXT: v_mov_b64_e32 v[46:47], 0 ; VGPRRC-NEXT: s_nop 7 ; VGPRRC-NEXT: s_nop 0 ; VGPRRC-NEXT: global_store_dwordx4 v[40:41], v[28:31], off sc0 sc1 @@ -987,12 +993,12 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_f16__flags(<8 x half> %arg0, < ; VGPRRC-NEXT: s_waitcnt vmcnt(0) ; VGPRRC-NEXT: global_store_dwordx4 v[46:47], v[16:19], off sc0 sc1 ; VGPRRC-NEXT: s_waitcnt vmcnt(0) -; VGPRRC-NEXT: global_store_dwordx4 v[42:43], v[48:51], off sc0 sc1 -; VGPRRC-NEXT: s_waitcnt vmcnt(0) ; VGPRRC-NEXT: v_mov_b32_e32 v0, s20 ; VGPRRC-NEXT: v_mov_b32_e32 v1, s21 ; VGPRRC-NEXT: v_mov_b32_e32 v2, s22 ; VGPRRC-NEXT: v_mov_b32_e32 v3, s23 +; VGPRRC-NEXT: global_store_dwordx4 v[42:43], v[48:51], off sc0 sc1 +; VGPRRC-NEXT: s_waitcnt vmcnt(0) ; VGPRRC-NEXT: global_store_dwordx4 v[40:41], v[0:3], off sc0 sc1 ; VGPRRC-NEXT: s_waitcnt vmcnt(0) ; VGPRRC-NEXT: s_nop 0 @@ -2978,47 +2984,47 @@ define amdgpu_kernel void @test_mfma_i32_32x32x32_i8(<4 x i32> %arg0, <4 x i32> ; SDAG-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64 ; SDAG-NEXT: v_mov_b64_e32 v[0:1], 48 ; SDAG-NEXT: v_mov_b64_e32 v[2:3], 32 -; SDAG-NEXT: v_mov_b64_e32 v[4:5], 16 ; SDAG-NEXT: s_waitcnt lgkmcnt(0) -; SDAG-NEXT: v_mov_b32_e32 v8, s24 -; SDAG-NEXT: v_mov_b32_e32 v9, s25 -; SDAG-NEXT: v_mov_b32_e32 v10, s26 -; SDAG-NEXT: v_mov_b32_e32 v11, s27 -; SDAG-NEXT: v_accvgpr_write_b32 a0, s8 -; SDAG-NEXT: v_mov_b32_e32 v12, s28 -; SDAG-NEXT: v_mov_b32_e32 v13, s29 -; SDAG-NEXT: v_mov_b32_e32 v14, s30 -; SDAG-NEXT: v_mov_b32_e32 v15, s31 -; SDAG-NEXT: v_accvgpr_write_b32 a1, s9 -; SDAG-NEXT: v_accvgpr_write_b32 a2, s10 -; SDAG-NEXT: v_accvgpr_write_b32 a3, s11 -; SDAG-NEXT: v_accvgpr_write_b32 a4, s12 -; SDAG-NEXT: v_accvgpr_write_b32 a5, s13 -; SDAG-NEXT: v_accvgpr_write_b32 a6, s14 -; SDAG-NEXT: v_accvgpr_write_b32 a7, s15 -; SDAG-NEXT: v_accvgpr_write_b32 a8, s16 -; SDAG-NEXT: v_accvgpr_write_b32 a9, s17 -; SDAG-NEXT: v_accvgpr_write_b32 a10, s18 -; SDAG-NEXT: v_accvgpr_write_b32 a11, s19 -; SDAG-NEXT: v_accvgpr_write_b32 a12, s20 -; SDAG-NEXT: v_accvgpr_write_b32 a13, s21 -; SDAG-NEXT: v_accvgpr_write_b32 a14, s22 -; SDAG-NEXT: v_accvgpr_write_b32 a15, s23 +; SDAG-NEXT: v_mov_b32_e32 v4, s24 +; SDAG-NEXT: v_mov_b32_e32 v5, s25 +; SDAG-NEXT: v_mov_b32_e32 v6, s26 +; SDAG-NEXT: v_mov_b32_e32 v7, s27 +; SDAG-NEXT: v_accvgpr_write_b32 a31, s23 +; SDAG-NEXT: v_mov_b32_e32 v8, s28 +; SDAG-NEXT: v_mov_b32_e32 v9, s29 +; SDAG-NEXT: v_mov_b32_e32 v10, s30 +; SDAG-NEXT: v_mov_b32_e32 v11, s31 +; SDAG-NEXT: v_accvgpr_write_b32 a30, s22 +; SDAG-NEXT: v_accvgpr_write_b32 a29, s21 +; SDAG-NEXT: v_accvgpr_write_b32 a28, s20 +; SDAG-NEXT: v_accvgpr_write_b32 a27, s19 +; SDAG-NEXT: v_accvgpr_write_b32 a26, s18 +; SDAG-NEXT: v_accvgpr_write_b32 a25, s17 +; SDAG-NEXT: v_accvgpr_write_b32 a24, s16 +; SDAG-NEXT: v_accvgpr_write_b32 a23, s15 +; SDAG-NEXT: v_accvgpr_write_b32 a22, s14 +; SDAG-NEXT: v_accvgpr_write_b32 a21, s13 +; SDAG-NEXT: v_accvgpr_write_b32 a20, s12 +; SDAG-NEXT: v_accvgpr_write_b32 a19, s11 +; SDAG-NEXT: v_accvgpr_write_b32 a18, s10 +; SDAG-NEXT: v_accvgpr_write_b32 a17, s9 +; SDAG-NEXT: v_accvgpr_write_b32 a16, s8 +; SDAG-NEXT: s_nop 1 +; SDAG-NEXT: v_mfma_i32_32x32x32_i8 a[0:15], v[4:7], v[8:11], a[16:31] +; SDAG-NEXT: v_mov_b64_e32 v[4:5], 16 ; SDAG-NEXT: v_mov_b64_e32 v[6:7], 0 -; SDAG-NEXT: s_nop 0 -; SDAG-NEXT: v_mfma_i32_32x32x32_i8 a[16:31], v[8:11], v[12:15], a[0:15] ; SDAG-NEXT: v_mov_b32_e32 v8, s16 ; SDAG-NEXT: v_mov_b32_e32 v9, s17 ; SDAG-NEXT: v_mov_b32_e32 v10, s18 ; SDAG-NEXT: v_mov_b32_e32 v11, s19 -; SDAG-NEXT: s_nop 7 -; SDAG-NEXT: global_store_dwordx4 v[0:1], a[28:31], off sc0 sc1 +; SDAG-NEXT: s_nop 5 +; SDAG-NEXT: global_store_dwordx4 v[0:1], a[12:15], off sc0 sc1 ; SDAG-NEXT: s_waitcnt vmcnt(0) -; SDAG-NEXT: global_store_dwordx4 v[2:3], a[24:27], off sc0 sc1 +; SDAG-NEXT: global_store_dwordx4 v[2:3], a[8:11], off sc0 sc1 ; SDAG-NEXT: s_waitcnt vmcnt(0) -; SDAG-NEXT: global_store_dwordx4 v[4:5], a[20:23], off sc0 sc1 +; SDAG-NEXT: global_store_dwordx4 v[4:5], a[4:7], off sc0 sc1 ; SDAG-NEXT: s_waitcnt vmcnt(0) -; SDAG-NEXT: global_store_dwordx4 v[6:7], a[16:19], off sc0 sc1 +; SDAG-NEXT: global_store_dwordx4 v[6:7], a[0:3], off sc0 sc1 ; SDAG-NEXT: s_waitcnt vmcnt(0) ; SDAG-NEXT: global_store_dwordx4 v[2:3], v[8:11], off sc0 sc1 ; SDAG-NEXT: s_waitcnt vmcnt(0) @@ -3047,9 +3053,9 @@ define amdgpu_kernel void @test_mfma_i32_32x32x32_i8(<4 x i32> %arg0, <4 x i32> ; GISEL: ; %bb.0: ; GISEL-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24 ; GISEL-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64 -; GISEL-NEXT: v_mov_b64_e32 v[20:21], 0 -; GISEL-NEXT: v_mov_b64_e32 v[26:27], 48 -; GISEL-NEXT: v_mov_b64_e32 v[22:23], 16 +; GISEL-NEXT: v_mov_b64_e32 v[12:13], 0 +; GISEL-NEXT: v_mov_b64_e32 v[14:15], 16 +; GISEL-NEXT: v_mov_b64_e32 v[16:17], 32 ; GISEL-NEXT: s_waitcnt lgkmcnt(0) ; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[24:25] ; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[26:27] @@ -3072,31 +3078,34 @@ define amdgpu_kernel void @test_mfma_i32_32x32x32_i8(<4 x i32> %arg0, <4 x i32> ; GISEL-NEXT: v_accvgpr_write_b32 a14, s22 ; GISEL-NEXT: v_accvgpr_write_b32 a15, s23 ; GISEL-NEXT: v_mov_b64_e32 v[8:9], s[8:9] -; GISEL-NEXT: v_mov_b64_e32 v[12:13], s[12:13] +; GISEL-NEXT: v_mov_b64_e32 v[18:19], 48 ; GISEL-NEXT: v_mfma_i32_32x32x32_i8 a[16:31], v[0:3], v[4:7], a[0:15] -; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[20:21] -; GISEL-NEXT: v_mov_b64_e32 v[16:17], s[16:17] -; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[22:23] -; GISEL-NEXT: v_mov_b64_e32 v[24:25], 32 +; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[12:13] +; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[14:15] ; GISEL-NEXT: v_mov_b64_e32 v[10:11], s[10:11] -; GISEL-NEXT: v_mov_b64_e32 v[14:15], s[14:15] -; GISEL-NEXT: v_mov_b64_e32 v[18:19], s[18:19] -; GISEL-NEXT: s_nop 4 -; GISEL-NEXT: global_store_dwordx4 v[20:21], a[16:19], off sc0 sc1 +; GISEL-NEXT: s_nop 7 +; GISEL-NEXT: s_nop 0 +; GISEL-NEXT: global_store_dwordx4 v[12:13], a[16:19], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[22:23], a[20:23], off sc0 sc1 +; GISEL-NEXT: global_store_dwordx4 v[14:15], a[20:23], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[24:25], a[24:27], off sc0 sc1 +; GISEL-NEXT: global_store_dwordx4 v[16:17], a[24:27], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[26:27], a[28:31], off sc0 sc1 +; GISEL-NEXT: global_store_dwordx4 v[18:19], a[28:31], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[20:21], v[8:11], off sc0 sc1 +; GISEL-NEXT: global_store_dwordx4 v[12:13], v[8:11], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[22:23], v[12:15], off sc0 sc1 +; GISEL-NEXT: global_store_dwordx4 v[14:15], v[0:3], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[24:25], v[16:19], off sc0 sc1 +; GISEL-NEXT: s_nop 0 +; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[16:17] +; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[18:19] +; GISEL-NEXT: global_store_dwordx4 v[16:17], v[0:3], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[26:27], v[0:3], off sc0 sc1 +; GISEL-NEXT: s_nop 0 +; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[20:21] +; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[22:23] +; GISEL-NEXT: global_store_dwordx4 v[18:19], v[0:3], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) ; GISEL-NEXT: s_endpgm ; @@ -3106,47 +3115,47 @@ define amdgpu_kernel void @test_mfma_i32_32x32x32_i8(<4 x i32> %arg0, <4 x i32> ; HEURRC-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64 ; HEURRC-NEXT: v_mov_b64_e32 v[0:1], 48 ; HEURRC-NEXT: v_mov_b64_e32 v[2:3], 32 -; HEURRC-NEXT: v_mov_b64_e32 v[4:5], 16 ; HEURRC-NEXT: s_waitcnt lgkmcnt(0) -; HEURRC-NEXT: v_mov_b32_e32 v8, s24 -; HEURRC-NEXT: v_mov_b32_e32 v9, s25 -; HEURRC-NEXT: v_mov_b32_e32 v10, s26 -; HEURRC-NEXT: v_mov_b32_e32 v11, s27 -; HEURRC-NEXT: v_accvgpr_write_b32 a0, s8 -; HEURRC-NEXT: v_mov_b32_e32 v12, s28 -; HEURRC-NEXT: v_mov_b32_e32 v13, s29 -; HEURRC-NEXT: v_mov_b32_e32 v14, s30 -; HEURRC-NEXT: v_mov_b32_e32 v15, s31 -; HEURRC-NEXT: v_accvgpr_write_b32 a1, s9 -; HEURRC-NEXT: v_accvgpr_write_b32 a2, s10 -; HEURRC-NEXT: v_accvgpr_write_b32 a3, s11 -; HEURRC-NEXT: v_accvgpr_write_b32 a4, s12 -; HEURRC-NEXT: v_accvgpr_write_b32 a5, s13 -; HEURRC-NEXT: v_accvgpr_write_b32 a6, s14 -; HEURRC-NEXT: v_accvgpr_write_b32 a7, s15 -; HEURRC-NEXT: v_accvgpr_write_b32 a8, s16 -; HEURRC-NEXT: v_accvgpr_write_b32 a9, s17 -; HEURRC-NEXT: v_accvgpr_write_b32 a10, s18 -; HEURRC-NEXT: v_accvgpr_write_b32 a11, s19 -; HEURRC-NEXT: v_accvgpr_write_b32 a12, s20 -; HEURRC-NEXT: v_accvgpr_write_b32 a13, s21 -; HEURRC-NEXT: v_accvgpr_write_b32 a14, s22 -; HEURRC-NEXT: v_accvgpr_write_b32 a15, s23 +; HEURRC-NEXT: v_mov_b32_e32 v4, s24 +; HEURRC-NEXT: v_mov_b32_e32 v5, s25 +; HEURRC-NEXT: v_mov_b32_e32 v6, s26 +; HEURRC-NEXT: v_mov_b32_e32 v7, s27 +; HEURRC-NEXT: v_accvgpr_write_b32 a31, s23 +; HEURRC-NEXT: v_mov_b32_e32 v8, s28 +; HEURRC-NEXT: v_mov_b32_e32 v9, s29 +; HEURRC-NEXT: v_mov_b32_e32 v10, s30 +; HEURRC-NEXT: v_mov_b32_e32 v11, s31 +; HEURRC-NEXT: v_accvgpr_write_b32 a30, s22 +; HEURRC-NEXT: v_accvgpr_write_b32 a29, s21 +; HEURRC-NEXT: v_accvgpr_write_b32 a28, s20 +; HEURRC-NEXT: v_accvgpr_write_b32 a27, s19 +; HEURRC-NEXT: v_accvgpr_write_b32 a26, s18 +; HEURRC-NEXT: v_accvgpr_write_b32 a25, s17 +; HEURRC-NEXT: v_accvgpr_write_b32 a24, s16 +; HEURRC-NEXT: v_accvgpr_write_b32 a23, s15 +; HEURRC-NEXT: v_accvgpr_write_b32 a22, s14 +; HEURRC-NEXT: v_accvgpr_write_b32 a21, s13 +; HEURRC-NEXT: v_accvgpr_write_b32 a20, s12 +; HEURRC-NEXT: v_accvgpr_write_b32 a19, s11 +; HEURRC-NEXT: v_accvgpr_write_b32 a18, s10 +; HEURRC-NEXT: v_accvgpr_write_b32 a17, s9 +; HEURRC-NEXT: v_accvgpr_write_b32 a16, s8 +; HEURRC-NEXT: s_nop 1 +; HEURRC-NEXT: v_mfma_i32_32x32x32_i8 a[0:15], v[4:7], v[8:11], a[16:31] +; HEURRC-NEXT: v_mov_b64_e32 v[4:5], 16 ; HEURRC-NEXT: v_mov_b64_e32 v[6:7], 0 -; HEURRC-NEXT: s_nop 0 -; HEURRC-NEXT: v_mfma_i32_32x32x32_i8 a[16:31], v[8:11], v[12:15], a[0:15] ; HEURRC-NEXT: v_mov_b32_e32 v8, s16 ; HEURRC-NEXT: v_mov_b32_e32 v9, s17 ; HEURRC-NEXT: v_mov_b32_e32 v10, s18 ; HEURRC-NEXT: v_mov_b32_e32 v11, s19 -; HEURRC-NEXT: s_nop 7 -; HEURRC-NEXT: global_store_dwordx4 v[0:1], a[28:31], off sc0 sc1 +; HEURRC-NEXT: s_nop 5 +; HEURRC-NEXT: global_store_dwordx4 v[0:1], a[12:15], off sc0 sc1 ; HEURRC-NEXT: s_waitcnt vmcnt(0) -; HEURRC-NEXT: global_store_dwordx4 v[2:3], a[24:27], off sc0 sc1 +; HEURRC-NEXT: global_store_dwordx4 v[2:3], a[8:11], off sc0 sc1 ; HEURRC-NEXT: s_waitcnt vmcnt(0) -; HEURRC-NEXT: global_store_dwordx4 v[4:5], a[20:23], off sc0 sc1 +; HEURRC-NEXT: global_store_dwordx4 v[4:5], a[4:7], off sc0 sc1 ; HEURRC-NEXT: s_waitcnt vmcnt(0) -; HEURRC-NEXT: global_store_dwordx4 v[6:7], a[16:19], off sc0 sc1 +; HEURRC-NEXT: global_store_dwordx4 v[6:7], a[0:3], off sc0 sc1 ; HEURRC-NEXT: s_waitcnt vmcnt(0) ; HEURRC-NEXT: global_store_dwordx4 v[2:3], v[8:11], off sc0 sc1 ; HEURRC-NEXT: s_waitcnt vmcnt(0) @@ -3177,37 +3186,40 @@ define amdgpu_kernel void @test_mfma_i32_32x32x32_i8(<4 x i32> %arg0, <4 x i32> ; VGPRRC-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64 ; VGPRRC-NEXT: v_mov_b64_e32 v[32:33], 48 ; VGPRRC-NEXT: v_mov_b64_e32 v[34:35], 32 -; VGPRRC-NEXT: v_mov_b64_e32 v[36:37], 16 ; VGPRRC-NEXT: s_waitcnt lgkmcnt(0) -; VGPRRC-NEXT: v_mov_b32_e32 v40, s24 -; VGPRRC-NEXT: v_mov_b32_e32 v41, s25 -; VGPRRC-NEXT: v_mov_b32_e32 v42, s26 -; VGPRRC-NEXT: v_mov_b32_e32 v43, s27 -; VGPRRC-NEXT: v_mov_b64_e32 v[0:1], s[8:9] -; VGPRRC-NEXT: v_mov_b32_e32 v44, s28 -; VGPRRC-NEXT: v_mov_b32_e32 v45, s29 -; VGPRRC-NEXT: v_mov_b32_e32 v46, s30 -; VGPRRC-NEXT: v_mov_b32_e32 v47, s31 -; VGPRRC-NEXT: v_mov_b64_e32 v[2:3], s[10:11] -; VGPRRC-NEXT: v_mov_b64_e32 v[4:5], s[12:13] -; VGPRRC-NEXT: v_mov_b64_e32 v[6:7], s[14:15] -; VGPRRC-NEXT: v_mov_b64_e32 v[8:9], s[16:17] -; VGPRRC-NEXT: v_mov_b64_e32 v[10:11], s[18:19] -; VGPRRC-NEXT: v_mov_b64_e32 v[12:13], s[20:21] -; VGPRRC-NEXT: v_mov_b64_e32 v[14:15], s[22:23] -; VGPRRC-NEXT: v_mov_b64_e32 v[38:39], 0 -; VGPRRC-NEXT: s_nop 0 -; VGPRRC-NEXT: v_mfma_i32_32x32x32_i8 v[16:31], v[40:43], v[44:47], v[0:15] +; VGPRRC-NEXT: v_mov_b32_e32 v36, s24 +; VGPRRC-NEXT: v_mov_b32_e32 v37, s25 +; VGPRRC-NEXT: v_mov_b32_e32 v38, s26 +; VGPRRC-NEXT: v_mov_b32_e32 v39, s27 +; VGPRRC-NEXT: v_mov_b64_e32 v[30:31], s[22:23] +; VGPRRC-NEXT: v_mov_b32_e32 v40, s28 +; VGPRRC-NEXT: v_mov_b32_e32 v41, s29 +; VGPRRC-NEXT: v_mov_b32_e32 v42, s30 +; VGPRRC-NEXT: v_mov_b32_e32 v43, s31 +; VGPRRC-NEXT: v_mov_b64_e32 v[28:29], s[20:21] +; VGPRRC-NEXT: v_mov_b64_e32 v[26:27], s[18:19] +; VGPRRC-NEXT: v_mov_b64_e32 v[24:25], s[16:17] +; VGPRRC-NEXT: v_mov_b64_e32 v[22:23], s[14:15] +; VGPRRC-NEXT: v_mov_b64_e32 v[20:21], s[12:13] +; VGPRRC-NEXT: v_mov_b64_e32 v[18:19], s[10:11] +; VGPRRC-NEXT: v_mov_b64_e32 v[16:17], s[8:9] +; VGPRRC-NEXT: s_nop 1 +; VGPRRC-NEXT: v_mfma_i32_32x32x32_i8 v[0:15], v[36:39], v[40:43], v[16:31] ; VGPRRC-NEXT: s_nop 7 ; VGPRRC-NEXT: s_nop 3 -; VGPRRC-NEXT: global_store_dwordx4 v[32:33], v[28:31], off sc0 sc1 +; VGPRRC-NEXT: global_store_dwordx4 v[32:33], v[12:15], off sc0 sc1 ; VGPRRC-NEXT: s_waitcnt vmcnt(0) -; VGPRRC-NEXT: global_store_dwordx4 v[34:35], v[24:27], off sc0 sc1 +; VGPRRC-NEXT: global_store_dwordx4 v[34:35], v[8:11], off sc0 sc1 ; VGPRRC-NEXT: s_waitcnt vmcnt(0) -; VGPRRC-NEXT: global_store_dwordx4 v[36:37], v[20:23], off sc0 sc1 +; VGPRRC-NEXT: s_nop 0 +; VGPRRC-NEXT: v_mov_b64_e32 v[8:9], 16 +; VGPRRC-NEXT: global_store_dwordx4 v[8:9], v[4:7], off sc0 sc1 ; VGPRRC-NEXT: s_waitcnt vmcnt(0) -; VGPRRC-NEXT: global_store_dwordx4 v[38:39], v[16:19], off sc0 sc1 +; VGPRRC-NEXT: s_nop 0 +; VGPRRC-NEXT: v_mov_b64_e32 v[4:5], 0 +; VGPRRC-NEXT: global_store_dwordx4 v[4:5], v[0:3], off sc0 sc1 ; VGPRRC-NEXT: s_waitcnt vmcnt(0) +; VGPRRC-NEXT: s_nop 0 ; VGPRRC-NEXT: v_mov_b32_e32 v0, s16 ; VGPRRC-NEXT: v_mov_b32_e32 v1, s17 ; VGPRRC-NEXT: v_mov_b32_e32 v2, s18 @@ -3226,14 +3238,14 @@ define amdgpu_kernel void @test_mfma_i32_32x32x32_i8(<4 x i32> %arg0, <4 x i32> ; VGPRRC-NEXT: v_mov_b32_e32 v1, s9 ; VGPRRC-NEXT: v_mov_b32_e32 v2, s10 ; VGPRRC-NEXT: v_mov_b32_e32 v3, s11 -; VGPRRC-NEXT: global_store_dwordx4 v[38:39], v[0:3], off sc0 sc1 +; VGPRRC-NEXT: global_store_dwordx4 v[4:5], v[0:3], off sc0 sc1 ; VGPRRC-NEXT: s_waitcnt vmcnt(0) ; VGPRRC-NEXT: s_nop 0 ; VGPRRC-NEXT: v_mov_b32_e32 v0, s12 ; VGPRRC-NEXT: v_mov_b32_e32 v1, s13 ; VGPRRC-NEXT: v_mov_b32_e32 v2, s14 ; VGPRRC-NEXT: v_mov_b32_e32 v3, s15 -; VGPRRC-NEXT: global_store_dwordx4 v[36:37], v[0:3], off sc0 sc1 +; VGPRRC-NEXT: global_store_dwordx4 v[8:9], v[0:3], off sc0 sc1 ; VGPRRC-NEXT: s_waitcnt vmcnt(0) ; VGPRRC-NEXT: s_endpgm ; AGPR-LABEL: test_mfma_i32_32x32x32_i8: @@ -3386,47 +3398,47 @@ define amdgpu_kernel void @test_mfma_i32_32x32x32_i8__flags(<4 x i32> %arg0, <4 ; SDAG-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64 ; SDAG-NEXT: v_mov_b64_e32 v[0:1], 48 ; SDAG-NEXT: v_mov_b64_e32 v[2:3], 32 -; SDAG-NEXT: v_mov_b64_e32 v[4:5], 16 ; SDAG-NEXT: s_waitcnt lgkmcnt(0) -; SDAG-NEXT: v_mov_b32_e32 v8, s24 -; SDAG-NEXT: v_mov_b32_e32 v9, s25 -; SDAG-NEXT: v_mov_b32_e32 v10, s26 -; SDAG-NEXT: v_mov_b32_e32 v11, s27 -; SDAG-NEXT: v_accvgpr_write_b32 a0, s8 -; SDAG-NEXT: v_mov_b32_e32 v12, s28 -; SDAG-NEXT: v_mov_b32_e32 v13, s29 -; SDAG-NEXT: v_mov_b32_e32 v14, s30 -; SDAG-NEXT: v_mov_b32_e32 v15, s31 -; SDAG-NEXT: v_accvgpr_write_b32 a1, s9 -; SDAG-NEXT: v_accvgpr_write_b32 a2, s10 -; SDAG-NEXT: v_accvgpr_write_b32 a3, s11 -; SDAG-NEXT: v_accvgpr_write_b32 a4, s12 -; SDAG-NEXT: v_accvgpr_write_b32 a5, s13 -; SDAG-NEXT: v_accvgpr_write_b32 a6, s14 -; SDAG-NEXT: v_accvgpr_write_b32 a7, s15 -; SDAG-NEXT: v_accvgpr_write_b32 a8, s16 -; SDAG-NEXT: v_accvgpr_write_b32 a9, s17 -; SDAG-NEXT: v_accvgpr_write_b32 a10, s18 -; SDAG-NEXT: v_accvgpr_write_b32 a11, s19 -; SDAG-NEXT: v_accvgpr_write_b32 a12, s20 -; SDAG-NEXT: v_accvgpr_write_b32 a13, s21 -; SDAG-NEXT: v_accvgpr_write_b32 a14, s22 -; SDAG-NEXT: v_accvgpr_write_b32 a15, s23 +; SDAG-NEXT: v_mov_b32_e32 v4, s24 +; SDAG-NEXT: v_mov_b32_e32 v5, s25 +; SDAG-NEXT: v_mov_b32_e32 v6, s26 +; SDAG-NEXT: v_mov_b32_e32 v7, s27 +; SDAG-NEXT: v_accvgpr_write_b32 a31, s23 +; SDAG-NEXT: v_mov_b32_e32 v8, s28 +; SDAG-NEXT: v_mov_b32_e32 v9, s29 +; SDAG-NEXT: v_mov_b32_e32 v10, s30 +; SDAG-NEXT: v_mov_b32_e32 v11, s31 +; SDAG-NEXT: v_accvgpr_write_b32 a30, s22 +; SDAG-NEXT: v_accvgpr_write_b32 a29, s21 +; SDAG-NEXT: v_accvgpr_write_b32 a28, s20 +; SDAG-NEXT: v_accvgpr_write_b32 a27, s19 +; SDAG-NEXT: v_accvgpr_write_b32 a26, s18 +; SDAG-NEXT: v_accvgpr_write_b32 a25, s17 +; SDAG-NEXT: v_accvgpr_write_b32 a24, s16 +; SDAG-NEXT: v_accvgpr_write_b32 a23, s15 +; SDAG-NEXT: v_accvgpr_write_b32 a22, s14 +; SDAG-NEXT: v_accvgpr_write_b32 a21, s13 +; SDAG-NEXT: v_accvgpr_write_b32 a20, s12 +; SDAG-NEXT: v_accvgpr_write_b32 a19, s11 +; SDAG-NEXT: v_accvgpr_write_b32 a18, s10 +; SDAG-NEXT: v_accvgpr_write_b32 a17, s9 +; SDAG-NEXT: v_accvgpr_write_b32 a16, s8 +; SDAG-NEXT: s_nop 1 +; SDAG-NEXT: v_mfma_i32_32x32x32_i8 a[0:15], v[4:7], v[8:11], a[16:31] cbsz:2 abid:3 blgp:1 +; SDAG-NEXT: v_mov_b64_e32 v[4:5], 16 ; SDAG-NEXT: v_mov_b64_e32 v[6:7], 0 -; SDAG-NEXT: s_nop 0 -; SDAG-NEXT: v_mfma_i32_32x32x32_i8 a[16:31], v[8:11], v[12:15], a[0:15] cbsz:2 abid:3 blgp:1 ; SDAG-NEXT: v_mov_b32_e32 v8, s16 ; SDAG-NEXT: v_mov_b32_e32 v9, s17 ; SDAG-NEXT: v_mov_b32_e32 v10, s18 ; SDAG-NEXT: v_mov_b32_e32 v11, s19 -; SDAG-NEXT: s_nop 7 -; SDAG-NEXT: global_store_dwordx4 v[0:1], a[28:31], off sc0 sc1 +; SDAG-NEXT: s_nop 5 +; SDAG-NEXT: global_store_dwordx4 v[0:1], a[12:15], off sc0 sc1 ; SDAG-NEXT: s_waitcnt vmcnt(0) -; SDAG-NEXT: global_store_dwordx4 v[2:3], a[24:27], off sc0 sc1 +; SDAG-NEXT: global_store_dwordx4 v[2:3], a[8:11], off sc0 sc1 ; SDAG-NEXT: s_waitcnt vmcnt(0) -; SDAG-NEXT: global_store_dwordx4 v[4:5], a[20:23], off sc0 sc1 +; SDAG-NEXT: global_store_dwordx4 v[4:5], a[4:7], off sc0 sc1 ; SDAG-NEXT: s_waitcnt vmcnt(0) -; SDAG-NEXT: global_store_dwordx4 v[6:7], a[16:19], off sc0 sc1 +; SDAG-NEXT: global_store_dwordx4 v[6:7], a[0:3], off sc0 sc1 ; SDAG-NEXT: s_waitcnt vmcnt(0) ; SDAG-NEXT: global_store_dwordx4 v[2:3], v[8:11], off sc0 sc1 ; SDAG-NEXT: s_waitcnt vmcnt(0) @@ -3455,9 +3467,9 @@ define amdgpu_kernel void @test_mfma_i32_32x32x32_i8__flags(<4 x i32> %arg0, <4 ; GISEL: ; %bb.0: ; GISEL-NEXT: s_load_dwordx8 s[24:31], s[4:5], 0x24 ; GISEL-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64 -; GISEL-NEXT: v_mov_b64_e32 v[20:21], 0 -; GISEL-NEXT: v_mov_b64_e32 v[26:27], 48 -; GISEL-NEXT: v_mov_b64_e32 v[22:23], 16 +; GISEL-NEXT: v_mov_b64_e32 v[12:13], 0 +; GISEL-NEXT: v_mov_b64_e32 v[14:15], 16 +; GISEL-NEXT: v_mov_b64_e32 v[16:17], 32 ; GISEL-NEXT: s_waitcnt lgkmcnt(0) ; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[24:25] ; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[26:27] @@ -3480,31 +3492,34 @@ define amdgpu_kernel void @test_mfma_i32_32x32x32_i8__flags(<4 x i32> %arg0, <4 ; GISEL-NEXT: v_accvgpr_write_b32 a14, s22 ; GISEL-NEXT: v_accvgpr_write_b32 a15, s23 ; GISEL-NEXT: v_mov_b64_e32 v[8:9], s[8:9] -; GISEL-NEXT: v_mov_b64_e32 v[12:13], s[12:13] +; GISEL-NEXT: v_mov_b64_e32 v[18:19], 48 ; GISEL-NEXT: v_mfma_i32_32x32x32_i8 a[16:31], v[0:3], v[4:7], a[0:15] cbsz:2 abid:3 blgp:1 -; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[20:21] -; GISEL-NEXT: v_mov_b64_e32 v[16:17], s[16:17] -; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[22:23] -; GISEL-NEXT: v_mov_b64_e32 v[24:25], 32 +; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[12:13] +; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[14:15] ; GISEL-NEXT: v_mov_b64_e32 v[10:11], s[10:11] -; GISEL-NEXT: v_mov_b64_e32 v[14:15], s[14:15] -; GISEL-NEXT: v_mov_b64_e32 v[18:19], s[18:19] -; GISEL-NEXT: s_nop 4 -; GISEL-NEXT: global_store_dwordx4 v[20:21], a[16:19], off sc0 sc1 +; GISEL-NEXT: s_nop 7 +; GISEL-NEXT: s_nop 0 +; GISEL-NEXT: global_store_dwordx4 v[12:13], a[16:19], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[22:23], a[20:23], off sc0 sc1 +; GISEL-NEXT: global_store_dwordx4 v[14:15], a[20:23], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[24:25], a[24:27], off sc0 sc1 +; GISEL-NEXT: global_store_dwordx4 v[16:17], a[24:27], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[26:27], a[28:31], off sc0 sc1 +; GISEL-NEXT: global_store_dwordx4 v[18:19], a[28:31], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[20:21], v[8:11], off sc0 sc1 +; GISEL-NEXT: global_store_dwordx4 v[12:13], v[8:11], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[22:23], v[12:15], off sc0 sc1 +; GISEL-NEXT: global_store_dwordx4 v[14:15], v[0:3], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[24:25], v[16:19], off sc0 sc1 +; GISEL-NEXT: s_nop 0 +; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[16:17] +; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[18:19] +; GISEL-NEXT: global_store_dwordx4 v[16:17], v[0:3], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) -; GISEL-NEXT: global_store_dwordx4 v[26:27], v[0:3], off sc0 sc1 +; GISEL-NEXT: s_nop 0 +; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[20:21] +; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[22:23] +; GISEL-NEXT: global_store_dwordx4 v[18:19], v[0:3], off sc0 sc1 ; GISEL-NEXT: s_waitcnt vmcnt(0) ; GISEL-NEXT: s_endpgm ; @@ -3514,47 +3529,47 @@ define amdgpu_kernel void @test_mfma_i32_32x32x32_i8__flags(<4 x i32> %arg0, <4 ; HEURRC-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64 ; HEURRC-NEXT: v_mov_b64_e32 v[0:1], 48 ; HEURRC-NEXT: v_mov_b64_e32 v[2:3], 32 -; HEURRC-NEXT: v_mov_b64_e32 v[4:5], 16 ; HEURRC-NEXT: s_waitcnt lgkmcnt(0) -; HEURRC-NEXT: v_mov_b32_e32 v8, s24 -; HEURRC-NEXT: v_mov_b32_e32 v9, s25 -; HEURRC-NEXT: v_mov_b32_e32 v10, s26 -; HEURRC-NEXT: v_mov_b32_e32 v11, s27 -; HEURRC-NEXT: v_accvgpr_write_b32 a0, s8 -; HEURRC-NEXT: v_mov_b32_e32 v12, s28 -; HEURRC-NEXT: v_mov_b32_e32 v13, s29 -; HEURRC-NEXT: v_mov_b32_e32 v14, s30 -; HEURRC-NEXT: v_mov_b32_e32 v15, s31 -; HEURRC-NEXT: v_accvgpr_write_b32 a1, s9 -; HEURRC-NEXT: v_accvgpr_write_b32 a2, s10 -; HEURRC-NEXT: v_accvgpr_write_b32 a3, s11 -; HEURRC-NEXT: v_accvgpr_write_b32 a4, s12 -; HEURRC-NEXT: v_accvgpr_write_b32 a5, s13 -; HEURRC-NEXT: v_accvgpr_write_b32 a6, s14 -; HEURRC-NEXT: v_accvgpr_write_b32 a7, s15 -; HEURRC-NEXT: v_accvgpr_write_b32 a8, s16 -; HEURRC-NEXT: v_accvgpr_write_b32 a9, s17 -; HEURRC-NEXT: v_accvgpr_write_b32 a10, s18 -; HEURRC-NEXT: v_accvgpr_write_b32 a11, s19 -; HEURRC-NEXT: v_accvgpr_write_b32 a12, s20 -; HEURRC-NEXT: v_accvgpr_write_b32 a13, s21 -; HEURRC-NEXT: v_accvgpr_write_b32 a14, s22 -; HEURRC-NEXT: v_accvgpr_write_b32 a15, s23 +; HEURRC-NEXT: v_mov_b32_e32 v4, s24 +; HEURRC-NEXT: v_mov_b32_e32 v5, s25 +; HEURRC-NEXT: v_mov_b32_e32 v6, s26 +; HEURRC-NEXT: v_mov_b32_e32 v7, s27 +; HEURRC-NEXT: v_accvgpr_write_b32 a31, s23 +; HEURRC-NEXT: v_mov_b32_e32 v8, s28 +; HEURRC-NEXT: v_mov_b32_e32 v9, s29 +; HEURRC-NEXT: v_mov_b32_e32 v10, s30 +; HEURRC-NEXT: v_mov_b32_e32 v11, s31 +; HEURRC-NEXT: v_accvgpr_write_b32 a30, s22 +; HEURRC-NEXT: v_accvgpr_write_b32 a29, s21 +; HEURRC-NEXT: v_accvgpr_write_b32 a28, s20 +; HEURRC-NEXT: v_accvgpr_write_b32 a27, s19 +; HEURRC-NEXT: v_accvgpr_write_b32 a26, s18 +; HEURRC-NEXT: v_accvgpr_write_b32 a25, s17 +; HEURRC-NEXT: v_accvgpr_write_b32 a24, s16 +; HEURRC-NEXT: v_accvgpr_write_b32 a23, s15 +; HEURRC-NEXT: v_accvgpr_write_b32 a22, s14 +; HEURRC-NEXT: v_accvgpr_write_b32 a21, s13 +; HEURRC-NEXT: v_accvgpr_write_b32 a20, s12 +; HEURRC-NEXT: v_accvgpr_write_b32 a19, s11 +; HEURRC-NEXT: v_accvgpr_write_b32 a18, s10 +; HEURRC-NEXT: v_accvgpr_write_b32 a17, s9 +; HEURRC-NEXT: v_accvgpr_write_b32 a16, s8 +; HEURRC-NEXT: s_nop 1 +; HEURRC-NEXT: v_mfma_i32_32x32x32_i8 a[0:15], v[4:7], v[8:11], a[16:31] cbsz:2 abid:3 blgp:1 +; HEURRC-NEXT: v_mov_b64_e32 v[4:5], 16 ; HEURRC-NEXT: v_mov_b64_e32 v[6:7], 0 -; HEURRC-NEXT: s_nop 0 -; HEURRC-NEXT: v_mfma_i32_32x32x32_i8 a[16:31], v[8:11], v[12:15], a[0:15] cbsz:2 abid:3 blgp:1 ; HEURRC-NEXT: v_mov_b32_e32 v8, s16 ; HEURRC-NEXT: v_mov_b32_e32 v9, s17 ; HEURRC-NEXT: v_mov_b32_e32 v10, s18 ; HEURRC-NEXT: v_mov_b32_e32 v11, s19 -; HEURRC-NEXT: s_nop 7 -; HEURRC-NEXT: global_store_dwordx4 v[0:1], a[28:31], off sc0 sc1 +; HEURRC-NEXT: s_nop 5 +; HEURRC-NEXT: global_store_dwordx4 v[0:1], a[12:15], off sc0 sc1 ; HEURRC-NEXT: s_waitcnt vmcnt(0) -; HEURRC-NEXT: global_store_dwordx4 v[2:3], a[24:27], off sc0 sc1 +; HEURRC-NEXT: global_store_dwordx4 v[2:3], a[8:11], off sc0 sc1 ; HEURRC-NEXT: s_waitcnt vmcnt(0) -; HEURRC-NEXT: global_store_dwordx4 v[4:5], a[20:23], off sc0 sc1 +; HEURRC-NEXT: global_store_dwordx4 v[4:5], a[4:7], off sc0 sc1 ; HEURRC-NEXT: s_waitcnt vmcnt(0) -; HEURRC-NEXT: global_store_dwordx4 v[6:7], a[16:19], off sc0 sc1 +; HEURRC-NEXT: global_store_dwordx4 v[6:7], a[0:3], off sc0 sc1 ; HEURRC-NEXT: s_waitcnt vmcnt(0) ; HEURRC-NEXT: global_store_dwordx4 v[2:3], v[8:11], off sc0 sc1 ; HEURRC-NEXT: s_waitcnt vmcnt(0) @@ -3585,37 +3600,40 @@ define amdgpu_kernel void @test_mfma_i32_32x32x32_i8__flags(<4 x i32> %arg0, <4 ; VGPRRC-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x64 ; VGPRRC-NEXT: v_mov_b64_e32 v[32:33], 48 ; VGPRRC-NEXT: v_mov_b64_e32 v[34:35], 32 -; VGPRRC-NEXT: v_mov_b64_e32 v[36:37], 16 ; VGPRRC-NEXT: s_waitcnt lgkmcnt(0) -; VGPRRC-NEXT: v_mov_b32_e32 v40, s24 -; VGPRRC-NEXT: v_mov_b32_e32 v41, s25 -; VGPRRC-NEXT: v_mov_b32_e32 v42, s26 -; VGPRRC-NEXT: v_mov_b32_e32 v43, s27 -; VGPRRC-NEXT: v_mov_b64_e32 v[0:1], s[8:9] -; VGPRRC-NEXT: v_mov_b32_e32 v44, s28 -; VGPRRC-NEXT: v_mov_b32_e32 v45, s29 -; VGPRRC-NEXT: v_mov_b32_e32 v46, s30 -; VGPRRC-NEXT: v_mov_b32_e32 v47, s31 -; VGPRRC-NEXT: v_mov_b64_e32 v[2:3], s[10:11] -; VGPRRC-NEXT: v_mov_b64_e32 v[4:5], s[12:13] -; VGPRRC-NEXT: v_mov_b64_e32 v[6:7], s[14:15] -; VGPRRC-NEXT: v_mov_b64_e32 v[8:9], s[16:17] -; VGPRRC-NEXT: v_mov_b64_e32 v[10:11], s[18:19] -; VGPRRC-NEXT: v_mov_b64_e32 v[12:13], s[20:21] -; VGPRRC-NEXT: v_mov_b64_e32 v[14:15], s[22:23] -; VGPRRC-NEXT: v_mov_b64_e32 v[38:39], 0 -; VGPRRC-NEXT: s_nop 0 -; VGPRRC-NEXT: v_mfma_i32_32x32x32_i8 v[16:31], v[40:43], v[44:47], v[0:15] cbsz:2 abid:3 blgp:1 +; VGPRRC-NEXT: v_mov_b32_e32 v36, s24 +; VGPRRC-NEXT: v_mov_b32_e32 v37, s25 +; VGPRRC-NEXT: v_mov_b32_e32 v38, s26 +; VGPRRC-NEXT: v_mov_b32_e32 v39, s27 +; VGPRRC-NEXT: v_mov_b64_e32 v[30:31], s[22:23] +; VGPRRC-NEXT: v_mov_b32_e32 v40, s28 +; VGPRRC-NEXT: v_mov_b32_e32 v41, s29 +; VGPRRC-NEXT: v_mov_b32_e32 v42, s30 +; VGPRRC-NEXT: v_mov_b32_e32 v43, s31 +; VGPRRC-NEXT: v_mov_b64_e32 v[28:29], s[20:21] +; VGPRRC-NEXT: v_mov_b64_e32 v[26:27], s[18:19] +; VGPRRC-NEXT: v_mov_b64_e32 v[24:25], s[16:17] +; VGPRRC-NEXT: v_mov_b64_e32 v[22:23], s[14:15] +; VGPRRC-NEXT: v_mov_b64_e32 v[20:21], s[12:13] +; VGPRRC-NEXT: v_mov_b64_e32 v[18:19], s[10:11] +; VGPRRC-NEXT: v_mov_b64_e32 v[16:17], s[8:9] +; VGPRRC-NEXT: s_nop 1 +; VGPRRC-NEXT: v_mfma_i32_32x32x32_i8 v[0:15], v[36:39], v[40:43], v[16:31] cbsz:2 abid:3 blgp:1 ; VGPRRC-NEXT: s_nop 7 ; VGPRRC-NEXT: s_nop 3 -; VGPRRC-NEXT: global_store_dwordx4 v[32:33], v[28:31], off sc0 sc1 +; VGPRRC-NEXT: global_store_dwordx4 v[32:33], v[12:15], off sc0 sc1 ; VGPRRC-NEXT: s_waitcnt vmcnt(0) -; VGPRRC-NEXT: global_store_dwordx4 v[34:35], v[24:27], off sc0 sc1 +; VGPRRC-NEXT: global_store_dwordx4 v[34:35], v[8:11], off sc0 sc1 ; VGPRRC-NEXT: s_waitcnt vmcnt(0) -; VGPRRC-NEXT: global_store_dwordx4 v[36:37], v[20:23], off sc0 sc1 +; VGPRRC-NEXT: s_nop 0 +; VGPRRC-NEXT: v_mov_b64_e32 v[8:9], 16 +; VGPRRC-NEXT: global_store_dwordx4 v[8:9], v[4:7], off sc0 sc1 ; VGPRRC-NEXT: s_waitcnt vmcnt(0) -; VGPRRC-NEXT: global_store_dwordx4 v[38:39], v[16:19], off sc0 sc1 +; VGPRRC-NEXT: s_nop 0 +; VGPRRC-NEXT: v_mov_b64_e32 v[4:5], 0 +; VGPRRC-NEXT: global_store_dwordx4 v[4:5], v[0:3], off sc0 sc1 ; VGPRRC-NEXT: s_waitcnt vmcnt(0) +; VGPRRC-NEXT: s_nop 0 ; VGPRRC-NEXT: v_mov_b32_e32 v0, s16 ; VGPRRC-NEXT: v_mov_b32_e32 v1, s17 ; VGPRRC-NEXT: v_mov_b32_e32 v2, s18 @@ -3634,14 +3652,14 @@ define amdgpu_kernel void @test_mfma_i32_32x32x32_i8__flags(<4 x i32> %arg0, <4 ; VGPRRC-NEXT: v_mov_b32_e32 v1, s9 ; VGPRRC-NEXT: v_mov_b32_e32 v2, s10 ; VGPRRC-NEXT: v_mov_b32_e32 v3, s11 -; VGPRRC-NEXT: global_store_dwordx4 v[38:39], v[0:3], off sc0 sc1 +; VGPRRC-NEXT: global_store_dwordx4 v[4:5], v[0:3], off sc0 sc1 ; VGPRRC-NEXT: s_waitcnt vmcnt(0) ; VGPRRC-NEXT: s_nop 0 ; VGPRRC-NEXT: v_mov_b32_e32 v0, s12 ; VGPRRC-NEXT: v_mov_b32_e32 v1, s13 ; VGPRRC-NEXT: v_mov_b32_e32 v2, s14 ; VGPRRC-NEXT: v_mov_b32_e32 v3, s15 -; VGPRRC-NEXT: global_store_dwordx4 v[36:37], v[0:3], off sc0 sc1 +; VGPRRC-NEXT: global_store_dwordx4 v[8:9], v[0:3], off sc0 sc1 ; VGPRRC-NEXT: s_waitcnt vmcnt(0) ; VGPRRC-NEXT: s_endpgm ; AGPR-LABEL: test_mfma_i32_32x32x32_i8__flags: diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.scale.f32.32x32x64.f8f6f4.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.scale.f32.32x32x64.f8f6f4.ll index 0b2818f38149d..24af3fa5ff9b7 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.scale.f32.32x32x64.f8f6f4.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.scale.f32.32x32x64.f8f6f4.ll @@ -4784,8 +4784,8 @@ define amdgpu_kernel void @test_mfma_scale_f32_32x32x64_f8f6f4_0_0__nonmac(<8 x ; GISEL-NEXT: s_load_dwordx16 s[36:51], s[4:5], 0x0 ; GISEL-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x40 ; GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x80 +; GISEL-NEXT: v_mov_b64_e32 v[16:17], 0 ; GISEL-NEXT: v_mov_b64_e32 v[18:19], 16 -; GISEL-NEXT: v_mov_b64_e32 v[20:21], 32 ; GISEL-NEXT: s_waitcnt lgkmcnt(0) ; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[36:37] ; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[38:39] @@ -4811,16 +4811,16 @@ define amdgpu_kernel void @test_mfma_scale_f32_32x32x64_f8f6f4_0_0__nonmac(<8 x ; GISEL-NEXT: v_accvgpr_write_b32 a13, s21 ; GISEL-NEXT: v_accvgpr_write_b32 a14, s22 ; GISEL-NEXT: v_accvgpr_write_b32 a15, s23 -; GISEL-NEXT: v_mov_b32_e32 v16, s1 +; GISEL-NEXT: v_mov_b32_e32 v20, s1 ; GISEL-NEXT: v_mov_b64_e32 v[22:23], 48 ; GISEL-NEXT: s_nop 0 -; GISEL-NEXT: v_mfma_scale_f32_32x32x64_f8f6f4 a[0:15], v[0:7], v[8:15], a[0:15], s0, v16 op_sel_hi:[0,0,0] +; GISEL-NEXT: v_mfma_scale_f32_32x32x64_f8f6f4 a[0:15], v[0:7], v[8:15], a[0:15], s0, v20 op_sel_hi:[0,0,0] ; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[8:9] -; GISEL-NEXT: v_mov_b64_e32 v[16:17], 0 ; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[10:11] ; GISEL-NEXT: v_mov_b64_e32 v[4:5], s[12:13] ; GISEL-NEXT: v_mov_b64_e32 v[8:9], s[16:17] ; GISEL-NEXT: v_mov_b64_e32 v[12:13], s[20:21] +; GISEL-NEXT: v_mov_b64_e32 v[20:21], 32 ; GISEL-NEXT: v_mov_b64_e32 v[6:7], s[14:15] ; GISEL-NEXT: v_mov_b64_e32 v[10:11], s[18:19] ; GISEL-NEXT: v_mov_b64_e32 v[14:15], s[22:23] _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits