llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT--> @llvm/pr-subscribers-backend-amdgpu Author: Matt Arsenault (arsenm) <details> <summary>Changes</summary> --- Full diff: https://github.com/llvm/llvm-project/pull/154500.diff 5 Files Affected: - (modified) llvm/lib/Target/AMDGPU/AMDGPUPrepareAGPRAlloc.cpp (+20-2) - (modified) llvm/test/CodeGen/AMDGPU/amdgpu-prepare-agpr-alloc.mir (+31-22) - (modified) llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll (+27-25) - (modified) llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.bf16.ll (+8-8) - (modified) llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.scale.f32.32x32x64.f8f6f4.ll (+4-4) ``````````diff diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPrepareAGPRAlloc.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPrepareAGPRAlloc.cpp index 3b06e9b00ac69..0137b3f5943d7 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUPrepareAGPRAlloc.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPrepareAGPRAlloc.cpp @@ -34,6 +34,8 @@ class AMDGPUPrepareAGPRAllocImpl { const SIInstrInfo &TII; MachineRegisterInfo &MRI; + bool isAV64Imm(const MachineOperand &MO) const; + public: AMDGPUPrepareAGPRAllocImpl(const GCNSubtarget &ST, MachineRegisterInfo &MRI) : TII(*ST.getInstrInfo()), MRI(MRI) {} @@ -85,11 +87,16 @@ AMDGPUPrepareAGPRAllocPass::run(MachineFunction &MF, return PreservedAnalyses::all(); } +bool AMDGPUPrepareAGPRAllocImpl::isAV64Imm(const MachineOperand &MO) const { + return MO.isImm() && TII.isLegalAV64PseudoImm(MO.getImm()); +} + bool AMDGPUPrepareAGPRAllocImpl::run(MachineFunction &MF) { if (MRI.isReserved(AMDGPU::AGPR0)) return false; - const MCInstrDesc &AVImmPseudo = TII.get(AMDGPU::AV_MOV_B32_IMM_PSEUDO); + const MCInstrDesc &AVImmPseudo32 = TII.get(AMDGPU::AV_MOV_B32_IMM_PSEUDO); + const MCInstrDesc &AVImmPseudo64 = TII.get(AMDGPU::AV_MOV_B64_IMM_PSEUDO); bool Changed = false; for (MachineBasicBlock &MBB : MF) { @@ -98,8 +105,19 @@ bool AMDGPUPrepareAGPRAllocImpl::run(MachineFunction &MF) { TII.isInlineConstant(MI, 1)) || (MI.getOpcode() == AMDGPU::V_ACCVGPR_WRITE_B32_e64 && MI.getOperand(1).isImm())) { - MI.setDesc(AVImmPseudo); + MI.setDesc(AVImmPseudo32); + Changed = true; + continue; + } + + // TODO: If only half of the value is rewritable, is it worth splitting it + // up? + if ((MI.getOpcode() == AMDGPU::V_MOV_B64_e64 || + MI.getOpcode() == AMDGPU::V_MOV_B64_PSEUDO) && + isAV64Imm(MI.getOperand(1))) { + MI.setDesc(AVImmPseudo64); Changed = true; + continue; } } } diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-prepare-agpr-alloc.mir b/llvm/test/CodeGen/AMDGPU/amdgpu-prepare-agpr-alloc.mir index d277c8104fe44..aaacf1d6f793b 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgpu-prepare-agpr-alloc.mir +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-prepare-agpr-alloc.mir @@ -114,22 +114,22 @@ body: | ; HAS-AGPR-NEXT: liveins: $vgpr0_vgpr1 ; HAS-AGPR-NEXT: {{ $}} ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 $vgpr0_vgpr1, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 54, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 1, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 64, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 %stack.0, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 65, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 279172874240, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 279172874305, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B4:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B5:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO -9223372036854775808, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B6:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 1042479491, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B7:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4477415320595726336, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B8:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO $vgpr0_vgpr1, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B9:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO %stack.0, implicit $exec + ; HAS-AGPR-NEXT: [[AV_MOV_:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 54, implicit $exec + ; HAS-AGPR-NEXT: [[AV_MOV_1:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 1, implicit $exec + ; HAS-AGPR-NEXT: [[AV_MOV_2:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 64, implicit $exec + ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 %stack.0, implicit $exec + ; HAS-AGPR-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 65, implicit $exec + ; HAS-AGPR-NEXT: [[V_MOV_B1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 279172874240, implicit $exec + ; HAS-AGPR-NEXT: [[V_MOV_B2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 279172874305, implicit $exec + ; HAS-AGPR-NEXT: [[V_MOV_B3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec + ; HAS-AGPR-NEXT: [[V_MOV_B4:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO -9223372036854775808, implicit $exec + ; HAS-AGPR-NEXT: [[AV_MOV_3:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 1042479491, implicit $exec + ; HAS-AGPR-NEXT: [[AV_MOV_4:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 4477415320595726336, implicit $exec + ; HAS-AGPR-NEXT: [[V_MOV_B5:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO $vgpr0_vgpr1, implicit $exec + ; HAS-AGPR-NEXT: [[V_MOV_B6:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO %stack.0, implicit $exec ; HAS-AGPR-NEXT: {{ $}} ; HAS-AGPR-NEXT: bb.1: - ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_4:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 3, implicit $exec + ; HAS-AGPR-NEXT: [[AV_MOV_5:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 3, implicit $exec ; ; NO-AGPR-LABEL: name: func64 ; NO-AGPR: bb.0: @@ -181,14 +181,23 @@ tracksRegLiveness: true body: | bb.0: liveins: $vgpr0 - ; HAS-AGPR-LABEL: name: func64_no_agprs - ; HAS-AGPR: liveins: $vgpr0 - ; HAS-AGPR-NEXT: {{ $}} - ; HAS-AGPR-NEXT: [[V_MOV_B64_e64_:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 1, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO -9223372036854775808, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 1042479491, implicit $exec - ; HAS-AGPR-NEXT: [[V_MOV_B3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4477415320595726336, implicit $exec + ; GFX90A-LABEL: name: func64_no_agprs + ; GFX90A: liveins: $vgpr0 + ; GFX90A-NEXT: {{ $}} + ; GFX90A-NEXT: [[V_MOV_B64_e64_:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_e64 1, implicit $exec + ; GFX90A-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec + ; GFX90A-NEXT: [[V_MOV_B1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO -9223372036854775808, implicit $exec + ; GFX90A-NEXT: [[V_MOV_B2:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 1042479491, implicit $exec + ; GFX90A-NEXT: [[V_MOV_B3:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4477415320595726336, implicit $exec + ; + ; GFX908-LABEL: name: func64_no_agprs + ; GFX908: liveins: $vgpr0 + ; GFX908-NEXT: {{ $}} + ; GFX908-NEXT: [[AV_MOV_:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 1, implicit $exec + ; GFX908-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO 4290672329938, implicit $exec + ; GFX908-NEXT: [[V_MOV_B1:%[0-9]+]]:vreg_64_align2 = V_MOV_B64_PSEUDO -9223372036854775808, implicit $exec + ; GFX908-NEXT: [[AV_MOV_1:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 1042479491, implicit $exec + ; GFX908-NEXT: [[AV_MOV_2:%[0-9]+]]:vreg_64_align2 = AV_MOV_B64_IMM_PSEUDO 4477415320595726336, implicit $exec ; ; NO-AGPR-LABEL: name: func64_no_agprs ; NO-AGPR: liveins: $vgpr0 diff --git a/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll b/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll index 89fe0ab526a8a..614b1e38a530f 100644 --- a/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll +++ b/llvm/test/CodeGen/AMDGPU/av-split-dead-valno-crash.ll @@ -16,10 +16,12 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2, ; CHECK-NEXT: s_bitcmp1_b32 s0, 8 ; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0 ; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[2:3] -; CHECK-NEXT: s_xor_b64 s[20:21], s[2:3], -1 ; CHECK-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0 -; CHECK-NEXT: s_and_b64 s[2:3], exec, s[2:3] ; CHECK-NEXT: v_mov_b32_e32 v0, 0x9037ab78 +; CHECK-NEXT: v_accvgpr_write_b32 a3, v1 +; CHECK-NEXT: s_xor_b64 s[20:21], s[2:3], -1 +; CHECK-NEXT: s_and_b64 s[2:3], exec, s[2:3] +; CHECK-NEXT: v_accvgpr_write_b32 a2, v0 ; CHECK-NEXT: v_mov_b32_e32 v3, 0xbe927e4f ; CHECK-NEXT: v_mov_b32_e32 v4, 0x19f4ec90 ; CHECK-NEXT: v_mov_b32_e32 v5, 0x3efa01a0 @@ -34,14 +36,14 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2, ; CHECK-NEXT: v_mov_b32_e32 v14, 0x8427b883 ; CHECK-NEXT: v_mov_b32_e32 v15, 0x3fae1bb4 ; CHECK-NEXT: s_mov_b64 s[22:23], 0 -; CHECK-NEXT: v_mov_b32_e32 v16, 0x57b87036 -; CHECK-NEXT: v_mov_b32_e32 v17, 0x3fb3b136 +; CHECK-NEXT: v_mov_b32_e32 v0, 0x57b87036 +; CHECK-NEXT: v_mov_b32_e32 v1, 0x3fb3b136 ; CHECK-NEXT: s_and_b64 s[4:5], exec, s[16:17] ; CHECK-NEXT: v_mov_b32_e32 v18, 0x55555523 ; CHECK-NEXT: v_mov_b32_e32 v19, 0xbfd55555 ; CHECK-NEXT: s_and_b64 s[6:7], exec, s[18:19] ; CHECK-NEXT: v_mov_b32_e32 v20, 0 -; CHECK-NEXT: ; implicit-def: $agpr0_agpr1 +; CHECK-NEXT: ; implicit-def: $vgpr30_vgpr31 ; CHECK-NEXT: ; implicit-def: $vgpr22_vgpr23 ; CHECK-NEXT: s_branch .LBB0_2 ; CHECK-NEXT: .LBB0_1: ; %Flow9 @@ -61,9 +63,12 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2, ; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1 ; CHECK-NEXT: v_mov_b64_e32 v[24:25], s[14:15] ; CHECK-NEXT: flat_load_dwordx2 v[24:25], v[24:25] -; CHECK-NEXT: v_mov_b64_e32 v[26:27], v[0:1] +; CHECK-NEXT: v_accvgpr_read_b32 v27, a3 +; CHECK-NEXT: v_accvgpr_read_b32 v26, a2 ; CHECK-NEXT: v_mov_b64_e32 v[28:29], v[2:3] -; CHECK-NEXT: v_mov_b64_e32 v[30:31], v[16:17] +; CHECK-NEXT: v_mov_b64_e32 v[16:17], v[0:1] +; CHECK-NEXT: v_accvgpr_write_b32 a0, 0 +; CHECK-NEXT: v_accvgpr_write_b32 a1, 0 ; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) ; CHECK-NEXT: v_fmac_f64_e32 v[26:27], 0, v[24:25] ; CHECK-NEXT: v_fmac_f64_e32 v[28:29], 0, v[26:27] @@ -79,10 +84,9 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2, ; CHECK-NEXT: v_fmac_f64_e32 v[26:27], 0, v[28:29] ; CHECK-NEXT: v_mov_b64_e32 v[28:29], v[14:15] ; CHECK-NEXT: v_fmac_f64_e32 v[28:29], 0, v[26:27] -; CHECK-NEXT: v_fmac_f64_e32 v[30:31], 0, v[28:29] +; CHECK-NEXT: v_fmac_f64_e32 v[16:17], 0, v[28:29] ; CHECK-NEXT: v_mov_b64_e32 v[26:27], v[18:19] -; CHECK-NEXT: v_fmac_f64_e32 v[26:27], 0, v[30:31] -; CHECK-NEXT: v_mov_b64_e32 v[30:31], 0 +; CHECK-NEXT: v_fmac_f64_e32 v[26:27], 0, v[16:17] ; CHECK-NEXT: s_branch .LBB0_6 ; CHECK-NEXT: .LBB0_5: ; %Flow ; CHECK-NEXT: ; in Loop: Header=BB0_6 Depth=2 @@ -91,30 +95,30 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2, ; CHECK-NEXT: .LBB0_6: ; %.preheader1855.i.i.i3329 ; CHECK-NEXT: ; Parent Loop BB0_2 Depth=1 ; CHECK-NEXT: ; => This Inner Loop Header: Depth=2 -; CHECK-NEXT: v_mov_b64_e32 v[28:29], v[30:31] +; CHECK-NEXT: v_accvgpr_read_b32 v29, a1 +; CHECK-NEXT: v_accvgpr_read_b32 v28, a0 ; CHECK-NEXT: s_mov_b64 s[24:25], -1 ; CHECK-NEXT: s_mov_b64 s[8:9], -1 ; CHECK-NEXT: s_mov_b64 vcc, s[2:3] -; CHECK-NEXT: ; implicit-def: $vgpr30_vgpr31 +; CHECK-NEXT: ; implicit-def: $agpr0_agpr1 ; CHECK-NEXT: s_cbranch_vccz .LBB0_5 ; CHECK-NEXT: ; %bb.7: ; %.lr.ph2070.i.i.i3291 ; CHECK-NEXT: ; in Loop: Header=BB0_6 Depth=2 -; CHECK-NEXT: v_accvgpr_read_b32 v31, a1 -; CHECK-NEXT: v_accvgpr_read_b32 v30, a0 +; CHECK-NEXT: v_accvgpr_write_b32 a0, v30 +; CHECK-NEXT: v_accvgpr_write_b32 a1, v31 ; CHECK-NEXT: s_mov_b64 s[8:9], s[18:19] ; CHECK-NEXT: s_mov_b64 vcc, s[6:7] ; CHECK-NEXT: s_cbranch_vccz .LBB0_5 ; CHECK-NEXT: ; %bb.8: ; %.preheader1856.preheader.i.i.i3325 ; CHECK-NEXT: ; in Loop: Header=BB0_6 Depth=2 +; CHECK-NEXT: v_accvgpr_write_b32 a0, v26 ; CHECK-NEXT: s_mov_b64 s[24:25], 0 -; CHECK-NEXT: v_mov_b64_e32 v[30:31], v[26:27] +; CHECK-NEXT: v_accvgpr_write_b32 a1, v27 ; CHECK-NEXT: s_mov_b64 s[8:9], 0 ; CHECK-NEXT: s_branch .LBB0_5 ; CHECK-NEXT: .LBB0_9: ; in Loop: Header=BB0_2 Depth=1 -; CHECK-NEXT: v_mov_b64_e32 v[24:25], s[10:11] -; CHECK-NEXT: v_accvgpr_write_b32 a0, v24 ; CHECK-NEXT: s_mov_b64 s[22:23], 0 -; CHECK-NEXT: v_accvgpr_write_b32 a1, v25 +; CHECK-NEXT: v_mov_b64_e32 v[30:31], s[10:11] ; CHECK-NEXT: s_mov_b64 s[8:9], s[20:21] ; CHECK-NEXT: s_branch .LBB0_15 ; CHECK-NEXT: .LBB0_10: ; in Loop: Header=BB0_2 Depth=1 @@ -128,24 +132,22 @@ define amdgpu_kernel void @vgpr_mfma_pass_av_split_crash(double %arg1, i1 %arg2, ; CHECK-NEXT: ; %bb.12: ; %._crit_edge2105.i.i.i2330.loopexit ; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1 ; CHECK-NEXT: v_cmp_nlg_f64_e64 s[8:9], 0, v[28:29] -; CHECK-NEXT: v_accvgpr_write_b32 a0, v24 ; CHECK-NEXT: v_cndmask_b32_e64 v23, v23, 0, s[16:17] -; CHECK-NEXT: v_cndmask_b32_e64 v26, 0, 1, s[8:9] -; CHECK-NEXT: v_mov_b32_e32 v27, v26 -; CHECK-NEXT: s_and_b64 s[8:9], exec, s[16:17] ; CHECK-NEXT: v_cndmask_b32_e64 v22, v22, 0, s[16:17] -; CHECK-NEXT: global_store_dwordx2 v20, v[26:27], s[12:13] +; CHECK-NEXT: v_cndmask_b32_e64 v16, 0, 1, s[8:9] +; CHECK-NEXT: v_mov_b32_e32 v17, v16 +; CHECK-NEXT: s_and_b64 s[8:9], exec, s[16:17] +; CHECK-NEXT: global_store_dwordx2 v20, v[16:17], s[12:13] ; CHECK-NEXT: s_cselect_b32 s23, s23, 0 ; CHECK-NEXT: s_cselect_b32 s22, s22, 0 ; CHECK-NEXT: s_mov_b64 s[8:9], -1 ; CHECK-NEXT: s_branch .LBB0_14 ; CHECK-NEXT: .LBB0_13: ; in Loop: Header=BB0_2 Depth=1 -; CHECK-NEXT: v_accvgpr_write_b32 a0, v24 ; CHECK-NEXT: s_mov_b64 s[8:9], 0 ; CHECK-NEXT: v_mov_b64_e32 v[22:23], 0 ; CHECK-NEXT: .LBB0_14: ; %Flow6 ; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1 -; CHECK-NEXT: v_accvgpr_write_b32 a1, v25 +; CHECK-NEXT: v_mov_b64_e32 v[30:31], v[24:25] ; CHECK-NEXT: .LBB0_15: ; %Flow6 ; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1 ; CHECK-NEXT: s_mov_b64 s[24:25], -1 diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.bf16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.bf16.ll index 8081a15b53bb7..284ced1727b7e 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.bf16.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.gfx950.bf16.ll @@ -39,16 +39,16 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_bf16(<8 x bfloat> %arg0, <8 x ; GCN-NEXT: v_accvgpr_write_b32 a13, s21 ; GCN-NEXT: v_accvgpr_write_b32 a14, s22 ; GCN-NEXT: v_accvgpr_write_b32 a15, s23 -; GCN-NEXT: v_mov_b64_e32 v[14:15], 0 ; GCN-NEXT: v_mov_b32_e32 v16, s16 +; GCN-NEXT: v_mov_b32_e32 v17, s17 ; GCN-NEXT: v_mfma_f32_32x32x16_bf16 a[16:31], v[0:3], v[4:7], a[0:15] +; GCN-NEXT: v_mov_b32_e32 v18, s18 +; GCN-NEXT: v_mov_b32_e32 v19, s19 ; GCN-NEXT: v_mov_b32_e32 v0, s20 ; GCN-NEXT: v_mov_b32_e32 v1, s21 ; GCN-NEXT: v_mov_b32_e32 v2, s22 ; GCN-NEXT: v_mov_b32_e32 v3, s23 -; GCN-NEXT: v_mov_b32_e32 v17, s17 -; GCN-NEXT: v_mov_b32_e32 v18, s18 -; GCN-NEXT: v_mov_b32_e32 v19, s19 +; GCN-NEXT: v_mov_b64_e32 v[14:15], 0 ; GCN-NEXT: s_nop 4 ; GCN-NEXT: global_store_dwordx4 v[8:9], a[28:31], off sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) @@ -112,16 +112,16 @@ define amdgpu_kernel void @test_mfma_f32_32x32x16_bf16__flags(<8 x bfloat> %arg0 ; GCN-NEXT: v_accvgpr_write_b32 a13, s21 ; GCN-NEXT: v_accvgpr_write_b32 a14, s22 ; GCN-NEXT: v_accvgpr_write_b32 a15, s23 -; GCN-NEXT: v_mov_b64_e32 v[14:15], 0 ; GCN-NEXT: v_mov_b32_e32 v16, s16 +; GCN-NEXT: v_mov_b32_e32 v17, s17 ; GCN-NEXT: v_mfma_f32_32x32x16_bf16 a[16:31], v[0:3], v[4:7], a[0:15] cbsz:2 abid:3 blgp:1 +; GCN-NEXT: v_mov_b32_e32 v18, s18 +; GCN-NEXT: v_mov_b32_e32 v19, s19 ; GCN-NEXT: v_mov_b32_e32 v0, s20 ; GCN-NEXT: v_mov_b32_e32 v1, s21 ; GCN-NEXT: v_mov_b32_e32 v2, s22 ; GCN-NEXT: v_mov_b32_e32 v3, s23 -; GCN-NEXT: v_mov_b32_e32 v17, s17 -; GCN-NEXT: v_mov_b32_e32 v18, s18 -; GCN-NEXT: v_mov_b32_e32 v19, s19 +; GCN-NEXT: v_mov_b64_e32 v[14:15], 0 ; GCN-NEXT: s_nop 4 ; GCN-NEXT: global_store_dwordx4 v[8:9], a[28:31], off sc0 sc1 ; GCN-NEXT: s_waitcnt vmcnt(0) diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.scale.f32.32x32x64.f8f6f4.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.scale.f32.32x32x64.f8f6f4.ll index 0b2818f38149d..24af3fa5ff9b7 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.scale.f32.32x32x64.f8f6f4.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.mfma.scale.f32.32x32x64.f8f6f4.ll @@ -4784,8 +4784,8 @@ define amdgpu_kernel void @test_mfma_scale_f32_32x32x64_f8f6f4_0_0__nonmac(<8 x ; GISEL-NEXT: s_load_dwordx16 s[36:51], s[4:5], 0x0 ; GISEL-NEXT: s_load_dwordx16 s[8:23], s[4:5], 0x40 ; GISEL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x80 +; GISEL-NEXT: v_mov_b64_e32 v[16:17], 0 ; GISEL-NEXT: v_mov_b64_e32 v[18:19], 16 -; GISEL-NEXT: v_mov_b64_e32 v[20:21], 32 ; GISEL-NEXT: s_waitcnt lgkmcnt(0) ; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[36:37] ; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[38:39] @@ -4811,16 +4811,16 @@ define amdgpu_kernel void @test_mfma_scale_f32_32x32x64_f8f6f4_0_0__nonmac(<8 x ; GISEL-NEXT: v_accvgpr_write_b32 a13, s21 ; GISEL-NEXT: v_accvgpr_write_b32 a14, s22 ; GISEL-NEXT: v_accvgpr_write_b32 a15, s23 -; GISEL-NEXT: v_mov_b32_e32 v16, s1 +; GISEL-NEXT: v_mov_b32_e32 v20, s1 ; GISEL-NEXT: v_mov_b64_e32 v[22:23], 48 ; GISEL-NEXT: s_nop 0 -; GISEL-NEXT: v_mfma_scale_f32_32x32x64_f8f6f4 a[0:15], v[0:7], v[8:15], a[0:15], s0, v16 op_sel_hi:[0,0,0] +; GISEL-NEXT: v_mfma_scale_f32_32x32x64_f8f6f4 a[0:15], v[0:7], v[8:15], a[0:15], s0, v20 op_sel_hi:[0,0,0] ; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[8:9] -; GISEL-NEXT: v_mov_b64_e32 v[16:17], 0 ; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[10:11] ; GISEL-NEXT: v_mov_b64_e32 v[4:5], s[12:13] ; GISEL-NEXT: v_mov_b64_e32 v[8:9], s[16:17] ; GISEL-NEXT: v_mov_b64_e32 v[12:13], s[20:21] +; GISEL-NEXT: v_mov_b64_e32 v[20:21], 32 ; GISEL-NEXT: v_mov_b64_e32 v[6:7], s[14:15] ; GISEL-NEXT: v_mov_b64_e32 v[10:11], s[18:19] ; GISEL-NEXT: v_mov_b64_e32 v[14:15], s[22:23] `````````` </details> https://github.com/llvm/llvm-project/pull/154500 _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits