https://github.com/petar-avramovic updated https://github.com/llvm/llvm-project/pull/128702
>From 8e33358f5f3ca6167cc585acb7661d7a5f4a8568 Mon Sep 17 00:00:00 2001 From: Petar Avramovic <petar.avramo...@amd.com> Date: Fri, 28 Feb 2025 15:54:55 +0100 Subject: [PATCH] AMDGPU/GlobalISel: Update divergence lowering tests In preparations for implementing temporal divergence lowering for global-isel, switch llvm-ir tests for amdgpu divergence lowering to new reg bank select. Requires adding few simple regbanklegalize rules for these tests to work. --- .../Target/AMDGPU/AMDGPURegBankLegalize.cpp | 6 + .../AMDGPU/AMDGPURegBankLegalizeHelper.cpp | 28 +- .../AMDGPU/AMDGPURegBankLegalizeRules.cpp | 39 +- .../AMDGPU/AMDGPURegBankLegalizeRules.h | 5 + ...-divergent-i1-phis-no-lane-mask-merging.ll | 97 ++--- .../GlobalISel/divergence-structurizer.ll | 409 ++++++++++-------- .../divergence-temporal-divergent-i1.ll | 398 ++++++++++++++--- .../divergence-temporal-divergent-i1.mir | 400 +++++++++++++++++ .../divergence-temporal-divergent-reg.ll | 57 ++- .../divergence-temporal-divergent-reg.mir | 71 +++ 10 files changed, 1182 insertions(+), 328 deletions(-) diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalize.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalize.cpp index 75eb440d99c03..d5a83903e2b13 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalize.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalize.cpp @@ -311,6 +311,12 @@ bool AMDGPURegBankLegalize::runOnMachineFunction(MachineFunction &MF) { } // Opcodes that also support S1. + if (Opc == G_FREEZE && + MRI.getType(MI->getOperand(0).getReg()) != LLT::scalar(1)) { + RBLHelper.applyMappingTrivial(*MI); + continue; + } + if ((Opc == AMDGPU::G_CONSTANT || Opc == AMDGPU::G_FCONSTANT || Opc == AMDGPU::G_IMPLICIT_DEF)) { Register Dst = MI->getOperand(0).getReg(); diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp index 3c007987b8494..3383175fc1bdb 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp @@ -134,6 +134,26 @@ void RegBankLegalizeHelper::lower(MachineInstr &MI, switch (Mapping.LoweringMethod) { case DoNotLower: return; + case VccExtToSel: { + LLT Ty = MRI.getType(MI.getOperand(0).getReg()); + Register Src = MI.getOperand(1).getReg(); + unsigned Opc = MI.getOpcode(); + if (Ty == S32 || Ty == S16) { + auto True = B.buildConstant({VgprRB, Ty}, Opc == G_SEXT ? -1 : 1); + auto False = B.buildConstant({VgprRB, Ty}, 0); + B.buildSelect(MI.getOperand(0).getReg(), Src, True, False); + } + if (Ty == S64) { + auto True = B.buildConstant({VgprRB, S32}, Opc == G_SEXT ? -1 : 1); + auto False = B.buildConstant({VgprRB, S32}, 0); + auto Sel = B.buildSelect({VgprRB, S32}, Src, True, False); + B.buildMergeValues( + MI.getOperand(0).getReg(), + {Sel.getReg(0), Opc == G_SEXT ? Sel.getReg(0) : False.getReg(0)}); + } + MI.eraseFromParent(); + return; + } case UniExtToSel: { LLT Ty = MRI.getType(MI.getOperand(0).getReg()); auto True = B.buildConstant({SgprRB, Ty}, @@ -276,6 +296,8 @@ LLT RegBankLegalizeHelper::getTyFromID(RegBankLLTMappingApplyID ID) { case Sgpr64: case Vgpr64: return LLT::scalar(64); + case VgprP0: + return LLT::pointer(0, 64); case SgprP1: case VgprP1: return LLT::pointer(1, 64); @@ -383,6 +405,7 @@ RegBankLegalizeHelper::getRegBankFromID(RegBankLLTMappingApplyID ID) { return SgprRB; case Vgpr32: case Vgpr64: + case VgprP0: case VgprP1: case VgprP3: case VgprP4: @@ -425,6 +448,7 @@ void RegBankLegalizeHelper::applyMappingDst( case SgprV4S32: case Vgpr32: case Vgpr64: + case VgprP0: case VgprP1: case VgprP3: case VgprP4: @@ -555,6 +579,7 @@ void RegBankLegalizeHelper::applyMappingSrc( // vgpr scalars, pointers and vectors case Vgpr32: case Vgpr64: + case VgprP0: case VgprP1: case VgprP3: case VgprP4: @@ -653,7 +678,8 @@ void RegBankLegalizeHelper::applyMappingPHI(MachineInstr &MI) { // We accept all types that can fit in some register class. // Uniform G_PHIs have all sgpr registers. // Divergent G_PHIs have vgpr dst but inputs can be sgpr or vgpr. - if (Ty == LLT::scalar(32) || Ty == LLT::pointer(4, 64)) { + if (Ty == LLT::scalar(32) || Ty == LLT::pointer(1, 64) || + Ty == LLT::pointer(4, 64)) { return; } diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp index 33018ae9677a3..6ee15709d2fa6 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp @@ -50,6 +50,8 @@ bool matchUniformityAndLLT(Register Reg, UniformityLLTOpPredicateID UniID, return MRI.getType(Reg) == LLT::scalar(32); case S64: return MRI.getType(Reg) == LLT::scalar(64); + case P0: + return MRI.getType(Reg) == LLT::pointer(0, 64); case P1: return MRI.getType(Reg) == LLT::pointer(1, 64); case P3: @@ -58,6 +60,8 @@ bool matchUniformityAndLLT(Register Reg, UniformityLLTOpPredicateID UniID, return MRI.getType(Reg) == LLT::pointer(4, 64); case P5: return MRI.getType(Reg) == LLT::pointer(5, 32); + case V4S32: + return MRI.getType(Reg) == LLT::fixed_vector(4, 32); case B32: return MRI.getType(Reg).getSizeInBits() == 32; case B64: @@ -78,6 +82,8 @@ bool matchUniformityAndLLT(Register Reg, UniformityLLTOpPredicateID UniID, return MRI.getType(Reg) == LLT::scalar(32) && MUI.isUniform(Reg); case UniS64: return MRI.getType(Reg) == LLT::scalar(64) && MUI.isUniform(Reg); + case UniP0: + return MRI.getType(Reg) == LLT::pointer(0, 64) && MUI.isUniform(Reg); case UniP1: return MRI.getType(Reg) == LLT::pointer(1, 64) && MUI.isUniform(Reg); case UniP3: @@ -104,6 +110,8 @@ bool matchUniformityAndLLT(Register Reg, UniformityLLTOpPredicateID UniID, return MRI.getType(Reg) == LLT::scalar(32) && MUI.isDivergent(Reg); case DivS64: return MRI.getType(Reg) == LLT::scalar(64) && MUI.isDivergent(Reg); + case DivP0: + return MRI.getType(Reg) == LLT::pointer(0, 64) && MUI.isDivergent(Reg); case DivP1: return MRI.getType(Reg) == LLT::pointer(1, 64) && MUI.isDivergent(Reg); case DivP3: @@ -433,16 +441,21 @@ RegBankLegalizeRules::RegBankLegalizeRules(const GCNSubtarget &_ST, addRulesForGOpcs({G_XOR, G_OR, G_AND}, StandardB) .Any({{UniS1}, {{Sgpr32Trunc}, {Sgpr32AExt, Sgpr32AExt}}}) .Any({{DivS1}, {{Vcc}, {Vcc, Vcc}}}) + .Div(B32, {{VgprB32}, {VgprB32, VgprB32}}) + .Uni(B64, {{SgprB64}, {SgprB64, SgprB64}}) .Div(B64, {{VgprB64}, {VgprB64, VgprB64}, SplitTo32}); addRulesForGOpcs({G_SHL}, Standard) + .Div(S32, {{Vgpr32}, {Vgpr32, Vgpr32}}) .Uni(S64, {{Sgpr64}, {Sgpr64, Sgpr32}}) .Div(S64, {{Vgpr64}, {Vgpr64, Vgpr32}}); // Note: we only write S1 rules for G_IMPLICIT_DEF, G_CONSTANT, G_FCONSTANT // and G_FREEZE here, rest is trivially regbankselected earlier + addRulesForGOpcs({G_IMPLICIT_DEF}).Any({{UniS1}, {{Sgpr32Trunc}, {}}}); addRulesForGOpcs({G_CONSTANT}) .Any({{UniS1, _}, {{Sgpr32Trunc}, {None}, UniCstExt}}); + addRulesForGOpcs({G_FREEZE}).Any({{DivS1}, {{Vcc}, {Vcc}}}); addRulesForGOpcs({G_ICMP}) .Any({{UniS1, _, S32}, {{Sgpr32Trunc}, {None, Sgpr32, Sgpr32}}}) @@ -473,6 +486,7 @@ RegBankLegalizeRules::RegBankLegalizeRules(const GCNSubtarget &_ST, addRulesForGOpcs({G_ZEXT, G_SEXT}) .Any({{UniS32, S1}, {{Sgpr32}, {Sgpr32AExtBoolInReg}, UniExtToSel}}) + .Any({{DivS32, S1}, {{Vgpr32}, {Vcc}, VccExtToSel}}) .Any({{UniS64, S32}, {{Sgpr64}, {Sgpr32}, Ext32To64}}) .Any({{DivS64, S32}, {{Vgpr64}, {Vgpr32}, Ext32To64}}); @@ -527,9 +541,12 @@ RegBankLegalizeRules::RegBankLegalizeRules(const GCNSubtarget &_ST, // clang-format off addRulesForGOpcs({G_LOAD}) + .Any({{DivB32, DivP0}, {{VgprB32}, {VgprP0}}}) + .Any({{DivB32, DivP1}, {{VgprB32}, {VgprP1}}}) .Any({{{UniB256, UniP1}, isAlign4 && isUL}, {{SgprB256}, {SgprP1}}}) .Any({{{UniB512, UniP1}, isAlign4 && isUL}, {{SgprB512}, {SgprP1}}}) + .Any({{{UniB32, UniP1}, !isAlign4 || !isUL}, {{UniInVgprB32}, {SgprP1}}}) .Any({{{UniB256, UniP1}, !isAlign4 || !isUL}, {{UniInVgprB256}, {VgprP1}, SplitLoad}}) .Any({{{UniB512, UniP1}, !isAlign4 || !isUL}, {{UniInVgprB512}, {VgprP1}, SplitLoad}}) @@ -558,15 +575,26 @@ RegBankLegalizeRules::RegBankLegalizeRules(const GCNSubtarget &_ST, // clang-format on addRulesForGOpcs({G_AMDGPU_BUFFER_LOAD}, Vector) + .Div(S32, {{Vgpr32}, {SgprV4S32, Vgpr32, Vgpr32, Sgpr32}}) + .Uni(S32, {{UniInVgprS32}, {SgprV4S32, Vgpr32, Vgpr32, Sgpr32}}) .Div(V4S32, {{VgprV4S32}, {SgprV4S32, Vgpr32, Vgpr32, Sgpr32}}) .Uni(V4S32, {{UniInVgprV4S32}, {SgprV4S32, Vgpr32, Vgpr32, Sgpr32}}); addRulesForGOpcs({G_STORE}) + .Any({{S32, P0}, {{}, {Vgpr32, VgprP0}}}) .Any({{S32, P1}, {{}, {Vgpr32, VgprP1}}}) .Any({{S64, P1}, {{}, {Vgpr64, VgprP1}}}) .Any({{V4S32, P1}, {{}, {VgprV4S32, VgprP1}}}); - addRulesForGOpcs({G_PTR_ADD}).Any({{DivP1}, {{VgprP1}, {VgprP1, Vgpr64}}}); + addRulesForGOpcs({G_AMDGPU_BUFFER_STORE}) + .Any({{S32}, {{}, {Vgpr32, SgprV4S32, Vgpr32, Vgpr32, Sgpr32}}}); + + addRulesForGOpcs({G_PTR_ADD}) + .Any({{UniP1}, {{SgprP1}, {SgprP1, Sgpr64}}}) + .Any({{DivP1}, {{VgprP1}, {VgprP1, Vgpr64}}}) + .Any({{DivP0}, {{VgprP0}, {VgprP0, Vgpr64}}}); + + addRulesForGOpcs({G_INTTOPTR}).Any({{UniP4}, {{SgprP4}, {Sgpr64}}}); addRulesForGOpcs({G_ABS}, Standard).Uni(S16, {{Sgpr32Trunc}, {Sgpr32SExt}}); @@ -582,15 +610,24 @@ RegBankLegalizeRules::RegBankLegalizeRules(const GCNSubtarget &_ST, .Any({{UniS32, S32}, {{UniInVgprS32}, {Vgpr32}}}, !hasSALUFloat); addRulesForGOpcs({G_UITOFP}) + .Any({{DivS32, S32}, {{Vgpr32}, {Vgpr32}}}) .Any({{UniS32, S32}, {{Sgpr32}, {Sgpr32}}}, hasSALUFloat) .Any({{UniS32, S32}, {{UniInVgprS32}, {Vgpr32}}}, !hasSALUFloat); using namespace Intrinsic; + addRulesForIOpcs({amdgcn_s_getpc}).Any({{UniS64, _}, {{Sgpr64}, {None}}}); + // This is "intrinsic lane mask" it was set to i32/i64 in llvm-ir. addRulesForIOpcs({amdgcn_end_cf}).Any({{_, S32}, {{}, {None, Sgpr32}}}); addRulesForIOpcs({amdgcn_if_break}, Standard) .Uni(S32, {{Sgpr32}, {IntrId, Vcc, Sgpr32}}); + addRulesForIOpcs({amdgcn_mbcnt_lo, amdgcn_mbcnt_hi}, Standard) + .Div(S32, {{}, {Vgpr32, None, Vgpr32, Vgpr32}}); + + addRulesForIOpcs({amdgcn_readfirstlane}) + .Any({{UniS32, _, DivS32}, {{}, {Sgpr32, None, Vgpr32}}}); + } // end initialize rules diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h index 8280751e1dbdd..6bde7f2cd676d 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h @@ -50,16 +50,19 @@ enum UniformityLLTOpPredicateID { DivS64, // pointers + P0, P1, P3, P4, P5, + UniP0, UniP1, UniP3, UniP4, UniP5, + DivP0, DivP1, DivP3, DivP4, @@ -124,6 +127,7 @@ enum RegBankLLTMappingApplyID { // vgpr scalars, pointers, vectors and B-types Vgpr32, Vgpr64, + VgprP0, VgprP1, VgprP3, VgprP4, @@ -162,6 +166,7 @@ enum RegBankLLTMappingApplyID { // vgpr. Lower it to two S32 vgpr ANDs. enum LoweringMethodID { DoNotLower, + VccExtToSel, UniExtToSel, VgprToVccCopy, SplitTo32, diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll index c5ded11c7d323..65c96a3db5bbf 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 -; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -new-reg-bank-select -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s ; Divergent phis that don't require lowering using lane mask merging @@ -101,27 +101,23 @@ define void @divergent_i1_phi_used_inside_loop(float %val, ptr %addr) { ; GFX10-LABEL: divergent_i1_phi_used_inside_loop: ; GFX10: ; %bb.0: ; %entry ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: s_mov_b32 s5, 0 -; GFX10-NEXT: v_mov_b32_e32 v3, 1 -; GFX10-NEXT: v_mov_b32_e32 v4, s5 -; GFX10-NEXT: ; implicit-def: $sgpr6 +; GFX10-NEXT: s_mov_b32 s4, 0 +; GFX10-NEXT: s_mov_b32 s5, 1 +; GFX10-NEXT: s_mov_b32 s6, 0 ; GFX10-NEXT: .LBB2_1: ; %loop ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX10-NEXT: v_xor_b32_e32 v3, 1, v3 -; GFX10-NEXT: v_cvt_f32_u32_e32 v5, v4 -; GFX10-NEXT: v_add_nc_u32_e32 v4, 1, v4 -; GFX10-NEXT: v_and_b32_e32 v6, 1, v3 -; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, v5, v0 -; GFX10-NEXT: v_cmp_ne_u32_e64 s4, 0, v6 -; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 -; GFX10-NEXT: s_andn2_b32 s6, s6, exec_lo -; GFX10-NEXT: s_and_b32 s4, exec_lo, s4 -; GFX10-NEXT: s_or_b32 s6, s6, s4 -; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5 +; GFX10-NEXT: v_cvt_f32_u32_e32 v3, s6 +; GFX10-NEXT: s_xor_b32 s5, s5, 1 +; GFX10-NEXT: s_add_i32 s6, s6, 1 +; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, v3, v0 +; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB2_1 ; GFX10-NEXT: ; %bb.2: ; %exit -; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5 -; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s6 +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4 +; GFX10-NEXT: s_cmp_lg_u32 s5, 0 +; GFX10-NEXT: s_cselect_b32 s4, exec_lo, 0 +; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s4 ; GFX10-NEXT: flat_store_dword v[1:2], v0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: s_setpc_b64 s[30:31] @@ -147,29 +143,25 @@ define void @divergent_i1_phi_used_inside_loop_bigger_loop_body(float %val, floa ; GFX10-LABEL: divergent_i1_phi_used_inside_loop_bigger_loop_body: ; GFX10: ; %bb.0: ; %entry ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: s_mov_b32 s4, 0 -; GFX10-NEXT: v_cmp_lt_f32_e64 s5, 1.0, v1 +; GFX10-NEXT: v_cmp_lt_f32_e64 s4, 1.0, v1 ; GFX10-NEXT: v_mov_b32_e32 v1, 0x3e8 -; GFX10-NEXT: v_mov_b32_e32 v8, s4 -; GFX10-NEXT: ; implicit-def: $sgpr6 +; GFX10-NEXT: s_mov_b32 s5, 0 +; GFX10-NEXT: s_mov_b32 s6, 0 ; GFX10-NEXT: s_branch .LBB3_2 ; GFX10-NEXT: .LBB3_1: ; %loop_body ; GFX10-NEXT: ; in Loop: Header=BB3_2 Depth=1 -; GFX10-NEXT: v_cvt_f32_u32_e32 v9, v8 -; GFX10-NEXT: s_xor_b32 s5, s5, -1 -; GFX10-NEXT: v_add_nc_u32_e32 v8, 1, v8 -; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, v9, v0 -; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 -; GFX10-NEXT: s_andn2_b32 s6, s6, exec_lo -; GFX10-NEXT: s_and_b32 s7, exec_lo, s5 -; GFX10-NEXT: s_or_b32 s6, s6, s7 -; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 +; GFX10-NEXT: v_cvt_f32_u32_e32 v8, s6 +; GFX10-NEXT: s_xor_b32 s4, s4, exec_lo +; GFX10-NEXT: s_add_i32 s6, s6, 1 +; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, v8, v0 +; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 +; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5 ; GFX10-NEXT: s_cbranch_execz .LBB3_6 ; GFX10-NEXT: .LBB3_2: ; %loop_start ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_cmpk_le_i32 s6, 0x3e8 ; GFX10-NEXT: s_mov_b32 s7, 1 -; GFX10-NEXT: v_cmp_ge_i32_e32 vcc_lo, 0x3e8, v8 -; GFX10-NEXT: s_cbranch_vccz .LBB3_4 +; GFX10-NEXT: s_cbranch_scc0 .LBB3_4 ; GFX10-NEXT: ; %bb.3: ; %else ; GFX10-NEXT: ; in Loop: Header=BB3_2 Depth=1 ; GFX10-NEXT: s_mov_b32 s7, 0 @@ -177,7 +169,6 @@ define void @divergent_i1_phi_used_inside_loop_bigger_loop_body(float %val, floa ; GFX10-NEXT: .LBB3_4: ; %Flow ; GFX10-NEXT: ; in Loop: Header=BB3_2 Depth=1 ; GFX10-NEXT: s_xor_b32 s7, s7, 1 -; GFX10-NEXT: s_and_b32 s7, s7, 1 ; GFX10-NEXT: s_cmp_lg_u32 s7, 0 ; GFX10-NEXT: s_cbranch_scc1 .LBB3_1 ; GFX10-NEXT: ; %bb.5: ; %if @@ -185,8 +176,8 @@ define void @divergent_i1_phi_used_inside_loop_bigger_loop_body(float %val, floa ; GFX10-NEXT: flat_store_dword v[4:5], v1 ; GFX10-NEXT: s_branch .LBB3_1 ; GFX10-NEXT: .LBB3_6: ; %exit -; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4 -; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s6 +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5 +; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s4 ; GFX10-NEXT: flat_store_dword v[2:3], v0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: s_setpc_b64 s[30:31] @@ -234,45 +225,47 @@ define amdgpu_cs void @single_lane_execution_attribute(i32 inreg %.userdata0, <3 ; GFX10-NEXT: s_mov_b32 s1, 0 ; GFX10-NEXT: v_mbcnt_lo_u32_b32 v1, -1, 0 ; GFX10-NEXT: s_or_b64 s[12:13], s[4:5], s[0:1] -; GFX10-NEXT: s_mov_b32 s3, -1 ; GFX10-NEXT: s_load_dwordx8 s[4:11], s[12:13], 0x0 ; GFX10-NEXT: v_mbcnt_hi_u32_b32 v1, -1, v1 ; GFX10-NEXT: v_lshlrev_b32_e32 v2, 2, v1 -; GFX10-NEXT: v_xor_b32_e32 v3, 1, v1 -; GFX10-NEXT: v_and_b32_e32 v3, 1, v3 +; GFX10-NEXT: v_and_b32_e32 v3, 1, v1 ; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v3 -; GFX10-NEXT: ; implicit-def: $vgpr3 +; GFX10-NEXT: s_xor_b32 s3, vcc_lo, exec_lo ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: buffer_load_dword v2, v2, s[4:7], 0 offen +; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s3 ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 0, v2 ; GFX10-NEXT: s_cbranch_vccnz .LBB4_4 ; GFX10-NEXT: ; %bb.1: ; %.preheader.preheader -; GFX10-NEXT: v_mov_b32_e32 v3, s1 -; GFX10-NEXT: v_mov_b32_e32 v4, s1 +; GFX10-NEXT: s_mov_b32 s3, 0 ; GFX10-NEXT: .LBB4_2: ; %.preheader ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX10-NEXT: buffer_load_dword v5, v3, s[4:7], 0 offen +; GFX10-NEXT: v_mov_b32_e32 v3, s1 ; GFX10-NEXT: v_add_nc_u32_e32 v1, -1, v1 -; GFX10-NEXT: v_add_nc_u32_e32 v3, 4, v3 +; GFX10-NEXT: s_add_i32 s1, s1, 4 +; GFX10-NEXT: buffer_load_dword v3, v3, s[4:7], 0 offen ; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v1 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_add_nc_u32_e32 v4, v5, v4 +; GFX10-NEXT: v_readfirstlane_b32 s12, v3 +; GFX10-NEXT: s_add_i32 s3, s12, s3 ; GFX10-NEXT: s_cbranch_vccnz .LBB4_2 ; GFX10-NEXT: ; %bb.3: ; %.preheader._crit_edge -; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2 -; GFX10-NEXT: s_mov_b32 s3, 0 +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, s3, v2 ; GFX10-NEXT: s_or_b32 s1, s0, vcc_lo -; GFX10-NEXT: v_cndmask_b32_e64 v3, 0, 1, s1 -; GFX10-NEXT: .LBB4_4: ; %Flow -; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s3 +; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s1 +; GFX10-NEXT: s_branch .LBB4_6 +; GFX10-NEXT: .LBB4_4: +; GFX10-NEXT: s_mov_b32 s1, exec_lo +; GFX10-NEXT: ; implicit-def: $vgpr1 +; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s1 ; GFX10-NEXT: s_cbranch_vccz .LBB4_6 ; GFX10-NEXT: ; %bb.5: ; %.19 ; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s0 -; GFX10-NEXT: v_or_b32_e32 v3, 2, v1 +; GFX10-NEXT: v_or_b32_e32 v1, 2, v1 ; GFX10-NEXT: .LBB4_6: ; %.22 ; GFX10-NEXT: v_add_lshl_u32 v0, v0, s2, 2 -; GFX10-NEXT: buffer_store_dword v3, v0, s[8:11], 0 offen +; GFX10-NEXT: buffer_store_dword v1, v0, s[8:11], 0 offen ; GFX10-NEXT: s_endpgm .entry: %.0 = call i64 @llvm.amdgcn.s.getpc() diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll index ba30a4bd97684..b1cc9ef0c5737 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 -; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -new-reg-bank-select -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s ; Simples case, if - then, that requires lane mask merging, ; %phi lane mask will hold %val_A at %A. Lanes that are active in %B @@ -41,9 +41,10 @@ exit: define amdgpu_ps void @divergent_i1_phi_if_else(ptr addrspace(1) %out, i32 %tid, i32 %cond) { ; GFX10-LABEL: divergent_i1_phi_if_else: ; GFX10: ; %bb.0: ; %entry -; GFX10-NEXT: s_and_b32 s0, 1, s0 +; GFX10-NEXT: s_and_b32 s0, s0, 1 ; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v3 -; GFX10-NEXT: v_cmp_ne_u32_e64 s0, 0, s0 +; GFX10-NEXT: s_cmp_lg_u32 s0, 0 +; GFX10-NEXT: s_cselect_b32 s0, exec_lo, 0 ; GFX10-NEXT: s_and_saveexec_b32 s1, vcc_lo ; GFX10-NEXT: s_xor_b32 s1, exec_lo, s1 ; GFX10-NEXT: ; %bb.1: ; %B @@ -105,46 +106,51 @@ exit: define amdgpu_cs void @loop_with_1break(ptr addrspace(1) %x, ptr addrspace(1) %a) { ; GFX10-LABEL: loop_with_1break: ; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: s_mov_b32 s0, 0 -; GFX10-NEXT: ; implicit-def: $sgpr1 -; GFX10-NEXT: v_mov_b32_e32 v4, s0 +; GFX10-NEXT: ; implicit-def: $sgpr5 ; GFX10-NEXT: s_branch .LBB2_2 ; GFX10-NEXT: .LBB2_1: ; %Flow ; GFX10-NEXT: ; in Loop: Header=BB2_2 Depth=1 ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 -; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s2 -; GFX10-NEXT: s_and_b32 s2, exec_lo, s1 -; GFX10-NEXT: s_or_b32 s0, s2, s0 -; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s0 +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s1 +; GFX10-NEXT: s_and_b32 s1, exec_lo, s5 +; GFX10-NEXT: s_or_b32 s4, s1, s4 +; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execz .LBB2_4 ; GFX10-NEXT: .LBB2_2: ; %A ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX10-NEXT: v_ashrrev_i32_e32 v5, 31, v4 -; GFX10-NEXT: s_andn2_b32 s1, s1, exec_lo -; GFX10-NEXT: s_and_b32 s2, exec_lo, -1 -; GFX10-NEXT: s_or_b32 s1, s1, s2 -; GFX10-NEXT: v_lshlrev_b64 v[5:6], 2, v[4:5] -; GFX10-NEXT: v_add_co_u32 v7, vcc_lo, v2, v5 -; GFX10-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, v3, v6, vcc_lo -; GFX10-NEXT: global_load_dword v7, v[7:8], off +; GFX10-NEXT: s_ashr_i32 s1, s0, 31 +; GFX10-NEXT: s_lshl_b64 s[2:3], s[0:1], 2 +; GFX10-NEXT: s_andn2_b32 s1, s5, exec_lo +; GFX10-NEXT: v_mov_b32_e32 v5, s3 +; GFX10-NEXT: v_mov_b32_e32 v4, s2 +; GFX10-NEXT: s_and_b32 s5, exec_lo, exec_lo +; GFX10-NEXT: s_or_b32 s5, s1, s5 +; GFX10-NEXT: v_add_co_u32 v4, vcc_lo, v2, v4 +; GFX10-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, v3, v5, vcc_lo +; GFX10-NEXT: global_load_dword v4, v[4:5], off ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v7 -; GFX10-NEXT: s_and_saveexec_b32 s2, vcc_lo +; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4 +; GFX10-NEXT: s_and_saveexec_b32 s1, vcc_lo ; GFX10-NEXT: s_cbranch_execz .LBB2_1 ; GFX10-NEXT: ; %bb.3: ; %loop.body ; GFX10-NEXT: ; in Loop: Header=BB2_2 Depth=1 -; GFX10-NEXT: v_add_co_u32 v5, vcc_lo, v0, v5 -; GFX10-NEXT: v_add_co_ci_u32_e32 v6, vcc_lo, v1, v6, vcc_lo -; GFX10-NEXT: v_add_nc_u32_e32 v8, 1, v4 -; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0x64, v4 -; GFX10-NEXT: s_andn2_b32 s1, s1, exec_lo -; GFX10-NEXT: global_load_dword v7, v[5:6], off -; GFX10-NEXT: v_mov_b32_e32 v4, v8 -; GFX10-NEXT: s_and_b32 s3, exec_lo, vcc_lo -; GFX10-NEXT: s_or_b32 s1, s1, s3 +; GFX10-NEXT: v_mov_b32_e32 v5, s3 +; GFX10-NEXT: v_mov_b32_e32 v4, s2 +; GFX10-NEXT: s_add_i32 s2, s0, 1 +; GFX10-NEXT: s_cmpk_lt_u32 s0, 0x64 +; GFX10-NEXT: s_cselect_b32 s0, exec_lo, 0 +; GFX10-NEXT: v_add_co_u32 v4, vcc_lo, v0, v4 +; GFX10-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, v1, v5, vcc_lo +; GFX10-NEXT: s_andn2_b32 s3, s5, exec_lo +; GFX10-NEXT: s_and_b32 s0, exec_lo, s0 +; GFX10-NEXT: s_or_b32 s5, s3, s0 +; GFX10-NEXT: global_load_dword v6, v[4:5], off +; GFX10-NEXT: s_mov_b32 s0, s2 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_add_nc_u32_e32 v7, 1, v7 -; GFX10-NEXT: global_store_dword v[5:6], v7, off +; GFX10-NEXT: v_add_nc_u32_e32 v6, 1, v6 +; GFX10-NEXT: global_store_dword v[4:5], v6, off ; GFX10-NEXT: s_branch .LBB2_1 ; GFX10-NEXT: .LBB2_4: ; %exit ; GFX10-NEXT: s_endpgm @@ -174,62 +180,69 @@ exit: define amdgpu_cs void @loop_with_2breaks(ptr addrspace(1) %x, ptr addrspace(1) %a, ptr addrspace(1) %b) { ; GFX10-LABEL: loop_with_2breaks: ; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: s_mov_b32 s0, 0 -; GFX10-NEXT: ; implicit-def: $sgpr1 -; GFX10-NEXT: v_mov_b32_e32 v6, s0 +; GFX10-NEXT: ; implicit-def: $sgpr5 ; GFX10-NEXT: s_branch .LBB3_3 ; GFX10-NEXT: .LBB3_1: ; %Flow3 ; GFX10-NEXT: ; in Loop: Header=BB3_3 Depth=1 ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 -; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s3 -; GFX10-NEXT: s_andn2_b32 s1, s1, exec_lo -; GFX10-NEXT: s_and_b32 s3, exec_lo, s4 -; GFX10-NEXT: s_or_b32 s1, s1, s3 +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s7 +; GFX10-NEXT: s_andn2_b32 s2, s5, exec_lo +; GFX10-NEXT: s_and_b32 s3, exec_lo, s6 +; GFX10-NEXT: s_or_b32 s5, s2, s3 ; GFX10-NEXT: .LBB3_2: ; %Flow ; GFX10-NEXT: ; in Loop: Header=BB3_3 Depth=1 -; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s2 -; GFX10-NEXT: s_and_b32 s2, exec_lo, s1 -; GFX10-NEXT: s_or_b32 s0, s2, s0 -; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s0 +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s1 +; GFX10-NEXT: s_and_b32 s1, exec_lo, s5 +; GFX10-NEXT: s_or_b32 s4, s1, s4 +; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execz .LBB3_6 ; GFX10-NEXT: .LBB3_3: ; %A ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX10-NEXT: v_ashrrev_i32_e32 v7, 31, v6 -; GFX10-NEXT: s_andn2_b32 s1, s1, exec_lo -; GFX10-NEXT: s_and_b32 s2, exec_lo, -1 -; GFX10-NEXT: s_or_b32 s1, s1, s2 -; GFX10-NEXT: v_lshlrev_b64 v[7:8], 2, v[6:7] -; GFX10-NEXT: v_add_co_u32 v9, vcc_lo, v2, v7 -; GFX10-NEXT: v_add_co_ci_u32_e32 v10, vcc_lo, v3, v8, vcc_lo -; GFX10-NEXT: global_load_dword v9, v[9:10], off +; GFX10-NEXT: s_ashr_i32 s1, s0, 31 +; GFX10-NEXT: s_lshl_b64 s[2:3], s[0:1], 2 +; GFX10-NEXT: s_andn2_b32 s1, s5, exec_lo +; GFX10-NEXT: v_mov_b32_e32 v7, s3 +; GFX10-NEXT: v_mov_b32_e32 v6, s2 +; GFX10-NEXT: s_and_b32 s5, exec_lo, exec_lo +; GFX10-NEXT: s_or_b32 s5, s1, s5 +; GFX10-NEXT: v_add_co_u32 v6, vcc_lo, v2, v6 +; GFX10-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, v3, v7, vcc_lo +; GFX10-NEXT: global_load_dword v6, v[6:7], off ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v9 -; GFX10-NEXT: s_and_saveexec_b32 s2, vcc_lo +; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6 +; GFX10-NEXT: s_and_saveexec_b32 s1, vcc_lo ; GFX10-NEXT: s_cbranch_execz .LBB3_2 ; GFX10-NEXT: ; %bb.4: ; %B ; GFX10-NEXT: ; in Loop: Header=BB3_3 Depth=1 -; GFX10-NEXT: v_add_co_u32 v9, vcc_lo, v4, v7 -; GFX10-NEXT: v_add_co_ci_u32_e32 v10, vcc_lo, v5, v8, vcc_lo -; GFX10-NEXT: s_mov_b32 s4, -1 -; GFX10-NEXT: global_load_dword v9, v[9:10], off +; GFX10-NEXT: v_mov_b32_e32 v7, s3 +; GFX10-NEXT: v_mov_b32_e32 v6, s2 +; GFX10-NEXT: s_mov_b32 s6, exec_lo +; GFX10-NEXT: v_add_co_u32 v6, vcc_lo, v4, v6 +; GFX10-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, v5, v7, vcc_lo +; GFX10-NEXT: global_load_dword v6, v[6:7], off ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v9 -; GFX10-NEXT: s_and_saveexec_b32 s3, vcc_lo +; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6 +; GFX10-NEXT: s_and_saveexec_b32 s7, vcc_lo ; GFX10-NEXT: s_cbranch_execz .LBB3_1 ; GFX10-NEXT: ; %bb.5: ; %loop.body ; GFX10-NEXT: ; in Loop: Header=BB3_3 Depth=1 -; GFX10-NEXT: v_add_co_u32 v7, vcc_lo, v0, v7 -; GFX10-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, v1, v8, vcc_lo -; GFX10-NEXT: v_add_nc_u32_e32 v10, 1, v6 -; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0x64, v6 -; GFX10-NEXT: s_andn2_b32 s4, -1, exec_lo -; GFX10-NEXT: global_load_dword v9, v[7:8], off -; GFX10-NEXT: v_mov_b32_e32 v6, v10 -; GFX10-NEXT: s_and_b32 s5, exec_lo, vcc_lo -; GFX10-NEXT: s_or_b32 s4, s4, s5 +; GFX10-NEXT: v_mov_b32_e32 v7, s3 +; GFX10-NEXT: v_mov_b32_e32 v6, s2 +; GFX10-NEXT: s_add_i32 s2, s0, 1 +; GFX10-NEXT: s_cmpk_lt_u32 s0, 0x64 +; GFX10-NEXT: s_cselect_b32 s0, exec_lo, 0 +; GFX10-NEXT: v_add_co_u32 v6, vcc_lo, v0, v6 +; GFX10-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, v1, v7, vcc_lo +; GFX10-NEXT: s_andn2_b32 s3, s6, exec_lo +; GFX10-NEXT: s_and_b32 s0, exec_lo, s0 +; GFX10-NEXT: s_or_b32 s6, s3, s0 +; GFX10-NEXT: global_load_dword v8, v[6:7], off +; GFX10-NEXT: s_mov_b32 s0, s2 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_add_nc_u32_e32 v9, 1, v9 -; GFX10-NEXT: global_store_dword v[7:8], v9, off +; GFX10-NEXT: v_add_nc_u32_e32 v8, 1, v8 +; GFX10-NEXT: global_store_dword v[6:7], v8, off ; GFX10-NEXT: s_branch .LBB3_1 ; GFX10-NEXT: .LBB3_6: ; %exit ; GFX10-NEXT: s_endpgm @@ -265,78 +278,87 @@ exit: define amdgpu_cs void @loop_with_3breaks(ptr addrspace(1) %x, ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c) { ; GFX10-LABEL: loop_with_3breaks: ; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: s_mov_b32 s0, 0 -; GFX10-NEXT: ; implicit-def: $sgpr1 -; GFX10-NEXT: v_mov_b32_e32 v8, s0 +; GFX10-NEXT: ; implicit-def: $sgpr5 ; GFX10-NEXT: s_branch .LBB4_4 ; GFX10-NEXT: .LBB4_1: ; %Flow5 ; GFX10-NEXT: ; in Loop: Header=BB4_4 Depth=1 ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 -; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4 -; GFX10-NEXT: s_andn2_b32 s4, -1, exec_lo -; GFX10-NEXT: s_and_b32 s5, exec_lo, s5 -; GFX10-NEXT: s_or_b32 s4, s4, s5 +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s9 +; GFX10-NEXT: s_andn2_b32 s2, s6, exec_lo +; GFX10-NEXT: s_and_b32 s3, exec_lo, s8 +; GFX10-NEXT: s_or_b32 s6, s2, s3 ; GFX10-NEXT: .LBB4_2: ; %Flow4 ; GFX10-NEXT: ; in Loop: Header=BB4_4 Depth=1 -; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s3 -; GFX10-NEXT: s_andn2_b32 s1, s1, exec_lo -; GFX10-NEXT: s_and_b32 s3, exec_lo, s4 -; GFX10-NEXT: s_or_b32 s1, s1, s3 +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s7 +; GFX10-NEXT: s_andn2_b32 s2, s5, exec_lo +; GFX10-NEXT: s_and_b32 s3, exec_lo, s6 +; GFX10-NEXT: s_or_b32 s5, s2, s3 ; GFX10-NEXT: .LBB4_3: ; %Flow ; GFX10-NEXT: ; in Loop: Header=BB4_4 Depth=1 -; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s2 -; GFX10-NEXT: s_and_b32 s2, exec_lo, s1 -; GFX10-NEXT: s_or_b32 s0, s2, s0 -; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s0 +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s1 +; GFX10-NEXT: s_and_b32 s1, exec_lo, s5 +; GFX10-NEXT: s_or_b32 s4, s1, s4 +; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execz .LBB4_8 ; GFX10-NEXT: .LBB4_4: ; %A ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX10-NEXT: v_ashrrev_i32_e32 v9, 31, v8 -; GFX10-NEXT: s_andn2_b32 s1, s1, exec_lo -; GFX10-NEXT: s_and_b32 s2, exec_lo, -1 -; GFX10-NEXT: s_or_b32 s1, s1, s2 -; GFX10-NEXT: v_lshlrev_b64 v[9:10], 2, v[8:9] -; GFX10-NEXT: v_add_co_u32 v11, vcc_lo, v2, v9 -; GFX10-NEXT: v_add_co_ci_u32_e32 v12, vcc_lo, v3, v10, vcc_lo -; GFX10-NEXT: global_load_dword v11, v[11:12], off +; GFX10-NEXT: s_ashr_i32 s1, s0, 31 +; GFX10-NEXT: s_lshl_b64 s[2:3], s[0:1], 2 +; GFX10-NEXT: s_andn2_b32 s1, s5, exec_lo +; GFX10-NEXT: v_mov_b32_e32 v9, s3 +; GFX10-NEXT: v_mov_b32_e32 v8, s2 +; GFX10-NEXT: s_and_b32 s5, exec_lo, exec_lo +; GFX10-NEXT: s_or_b32 s5, s1, s5 +; GFX10-NEXT: v_add_co_u32 v8, vcc_lo, v2, v8 +; GFX10-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, v3, v9, vcc_lo +; GFX10-NEXT: global_load_dword v8, v[8:9], off ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v11 -; GFX10-NEXT: s_and_saveexec_b32 s2, vcc_lo +; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8 +; GFX10-NEXT: s_and_saveexec_b32 s1, vcc_lo ; GFX10-NEXT: s_cbranch_execz .LBB4_3 ; GFX10-NEXT: ; %bb.5: ; %B ; GFX10-NEXT: ; in Loop: Header=BB4_4 Depth=1 -; GFX10-NEXT: v_add_co_u32 v11, vcc_lo, v4, v9 -; GFX10-NEXT: v_add_co_ci_u32_e32 v12, vcc_lo, v5, v10, vcc_lo -; GFX10-NEXT: s_mov_b32 s4, -1 -; GFX10-NEXT: global_load_dword v11, v[11:12], off +; GFX10-NEXT: v_mov_b32_e32 v9, s3 +; GFX10-NEXT: v_mov_b32_e32 v8, s2 +; GFX10-NEXT: s_mov_b32 s6, exec_lo +; GFX10-NEXT: v_add_co_u32 v8, vcc_lo, v4, v8 +; GFX10-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, v5, v9, vcc_lo +; GFX10-NEXT: global_load_dword v8, v[8:9], off ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v11 -; GFX10-NEXT: s_and_saveexec_b32 s3, vcc_lo +; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8 +; GFX10-NEXT: s_and_saveexec_b32 s7, vcc_lo ; GFX10-NEXT: s_cbranch_execz .LBB4_2 ; GFX10-NEXT: ; %bb.6: ; %C ; GFX10-NEXT: ; in Loop: Header=BB4_4 Depth=1 -; GFX10-NEXT: v_add_co_u32 v11, vcc_lo, v6, v9 -; GFX10-NEXT: v_add_co_ci_u32_e32 v12, vcc_lo, v7, v10, vcc_lo -; GFX10-NEXT: s_mov_b32 s5, -1 -; GFX10-NEXT: global_load_dword v11, v[11:12], off +; GFX10-NEXT: v_mov_b32_e32 v9, s3 +; GFX10-NEXT: v_mov_b32_e32 v8, s2 +; GFX10-NEXT: s_mov_b32 s8, exec_lo +; GFX10-NEXT: v_add_co_u32 v8, vcc_lo, v6, v8 +; GFX10-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, v7, v9, vcc_lo +; GFX10-NEXT: global_load_dword v8, v[8:9], off ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v11 -; GFX10-NEXT: s_and_saveexec_b32 s4, vcc_lo +; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8 +; GFX10-NEXT: s_and_saveexec_b32 s9, vcc_lo ; GFX10-NEXT: s_cbranch_execz .LBB4_1 ; GFX10-NEXT: ; %bb.7: ; %loop.body ; GFX10-NEXT: ; in Loop: Header=BB4_4 Depth=1 -; GFX10-NEXT: v_add_co_u32 v9, vcc_lo, v0, v9 -; GFX10-NEXT: v_add_co_ci_u32_e32 v10, vcc_lo, v1, v10, vcc_lo -; GFX10-NEXT: v_add_nc_u32_e32 v12, 1, v8 -; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0x64, v8 -; GFX10-NEXT: s_andn2_b32 s5, -1, exec_lo -; GFX10-NEXT: global_load_dword v11, v[9:10], off -; GFX10-NEXT: v_mov_b32_e32 v8, v12 -; GFX10-NEXT: s_and_b32 s6, exec_lo, vcc_lo -; GFX10-NEXT: s_or_b32 s5, s5, s6 +; GFX10-NEXT: v_mov_b32_e32 v9, s3 +; GFX10-NEXT: v_mov_b32_e32 v8, s2 +; GFX10-NEXT: s_add_i32 s2, s0, 1 +; GFX10-NEXT: s_cmpk_lt_u32 s0, 0x64 +; GFX10-NEXT: s_cselect_b32 s0, exec_lo, 0 +; GFX10-NEXT: v_add_co_u32 v8, vcc_lo, v0, v8 +; GFX10-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, v1, v9, vcc_lo +; GFX10-NEXT: s_andn2_b32 s3, s8, exec_lo +; GFX10-NEXT: s_and_b32 s0, exec_lo, s0 +; GFX10-NEXT: s_or_b32 s8, s3, s0 +; GFX10-NEXT: global_load_dword v10, v[8:9], off +; GFX10-NEXT: s_mov_b32 s0, s2 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_add_nc_u32_e32 v11, 1, v11 -; GFX10-NEXT: global_store_dword v[9:10], v11, off +; GFX10-NEXT: v_add_nc_u32_e32 v10, 1, v10 +; GFX10-NEXT: global_store_dword v[8:9], v10, off ; GFX10-NEXT: s_branch .LBB4_1 ; GFX10-NEXT: .LBB4_8: ; %exit ; GFX10-NEXT: s_endpgm @@ -382,60 +404,63 @@ exit: define amdgpu_cs void @loop_with_div_break_with_body(ptr addrspace(1) %x, ptr addrspace(1) %a, ptr addrspace(1) %a.break) { ; GFX10-LABEL: loop_with_div_break_with_body: ; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_mov_b32 s4, 0 ; GFX10-NEXT: s_mov_b32 s0, 0 -; GFX10-NEXT: ; implicit-def: $sgpr1 -; GFX10-NEXT: ; implicit-def: $sgpr2 -; GFX10-NEXT: ; implicit-def: $sgpr3 -; GFX10-NEXT: v_mov_b32_e32 v6, s0 +; GFX10-NEXT: ; implicit-def: $sgpr5 +; GFX10-NEXT: ; implicit-def: $sgpr6 ; GFX10-NEXT: s_branch .LBB5_2 ; GFX10-NEXT: .LBB5_1: ; %Flow ; GFX10-NEXT: ; in Loop: Header=BB5_2 Depth=1 ; GFX10-NEXT: s_waitcnt_depctr 0xffe3 -; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4 -; GFX10-NEXT: s_and_b32 s4, exec_lo, s2 -; GFX10-NEXT: s_or_b32 s0, s4, s0 -; GFX10-NEXT: s_andn2_b32 s1, s1, exec_lo -; GFX10-NEXT: s_and_b32 s4, exec_lo, s3 -; GFX10-NEXT: s_or_b32 s1, s1, s4 -; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s0 +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s1 +; GFX10-NEXT: s_and_b32 s1, exec_lo, s5 +; GFX10-NEXT: s_or_b32 s4, s1, s4 +; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execz .LBB5_4 ; GFX10-NEXT: .LBB5_2: ; %A ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX10-NEXT: v_ashrrev_i32_e32 v7, 31, v6 -; GFX10-NEXT: s_andn2_b32 s3, s3, exec_lo -; GFX10-NEXT: s_and_b32 s4, exec_lo, -1 -; GFX10-NEXT: s_andn2_b32 s2, s2, exec_lo -; GFX10-NEXT: s_or_b32 s3, s3, s4 -; GFX10-NEXT: v_lshlrev_b64 v[7:8], 2, v[6:7] -; GFX10-NEXT: s_or_b32 s2, s2, s4 -; GFX10-NEXT: v_add_co_u32 v9, vcc_lo, v2, v7 -; GFX10-NEXT: v_add_co_ci_u32_e32 v10, vcc_lo, v3, v8, vcc_lo -; GFX10-NEXT: global_load_dword v9, v[9:10], off +; GFX10-NEXT: s_ashr_i32 s1, s0, 31 +; GFX10-NEXT: s_mov_b32 s7, exec_lo +; GFX10-NEXT: s_lshl_b64 s[2:3], s[0:1], 2 +; GFX10-NEXT: s_andn2_b32 s1, s6, exec_lo +; GFX10-NEXT: v_mov_b32_e32 v7, s3 +; GFX10-NEXT: v_mov_b32_e32 v6, s2 +; GFX10-NEXT: s_and_b32 s6, exec_lo, s7 +; GFX10-NEXT: s_andn2_b32 s5, s5, exec_lo +; GFX10-NEXT: s_and_b32 s7, exec_lo, exec_lo +; GFX10-NEXT: s_or_b32 s6, s1, s6 +; GFX10-NEXT: v_add_co_u32 v6, vcc_lo, v2, v6 +; GFX10-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, v3, v7, vcc_lo +; GFX10-NEXT: s_or_b32 s5, s5, s7 +; GFX10-NEXT: global_load_dword v6, v[6:7], off ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v9 -; GFX10-NEXT: s_and_saveexec_b32 s4, vcc_lo +; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v6 +; GFX10-NEXT: s_and_saveexec_b32 s1, vcc_lo ; GFX10-NEXT: s_cbranch_execz .LBB5_1 ; GFX10-NEXT: ; %bb.3: ; %loop.body ; GFX10-NEXT: ; in Loop: Header=BB5_2 Depth=1 -; GFX10-NEXT: v_add_co_u32 v7, vcc_lo, v0, v7 -; GFX10-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, v1, v8, vcc_lo -; GFX10-NEXT: v_add_nc_u32_e32 v10, 1, v6 -; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0x64, v6 -; GFX10-NEXT: s_andn2_b32 s3, s3, exec_lo -; GFX10-NEXT: global_load_dword v9, v[7:8], off -; GFX10-NEXT: s_and_b32 s5, exec_lo, 0 -; GFX10-NEXT: v_mov_b32_e32 v6, v10 -; GFX10-NEXT: s_andn2_b32 s2, s2, exec_lo -; GFX10-NEXT: s_and_b32 s6, exec_lo, vcc_lo -; GFX10-NEXT: s_or_b32 s3, s3, s5 -; GFX10-NEXT: s_or_b32 s2, s2, s6 +; GFX10-NEXT: v_mov_b32_e32 v7, s3 +; GFX10-NEXT: v_mov_b32_e32 v6, s2 +; GFX10-NEXT: s_add_i32 s2, s0, 1 +; GFX10-NEXT: s_cmpk_lt_u32 s0, 0x64 +; GFX10-NEXT: s_cselect_b32 s0, exec_lo, 0 +; GFX10-NEXT: v_add_co_u32 v6, vcc_lo, v0, v6 +; GFX10-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, v1, v7, vcc_lo +; GFX10-NEXT: s_andn2_b32 s3, s6, exec_lo +; GFX10-NEXT: s_and_b32 s6, exec_lo, 0 +; GFX10-NEXT: s_andn2_b32 s5, s5, exec_lo +; GFX10-NEXT: global_load_dword v8, v[6:7], off +; GFX10-NEXT: s_and_b32 s0, exec_lo, s0 +; GFX10-NEXT: s_or_b32 s6, s3, s6 +; GFX10-NEXT: s_or_b32 s5, s5, s0 +; GFX10-NEXT: s_mov_b32 s0, s2 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_add_nc_u32_e32 v9, 1, v9 -; GFX10-NEXT: global_store_dword v[7:8], v9, off +; GFX10-NEXT: v_add_nc_u32_e32 v8, 1, v8 +; GFX10-NEXT: global_store_dword v[6:7], v8, off ; GFX10-NEXT: s_branch .LBB5_1 ; GFX10-NEXT: .LBB5_4: ; %loop.exit.guard -; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s0 -; GFX10-NEXT: s_and_saveexec_b32 s0, s1 +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4 +; GFX10-NEXT: s_and_saveexec_b32 s0, s6 ; GFX10-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX10-NEXT: s_cbranch_execz .LBB5_6 ; GFX10-NEXT: ; %bb.5: ; %break.body @@ -494,71 +519,71 @@ define amdgpu_ps i32 @irreducible_cfg(i32 %x, i32 %y, i32 %a0, i32 %a1, i32 %a2, ; GFX10-LABEL: irreducible_cfg: ; GFX10: ; %bb.0: ; %.entry ; GFX10-NEXT: v_cmp_gt_i32_e32 vcc_lo, v4, v1 -; GFX10-NEXT: s_and_b32 s0, 1, s0 -; GFX10-NEXT: ; implicit-def: $sgpr2 -; GFX10-NEXT: v_cmp_ne_u32_e64 s3, 0, s0 -; GFX10-NEXT: s_mov_b32 s0, 0 -; GFX10-NEXT: s_xor_b32 s1, vcc_lo, -1 -; GFX10-NEXT: s_mov_b32 s4, s1 +; GFX10-NEXT: s_mov_b32 s0, exec_lo +; GFX10-NEXT: s_mov_b32 s2, 0 +; GFX10-NEXT: s_and_b32 s3, s0, 1 +; GFX10-NEXT: s_xor_b32 s1, vcc_lo, s0 +; GFX10-NEXT: s_cmp_lg_u32 s3, 0 +; GFX10-NEXT: s_cselect_b32 s0, exec_lo, 0 ; GFX10-NEXT: s_branch .LBB6_2 ; GFX10-NEXT: .LBB6_1: ; %Flow2 ; GFX10-NEXT: ; in Loop: Header=BB6_2 Depth=1 ; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5 -; GFX10-NEXT: s_and_b32 s5, exec_lo, s7 -; GFX10-NEXT: s_or_b32 s0, s5, s0 -; GFX10-NEXT: s_andn2_b32 s2, s2, exec_lo -; GFX10-NEXT: s_and_b32 s5, exec_lo, s3 -; GFX10-NEXT: s_or_b32 s2, s2, s5 -; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s0 +; GFX10-NEXT: s_and_b32 s3, exec_lo, s4 +; GFX10-NEXT: s_mov_b32 s1, exec_lo +; GFX10-NEXT: s_or_b32 s2, s3, s2 +; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s2 ; GFX10-NEXT: s_cbranch_execz .LBB6_8 ; GFX10-NEXT: .LBB6_2: ; %irr.guard ; GFX10-NEXT: ; =>This Loop Header: Depth=1 ; GFX10-NEXT: ; Child Loop BB6_6 Depth 2 -; GFX10-NEXT: s_andn2_b32 s5, s3, exec_lo -; GFX10-NEXT: s_and_b32 s3, exec_lo, s3 -; GFX10-NEXT: s_mov_b32 s6, -1 -; GFX10-NEXT: s_or_b32 s3, s5, s3 -; GFX10-NEXT: s_and_saveexec_b32 s5, s4 -; GFX10-NEXT: s_xor_b32 s4, exec_lo, s5 +; GFX10-NEXT: s_andn2_b32 s4, s0, exec_lo +; GFX10-NEXT: s_and_b32 s0, exec_lo, s0 +; GFX10-NEXT: s_mov_b32 s3, exec_lo +; GFX10-NEXT: s_or_b32 s0, s4, s0 +; GFX10-NEXT: s_and_saveexec_b32 s4, s1 +; GFX10-NEXT: s_xor_b32 s4, exec_lo, s4 ; GFX10-NEXT: ; %bb.3: ; %.loopexit ; GFX10-NEXT: ; in Loop: Header=BB6_2 Depth=1 -; GFX10-NEXT: v_cmp_gt_i32_e32 vcc_lo, v5, v0 +; GFX10-NEXT: v_cmp_gt_i32_e64 s1, v5, v0 +; GFX10-NEXT: s_mov_b32 s5, exec_lo +; GFX10-NEXT: s_mov_b32 s6, exec_lo +; GFX10-NEXT: s_xor_b32 s5, vcc_lo, s5 +; GFX10-NEXT: s_andn2_b32 s0, s0, exec_lo +; GFX10-NEXT: s_or_b32 s5, s1, s5 +; GFX10-NEXT: s_and_b32 s1, exec_lo, s1 +; GFX10-NEXT: s_xor_b32 s5, s5, s6 ; GFX10-NEXT: s_andn2_b32 s3, s3, exec_lo -; GFX10-NEXT: s_andn2_b32 s7, -1, exec_lo -; GFX10-NEXT: s_or_b32 s5, vcc_lo, s1 -; GFX10-NEXT: s_and_b32 s6, exec_lo, vcc_lo -; GFX10-NEXT: s_xor_b32 s5, s5, -1 -; GFX10-NEXT: s_or_b32 s3, s3, s6 ; GFX10-NEXT: s_and_b32 s5, exec_lo, s5 -; GFX10-NEXT: s_or_b32 s6, s7, s5 +; GFX10-NEXT: s_or_b32 s0, s0, s1 +; GFX10-NEXT: s_or_b32 s3, s3, s5 ; GFX10-NEXT: ; %bb.4: ; %Flow1 ; GFX10-NEXT: ; in Loop: Header=BB6_2 Depth=1 ; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4 -; GFX10-NEXT: s_mov_b32 s4, -1 -; GFX10-NEXT: s_mov_b32 s7, -1 -; GFX10-NEXT: s_and_saveexec_b32 s5, s6 +; GFX10-NEXT: s_mov_b32 s4, exec_lo +; GFX10-NEXT: s_and_saveexec_b32 s5, s3 ; GFX10-NEXT: s_cbranch_execz .LBB6_1 ; GFX10-NEXT: ; %bb.5: ; %.preheader ; GFX10-NEXT: ; in Loop: Header=BB6_2 Depth=1 -; GFX10-NEXT: s_mov_b32 s6, 0 -; GFX10-NEXT: v_cmp_le_i32_e32 vcc_lo, v4, v0 +; GFX10-NEXT: v_cmp_le_i32_e64 s1, v4, v0 +; GFX10-NEXT: s_mov_b32 s3, 0 ; GFX10-NEXT: .LBB6_6: ; %.inner_loop ; GFX10-NEXT: ; Parent Loop BB6_2 Depth=1 ; GFX10-NEXT: ; => This Inner Loop Header: Depth=2 -; GFX10-NEXT: s_and_b32 s7, exec_lo, vcc_lo -; GFX10-NEXT: s_or_b32 s6, s7, s6 -; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s6 +; GFX10-NEXT: s_and_b32 s6, exec_lo, s1 +; GFX10-NEXT: s_or_b32 s3, s6, s3 +; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s3 ; GFX10-NEXT: s_cbranch_execnz .LBB6_6 ; GFX10-NEXT: ; %bb.7: ; %Flow ; GFX10-NEXT: ; in Loop: Header=BB6_2 Depth=1 -; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s6 -; GFX10-NEXT: s_andn2_b32 s6, -1, exec_lo -; GFX10-NEXT: s_and_b32 s7, exec_lo, 0 -; GFX10-NEXT: s_or_b32 s7, s6, s7 +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s3 +; GFX10-NEXT: s_andn2_b32 s1, s4, exec_lo +; GFX10-NEXT: s_and_b32 s3, exec_lo, 0 +; GFX10-NEXT: s_or_b32 s4, s1, s3 ; GFX10-NEXT: s_branch .LBB6_1 ; GFX10-NEXT: .LBB6_8: ; %.exit -; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s0 -; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, v3, s2 +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s2 +; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, v3, s0 ; GFX10-NEXT: v_readfirstlane_b32 s0, v0 ; GFX10-NEXT: ; return to shader part epilog .entry: diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll index 1855ede0483de..1caecb599ffed 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll @@ -1,31 +1,28 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 -; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -new-reg-bank-select -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s define void @temporal_divergent_i1_phi(float %val, ptr %addr) { ; GFX10-LABEL: temporal_divergent_i1_phi: ; GFX10: ; %bb.0: ; %entry ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_mov_b32 s4, 0 +; GFX10-NEXT: s_mov_b32 s6, 1 ; GFX10-NEXT: s_mov_b32 s5, 0 -; GFX10-NEXT: v_mov_b32_e32 v3, 1 -; GFX10-NEXT: v_mov_b32_e32 v4, s5 -; GFX10-NEXT: ; implicit-def: $sgpr6 ; GFX10-NEXT: .LBB0_1: ; %loop ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX10-NEXT: v_cvt_f32_u32_e32 v5, v4 -; GFX10-NEXT: v_and_b32_e32 v6, 1, v3 -; GFX10-NEXT: v_xor_b32_e32 v3, 1, v3 -; GFX10-NEXT: v_add_nc_u32_e32 v4, 1, v4 -; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, v5, v0 -; GFX10-NEXT: v_cmp_ne_u32_e64 s4, 0, v6 -; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 -; GFX10-NEXT: s_andn2_b32 s6, s6, exec_lo -; GFX10-NEXT: s_and_b32 s4, exec_lo, s4 -; GFX10-NEXT: s_or_b32 s6, s6, s4 -; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5 +; GFX10-NEXT: v_cvt_f32_u32_e32 v3, s5 +; GFX10-NEXT: s_mov_b32 s7, s6 +; GFX10-NEXT: s_add_i32 s5, s5, 1 +; GFX10-NEXT: s_xor_b32 s6, s6, 1 +; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, v3, v0 +; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB0_1 ; GFX10-NEXT: ; %bb.2: ; %exit -; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5 -; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s6 +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4 +; GFX10-NEXT: s_cmp_lg_u32 s7, 0 +; GFX10-NEXT: s_cselect_b32 s4, exec_lo, 0 +; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s4 ; GFX10-NEXT: flat_store_dword v[1:2], v0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: s_setpc_b64 s[30:31] @@ -51,27 +48,23 @@ define void @temporal_divergent_i1_non_phi(float %val, ptr %addr) { ; GFX10-LABEL: temporal_divergent_i1_non_phi: ; GFX10: ; %bb.0: ; %entry ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GFX10-NEXT: s_mov_b32 s5, 0 -; GFX10-NEXT: v_mov_b32_e32 v3, 1 -; GFX10-NEXT: v_mov_b32_e32 v4, s5 -; GFX10-NEXT: ; implicit-def: $sgpr6 +; GFX10-NEXT: s_mov_b32 s4, 0 +; GFX10-NEXT: s_mov_b32 s5, 1 +; GFX10-NEXT: s_mov_b32 s6, 0 ; GFX10-NEXT: .LBB1_1: ; %loop ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX10-NEXT: v_xor_b32_e32 v3, 1, v3 -; GFX10-NEXT: v_cvt_f32_u32_e32 v5, v4 -; GFX10-NEXT: v_add_nc_u32_e32 v4, 1, v4 -; GFX10-NEXT: v_and_b32_e32 v6, 1, v3 -; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, v5, v0 -; GFX10-NEXT: v_cmp_ne_u32_e64 s4, 0, v6 -; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 -; GFX10-NEXT: s_andn2_b32 s6, s6, exec_lo -; GFX10-NEXT: s_and_b32 s4, exec_lo, s4 -; GFX10-NEXT: s_or_b32 s6, s6, s4 -; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5 +; GFX10-NEXT: v_cvt_f32_u32_e32 v3, s6 +; GFX10-NEXT: s_xor_b32 s5, s5, 1 +; GFX10-NEXT: s_add_i32 s6, s6, 1 +; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, v3, v0 +; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 ; GFX10-NEXT: s_cbranch_execnz .LBB1_1 ; GFX10-NEXT: ; %bb.2: ; %exit -; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5 -; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s6 +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4 +; GFX10-NEXT: s_cmp_lg_u32 s5, 0 +; GFX10-NEXT: s_cselect_b32 s4, exec_lo, 0 +; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1.0, s4 ; GFX10-NEXT: flat_store_dword v[1:2], v0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: s_setpc_b64 s[30:31] @@ -98,60 +91,60 @@ exit: define amdgpu_cs void @loop_with_1break(ptr addrspace(1) %x, i32 %x.size, ptr addrspace(1) inreg %a, ptr addrspace(1) inreg %a.break) { ; GFX10-LABEL: loop_with_1break: ; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: v_mov_b32_e32 v3, 0 +; GFX10-NEXT: s_mov_b32 s8, 0 ; GFX10-NEXT: s_mov_b32 s4, 0 -; GFX10-NEXT: v_mov_b32_e32 v4, s1 -; GFX10-NEXT: v_mov_b32_e32 v3, s0 -; GFX10-NEXT: v_mov_b32_e32 v5, s4 -; GFX10-NEXT: ; implicit-def: $sgpr0 -; GFX10-NEXT: ; implicit-def: $sgpr1 +; GFX10-NEXT: ; implicit-def: $sgpr9 ; GFX10-NEXT: s_branch .LBB2_3 ; GFX10-NEXT: .LBB2_1: ; %loop.body ; GFX10-NEXT: ; in Loop: Header=BB2_3 Depth=1 -; GFX10-NEXT: v_add_co_u32 v6, vcc_lo, v0, v6 -; GFX10-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, v1, v7, vcc_lo -; GFX10-NEXT: v_add_nc_u32_e32 v9, 1, v5 -; GFX10-NEXT: v_cmp_lt_u32_e32 vcc_lo, v5, v2 -; GFX10-NEXT: s_andn2_b32 s1, s1, exec_lo -; GFX10-NEXT: global_load_dword v8, v[6:7], off +; GFX10-NEXT: v_mov_b32_e32 v4, s6 +; GFX10-NEXT: v_mov_b32_e32 v5, s7 +; GFX10-NEXT: s_andn2_b32 s6, s9, exec_lo ; GFX10-NEXT: s_mov_b32 s5, 0 -; GFX10-NEXT: v_mov_b32_e32 v5, v9 -; GFX10-NEXT: s_and_b32 s6, exec_lo, vcc_lo -; GFX10-NEXT: s_or_b32 s1, s1, s6 +; GFX10-NEXT: v_add_co_u32 v4, vcc_lo, v0, v4 +; GFX10-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, v1, v5, vcc_lo +; GFX10-NEXT: v_cmp_lt_u32_e32 vcc_lo, s4, v2 +; GFX10-NEXT: s_add_i32 s4, s4, 1 +; GFX10-NEXT: global_load_dword v6, v[4:5], off +; GFX10-NEXT: s_and_b32 s7, exec_lo, vcc_lo +; GFX10-NEXT: s_or_b32 s9, s6, s7 ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_add_nc_u32_e32 v8, 1, v8 -; GFX10-NEXT: global_store_dword v[6:7], v8, off +; GFX10-NEXT: v_add_nc_u32_e32 v6, 1, v6 +; GFX10-NEXT: global_store_dword v[4:5], v6, off ; GFX10-NEXT: .LBB2_2: ; %Flow ; GFX10-NEXT: ; in Loop: Header=BB2_3 Depth=1 -; GFX10-NEXT: s_and_b32 s5, 1, s5 -; GFX10-NEXT: s_and_b32 s6, exec_lo, s1 -; GFX10-NEXT: v_cmp_ne_u32_e64 s5, 0, s5 -; GFX10-NEXT: s_or_b32 s4, s6, s4 -; GFX10-NEXT: s_andn2_b32 s0, s0, exec_lo -; GFX10-NEXT: s_and_b32 s5, exec_lo, s5 -; GFX10-NEXT: s_or_b32 s0, s0, s5 -; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 +; GFX10-NEXT: s_cmp_lg_u32 s5, 0 +; GFX10-NEXT: s_cselect_b32 s5, exec_lo, 0 +; GFX10-NEXT: s_and_b32 s6, exec_lo, s9 +; GFX10-NEXT: s_or_b32 s8, s6, s8 +; GFX10-NEXT: s_waitcnt_depctr 0xffe3 +; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s8 ; GFX10-NEXT: s_cbranch_execz .LBB2_5 ; GFX10-NEXT: .LBB2_3: ; %A ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX10-NEXT: v_ashrrev_i32_e32 v6, 31, v5 -; GFX10-NEXT: s_andn2_b32 s1, s1, exec_lo -; GFX10-NEXT: s_and_b32 s5, exec_lo, -1 -; GFX10-NEXT: s_or_b32 s1, s1, s5 -; GFX10-NEXT: v_lshlrev_b64 v[6:7], 2, v[5:6] -; GFX10-NEXT: v_add_co_u32 v8, vcc_lo, v3, v6 -; GFX10-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, v4, v7, vcc_lo -; GFX10-NEXT: global_load_dword v8, v[8:9], off +; GFX10-NEXT: s_ashr_i32 s5, s4, 31 +; GFX10-NEXT: s_lshl_b64 s[6:7], s[4:5], 2 +; GFX10-NEXT: s_add_u32 s10, s0, s6 +; GFX10-NEXT: s_addc_u32 s11, s1, s7 +; GFX10-NEXT: global_load_dword v4, v3, s[10:11] ; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8 -; GFX10-NEXT: s_cbranch_vccnz .LBB2_1 +; GFX10-NEXT: v_readfirstlane_b32 s5, v4 +; GFX10-NEXT: s_cmp_lg_u32 s5, 0 +; GFX10-NEXT: s_cselect_b32 s5, 1, 0 +; GFX10-NEXT: s_andn2_b32 s9, s9, exec_lo +; GFX10-NEXT: s_and_b32 s10, exec_lo, exec_lo +; GFX10-NEXT: s_or_b32 s9, s9, s10 +; GFX10-NEXT: s_cmp_lg_u32 s5, 0 +; GFX10-NEXT: s_cbranch_scc1 .LBB2_1 ; GFX10-NEXT: ; %bb.4: ; in Loop: Header=BB2_3 Depth=1 ; GFX10-NEXT: s_mov_b32 s5, 1 -; GFX10-NEXT: ; implicit-def: $vgpr5 +; GFX10-NEXT: ; implicit-def: $sgpr4 ; GFX10-NEXT: s_branch .LBB2_2 ; GFX10-NEXT: .LBB2_5: ; %loop.exit.guard -; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4 -; GFX10-NEXT: s_and_saveexec_b32 s1, s0 -; GFX10-NEXT: s_xor_b32 s1, exec_lo, s1 +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s8 +; GFX10-NEXT: s_and_saveexec_b32 s0, s5 +; GFX10-NEXT: s_xor_b32 s0, exec_lo, s0 ; GFX10-NEXT: s_cbranch_execz .LBB2_7 ; GFX10-NEXT: ; %bb.6: ; %break.body ; GFX10-NEXT: v_mov_b32_e32 v0, 10 @@ -186,3 +179,264 @@ exit: ret void } +; Temporal divergence i1 across inner and outer nested loops +define void @nested_loops_temporal_divergence_inner(float %pre.cond.val, i32 %n.i, ptr %mat, ptr %mat.oe, ptr %arr) { +; GFX10-LABEL: nested_loops_temporal_divergence_inner: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_cmp_lt_f32_e64 s8, 1.0, v0 +; GFX10-NEXT: s_mov_b32 s5, 0 +; GFX10-NEXT: s_mov_b32 s6, 0 +; GFX10-NEXT: .LBB3_1: ; %OuterHeader +; GFX10-NEXT: ; =>This Loop Header: Depth=1 +; GFX10-NEXT: ; Child Loop BB3_2 Depth 2 +; GFX10-NEXT: s_ashr_i32 s7, s6, 31 +; GFX10-NEXT: s_mov_b32 s9, s8 +; GFX10-NEXT: s_lshl_b64 s[10:11], s[6:7], 2 +; GFX10-NEXT: s_mov_b32 s4, 0 +; GFX10-NEXT: v_mov_b32_e32 v6, s10 +; GFX10-NEXT: v_mov_b32_e32 v7, s11 +; GFX10-NEXT: s_mov_b32 s10, 0 +; GFX10-NEXT: v_add_co_u32 v6, vcc_lo, v2, v6 +; GFX10-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, v3, v7, vcc_lo +; GFX10-NEXT: flat_load_dword v0, v[6:7] +; GFX10-NEXT: .LBB3_2: ; %InnerHeader +; GFX10-NEXT: ; Parent Loop BB3_1 Depth=1 +; GFX10-NEXT: ; => This Inner Loop Header: Depth=2 +; GFX10-NEXT: v_cvt_f32_u32_e32 v6, s10 +; GFX10-NEXT: s_add_i32 s10, s10, 1 +; GFX10-NEXT: s_xor_b32 s9, s9, exec_lo +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, v6, v0 +; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 +; GFX10-NEXT: s_cbranch_execnz .LBB3_2 +; GFX10-NEXT: ; %bb.3: ; %UseInst +; GFX10-NEXT: ; in Loop: Header=BB3_1 Depth=1 +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4 +; GFX10-NEXT: v_mov_b32_e32 v6, s6 +; GFX10-NEXT: v_mov_b32_e32 v7, s7 +; GFX10-NEXT: v_cmp_lt_u32_e32 vcc_lo, s6, v1 +; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s9 +; GFX10-NEXT: s_add_i32 s6, s6, 1 +; GFX10-NEXT: v_add_co_u32 v6, s4, v4, v6 +; GFX10-NEXT: v_add_co_ci_u32_e64 v7, s4, v5, v7, s4 +; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 +; GFX10-NEXT: flat_store_byte v[6:7], v0 +; GFX10-NEXT: s_waitcnt_depctr 0xffe3 +; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5 +; GFX10-NEXT: s_cbranch_execnz .LBB3_1 +; GFX10-NEXT: ; %bb.4: ; %exit +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5 +; GFX10-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-NEXT: s_setpc_b64 s[30:31] +entry: + %pre.cond = fcmp ogt float %pre.cond.val, 1.0 + br label %OuterHeader + +OuterHeader: + %i = phi i32 [ 0, %entry ], [ %i.plus.1, %OuterLatch ] + %mat.i = getelementptr float, ptr %mat, i32 %i + %val.i = load float, ptr %mat.i + br label %InnerHeader + +InnerHeader: + %j = phi i32 [ 0, %OuterHeader ], [ %j.plus.1, %InnerHeader ] + %bool.counter = phi i1 [ %pre.cond, %OuterHeader ], [ %odd.even.counter, %InnerHeader ] + + %odd.even.counter = xor i1 %bool.counter, true + + %f.j = uitofp i32 %j to float + %j.plus.1 = add i32 %j, 1 + %cond.j = fcmp ogt float %f.j, %val.i + br i1 %cond.j, label %UseInst, label %InnerHeader + +UseInst: + %mat.oe.i = getelementptr i1, ptr %mat.oe, i32 %i + ; mat.oe[i] = oddOReven(mat[i]) + store i1 %odd.even.counter, ptr %mat.oe.i + br label %OuterLatch + +OuterLatch: + %cond.i = icmp ult i32 %i, %n.i + %i.plus.1 = add i32 %i, 1 + br i1 %cond.i, label %exit, label %OuterHeader + +exit: + ret void +} + +define void @nested_loops_temporal_divergence_outer(float %pre.cond.val, i32 %n.i, ptr %mat, ptr %mat.oe, ptr %arr) { +; GFX10-LABEL: nested_loops_temporal_divergence_outer: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_cmp_lt_f32_e64 s8, 1.0, v0 +; GFX10-NEXT: s_mov_b32 s5, 0 +; GFX10-NEXT: s_mov_b32 s6, 0 +; GFX10-NEXT: .LBB4_1: ; %OuterHeader +; GFX10-NEXT: ; =>This Loop Header: Depth=1 +; GFX10-NEXT: ; Child Loop BB4_2 Depth 2 +; GFX10-NEXT: s_ashr_i32 s7, s6, 31 +; GFX10-NEXT: s_mov_b32 s9, s8 +; GFX10-NEXT: s_lshl_b64 s[10:11], s[6:7], 2 +; GFX10-NEXT: s_mov_b32 s4, 0 +; GFX10-NEXT: v_mov_b32_e32 v6, s10 +; GFX10-NEXT: v_mov_b32_e32 v7, s11 +; GFX10-NEXT: s_mov_b32 s10, 0 +; GFX10-NEXT: v_add_co_u32 v6, vcc_lo, v2, v6 +; GFX10-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, v3, v7, vcc_lo +; GFX10-NEXT: flat_load_dword v0, v[6:7] +; GFX10-NEXT: .LBB4_2: ; %InnerHeader +; GFX10-NEXT: ; Parent Loop BB4_1 Depth=1 +; GFX10-NEXT: ; => This Inner Loop Header: Depth=2 +; GFX10-NEXT: v_cvt_f32_u32_e32 v6, s10 +; GFX10-NEXT: s_add_i32 s10, s10, 1 +; GFX10-NEXT: s_xor_b32 s9, s9, exec_lo +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, v6, v0 +; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 +; GFX10-NEXT: s_cbranch_execnz .LBB4_2 +; GFX10-NEXT: ; %bb.3: ; %UseInst +; GFX10-NEXT: ; in Loop: Header=BB4_1 Depth=1 +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4 +; GFX10-NEXT: v_mov_b32_e32 v6, s6 +; GFX10-NEXT: v_mov_b32_e32 v7, s7 +; GFX10-NEXT: v_cmp_lt_u32_e32 vcc_lo, s6, v1 +; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s9 +; GFX10-NEXT: s_add_i32 s6, s6, 1 +; GFX10-NEXT: v_add_co_u32 v6, s4, v4, v6 +; GFX10-NEXT: v_add_co_ci_u32_e64 v7, s4, v5, v7, s4 +; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 +; GFX10-NEXT: flat_store_byte v[6:7], v0 +; GFX10-NEXT: s_waitcnt_depctr 0xffe3 +; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5 +; GFX10-NEXT: s_cbranch_execnz .LBB4_1 +; GFX10-NEXT: ; %bb.4: ; %exit +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5 +; GFX10-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-NEXT: s_setpc_b64 s[30:31] +entry: + %pre.cond = fcmp ogt float %pre.cond.val, 1.0 + br label %OuterHeader + +OuterHeader: + %i = phi i32 [ 0, %entry ], [ %i.plus.1, %OuterLatch ] + %mat.i = getelementptr float, ptr %mat, i32 %i + %val.i = load float, ptr %mat.i + br label %InnerHeader + +InnerHeader: + %j = phi i32 [ 0, %OuterHeader ], [ %j.plus.1, %InnerHeader ] + %bool.counter = phi i1 [ %pre.cond, %OuterHeader ], [ %odd.even.counter, %InnerHeader ] + + %odd.even.counter = xor i1 %bool.counter, true + + %f.j = uitofp i32 %j to float + %j.plus.1 = add i32 %j, 1 + %cond.j = fcmp ogt float %f.j, %val.i + br i1 %cond.j, label %UseInst, label %InnerHeader + +UseInst: + %mat.oe.i = getelementptr i1, ptr %mat.oe, i32 %i + ; mat.oe[i] = oddOReven(mat[i]) + store i1 %odd.even.counter, ptr %mat.oe.i + br label %OuterLatch + +OuterLatch: + %cond.i = icmp ult i32 %i, %n.i + %i.plus.1 = add i32 %i, 1 + br i1 %cond.i, label %exit, label %OuterHeader + +exit: + ret void +} + +define void @nested_loops_temporal_divergence_both(float %pre.cond.val, i32 %n.i, ptr %mat, ptr %mat.oe, ptr %arr) { +; GFX10-LABEL: nested_loops_temporal_divergence_both: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_cmp_lt_f32_e64 s8, 1.0, v0 +; GFX10-NEXT: s_mov_b32 s5, 0 +; GFX10-NEXT: s_mov_b32 s6, 0 +; GFX10-NEXT: .LBB5_1: ; %OuterHeader +; GFX10-NEXT: ; =>This Loop Header: Depth=1 +; GFX10-NEXT: ; Child Loop BB5_2 Depth 2 +; GFX10-NEXT: s_ashr_i32 s7, s6, 31 +; GFX10-NEXT: s_mov_b32 s9, s8 +; GFX10-NEXT: s_lshl_b64 s[10:11], s[6:7], 2 +; GFX10-NEXT: s_mov_b32 s4, 0 +; GFX10-NEXT: v_mov_b32_e32 v8, s10 +; GFX10-NEXT: v_mov_b32_e32 v9, s11 +; GFX10-NEXT: s_mov_b32 s10, 0 +; GFX10-NEXT: v_add_co_u32 v8, vcc_lo, v2, v8 +; GFX10-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, v3, v9, vcc_lo +; GFX10-NEXT: flat_load_dword v0, v[8:9] +; GFX10-NEXT: .LBB5_2: ; %InnerHeader +; GFX10-NEXT: ; Parent Loop BB5_1 Depth=1 +; GFX10-NEXT: ; => This Inner Loop Header: Depth=2 +; GFX10-NEXT: v_cvt_f32_u32_e32 v8, s10 +; GFX10-NEXT: s_add_i32 s10, s10, 1 +; GFX10-NEXT: s_xor_b32 s9, s9, exec_lo +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, v8, v0 +; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 +; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 +; GFX10-NEXT: s_cbranch_execnz .LBB5_2 +; GFX10-NEXT: ; %bb.3: ; %UseInst +; GFX10-NEXT: ; in Loop: Header=BB5_1 Depth=1 +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4 +; GFX10-NEXT: v_mov_b32_e32 v9, s7 +; GFX10-NEXT: v_mov_b32_e32 v8, s6 +; GFX10-NEXT: v_cmp_lt_u32_e32 vcc_lo, s6, v1 +; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s9 +; GFX10-NEXT: s_add_i32 s6, s6, 1 +; GFX10-NEXT: v_add_co_u32 v8, s4, v4, v8 +; GFX10-NEXT: v_add_co_ci_u32_e64 v9, s4, v5, v9, s4 +; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 +; GFX10-NEXT: flat_store_byte v[8:9], v0 +; GFX10-NEXT: s_waitcnt_depctr 0xffe3 +; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5 +; GFX10-NEXT: s_cbranch_execnz .LBB5_1 +; GFX10-NEXT: ; %bb.4: ; %exit +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5 +; GFX10-NEXT: flat_store_byte v[6:7], v0 +; GFX10-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-NEXT: s_setpc_b64 s[30:31] +entry: + %pre.cond = fcmp ogt float %pre.cond.val, 1.0 + br label %OuterHeader + +OuterHeader: + %i = phi i32 [ 0, %entry ], [ %i.plus.1, %OuterLatch ] + %mat.i = getelementptr float, ptr %mat, i32 %i + %val.i = load float, ptr %mat.i + br label %InnerHeader + +InnerHeader: + %j = phi i32 [ 0, %OuterHeader ], [ %j.plus.1, %InnerHeader ] + %bool.counter = phi i1 [ %pre.cond, %OuterHeader ], [ %odd.even.counter, %InnerHeader ] + + %odd.even.counter = xor i1 %bool.counter, true + + %f.j = uitofp i32 %j to float + %j.plus.1 = add i32 %j, 1 + %cond.j = fcmp ogt float %f.j, %val.i + br i1 %cond.j, label %UseInst, label %InnerHeader + +UseInst: + %mat.oe.i = getelementptr i1, ptr %mat.oe, i32 %i + ; mat.oe[i] = oddOReven(mat[i]) + store i1 %odd.even.counter, ptr %mat.oe.i + br label %OuterLatch + +OuterLatch: + %cond.i = icmp ult i32 %i, %n.i + %i.plus.1 = add i32 %i, 1 + br i1 %cond.i, label %exit, label %OuterHeader + +exit: + ; arr = oddOReven(mat[n.i - 1]) + store i1 %odd.even.counter, ptr %arr + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.mir index e9a415c3da7ee..d50e32906c7a9 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.mir @@ -323,3 +323,403 @@ body: | %34:sreg_32_xm0_xexec(s32) = SI_IF %35(s1), %bb.4, implicit-def $exec, implicit-def $scc, implicit $exec G_BR %bb.2 ... + +--- +name: nested_loops_temporal_divergence_inner +legalized: true +tracksRegLiveness: true +body: | + ; GFX10-LABEL: name: nested_loops_temporal_divergence_inner + ; GFX10: bb.0: + ; GFX10-NEXT: successors: %bb.1(0x80000000) + ; GFX10-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7 + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32) + ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4 + ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5 + ; GFX10-NEXT: [[MV1:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32) + ; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00 + ; GFX10-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ogt), [[COPY]](s32), [[C]] + ; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: bb.1: + ; GFX10-NEXT: successors: %bb.2(0x80000000) + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: [[PHI:%[0-9]+]]:_(s32) = G_PHI %12(s32), %bb.3, [[C1]](s32), %bb.0 + ; GFX10-NEXT: [[PHI1:%[0-9]+]]:_(s32) = G_PHI [[C1]](s32), %bb.0, %14(s32), %bb.3 + ; GFX10-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[PHI1]](s32) + ; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 + ; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[SEXT]], [[C2]](s32) + ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[MV]], [[SHL]](s64) + ; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32)) + ; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[FCMP]](s1) + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: bb.2: + ; GFX10-NEXT: successors: %bb.3(0x04000000), %bb.2(0x7c000000) + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: [[PHI2:%[0-9]+]]:sreg_32(s1) = PHI [[COPY6]](s1), %bb.1, %36(s1), %bb.2 + ; GFX10-NEXT: [[PHI3:%[0-9]+]]:_(s32) = G_PHI [[C3]](s32), %bb.1, %22(s32), %bb.2 + ; GFX10-NEXT: [[PHI4:%[0-9]+]]:_(s32) = G_PHI %24(s32), %bb.2, [[C3]](s32), %bb.1 + ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32(s1) = COPY [[PHI2]](s1) + ; GFX10-NEXT: [[C4:%[0-9]+]]:_(s1) = G_CONSTANT i1 true + ; GFX10-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[COPY7]], [[C4]] + ; GFX10-NEXT: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[PHI4]](s32) + ; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[PHI4]], [[C5]] + ; GFX10-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(ogt), [[UITOFP]](s32), [[LOAD]] + ; GFX10-NEXT: [[INT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[FCMP1]](s1), [[PHI3]](s32) + ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[XOR]](s1) + ; GFX10-NEXT: SI_LOOP [[INT]](s32), %bb.2, implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX10-NEXT: G_BR %bb.3 + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: bb.3: + ; GFX10-NEXT: successors: %bb.4(0x04000000), %bb.1(0x7c000000) + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[INT]](s32) + ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[MV1]], [[SEXT]](s64) + ; GFX10-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[XOR]](s1) + ; GFX10-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; GFX10-NEXT: G_STORE [[ZEXT]](s32), [[PTR_ADD1]](p0) :: (store (s8)) + ; GFX10-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[PHI1]], [[C6]] + ; GFX10-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[PHI1]](s32), [[COPY1]] + ; GFX10-NEXT: [[INT1:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[ICMP]](s1), [[PHI]](s32) + ; GFX10-NEXT: SI_LOOP [[INT1]](s32), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX10-NEXT: G_BR %bb.4 + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: bb.4: + ; GFX10-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[INT1]](s32) + ; GFX10-NEXT: SI_RETURN + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7 + + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s32) = COPY $vgpr2 + %3:_(s32) = COPY $vgpr3 + %4:_(p0) = G_MERGE_VALUES %2(s32), %3(s32) + %5:_(s32) = COPY $vgpr4 + %6:_(s32) = COPY $vgpr5 + %7:_(p0) = G_MERGE_VALUES %5(s32), %6(s32) + %8:_(s32) = G_FCONSTANT float 1.000000e+00 + %9:_(s1) = G_FCMP floatpred(ogt), %0(s32), %8 + %10:_(s32) = G_CONSTANT i32 0 + + bb.1: + %11:_(s32) = G_PHI %12(s32), %bb.3, %10(s32), %bb.0 + %13:_(s32) = G_PHI %10(s32), %bb.0, %14(s32), %bb.3 + %15:_(s64) = G_SEXT %13(s32) + %16:_(s32) = G_CONSTANT i32 2 + %17:_(s64) = G_SHL %15, %16(s32) + %18:_(p0) = G_PTR_ADD %4, %17(s64) + %19:_(s32) = G_LOAD %18(p0) :: (load (s32)) + %20:_(s32) = G_CONSTANT i32 0 + + bb.2: + successors: %bb.3(0x04000000), %bb.2(0x7c000000) + + %21:_(s32) = G_PHI %20(s32), %bb.1, %22(s32), %bb.2 + %23:_(s32) = G_PHI %24(s32), %bb.2, %20(s32), %bb.1 + %25:_(s1) = G_PHI %26(s1), %bb.2, %9(s1), %bb.1 + %27:_(s1) = G_CONSTANT i1 true + %26:_(s1) = G_XOR %25, %27 + %28:_(s32) = G_UITOFP %23(s32) + %29:_(s32) = G_CONSTANT i32 1 + %24:_(s32) = G_ADD %23, %29 + %30:_(s1) = G_FCMP floatpred(ogt), %28(s32), %19 + %22:sreg_32_xm0_xexec(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), %30(s1), %21(s32) + SI_LOOP %22(s32), %bb.2, implicit-def $exec, implicit-def $scc, implicit $exec + G_BR %bb.3 + + bb.3: + successors: %bb.4(0x04000000), %bb.1(0x7c000000) + + G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), %22(s32) + %31:_(p0) = G_PTR_ADD %7, %15(s64) + %32:_(s32) = G_ZEXT %26(s1) + %33:_(s32) = G_CONSTANT i32 1 + G_STORE %32(s32), %31(p0) :: (store (s8)) + %14:_(s32) = G_ADD %13, %33 + %34:_(s1) = G_ICMP intpred(ult), %13(s32), %1 + %12:sreg_32_xm0_xexec(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), %34(s1), %11(s32) + SI_LOOP %12(s32), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec + G_BR %bb.4 + + bb.4: + G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), %12(s32) + SI_RETURN +... + +--- +name: nested_loops_temporal_divergence_outer +legalized: true +tracksRegLiveness: true +body: | + ; GFX10-LABEL: name: nested_loops_temporal_divergence_outer + ; GFX10: bb.0: + ; GFX10-NEXT: successors: %bb.1(0x80000000) + ; GFX10-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7 + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32) + ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4 + ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5 + ; GFX10-NEXT: [[MV1:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32) + ; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00 + ; GFX10-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ogt), [[COPY]](s32), [[C]] + ; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: bb.1: + ; GFX10-NEXT: successors: %bb.2(0x80000000) + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: [[PHI:%[0-9]+]]:_(s32) = G_PHI %12(s32), %bb.3, [[C1]](s32), %bb.0 + ; GFX10-NEXT: [[PHI1:%[0-9]+]]:_(s32) = G_PHI [[C1]](s32), %bb.0, %14(s32), %bb.3 + ; GFX10-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[PHI1]](s32) + ; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 + ; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[SEXT]], [[C2]](s32) + ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[MV]], [[SHL]](s64) + ; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32)) + ; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[FCMP]](s1) + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: bb.2: + ; GFX10-NEXT: successors: %bb.3(0x04000000), %bb.2(0x7c000000) + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: [[PHI2:%[0-9]+]]:sreg_32(s1) = PHI [[COPY6]](s1), %bb.1, %36(s1), %bb.2 + ; GFX10-NEXT: [[PHI3:%[0-9]+]]:_(s32) = G_PHI [[C3]](s32), %bb.1, %22(s32), %bb.2 + ; GFX10-NEXT: [[PHI4:%[0-9]+]]:_(s32) = G_PHI %24(s32), %bb.2, [[C3]](s32), %bb.1 + ; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32(s1) = COPY [[PHI2]](s1) + ; GFX10-NEXT: [[C4:%[0-9]+]]:_(s1) = G_CONSTANT i1 true + ; GFX10-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[COPY7]], [[C4]] + ; GFX10-NEXT: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[PHI4]](s32) + ; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[PHI4]], [[C5]] + ; GFX10-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(ogt), [[UITOFP]](s32), [[LOAD]] + ; GFX10-NEXT: [[INT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[FCMP1]](s1), [[PHI3]](s32) + ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[XOR]](s1) + ; GFX10-NEXT: SI_LOOP [[INT]](s32), %bb.2, implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX10-NEXT: G_BR %bb.3 + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: bb.3: + ; GFX10-NEXT: successors: %bb.4(0x04000000), %bb.1(0x7c000000) + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[INT]](s32) + ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[MV1]], [[SEXT]](s64) + ; GFX10-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[XOR]](s1) + ; GFX10-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; GFX10-NEXT: G_STORE [[ZEXT]](s32), [[PTR_ADD1]](p0) :: (store (s8)) + ; GFX10-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[PHI1]], [[C6]] + ; GFX10-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[PHI1]](s32), [[COPY1]] + ; GFX10-NEXT: [[INT1:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[ICMP]](s1), [[PHI]](s32) + ; GFX10-NEXT: SI_LOOP [[INT1]](s32), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX10-NEXT: G_BR %bb.4 + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: bb.4: + ; GFX10-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[INT1]](s32) + ; GFX10-NEXT: SI_RETURN + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7 + + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s32) = COPY $vgpr2 + %3:_(s32) = COPY $vgpr3 + %4:_(p0) = G_MERGE_VALUES %2(s32), %3(s32) + %5:_(s32) = COPY $vgpr4 + %6:_(s32) = COPY $vgpr5 + %7:_(p0) = G_MERGE_VALUES %5(s32), %6(s32) + %8:_(s32) = G_FCONSTANT float 1.000000e+00 + %9:_(s1) = G_FCMP floatpred(ogt), %0(s32), %8 + %10:_(s32) = G_CONSTANT i32 0 + + bb.1: + %11:_(s32) = G_PHI %12(s32), %bb.3, %10(s32), %bb.0 + %13:_(s32) = G_PHI %10(s32), %bb.0, %14(s32), %bb.3 + %15:_(s64) = G_SEXT %13(s32) + %16:_(s32) = G_CONSTANT i32 2 + %17:_(s64) = G_SHL %15, %16(s32) + %18:_(p0) = G_PTR_ADD %4, %17(s64) + %19:_(s32) = G_LOAD %18(p0) :: (load (s32)) + %20:_(s32) = G_CONSTANT i32 0 + + bb.2: + successors: %bb.3(0x04000000), %bb.2(0x7c000000) + + %21:_(s32) = G_PHI %20(s32), %bb.1, %22(s32), %bb.2 + %23:_(s32) = G_PHI %24(s32), %bb.2, %20(s32), %bb.1 + %25:_(s1) = G_PHI %26(s1), %bb.2, %9(s1), %bb.1 + %27:_(s1) = G_CONSTANT i1 true + %26:_(s1) = G_XOR %25, %27 + %28:_(s32) = G_UITOFP %23(s32) + %29:_(s32) = G_CONSTANT i32 1 + %24:_(s32) = G_ADD %23, %29 + %30:_(s1) = G_FCMP floatpred(ogt), %28(s32), %19 + %22:sreg_32_xm0_xexec(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), %30(s1), %21(s32) + SI_LOOP %22(s32), %bb.2, implicit-def $exec, implicit-def $scc, implicit $exec + G_BR %bb.3 + + bb.3: + successors: %bb.4(0x04000000), %bb.1(0x7c000000) + + G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), %22(s32) + %31:_(p0) = G_PTR_ADD %7, %15(s64) + %32:_(s32) = G_ZEXT %26(s1) + %33:_(s32) = G_CONSTANT i32 1 + G_STORE %32(s32), %31(p0) :: (store (s8)) + %14:_(s32) = G_ADD %13, %33 + %34:_(s1) = G_ICMP intpred(ult), %13(s32), %1 + %12:sreg_32_xm0_xexec(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), %34(s1), %11(s32) + SI_LOOP %12(s32), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec + G_BR %bb.4 + + bb.4: + G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), %12(s32) + SI_RETURN +... + +--- +name: nested_loops_temporal_divergence_both +legalized: true +tracksRegLiveness: true +body: | + ; GFX10-LABEL: name: nested_loops_temporal_divergence_both + ; GFX10: bb.0: + ; GFX10-NEXT: successors: %bb.1(0x80000000) + ; GFX10-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7 + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32) + ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4 + ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5 + ; GFX10-NEXT: [[MV1:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32) + ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6 + ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7 + ; GFX10-NEXT: [[MV2:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY6]](s32), [[COPY7]](s32) + ; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00 + ; GFX10-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ogt), [[COPY]](s32), [[C]] + ; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: bb.1: + ; GFX10-NEXT: successors: %bb.2(0x80000000) + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: [[PHI:%[0-9]+]]:_(s32) = G_PHI %15(s32), %bb.3, [[C1]](s32), %bb.0 + ; GFX10-NEXT: [[PHI1:%[0-9]+]]:_(s32) = G_PHI [[C1]](s32), %bb.0, %17(s32), %bb.3 + ; GFX10-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[PHI1]](s32) + ; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 + ; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[SEXT]], [[C2]](s32) + ; GFX10-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[MV]], [[SHL]](s64) + ; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[PTR_ADD]](p0) :: (load (s32)) + ; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[FCMP]](s1) + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: bb.2: + ; GFX10-NEXT: successors: %bb.3(0x04000000), %bb.2(0x7c000000) + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: [[PHI2:%[0-9]+]]:sreg_32(s1) = PHI [[COPY8]](s1), %bb.1, %40(s1), %bb.2 + ; GFX10-NEXT: [[PHI3:%[0-9]+]]:_(s32) = G_PHI [[C3]](s32), %bb.1, %25(s32), %bb.2 + ; GFX10-NEXT: [[PHI4:%[0-9]+]]:_(s32) = G_PHI %27(s32), %bb.2, [[C3]](s32), %bb.1 + ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[PHI2]](s1) + ; GFX10-NEXT: [[C4:%[0-9]+]]:_(s1) = G_CONSTANT i1 true + ; GFX10-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[COPY9]], [[C4]] + ; GFX10-NEXT: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[PHI4]](s32) + ; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[PHI4]], [[C5]] + ; GFX10-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(ogt), [[UITOFP]](s32), [[LOAD]] + ; GFX10-NEXT: [[INT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[FCMP1]](s1), [[PHI3]](s32) + ; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[XOR]](s1) + ; GFX10-NEXT: SI_LOOP [[INT]](s32), %bb.2, implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX10-NEXT: G_BR %bb.3 + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: bb.3: + ; GFX10-NEXT: successors: %bb.4(0x04000000), %bb.1(0x7c000000) + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[INT]](s32) + ; GFX10-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[MV1]], [[SEXT]](s64) + ; GFX10-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[XOR]](s1) + ; GFX10-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; GFX10-NEXT: G_STORE [[ZEXT]](s32), [[PTR_ADD1]](p0) :: (store (s8)) + ; GFX10-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[PHI1]], [[C6]] + ; GFX10-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[PHI1]](s32), [[COPY1]] + ; GFX10-NEXT: [[INT1:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[ICMP]](s1), [[PHI]](s32) + ; GFX10-NEXT: SI_LOOP [[INT1]](s32), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX10-NEXT: G_BR %bb.4 + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: bb.4: + ; GFX10-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[INT1]](s32) + ; GFX10-NEXT: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[XOR]](s1) + ; GFX10-NEXT: G_STORE [[ZEXT1]](s32), [[MV2]](p0) :: (store (s8)) + ; GFX10-NEXT: SI_RETURN + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7 + + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s32) = COPY $vgpr2 + %3:_(s32) = COPY $vgpr3 + %4:_(p0) = G_MERGE_VALUES %2(s32), %3(s32) + %5:_(s32) = COPY $vgpr4 + %6:_(s32) = COPY $vgpr5 + %7:_(p0) = G_MERGE_VALUES %5(s32), %6(s32) + %8:_(s32) = COPY $vgpr6 + %9:_(s32) = COPY $vgpr7 + %10:_(p0) = G_MERGE_VALUES %8(s32), %9(s32) + %11:_(s32) = G_FCONSTANT float 1.000000e+00 + %12:_(s1) = G_FCMP floatpred(ogt), %0(s32), %11 + %13:_(s32) = G_CONSTANT i32 0 + + bb.1: + %14:_(s32) = G_PHI %15(s32), %bb.3, %13(s32), %bb.0 + %16:_(s32) = G_PHI %13(s32), %bb.0, %17(s32), %bb.3 + %18:_(s64) = G_SEXT %16(s32) + %19:_(s32) = G_CONSTANT i32 2 + %20:_(s64) = G_SHL %18, %19(s32) + %21:_(p0) = G_PTR_ADD %4, %20(s64) + %22:_(s32) = G_LOAD %21(p0) :: (load (s32)) + %23:_(s32) = G_CONSTANT i32 0 + + bb.2: + successors: %bb.3(0x04000000), %bb.2(0x7c000000) + + %24:_(s32) = G_PHI %23(s32), %bb.1, %25(s32), %bb.2 + %26:_(s32) = G_PHI %27(s32), %bb.2, %23(s32), %bb.1 + %28:_(s1) = G_PHI %29(s1), %bb.2, %12(s1), %bb.1 + %30:_(s1) = G_CONSTANT i1 true + %29:_(s1) = G_XOR %28, %30 + %31:_(s32) = G_UITOFP %26(s32) + %32:_(s32) = G_CONSTANT i32 1 + %27:_(s32) = G_ADD %26, %32 + %33:_(s1) = G_FCMP floatpred(ogt), %31(s32), %22 + %25:sreg_32_xm0_xexec(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), %33(s1), %24(s32) + SI_LOOP %25(s32), %bb.2, implicit-def $exec, implicit-def $scc, implicit $exec + G_BR %bb.3 + + bb.3: + successors: %bb.4(0x04000000), %bb.1(0x7c000000) + + G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), %25(s32) + %34:_(p0) = G_PTR_ADD %7, %18(s64) + %35:_(s32) = G_ZEXT %29(s1) + %36:_(s32) = G_CONSTANT i32 1 + G_STORE %35(s32), %34(p0) :: (store (s8)) + %17:_(s32) = G_ADD %16, %36 + %37:_(s1) = G_ICMP intpred(ult), %16(s32), %1 + %15:sreg_32_xm0_xexec(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), %37(s1), %14(s32) + SI_LOOP %15(s32), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec + G_BR %bb.4 + + bb.4: + G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), %15(s32) + %38:_(s32) = G_ZEXT %29(s1) + G_STORE %38(s32), %10(p0) :: (store (s8)) + SI_RETURN +... diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.ll index 1934958ea8f37..bff3ed9228e05 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.ll +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.ll @@ -1,24 +1,24 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 -; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -new-reg-bank-select -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s define void @temporal_divergent_i32(float %val, ptr %addr) { ; GFX10-LABEL: temporal_divergent_i32: ; GFX10: ; %bb.0: ; %entry ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; GFX10-NEXT: s_mov_b32 s4, -1 -; GFX10-NEXT: v_mov_b32_e32 v3, s4 -; GFX10-NEXT: s_mov_b32 s4, 0 +; GFX10-NEXT: s_mov_b32 s5, 0 ; GFX10-NEXT: .LBB0_1: ; %loop ; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 -; GFX10-NEXT: v_add_nc_u32_e32 v3, 1, v3 -; GFX10-NEXT: v_cvt_f32_u32_e32 v4, v3 -; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, v4, v0 -; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4 -; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4 +; GFX10-NEXT: s_add_i32 s4, s4, 1 +; GFX10-NEXT: v_cvt_f32_u32_e32 v3, s4 +; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, v3, v0 +; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 +; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5 ; GFX10-NEXT: s_cbranch_execnz .LBB0_1 ; GFX10-NEXT: ; %bb.2: ; %exit -; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4 -; GFX10-NEXT: flat_store_dword v[1:2], v3 +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5 +; GFX10-NEXT: v_mov_b32_e32 v0, s4 +; GFX10-NEXT: flat_store_dword v[1:2], v0 ; GFX10-NEXT: s_waitcnt lgkmcnt(0) ; GFX10-NEXT: s_setpc_b64 s[30:31] entry: @@ -35,3 +35,40 @@ exit: store i32 %counter, ptr %addr ret void } + +define void @temporal_divergent_i32_multiple_use(float %val, ptr %addr, ptr %addr2) { +; GFX10-LABEL: temporal_divergent_i32_multiple_use: +; GFX10: ; %bb.0: ; %entry +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_mov_b32 s4, -1 +; GFX10-NEXT: s_mov_b32 s5, 0 +; GFX10-NEXT: .LBB1_1: ; %loop +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_add_i32 s4, s4, 1 +; GFX10-NEXT: v_cvt_f32_u32_e32 v5, s4 +; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, v5, v0 +; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5 +; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5 +; GFX10-NEXT: s_cbranch_execnz .LBB1_1 +; GFX10-NEXT: ; %bb.2: ; %exit +; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5 +; GFX10-NEXT: v_mov_b32_e32 v0, s4 +; GFX10-NEXT: flat_store_dword v[1:2], v0 +; GFX10-NEXT: flat_store_dword v[3:4], v0 +; GFX10-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-NEXT: s_setpc_b64 s[30:31] +entry: + br label %loop + +loop: + %counter = phi i32 [ 0, %entry ], [ %counter.plus.1, %loop ] + %f.counter = uitofp i32 %counter to float + %cond = fcmp ogt float %f.counter, %val + %counter.plus.1 = add i32 %counter, 1 + br i1 %cond, label %exit, label %loop + +exit: + store i32 %counter, ptr %addr + store i32 %counter, ptr %addr2 + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.mir index 996815e2d38fc..aad15b3e525b4 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-reg.mir @@ -64,3 +64,74 @@ body: | G_STORE %9(s32), %3(p0) :: (store (s32)) SI_RETURN ... + +--- +name: temporal_divergent_i32_multiple_use +legalized: true +tracksRegLiveness: true +body: | + ; GFX10-LABEL: name: temporal_divergent_i32_multiple_use + ; GFX10: bb.0: + ; GFX10-NEXT: successors: %bb.1(0x80000000) + ; GFX10-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; GFX10-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32) + ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4 + ; GFX10-NEXT: [[MV1:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY3]](s32), [[COPY4]](s32) + ; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: bb.1: + ; GFX10-NEXT: successors: %bb.2(0x04000000), %bb.1(0x7c000000) + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: [[PHI:%[0-9]+]]:_(s32) = G_PHI %10(s32), %bb.1, [[C1]](s32), %bb.0 + ; GFX10-NEXT: [[PHI1:%[0-9]+]]:_(s32) = G_PHI [[C]](s32), %bb.0, %12(s32), %bb.1 + ; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[PHI1]], [[C2]] + ; GFX10-NEXT: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[ADD]](s32) + ; GFX10-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ogt), [[UITOFP]](s32), [[COPY]] + ; GFX10-NEXT: [[INT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[FCMP]](s1), [[PHI]](s32) + ; GFX10-NEXT: SI_LOOP [[INT]](s32), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec + ; GFX10-NEXT: G_BR %bb.2 + ; GFX10-NEXT: {{ $}} + ; GFX10-NEXT: bb.2: + ; GFX10-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[INT]](s32) + ; GFX10-NEXT: G_STORE [[ADD]](s32), [[MV]](p0) :: (store (s32)) + ; GFX10-NEXT: G_STORE [[ADD]](s32), [[MV1]](p0) :: (store (s32)) + ; GFX10-NEXT: SI_RETURN + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 + + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s32) = COPY $vgpr2 + %3:_(p0) = G_MERGE_VALUES %1(s32), %2(s32) + %4:_(s32) = COPY $vgpr3 + %5:_(s32) = COPY $vgpr4 + %6:_(p0) = G_MERGE_VALUES %4(s32), %5(s32) + %7:_(s32) = G_CONSTANT i32 -1 + %8:_(s32) = G_CONSTANT i32 0 + + bb.1: + successors: %bb.2(0x04000000), %bb.1(0x7c000000) + + %9:_(s32) = G_PHI %10(s32), %bb.1, %8(s32), %bb.0 + %11:_(s32) = G_PHI %7(s32), %bb.0, %12(s32), %bb.1 + %13:_(s32) = G_CONSTANT i32 1 + %12:_(s32) = G_ADD %11, %13 + %14:_(s32) = G_UITOFP %12(s32) + %15:_(s1) = G_FCMP floatpred(ogt), %14(s32), %0 + %10:sreg_32_xm0_xexec(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), %15(s1), %9(s32) + SI_LOOP %10(s32), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec + G_BR %bb.2 + + bb.2: + G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), %10(s32) + G_STORE %12(s32), %3(p0) :: (store (s32)) + G_STORE %12(s32), %6(p0) :: (store (s32)) + SI_RETURN +... _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits