https://github.com/petar-avramovic updated https://github.com/llvm/llvm-project/pull/168411
>From 73f2bf84bb2bcff3cd20aa207116f214cde943f9 Mon Sep 17 00:00:00 2001 From: Petar Avramovic <[email protected]> Date: Mon, 17 Nov 2025 18:47:58 +0100 Subject: [PATCH] AMDGPU/GlobalISel: RegBankLegalize rules for G_FABS and G_FNEG --- .../AMDGPU/AMDGPURegBankLegalizeHelper.cpp | 26 +- .../AMDGPU/AMDGPURegBankLegalizeHelper.h | 1 + .../AMDGPU/AMDGPURegBankLegalizeRules.cpp | 19 + llvm/test/CodeGen/AMDGPU/GlobalISel/fabs.ll | 340 ++++++++++++++++++ llvm/test/CodeGen/AMDGPU/GlobalISel/fneg.ll | 303 ++++++++++++++++ 5 files changed, 683 insertions(+), 6 deletions(-) create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/fabs.ll create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/fneg.ll diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp index 1765d054a3c0d..123fc5bf37a19 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp @@ -437,6 +437,13 @@ std::pair<Register, Register> RegBankLegalizeHelper::unpackAExt(Register Reg) { return {Lo.getReg(0), Hi.getReg(0)}; } +std::pair<Register, Register> +RegBankLegalizeHelper::unpackAExtTruncS16(Register Reg) { + auto [Lo32, Hi32] = unpackAExt(Reg); + return {B.buildTrunc(SgprRB_S16, Lo32).getReg(0), + B.buildTrunc(SgprRB_S16, Hi32).getReg(0)}; +} + void RegBankLegalizeHelper::lowerUnpackBitShift(MachineInstr &MI) { Register Lo, Hi; switch (MI.getOpcode()) { @@ -629,14 +636,21 @@ void RegBankLegalizeHelper::lowerSplitTo32(MachineInstr &MI) { void RegBankLegalizeHelper::lowerSplitTo16(MachineInstr &MI) { Register Dst = MI.getOperand(0).getReg(); assert(MRI.getType(Dst) == V2S16); - auto [Op1Lo32, Op1Hi32] = unpackAExt(MI.getOperand(1).getReg()); - auto [Op2Lo32, Op2Hi32] = unpackAExt(MI.getOperand(2).getReg()); unsigned Opc = MI.getOpcode(); auto Flags = MI.getFlags(); - auto Op1Lo = B.buildTrunc(SgprRB_S16, Op1Lo32); - auto Op1Hi = B.buildTrunc(SgprRB_S16, Op1Hi32); - auto Op2Lo = B.buildTrunc(SgprRB_S16, Op2Lo32); - auto Op2Hi = B.buildTrunc(SgprRB_S16, Op2Hi32); + + if (MI.getNumOperands() == 2) { + auto [Op1Lo, Op1Hi] = unpackAExtTruncS16(MI.getOperand(1).getReg()); + auto Lo = B.buildInstr(Opc, {SgprRB_S16}, {Op1Lo}, Flags); + auto Hi = B.buildInstr(Opc, {SgprRB_S16}, {Op1Hi}, Flags); + B.buildMergeLikeInstr(Dst, {Lo, Hi}); + MI.eraseFromParent(); + return; + } + + assert(MI.getNumOperands() == 3); + auto [Op1Lo, Op1Hi] = unpackAExtTruncS16(MI.getOperand(1).getReg()); + auto [Op2Lo, Op2Hi] = unpackAExtTruncS16(MI.getOperand(2).getReg()); auto Lo = B.buildInstr(Opc, {SgprRB_S16}, {Op1Lo, Op2Lo}, Flags); auto Hi = B.buildInstr(Opc, {SgprRB_S16}, {Op1Hi, Op2Hi}, Flags); B.buildMergeLikeInstr(Dst, {Lo, Hi}); diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h index e7598f888e4b5..4f1c3c02fa5d6 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h @@ -118,6 +118,7 @@ class RegBankLegalizeHelper { std::pair<Register, Register> unpackZExt(Register Reg); std::pair<Register, Register> unpackSExt(Register Reg); std::pair<Register, Register> unpackAExt(Register Reg); + std::pair<Register, Register> unpackAExtTruncS16(Register Reg); void lowerUnpackBitShift(MachineInstr &MI); void lowerV_BFE(MachineInstr &MI); void lowerS_BFE(MachineInstr &MI); diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp index b81a08de383d9..4051dc8495f6f 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp @@ -951,6 +951,25 @@ RegBankLegalizeRules::RegBankLegalizeRules(const GCNSubtarget &_ST, .Any({{UniV2S32}, {{UniInVgprV2S32}, {VgprV2S32, VgprV2S32}}}) .Any({{DivV2S32}, {{VgprV2S32}, {VgprV2S32, VgprV2S32}}}); + // FNEG and FABS are either folded as source modifiers or can be selected as + // bitwise XOR and AND with Mask. XOR and AND are available on SALU but for + // targets without SALU float we still select them as VGPR since there would + // be no real sgpr use. + addRulesForGOpcs({G_FNEG, G_FABS}, Standard) + .Uni(S16, {{UniInVgprS16}, {Vgpr16}}, !hasSALUFloat) + .Uni(S16, {{Sgpr16}, {Sgpr16}}, hasSALUFloat) + .Div(S16, {{Vgpr16}, {Vgpr16}}) + .Uni(S32, {{UniInVgprS32}, {Vgpr32}}, !hasSALUFloat) + .Uni(S32, {{Sgpr32}, {Sgpr32}}, hasSALUFloat) + .Div(S32, {{Vgpr32}, {Vgpr32}}) + .Uni(S64, {{UniInVgprS64}, {Vgpr64}}) + .Div(S64, {{Vgpr64}, {Vgpr64}}) + .Uni(V2S16, {{UniInVgprV2S16}, {VgprV2S16}}, !hasSALUFloat) + .Uni(V2S16, {{SgprV2S16}, {SgprV2S16}, ScalarizeToS16}, hasSALUFloat) + .Div(V2S16, {{VgprV2S16}, {VgprV2S16}}) + .Any({{UniV2S32}, {{UniInVgprV2S32}, {VgprV2S32}}}) + .Any({{DivV2S32}, {{VgprV2S32}, {VgprV2S32}}}); + addRulesForGOpcs({G_FPTOUI}) .Any({{UniS32, S32}, {{Sgpr32}, {Sgpr32}}}, hasSALUFloat) .Any({{UniS32, S32}, {{UniInVgprS32}, {Vgpr32}}}, !hasSALUFloat); diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fabs.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fabs.ll new file mode 100644 index 0000000000000..96cf528056cb1 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fabs.ll @@ -0,0 +1,340 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mattr=-real-true16 -mcpu=gfx1100 -o - %s | FileCheck -check-prefixes=GCN,GFX11 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mattr=-real-true16 -mcpu=gfx1200 -o - %s | FileCheck -check-prefixes=GCN,GFX12 %s + +define amdgpu_ps void @v_fabs_f16(half %in, ptr addrspace(1) %out) { +; GCN-LABEL: v_fabs_f16: +; GCN: ; %bb.0: +; GCN-NEXT: v_and_b32_e32 v0, 0x7fff, v0 +; GCN-NEXT: global_store_b16 v[1:2], v0, off +; GCN-NEXT: s_endpgm + %fabs = call half @llvm.fabs.f16(half %in) + store half %fabs, ptr addrspace(1) %out + ret void +} +define amdgpu_ps void @s_fabs_f16(half inreg %in, ptr addrspace(1) %out) { +; GFX11-LABEL: s_fabs_f16: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_and_b32_e64 v2, 0x7fff, s0 +; GFX11-NEXT: global_store_b16 v[0:1], v2, off +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: s_fabs_f16: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_and_b32 s0, s0, 0x7fff +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: v_mov_b32_e32 v2, s0 +; GFX12-NEXT: global_store_b16 v[0:1], v2, off +; GFX12-NEXT: s_endpgm + %fabs = call half @llvm.fabs.f16(half %in) + store half %fabs, ptr addrspace(1) %out + ret void +} +define amdgpu_ps void @s_fabs_f16_salu_use(half inreg %in, i32 inreg %val, ptr addrspace(1) %out) { +; GFX11-LABEL: s_fabs_f16_salu_use: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_and_b32_e64 v2, 0x7fff, s0 +; GFX11-NEXT: s_cmp_eq_u32 s1, 0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: v_readfirstlane_b32 s0, v2 +; GFX11-NEXT: s_cselect_b32 s0, s0, 0 +; GFX11-NEXT: v_mov_b32_e32 v2, s0 +; GFX11-NEXT: global_store_b16 v[0:1], v2, off +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: s_fabs_f16_salu_use: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_and_b32 s0, s0, 0x7fff +; GFX12-NEXT: s_cmp_eq_u32 s1, 0 +; GFX12-NEXT: s_cselect_b32 s0, s0, 0 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: v_mov_b32_e32 v2, s0 +; GFX12-NEXT: global_store_b16 v[0:1], v2, off +; GFX12-NEXT: s_endpgm + %fabs = call half @llvm.fabs.f16(half %in) + %cond = icmp eq i32 %val, 0 + %sel = select i1 %cond, half %fabs, half 0.0 + store half %sel, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_fabs_f32(float %in, ptr addrspace(1) %out) { +; GCN-LABEL: v_fabs_f32: +; GCN: ; %bb.0: +; GCN-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0 +; GCN-NEXT: global_store_b32 v[1:2], v0, off +; GCN-NEXT: s_endpgm + %fabs = call float @llvm.fabs.f32(float %in) + store float %fabs, ptr addrspace(1) %out + ret void +} +define amdgpu_ps void @s_fabs_f32(float inreg %in, ptr addrspace(1) %out) { +; GFX11-LABEL: s_fabs_f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_and_b32_e64 v2, 0x7fffffff, s0 +; GFX11-NEXT: global_store_b32 v[0:1], v2, off +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: s_fabs_f32: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_bitset0_b32 s0, 31 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: v_mov_b32_e32 v2, s0 +; GFX12-NEXT: global_store_b32 v[0:1], v2, off +; GFX12-NEXT: s_endpgm + %fabs = call float @llvm.fabs.f32(float %in) + store float %fabs, ptr addrspace(1) %out + ret void +} +define amdgpu_ps void @s_fabs_f32_salu_use(float inreg %in, i32 inreg %val, ptr addrspace(1) %out) { +; GFX11-LABEL: s_fabs_f32_salu_use: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_and_b32_e64 v2, 0x7fffffff, s0 +; GFX11-NEXT: s_cmp_eq_u32 s1, 0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: v_readfirstlane_b32 s0, v2 +; GFX11-NEXT: s_cselect_b32 s0, s0, 0 +; GFX11-NEXT: v_mov_b32_e32 v2, s0 +; GFX11-NEXT: global_store_b32 v[0:1], v2, off +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: s_fabs_f32_salu_use: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_bitset0_b32 s0, 31 +; GFX12-NEXT: s_cmp_eq_u32 s1, 0 +; GFX12-NEXT: s_cselect_b32 s0, s0, 0 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: v_mov_b32_e32 v2, s0 +; GFX12-NEXT: global_store_b32 v[0:1], v2, off +; GFX12-NEXT: s_endpgm + %fabs = call float @llvm.fabs.f32(float %in) + %cond = icmp eq i32 %val, 0 + %sel = select i1 %cond, float %fabs, float 0.0 + store float %sel, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_fabs_f64(double %in, ptr addrspace(1) %out) { +; GCN-LABEL: v_fabs_f64: +; GCN: ; %bb.0: +; GCN-NEXT: v_and_b32_e32 v1, 0x7fffffff, v1 +; GCN-NEXT: global_store_b64 v[2:3], v[0:1], off +; GCN-NEXT: s_endpgm + %fabs = call double @llvm.fabs.f64(double %in) + store double %fabs, ptr addrspace(1) %out + ret void +} +define amdgpu_ps void @s_fabs_f64(double inreg %in, ptr addrspace(1) %out) { +; GCN-LABEL: s_fabs_f64: +; GCN: ; %bb.0: +; GCN-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0 +; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GCN-NEXT: v_and_b32_e32 v3, 0x7fffffff, v3 +; GCN-NEXT: global_store_b64 v[0:1], v[2:3], off +; GCN-NEXT: s_endpgm + %fabs = call double @llvm.fabs.f64(double %in) + store double %fabs, ptr addrspace(1) %out + ret void +} +define amdgpu_ps void @s_fabs_f64_salu_use(double inreg %in, i32 inreg %val, ptr addrspace(1) %out) { +; GFX11-LABEL: s_fabs_f64_salu_use: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0 +; GFX11-NEXT: s_cmp_eq_u32 s2, 0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NEXT: v_and_b32_e32 v3, 0x7fffffff, v3 +; GFX11-NEXT: v_readfirstlane_b32 s0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: v_readfirstlane_b32 s1, v3 +; GFX11-NEXT: s_cselect_b64 s[0:1], s[0:1], 0 +; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0 +; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: s_fabs_f64_salu_use: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0 +; GFX12-NEXT: s_cmp_eq_u32 s2, 0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX12-NEXT: v_and_b32_e32 v3, 0x7fffffff, v3 +; GFX12-NEXT: v_readfirstlane_b32 s0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_readfirstlane_b32 s1, v3 +; GFX12-NEXT: s_cselect_b64 s[0:1], s[0:1], 0 +; GFX12-NEXT: s_wait_alu 0xfffe +; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0 +; GFX12-NEXT: global_store_b64 v[0:1], v[2:3], off +; GFX12-NEXT: s_endpgm + %fabs = call double @llvm.fabs.f64(double %in) + %cond = icmp eq i32 %val, 0 + %sel = select i1 %cond, double %fabs, double 0.0 + store double %sel, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_fabs_v2f16(<2 x half> %in, ptr addrspace(1) %out) { +; GCN-LABEL: v_fabs_v2f16: +; GCN: ; %bb.0: +; GCN-NEXT: v_and_b32_e32 v0, 0x7fff7fff, v0 +; GCN-NEXT: global_store_b32 v[1:2], v0, off +; GCN-NEXT: s_endpgm + %fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %in) + store <2 x half> %fabs, ptr addrspace(1) %out + ret void +} +define amdgpu_ps void @s_fabs_v2f16(<2 x half> inreg %in, ptr addrspace(1) %out) { +; GFX11-LABEL: s_fabs_v2f16: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_and_b32_e64 v2, 0x7fff7fff, s0 +; GFX11-NEXT: global_store_b32 v[0:1], v2, off +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: s_fabs_v2f16: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_lshr_b32 s1, s0, 16 +; GFX12-NEXT: s_and_b32 s0, s0, 0x7fff +; GFX12-NEXT: s_and_b32 s1, s1, 0x7fff +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: s_pack_ll_b32_b16 s0, s0, s1 +; GFX12-NEXT: v_mov_b32_e32 v2, s0 +; GFX12-NEXT: global_store_b32 v[0:1], v2, off +; GFX12-NEXT: s_endpgm + %fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %in) + store <2 x half> %fabs, ptr addrspace(1) %out + ret void +} +define amdgpu_ps void @s_fabs_v2f16_salu_use(<2 x half> inreg %in, i32 inreg %val, ptr addrspace(1) %out) { +; GFX11-LABEL: s_fabs_v2f16_salu_use: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_and_b32_e64 v2, 0x7fff7fff, s0 +; GFX11-NEXT: s_cmp_eq_u32 s1, 0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: v_readfirstlane_b32 s0, v2 +; GFX11-NEXT: s_cselect_b32 s0, s0, 0 +; GFX11-NEXT: v_mov_b32_e32 v2, s0 +; GFX11-NEXT: global_store_b32 v[0:1], v2, off +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: s_fabs_v2f16_salu_use: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_lshr_b32 s2, s0, 16 +; GFX12-NEXT: s_and_b32 s0, s0, 0x7fff +; GFX12-NEXT: s_and_b32 s2, s2, 0x7fff +; GFX12-NEXT: s_cmp_eq_u32 s1, 0 +; GFX12-NEXT: s_pack_ll_b32_b16 s0, s0, s2 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: s_cselect_b32 s0, s0, 0 +; GFX12-NEXT: v_mov_b32_e32 v2, s0 +; GFX12-NEXT: global_store_b32 v[0:1], v2, off +; GFX12-NEXT: s_endpgm + %fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %in) + %cond = icmp eq i32 %val, 0 + %sel = select i1 %cond, <2 x half> %fabs, <2 x half> <half 0.0, half 0.0> + store <2 x half> %sel, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_fabs_v2f32(<2 x float> %in, ptr addrspace(1) %out) { +; GCN-LABEL: v_fabs_v2f32: +; GCN: ; %bb.0: +; GCN-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0 +; GCN-NEXT: v_and_b32_e32 v1, 0x7fffffff, v1 +; GCN-NEXT: global_store_b64 v[2:3], v[0:1], off +; GCN-NEXT: s_endpgm + %fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %in) + store <2 x float> %fabs, ptr addrspace(1) %out + ret void +} +define amdgpu_ps void @s_fabs_v2f32(<2 x float> inreg %in, ptr addrspace(1) %out) { +; GFX11-LABEL: s_fabs_v2f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_and_b32_e64 v2, 0x7fffffff, s0 +; GFX11-NEXT: v_and_b32_e64 v3, 0x7fffffff, s1 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NEXT: v_readfirstlane_b32 s0, v2 +; GFX11-NEXT: v_readfirstlane_b32 s1, v3 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0 +; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: s_fabs_v2f32: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_bitset0_b32 s0, 31 +; GFX12-NEXT: s_bitset0_b32 s1, 31 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0 +; GFX12-NEXT: global_store_b64 v[0:1], v[2:3], off +; GFX12-NEXT: s_endpgm + %fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %in) + store <2 x float> %fabs, ptr addrspace(1) %out + ret void +} +define amdgpu_ps void @s_fabs_v2f32_salu_use(<2 x float> inreg %in, i32 inreg %val, ptr addrspace(1) %out) { +; GFX11-LABEL: s_fabs_v2f32_salu_use: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_and_b32_e64 v2, 0x7fffffff, s0 +; GFX11-NEXT: v_and_b32_e64 v3, 0x7fffffff, s1 +; GFX11-NEXT: s_cmp_eq_u32 s2, 0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NEXT: v_readfirstlane_b32 s0, v2 +; GFX11-NEXT: v_readfirstlane_b32 s1, v3 +; GFX11-NEXT: s_cselect_b64 s[0:1], s[0:1], 0 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0 +; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: s_fabs_v2f32_salu_use: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_bitset0_b32 s0, 31 +; GFX12-NEXT: s_bitset0_b32 s1, 31 +; GFX12-NEXT: s_cmp_eq_u32 s2, 0 +; GFX12-NEXT: s_cselect_b64 s[0:1], s[0:1], 0 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0 +; GFX12-NEXT: global_store_b64 v[0:1], v[2:3], off +; GFX12-NEXT: s_endpgm + %fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %in) + %cond = icmp eq i32 %val, 0 + %sel = select i1 %cond, <2 x float> %fabs, <2 x float> <float 0.0, float 0.0> + store <2 x float> %sel, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_fabs_fneg_f32(float %in, ptr addrspace(1) %out) { +; GCN-LABEL: v_fabs_fneg_f32: +; GCN: ; %bb.0: +; GCN-NEXT: v_or_b32_e32 v0, 0x80000000, v0 +; GCN-NEXT: global_store_b32 v[1:2], v0, off +; GCN-NEXT: s_endpgm + %fabs = call float @llvm.fabs.f32(float %in) + %fneg = fneg float %fabs + store float %fneg, ptr addrspace(1) %out + ret void +} +define amdgpu_ps void @s_fabs_fneg_f32(float inreg %in, ptr addrspace(1) %out) { +; GFX11-LABEL: s_fabs_fneg_f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_or_b32_e64 v2, 0x80000000, s0 +; GFX11-NEXT: global_store_b32 v[0:1], v2, off +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: s_fabs_fneg_f32: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_bitset1_b32 s0, 31 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: v_mov_b32_e32 v2, s0 +; GFX12-NEXT: global_store_b32 v[0:1], v2, off +; GFX12-NEXT: s_endpgm + %fabs = call float @llvm.fabs.f32(float %in) + %fneg = fneg float %fabs + store float %fneg, ptr addrspace(1) %out + ret void +} + +declare half @llvm.fabs.f16(half) +declare float @llvm.fabs.f32(float) +declare double @llvm.fabs.f64(double) +declare <2 x half> @llvm.fabs.v2f16(<2 x half>) +declare <2 x float> @llvm.fabs.v2f32(<2 x float>) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fneg.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fneg.ll new file mode 100644 index 0000000000000..8a260926d0a4f --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fneg.ll @@ -0,0 +1,303 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mattr=-real-true16 -mcpu=gfx1100 -o - %s | FileCheck -check-prefixes=GCN,GFX11 %s +; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mattr=-real-true16 -mcpu=gfx1200 -o - %s | FileCheck -check-prefixes=GCN,GFX12 %s + +define amdgpu_ps void @v_fneg_f16(half %in, ptr addrspace(1) %out) { +; GCN-LABEL: v_fneg_f16: +; GCN: ; %bb.0: +; GCN-NEXT: v_xor_b32_e32 v0, 0x8000, v0 +; GCN-NEXT: global_store_b16 v[1:2], v0, off +; GCN-NEXT: s_endpgm + %fneg = fneg half %in + store half %fneg, ptr addrspace(1) %out + ret void +} +define amdgpu_ps void @s_fneg_f16(half inreg %in, ptr addrspace(1) %out) { +; GFX11-LABEL: s_fneg_f16: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_xor_b32_e64 v2, 0x8000, s0 +; GFX11-NEXT: global_store_b16 v[0:1], v2, off +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: s_fneg_f16: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_xor_b32 s0, s0, 0x8000 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: v_mov_b32_e32 v2, s0 +; GFX12-NEXT: global_store_b16 v[0:1], v2, off +; GFX12-NEXT: s_endpgm + %fneg = fneg half %in + store half %fneg, ptr addrspace(1) %out + ret void +} +define amdgpu_ps void @s_fneg_f16_salu_use(half inreg %in, i32 inreg %val, ptr addrspace(1) %out) { +; GFX11-LABEL: s_fneg_f16_salu_use: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_xor_b32_e64 v2, 0x8000, s0 +; GFX11-NEXT: s_cmp_eq_u32 s1, 0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: v_readfirstlane_b32 s0, v2 +; GFX11-NEXT: s_cselect_b32 s0, s0, 0 +; GFX11-NEXT: v_mov_b32_e32 v2, s0 +; GFX11-NEXT: global_store_b16 v[0:1], v2, off +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: s_fneg_f16_salu_use: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_xor_b32 s0, s0, 0x8000 +; GFX12-NEXT: s_cmp_eq_u32 s1, 0 +; GFX12-NEXT: s_cselect_b32 s0, s0, 0 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: v_mov_b32_e32 v2, s0 +; GFX12-NEXT: global_store_b16 v[0:1], v2, off +; GFX12-NEXT: s_endpgm + %fneg = fneg half %in + %cond = icmp eq i32 %val, 0 + %sel = select i1 %cond, half %fneg, half 0.0 + store half %sel, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_fneg_f32(float %in, ptr addrspace(1) %out) { +; GCN-LABEL: v_fneg_f32: +; GCN: ; %bb.0: +; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 +; GCN-NEXT: global_store_b32 v[1:2], v0, off +; GCN-NEXT: s_endpgm + %fneg = fneg float %in + store float %fneg, ptr addrspace(1) %out + ret void +} +define amdgpu_ps void @s_fneg_f32(float inreg %in, ptr addrspace(1) %out) { +; GFX11-LABEL: s_fneg_f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_xor_b32_e64 v2, 0x80000000, s0 +; GFX11-NEXT: global_store_b32 v[0:1], v2, off +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: s_fneg_f32: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_xor_b32 s0, s0, 0x80000000 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: v_mov_b32_e32 v2, s0 +; GFX12-NEXT: global_store_b32 v[0:1], v2, off +; GFX12-NEXT: s_endpgm + %fneg = fneg float %in + store float %fneg, ptr addrspace(1) %out + ret void +} +define amdgpu_ps void @s_fneg_f32_salu_use(float inreg %in, i32 inreg %val, ptr addrspace(1) %out) { +; GFX11-LABEL: s_fneg_f32_salu_use: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_xor_b32_e64 v2, 0x80000000, s0 +; GFX11-NEXT: s_cmp_eq_u32 s1, 0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: v_readfirstlane_b32 s0, v2 +; GFX11-NEXT: s_cselect_b32 s0, s0, 0 +; GFX11-NEXT: v_mov_b32_e32 v2, s0 +; GFX11-NEXT: global_store_b32 v[0:1], v2, off +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: s_fneg_f32_salu_use: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_xor_b32 s0, s0, 0x80000000 +; GFX12-NEXT: s_cmp_eq_u32 s1, 0 +; GFX12-NEXT: s_cselect_b32 s0, s0, 0 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: v_mov_b32_e32 v2, s0 +; GFX12-NEXT: global_store_b32 v[0:1], v2, off +; GFX12-NEXT: s_endpgm + %fneg = fneg float %in + %cond = icmp eq i32 %val, 0 + %sel = select i1 %cond, float %fneg, float 0.0 + store float %sel, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_fneg_f64(double %in, ptr addrspace(1) %out) { +; GCN-LABEL: v_fneg_f64: +; GCN: ; %bb.0: +; GCN-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 +; GCN-NEXT: global_store_b64 v[2:3], v[0:1], off +; GCN-NEXT: s_endpgm + %fneg = fneg double %in + store double %fneg, ptr addrspace(1) %out + ret void +} +define amdgpu_ps void @s_fneg_f64(double inreg %in, ptr addrspace(1) %out) { +; GCN-LABEL: s_fneg_f64: +; GCN: ; %bb.0: +; GCN-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0 +; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GCN-NEXT: v_xor_b32_e32 v3, 0x80000000, v3 +; GCN-NEXT: global_store_b64 v[0:1], v[2:3], off +; GCN-NEXT: s_endpgm + %fneg = fneg double %in + store double %fneg, ptr addrspace(1) %out + ret void +} +define amdgpu_ps void @s_fneg_f64_salu_use(double inreg %in, i32 inreg %val, ptr addrspace(1) %out) { +; GFX11-LABEL: s_fneg_f64_salu_use: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0 +; GFX11-NEXT: s_cmp_eq_u32 s2, 0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NEXT: v_xor_b32_e32 v3, 0x80000000, v3 +; GFX11-NEXT: v_readfirstlane_b32 s0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: v_readfirstlane_b32 s1, v3 +; GFX11-NEXT: s_cselect_b64 s[0:1], s[0:1], 0 +; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0 +; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: s_fneg_f64_salu_use: +; GFX12: ; %bb.0: +; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0 +; GFX12-NEXT: s_cmp_eq_u32 s2, 0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX12-NEXT: v_xor_b32_e32 v3, 0x80000000, v3 +; GFX12-NEXT: v_readfirstlane_b32 s0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_readfirstlane_b32 s1, v3 +; GFX12-NEXT: s_cselect_b64 s[0:1], s[0:1], 0 +; GFX12-NEXT: s_wait_alu 0xfffe +; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0 +; GFX12-NEXT: global_store_b64 v[0:1], v[2:3], off +; GFX12-NEXT: s_endpgm + %fneg = fneg double %in + %cond = icmp eq i32 %val, 0 + %sel = select i1 %cond, double %fneg, double 0.0 + store double %sel, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_fneg_v2f16(<2 x half> %in, ptr addrspace(1) %out) { +; GCN-LABEL: v_fneg_v2f16: +; GCN: ; %bb.0: +; GCN-NEXT: v_xor_b32_e32 v0, 0x80008000, v0 +; GCN-NEXT: global_store_b32 v[1:2], v0, off +; GCN-NEXT: s_endpgm + %fneg = fneg <2 x half> %in + store <2 x half> %fneg, ptr addrspace(1) %out + ret void +} +define amdgpu_ps void @s_fneg_v2f16(<2 x half> inreg %in, ptr addrspace(1) %out) { +; GFX11-LABEL: s_fneg_v2f16: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_xor_b32_e64 v2, 0x80008000, s0 +; GFX11-NEXT: global_store_b32 v[0:1], v2, off +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: s_fneg_v2f16: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_lshr_b32 s1, s0, 16 +; GFX12-NEXT: s_xor_b32 s0, s0, 0x8000 +; GFX12-NEXT: s_xor_b32 s1, s1, 0x8000 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: s_pack_ll_b32_b16 s0, s0, s1 +; GFX12-NEXT: v_mov_b32_e32 v2, s0 +; GFX12-NEXT: global_store_b32 v[0:1], v2, off +; GFX12-NEXT: s_endpgm + %fneg = fneg <2 x half> %in + store <2 x half> %fneg, ptr addrspace(1) %out + ret void +} +define amdgpu_ps void @s_fneg_v2f16_salu_use(<2 x half> inreg %in, i32 inreg %val, ptr addrspace(1) %out) { +; GFX11-LABEL: s_fneg_v2f16_salu_use: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_xor_b32_e64 v2, 0x80008000, s0 +; GFX11-NEXT: s_cmp_eq_u32 s1, 0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: v_readfirstlane_b32 s0, v2 +; GFX11-NEXT: s_cselect_b32 s0, s0, 0 +; GFX11-NEXT: v_mov_b32_e32 v2, s0 +; GFX11-NEXT: global_store_b32 v[0:1], v2, off +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: s_fneg_v2f16_salu_use: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_lshr_b32 s2, s0, 16 +; GFX12-NEXT: s_xor_b32 s0, s0, 0x8000 +; GFX12-NEXT: s_xor_b32 s2, s2, 0x8000 +; GFX12-NEXT: s_cmp_eq_u32 s1, 0 +; GFX12-NEXT: s_pack_ll_b32_b16 s0, s0, s2 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: s_cselect_b32 s0, s0, 0 +; GFX12-NEXT: v_mov_b32_e32 v2, s0 +; GFX12-NEXT: global_store_b32 v[0:1], v2, off +; GFX12-NEXT: s_endpgm + %fneg = fneg <2 x half> %in + %cond = icmp eq i32 %val, 0 + %sel = select i1 %cond, <2 x half> %fneg, <2 x half> <half 0.0, half 0.0> + store <2 x half> %sel, ptr addrspace(1) %out + ret void +} + +define amdgpu_ps void @v_fneg_v2f32(<2 x float> %in, ptr addrspace(1) %out) { +; GCN-LABEL: v_fneg_v2f32: +; GCN: ; %bb.0: +; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0 +; GCN-NEXT: v_xor_b32_e32 v1, 0x80000000, v1 +; GCN-NEXT: global_store_b64 v[2:3], v[0:1], off +; GCN-NEXT: s_endpgm + %fneg = fneg <2 x float> %in + store <2 x float> %fneg, ptr addrspace(1) %out + ret void +} +define amdgpu_ps void @s_fneg_v2f32(<2 x float> inreg %in, ptr addrspace(1) %out) { +; GFX11-LABEL: s_fneg_v2f32: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_xor_b32_e64 v2, 0x80000000, s0 +; GFX11-NEXT: v_xor_b32_e64 v3, 0x80000000, s1 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NEXT: v_readfirstlane_b32 s0, v2 +; GFX11-NEXT: v_readfirstlane_b32 s1, v3 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0 +; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: s_fneg_v2f32: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_xor_b32 s0, s0, 0x80000000 +; GFX12-NEXT: s_xor_b32 s1, s1, 0x80000000 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0 +; GFX12-NEXT: global_store_b64 v[0:1], v[2:3], off +; GFX12-NEXT: s_endpgm + %fneg = fneg <2 x float> %in + store <2 x float> %fneg, ptr addrspace(1) %out + ret void +} +define amdgpu_ps void @s_fneg_v2f32_salu_use(<2 x float> inreg %in, i32 inreg %val, ptr addrspace(1) %out) { +; GFX11-LABEL: s_fneg_v2f32_salu_use: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_xor_b32_e64 v2, 0x80000000, s0 +; GFX11-NEXT: v_xor_b32_e64 v3, 0x80000000, s1 +; GFX11-NEXT: s_cmp_eq_u32 s2, 0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NEXT: v_readfirstlane_b32 s0, v2 +; GFX11-NEXT: v_readfirstlane_b32 s1, v3 +; GFX11-NEXT: s_cselect_b64 s[0:1], s[0:1], 0 +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0 +; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off +; GFX11-NEXT: s_endpgm +; +; GFX12-LABEL: s_fneg_v2f32_salu_use: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_xor_b32 s0, s0, 0x80000000 +; GFX12-NEXT: s_xor_b32 s1, s1, 0x80000000 +; GFX12-NEXT: s_cmp_eq_u32 s2, 0 +; GFX12-NEXT: s_cselect_b64 s[0:1], s[0:1], 0 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0 +; GFX12-NEXT: global_store_b64 v[0:1], v[2:3], off +; GFX12-NEXT: s_endpgm + %fneg = fneg <2 x float> %in + %cond = icmp eq i32 %val, 0 + %sel = select i1 %cond, <2 x float> %fneg, <2 x float> <float 0.0, float 0.0> + store <2 x float> %sel, ptr addrspace(1) %out + ret void +} _______________________________________________ llvm-branch-commits mailing list [email protected] https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits
