================
@@ -0,0 +1,340 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal
-mattr=-real-true16 -mcpu=gfx1100 -o - %s | FileCheck -check-prefixes=GCN,GFX11
%s
+; RUN: llc -global-isel -new-reg-bank-select -mtriple=amdgcn-amd-amdpal
-mattr=-real-true16 -mcpu=gfx1200 -o - %s | FileCheck -check-prefixes=GCN,GFX12
%s
+
+define amdgpu_ps void @v_fabs_f16(half %in, ptr addrspace(1) %out) {
+; GCN-LABEL: v_fabs_f16:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_and_b32_e32 v0, 0x7fff, v0
+; GCN-NEXT: global_store_b16 v[1:2], v0, off
+; GCN-NEXT: s_endpgm
+ %fabs = call half @llvm.fabs.f16(half %in)
+ store half %fabs, ptr addrspace(1) %out
+ ret void
+}
+define amdgpu_ps void @s_fabs_f16(half inreg %in, ptr addrspace(1) %out) {
+; GFX11-LABEL: s_fabs_f16:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_and_b32_e64 v2, 0x7fff, s0
+; GFX11-NEXT: global_store_b16 v[0:1], v2, off
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_fabs_f16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_and_b32 s0, s0, 0x7fff
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: v_mov_b32_e32 v2, s0
+; GFX12-NEXT: global_store_b16 v[0:1], v2, off
+; GFX12-NEXT: s_endpgm
+ %fabs = call half @llvm.fabs.f16(half %in)
+ store half %fabs, ptr addrspace(1) %out
+ ret void
+}
+define amdgpu_ps void @s_fabs_f16_salu_use(half inreg %in, i32 inreg %val, ptr
addrspace(1) %out) {
+; GFX11-LABEL: s_fabs_f16_salu_use:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_and_b32_e64 v2, 0x7fff, s0
+; GFX11-NEXT: s_cmp_eq_u32 s1, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) |
instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_readfirstlane_b32 s0, v2
+; GFX11-NEXT: s_cselect_b32 s0, s0, 0
+; GFX11-NEXT: v_mov_b32_e32 v2, s0
+; GFX11-NEXT: global_store_b16 v[0:1], v2, off
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_fabs_f16_salu_use:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_and_b32 s0, s0, 0x7fff
+; GFX12-NEXT: s_cmp_eq_u32 s1, 0
+; GFX12-NEXT: s_cselect_b32 s0, s0, 0
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: v_mov_b32_e32 v2, s0
+; GFX12-NEXT: global_store_b16 v[0:1], v2, off
+; GFX12-NEXT: s_endpgm
+ %fabs = call half @llvm.fabs.f16(half %in)
+ %cond = icmp eq i32 %val, 0
+ %sel = select i1 %cond, half %fabs, half 0.0
+ store half %sel, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @v_fabs_f32(float %in, ptr addrspace(1) %out) {
+; GCN-LABEL: v_fabs_f32:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
+; GCN-NEXT: global_store_b32 v[1:2], v0, off
+; GCN-NEXT: s_endpgm
+ %fabs = call float @llvm.fabs.f32(float %in)
+ store float %fabs, ptr addrspace(1) %out
+ ret void
+}
+define amdgpu_ps void @s_fabs_f32(float inreg %in, ptr addrspace(1) %out) {
+; GFX11-LABEL: s_fabs_f32:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_and_b32_e64 v2, 0x7fffffff, s0
+; GFX11-NEXT: global_store_b32 v[0:1], v2, off
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_fabs_f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_bitset0_b32 s0, 31
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: v_mov_b32_e32 v2, s0
+; GFX12-NEXT: global_store_b32 v[0:1], v2, off
+; GFX12-NEXT: s_endpgm
+ %fabs = call float @llvm.fabs.f32(float %in)
+ store float %fabs, ptr addrspace(1) %out
+ ret void
+}
+define amdgpu_ps void @s_fabs_f32_salu_use(float inreg %in, i32 inreg %val,
ptr addrspace(1) %out) {
+; GFX11-LABEL: s_fabs_f32_salu_use:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_and_b32_e64 v2, 0x7fffffff, s0
+; GFX11-NEXT: s_cmp_eq_u32 s1, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) |
instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_readfirstlane_b32 s0, v2
+; GFX11-NEXT: s_cselect_b32 s0, s0, 0
+; GFX11-NEXT: v_mov_b32_e32 v2, s0
+; GFX11-NEXT: global_store_b32 v[0:1], v2, off
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_fabs_f32_salu_use:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_bitset0_b32 s0, 31
+; GFX12-NEXT: s_cmp_eq_u32 s1, 0
+; GFX12-NEXT: s_cselect_b32 s0, s0, 0
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: v_mov_b32_e32 v2, s0
+; GFX12-NEXT: global_store_b32 v[0:1], v2, off
+; GFX12-NEXT: s_endpgm
+ %fabs = call float @llvm.fabs.f32(float %in)
+ %cond = icmp eq i32 %val, 0
+ %sel = select i1 %cond, float %fabs, float 0.0
+ store float %sel, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @v_fabs_f64(double %in, ptr addrspace(1) %out) {
+; GCN-LABEL: v_fabs_f64:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_and_b32_e32 v1, 0x7fffffff, v1
+; GCN-NEXT: global_store_b64 v[2:3], v[0:1], off
+; GCN-NEXT: s_endpgm
+ %fabs = call double @llvm.fabs.f64(double %in)
+ store double %fabs, ptr addrspace(1) %out
+ ret void
+}
+define amdgpu_ps void @s_fabs_f64(double inreg %in, ptr addrspace(1) %out) {
+; GCN-LABEL: s_fabs_f64:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GCN-NEXT: v_and_b32_e32 v3, 0x7fffffff, v3
+; GCN-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GCN-NEXT: s_endpgm
+ %fabs = call double @llvm.fabs.f64(double %in)
+ store double %fabs, ptr addrspace(1) %out
+ ret void
+}
+define amdgpu_ps void @s_fabs_f64_salu_use(double inreg %in, i32 inreg %val,
ptr addrspace(1) %out) {
+; GFX11-LABEL: s_fabs_f64_salu_use:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX11-NEXT: s_cmp_eq_u32 s2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) |
instid1(VALU_DEP_2)
+; GFX11-NEXT: v_and_b32_e32 v3, 0x7fffffff, v3
+; GFX11-NEXT: v_readfirstlane_b32 s0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) |
instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_readfirstlane_b32 s1, v3
+; GFX11-NEXT: s_cselect_b64 s[0:1], s[0:1], 0
+; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_fabs_f64_salu_use:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX12-NEXT: s_cmp_eq_u32 s2, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) |
instid1(VALU_DEP_2)
+; GFX12-NEXT: v_and_b32_e32 v3, 0x7fffffff, v3
+; GFX12-NEXT: v_readfirstlane_b32 s0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_readfirstlane_b32 s1, v3
+; GFX12-NEXT: s_cselect_b64 s[0:1], s[0:1], 0
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX12-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX12-NEXT: s_endpgm
+ %fabs = call double @llvm.fabs.f64(double %in)
+ %cond = icmp eq i32 %val, 0
+ %sel = select i1 %cond, double %fabs, double 0.0
+ store double %sel, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @v_fabs_v2f16(<2 x half> %in, ptr addrspace(1) %out) {
+; GCN-LABEL: v_fabs_v2f16:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_and_b32_e32 v0, 0x7fff7fff, v0
+; GCN-NEXT: global_store_b32 v[1:2], v0, off
+; GCN-NEXT: s_endpgm
+ %fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %in)
+ store <2 x half> %fabs, ptr addrspace(1) %out
+ ret void
+}
+define amdgpu_ps void @s_fabs_v2f16(<2 x half> inreg %in, ptr addrspace(1)
%out) {
+; GFX11-LABEL: s_fabs_v2f16:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_and_b32_e64 v2, 0x7fff7fff, s0
+; GFX11-NEXT: global_store_b32 v[0:1], v2, off
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_fabs_v2f16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_lshr_b32 s1, s0, 16
+; GFX12-NEXT: s_and_b32 s0, s0, 0x7fff
+; GFX12-NEXT: s_and_b32 s1, s1, 0x7fff
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) |
instid1(SALU_CYCLE_1)
+; GFX12-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GFX12-NEXT: v_mov_b32_e32 v2, s0
+; GFX12-NEXT: global_store_b32 v[0:1], v2, off
+; GFX12-NEXT: s_endpgm
+ %fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %in)
+ store <2 x half> %fabs, ptr addrspace(1) %out
+ ret void
+}
+define amdgpu_ps void @s_fabs_v2f16_salu_use(<2 x half> inreg %in, i32 inreg
%val, ptr addrspace(1) %out) {
+; GFX11-LABEL: s_fabs_v2f16_salu_use:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_and_b32_e64 v2, 0x7fff7fff, s0
+; GFX11-NEXT: s_cmp_eq_u32 s1, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) |
instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_readfirstlane_b32 s0, v2
+; GFX11-NEXT: s_cselect_b32 s0, s0, 0
+; GFX11-NEXT: v_mov_b32_e32 v2, s0
+; GFX11-NEXT: global_store_b32 v[0:1], v2, off
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_fabs_v2f16_salu_use:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_lshr_b32 s2, s0, 16
+; GFX12-NEXT: s_and_b32 s0, s0, 0x7fff
+; GFX12-NEXT: s_and_b32 s2, s2, 0x7fff
+; GFX12-NEXT: s_cmp_eq_u32 s1, 0
+; GFX12-NEXT: s_pack_ll_b32_b16 s0, s0, s2
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) |
instid1(SALU_CYCLE_1)
+; GFX12-NEXT: s_cselect_b32 s0, s0, 0
+; GFX12-NEXT: v_mov_b32_e32 v2, s0
+; GFX12-NEXT: global_store_b32 v[0:1], v2, off
+; GFX12-NEXT: s_endpgm
+ %fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %in)
+ %cond = icmp eq i32 %val, 0
+ %sel = select i1 %cond, <2 x half> %fabs, <2 x half> <half 0.0, half 0.0>
+ store <2 x half> %sel, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @v_fabs_v2f32(<2 x float> %in, ptr addrspace(1) %out) {
+; GCN-LABEL: v_fabs_v2f32:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
+; GCN-NEXT: v_and_b32_e32 v1, 0x7fffffff, v1
+; GCN-NEXT: global_store_b64 v[2:3], v[0:1], off
+; GCN-NEXT: s_endpgm
+ %fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %in)
+ store <2 x float> %fabs, ptr addrspace(1) %out
+ ret void
+}
+define amdgpu_ps void @s_fabs_v2f32(<2 x float> inreg %in, ptr addrspace(1)
%out) {
+; GFX11-LABEL: s_fabs_v2f32:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_and_b32_e64 v2, 0x7fffffff, s0
+; GFX11-NEXT: v_and_b32_e64 v3, 0x7fffffff, s1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) |
instid1(VALU_DEP_2)
+; GFX11-NEXT: v_readfirstlane_b32 s0, v2
+; GFX11-NEXT: v_readfirstlane_b32 s1, v3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_fabs_v2f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_bitset0_b32 s0, 31
+; GFX12-NEXT: s_bitset0_b32 s1, 31
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX12-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX12-NEXT: s_endpgm
+ %fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %in)
+ store <2 x float> %fabs, ptr addrspace(1) %out
+ ret void
+}
+define amdgpu_ps void @s_fabs_v2f32_salu_use(<2 x float> inreg %in, i32 inreg
%val, ptr addrspace(1) %out) {
+; GFX11-LABEL: s_fabs_v2f32_salu_use:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_and_b32_e64 v2, 0x7fffffff, s0
+; GFX11-NEXT: v_and_b32_e64 v3, 0x7fffffff, s1
+; GFX11-NEXT: s_cmp_eq_u32 s2, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) |
instid1(VALU_DEP_2)
+; GFX11-NEXT: v_readfirstlane_b32 s0, v2
+; GFX11-NEXT: v_readfirstlane_b32 s1, v3
+; GFX11-NEXT: s_cselect_b64 s[0:1], s[0:1], 0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_fabs_v2f32_salu_use:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_bitset0_b32 s0, 31
+; GFX12-NEXT: s_bitset0_b32 s1, 31
+; GFX12-NEXT: s_cmp_eq_u32 s2, 0
+; GFX12-NEXT: s_cselect_b64 s[0:1], s[0:1], 0
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: v_dual_mov_b32 v3, s1 :: v_dual_mov_b32 v2, s0
+; GFX12-NEXT: global_store_b64 v[0:1], v[2:3], off
+; GFX12-NEXT: s_endpgm
+ %fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %in)
+ %cond = icmp eq i32 %val, 0
+ %sel = select i1 %cond, <2 x float> %fabs, <2 x float> <float 0.0, float 0.0>
+ store <2 x float> %sel, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_ps void @v_fabs_fneg_f32(float %in, ptr addrspace(1) %out) {
+; GCN-LABEL: v_fabs_fneg_f32:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_or_b32_e32 v0, 0x80000000, v0
+; GCN-NEXT: global_store_b32 v[1:2], v0, off
+; GCN-NEXT: s_endpgm
+ %fabs = call float @llvm.fabs.f32(float %in)
+ %fneg = fneg float %fabs
+ store float %fneg, ptr addrspace(1) %out
+ ret void
+}
+define amdgpu_ps void @s_fabs_fneg_f32(float inreg %in, ptr addrspace(1) %out)
{
+; GFX11-LABEL: s_fabs_fneg_f32:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: v_or_b32_e64 v2, 0x80000000, s0
+; GFX11-NEXT: global_store_b32 v[0:1], v2, off
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: s_fabs_fneg_f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_bitset1_b32 s0, 31
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: v_mov_b32_e32 v2, s0
+; GFX12-NEXT: global_store_b32 v[0:1], v2, off
+; GFX12-NEXT: s_endpgm
+ %fabs = call float @llvm.fabs.f32(float %in)
+ %fneg = fneg float %fabs
+ store float %fneg, ptr addrspace(1) %out
+ ret void
+}
----------------
petar-avramovic wrote:
Added are some fneg fabs tests, what are we trying to test here exactly?
https://github.com/llvm/llvm-project/pull/168411
_______________________________________________
llvm-branch-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits