https://github.com/arsenm updated https://github.com/llvm/llvm-project/pull/153026
>From bc0f27ae4ec17649dff589e1dc3468b8ad0f4e45 Mon Sep 17 00:00:00 2001 From: Matt Arsenault <matthew.arsena...@amd.com> Date: Mon, 11 Aug 2025 19:12:49 +0900 Subject: [PATCH 1/2] AMDGPU: Add tests for every mfma intrinsic v-to-a mapping Make sure the MFMA VGPR to AGPR InstrMapping table is complete. I think I got everything, except the full cross product of input types with the mfma scale intrinsics. Also makes sure we have coverage for smfmac and mfma_scale cases. --- .../rewrite-vgpr-mfma-to-agpr.gfx90a.ll | 141 +++ .../rewrite-vgpr-mfma-to-agpr.gfx950.ll | 664 ++++++++++++++ .../AMDGPU/rewrite-vgpr-mfma-to-agpr.ll | 867 ++++++++++++++++++ 3 files changed, 1672 insertions(+) create mode 100644 llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.gfx90a.ll create mode 100644 llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.gfx950.ll diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.gfx90a.ll b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.gfx90a.ll new file mode 100644 index 0000000000000..7d00b12e7334a --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.gfx90a.ll @@ -0,0 +1,141 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mcpu=gfx90a -amdgpu-mfma-vgpr-form < %s | FileCheck %s + +target triple = "amdgcn-amd-amdhsa" + +define void @test_rewrite_mfma_i32_32x32x8i8(i32 %arg0, i32 %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_i32_32x32x8i8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[2:3], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[2:3], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[2:3], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[2:3], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_i32_32x32x8i8 a[0:15], v0, v1, a[0:15] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x i32>, ptr addrspace(1) %ptr + %mai = call <16 x i32> @llvm.amdgcn.mfma.i32.32x32x8i8(i32 %arg0, i32 %arg1, <16 x i32> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x i32> %mai) + ret void +} + +define void @test_rewrite_mfma_i32_16x16x16i8(i32 %arg0, i32 %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_i32_16x16x16i8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[2:3], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_i32_16x16x16i8 a[0:3], v0, v1, a[0:3] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x i32>, ptr addrspace(1) %ptr + %mai = call <4 x i32> @llvm.amdgcn.mfma.i32.16x16x16i8(i32 %arg0, i32 %arg1, <4 x i32> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x i32> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_32x32x2bf16(<2 x i16> %arg0, <2 x i16> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_32x32x2bf16: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[28:31], v[2:3], off offset:112 +; CHECK-NEXT: global_load_dwordx4 a[24:27], v[2:3], off offset:96 +; CHECK-NEXT: global_load_dwordx4 a[20:23], v[2:3], off offset:80 +; CHECK-NEXT: global_load_dwordx4 a[16:19], v[2:3], off offset:64 +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[2:3], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[2:3], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[2:3], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[2:3], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x2bf16 a[0:31], v0, v1, a[0:31] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:31] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <32 x float>, ptr addrspace(1) %ptr + %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x2bf16(<2 x i16> %arg0, <2 x i16> %arg1, <32 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<32 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_16x16x2bf16(<2 x i16> %arg0, <2 x i16> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_16x16x2bf16: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[2:3], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[2:3], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[2:3], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[2:3], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_16x16x2bf16 a[0:15], v0, v1, a[0:15] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.mfma.f32.16x16x2bf16(<2 x i16> %arg0, <2 x i16> %arg1, <16 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_4x4x2bf16(<2 x i16> %arg0, <2 x i16> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_4x4x2bf16: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[2:3], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_4x4x2bf16 a[0:3], v0, v1, a[0:3] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.mfma.f32.4x4x2bf16(<2 x i16> %arg0, <2 x i16> %arg1, <4 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_32x32x4bf16(<2 x i16> %arg0, <2 x i16> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_32x32x4bf16: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[2:3], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[2:3], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[2:3], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[2:3], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x4bf16 a[0:15], v0, v1, a[0:15] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x4bf16(<2 x i16> %arg0, <2 x i16> %arg1, <16 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_16x16x8bf16(<2 x i16> %arg0, <2 x i16> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_16x16x8bf16: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[2:3], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_16x16x8bf16 a[0:3], v0, v1, a[0:3] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.mfma.f32.16x16x8bf16(<2 x i16> %arg0, <2 x i16> %arg1, <4 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +attributes #0 = { nounwind "amdgpu-flat-work-group-size"="1,256" "amdgpu-waves-per-eu"="4,4" } diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.gfx950.ll b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.gfx950.ll new file mode 100644 index 0000000000000..b2465b02f2eee --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.gfx950.ll @@ -0,0 +1,664 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mcpu=gfx950 -amdgpu-mfma-vgpr-form < %s | FileCheck %s + +target triple = "amdgcn-amd-amdhsa" + +define void @test_rewrite_mfma_f32_16x16x32_f16(<8 x half> %arg0, <8 x half> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_16x16x32_f16: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[8:9], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_16x16x32_f16 a[0:3], v[0:3], v[4:7], a[0:3] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.mfma.f32.16x16x32.f16(<8 x half> %arg0, <8 x half> %arg1, <4 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_32x32x16_f16(<8 x half> %arg0, <8 x half> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_32x32x16_f16: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[8:9], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[8:9], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[8:9], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[8:9], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x16_f16 a[0:15], v[0:3], v[4:7], a[0:15] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.f16(<8 x half> %arg0, <8 x half> %arg1, <16 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_i32_16x16x64_i8(<4 x i32> %arg0, <4 x i32> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_i32_16x16x64_i8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[8:9], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_i32_16x16x64_i8 a[0:3], v[0:3], v[4:7], a[0:3] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x i32>, ptr addrspace(1) %ptr + %mai = call <4 x i32> @llvm.amdgcn.mfma.i32.16x16x64.i8(<4 x i32> %arg0, <4 x i32> %arg1, <4 x i32> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x i32> %mai) + ret void +} + +define void @test_rewrite_mfma_i32_32x32x32_i8(<4 x i32> %arg0, <4 x i32> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_i32_32x32x32_i8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[8:9], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[8:9], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[8:9], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[8:9], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_i32_32x32x32_i8 a[0:15], v[0:3], v[4:7], a[0:15] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x i32>, ptr addrspace(1) %ptr + %mai = call <16 x i32> @llvm.amdgcn.mfma.i32.32x32x32.i8(<4 x i32> %arg0, <4 x i32> %arg1, <16 x i32> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x i32> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_16x16x32_bf16(<8 x bfloat> %arg0, <8 x bfloat> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_16x16x32_bf16: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[8:9], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_16x16x32_bf16 a[0:3], v[0:3], v[4:7], a[0:3] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.mfma.f32.16x16x32.bf16(<8 x bfloat> %arg0, <8 x bfloat> %arg1, <4 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_32x32x16_bf16(<8 x bfloat> %arg0, <8 x bfloat> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_32x32x16_bf16: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[8:9], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[8:9], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[8:9], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[8:9], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x16_bf16 a[0:15], v[0:3], v[4:7], a[0:15] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.bf16(<8 x bfloat> %arg0, <8 x bfloat> %arg1, <16 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +; TODO: Full cross product of src0/src1 sizes not tested +define void @test_rewrite_mfma_scale_f32_16x16x128_f8f6f4_0_0__cbsz0__blgp0(<8 x i32> %arg0, <8 x i32> %arg1, i32 %scale0, i32 %scale1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_scale_f32_16x16x128_f8f6f4_0_0__cbsz0__blgp0: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[18:19], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_scale_f32_16x16x128_f8f6f4 a[0:3], v[0:7], v[8:15], a[0:3], v16, v17 op_sel_hi:[0,0,0] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.mfma.scale.f32.16x16x128.f8f6f4.v8i32.v8i32(<8 x i32> %arg0, <8 x i32> %arg1, <4 x float> %src2, + i32 0, ; cbsz + i32 0, ; blgp + i32 0, i32 %scale0, i32 0, i32 %scale1) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_16x16x128_f8f6f4_0_0__cbsz0__blgp0(<8 x i32> %arg0, <8 x i32> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_16x16x128_f8f6f4_0_0__cbsz0__blgp0: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[16:17], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_16x16x128_f8f6f4 a[0:3], v[0:7], v[8:15], a[0:3] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.mfma.scale.f32.16x16x128.f8f6f4.v8i32.v8i32(<8 x i32> %arg0, <8 x i32> %arg1, <4 x float> %src2, + i32 0, ; cbsz + i32 0, ; blgp + i32 0, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_scale_f32_16x16x128_f8f6f4_0_0__cbsz2__blgp2(<6 x i32> %arg0, <6 x i32> %arg1, i32 %scale0, i32 %scale1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_scale_f32_16x16x128_f8f6f4_0_0__cbsz2__blgp2: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[14:15], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_scale_f32_16x16x128_f8f6f4 a[0:3], v[0:5], v[6:11], a[0:3], v12, v13 op_sel_hi:[0,0,0] cbsz:2 blgp:2 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.mfma.scale.f32.16x16x128.f8f6f4.v6i32.v6i32(<6 x i32> %arg0, <6 x i32> %arg1, <4 x float> %src2, + i32 2, ; cbsz + i32 2, ; blgp + i32 0, i32 %scale0, i32 0, i32 %scale1) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_16x16x128_f8f6f4_0_0__cbsz2__blgp2(<6 x i32> %arg0, <6 x i32> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_16x16x128_f8f6f4_0_0__cbsz2__blgp2: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[12:13], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_16x16x128_f8f6f4 a[0:3], v[0:5], v[6:11], a[0:3] cbsz:2 blgp:2 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.mfma.scale.f32.16x16x128.f8f6f4.v6i32.v6i32(<6 x i32> %arg0, <6 x i32> %arg1, <4 x float> %src2, + i32 2, ; cbsz + i32 2, ; blgp + i32 0, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_scale_f32_16x16x128_f8f6f4_0_0__cbsz4__blgp4(<4 x i32> %arg0, <4 x i32> %arg1, i32 %scale0, i32 %scale1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_scale_f32_16x16x128_f8f6f4_0_0__cbsz4__blgp4: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[10:11], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_scale_f32_16x16x128_f8f6f4 a[0:3], v[0:3], v[4:7], a[0:3], v8, v9 op_sel_hi:[0,0,0] cbsz:4 blgp:4 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.mfma.scale.f32.16x16x128.f8f6f4.v4i32.v4i32(<4 x i32> %arg0, <4 x i32> %arg1, <4 x float> %src2, + i32 4, ; cbsz + i32 4, ; blgp + i32 0, i32 %scale0, i32 0, i32 %scale1) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_16x16x128_f8f6f4_0_0__cbsz4__blgp4(<4 x i32> %arg0, <4 x i32> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_16x16x128_f8f6f4_0_0__cbsz4__blgp4: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[8:9], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_16x16x128_f8f6f4 a[0:3], v[0:3], v[4:7], a[0:3] cbsz:4 blgp:4 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.mfma.scale.f32.16x16x128.f8f6f4.v4i32.v4i32(<4 x i32> %arg0, <4 x i32> %arg1, <4 x float> %src2, + i32 4, ; cbsz + i32 4, ; blgp + i32 0, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + + +define void @test_rewrite_mfma_scale_f32_32x32x64_f8f6f4_0_0__cbsz0__blgp0(<8 x i32> %arg0, <8 x i32> %arg1, i32 %scale0, i32 %scale1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_scale_f32_32x32x64_f8f6f4_0_0__cbsz0__blgp0: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[18:19], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[18:19], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[18:19], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[18:19], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_scale_f32_32x32x64_f8f6f4 a[0:15], v[0:7], v[8:15], a[0:15], v16, v17 op_sel_hi:[0,0,0] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.mfma.scale.f32.32x32x64.f8f6f4.v8i32.v8i32(<8 x i32> %arg0, <8 x i32> %arg1, <16 x float> %src2, + i32 0, ; cbsz + i32 0, ; blgp + i32 0, i32 %scale0, i32 0, i32 %scale1) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_32x32x64_f8f6f4_0_0__cbsz0__blgp0(<8 x i32> %arg0, <8 x i32> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_32x32x64_f8f6f4_0_0__cbsz0__blgp0: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[16:17], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[16:17], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[16:17], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[16:17], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x64_f8f6f4 a[0:15], v[0:7], v[8:15], a[0:15] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.mfma.scale.f32.32x32x64.f8f6f4.v8i32.v8i32(<8 x i32> %arg0, <8 x i32> %arg1, <16 x float> %src2, + i32 0, ; cbsz + i32 0, ; blgp + i32 0, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_scale_f32_32x32x64_f8f6f4_0_0__cbsz2__blgp2(<6 x i32> %arg0, <6 x i32> %arg1, i32 %scale0, i32 %scale1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_scale_f32_32x32x64_f8f6f4_0_0__cbsz2__blgp2: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[14:15], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[14:15], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[14:15], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[14:15], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_scale_f32_32x32x64_f8f6f4 a[0:15], v[0:5], v[6:11], a[0:15], v12, v13 op_sel_hi:[0,0,0] cbsz:2 blgp:2 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.mfma.scale.f32.32x32x64.f8f6f4.v6i32.v6i32(<6 x i32> %arg0, <6 x i32> %arg1, <16 x float> %src2, + i32 2, ; cbsz + i32 2, ; blgp + i32 0, i32 %scale0, i32 0, i32 %scale1) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_32x32x64_f8f6f4_0_0__cbsz2__blgp2(<6 x i32> %arg0, <6 x i32> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_32x32x64_f8f6f4_0_0__cbsz2__blgp2: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[12:13], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[12:13], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[12:13], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[12:13], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x64_f8f6f4 a[0:15], v[0:5], v[6:11], a[0:15] cbsz:2 blgp:2 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.mfma.scale.f32.32x32x64.f8f6f4.v6i32.v6i32(<6 x i32> %arg0, <6 x i32> %arg1, <16 x float> %src2, + i32 2, ; cbsz + i32 2, ; blgp + i32 0, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_scale_f32_32x32x64_f8f6f4_0_0__cbsz4__blgp4(<4 x i32> %arg0, <4 x i32> %arg1, i32 %scale0, i32 %scale1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_scale_f32_32x32x64_f8f6f4_0_0__cbsz4__blgp4: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[10:11], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[10:11], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[10:11], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[10:11], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_scale_f32_32x32x64_f8f6f4 a[0:15], v[0:3], v[4:7], a[0:15], v8, v9 op_sel_hi:[0,0,0] cbsz:4 blgp:4 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.mfma.scale.f32.32x32x64.f8f6f4.v4i32.v4i32(<4 x i32> %arg0, <4 x i32> %arg1, <16 x float> %src2, + i32 4, ; cbsz + i32 4, ; blgp + i32 0, i32 %scale0, i32 0, i32 %scale1) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_32x32x64_f8f6f4_0_0__cbsz4__blgp4(<4 x i32> %arg0, <4 x i32> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_32x32x64_f8f6f4_0_0__cbsz4__blgp4: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[8:9], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[8:9], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[8:9], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[8:9], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x64_f8f6f4 a[0:15], v[0:3], v[4:7], a[0:15] cbsz:4 blgp:4 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.mfma.scale.f32.32x32x64.f8f6f4.v4i32.v4i32(<4 x i32> %arg0, <4 x i32> %arg1, <16 x float> %src2, + i32 4, ; cbsz + i32 4, ; blgp + i32 0, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_smfmac_f32_16x16x64_f16(<8 x half> %arg0, <16 x half> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_f32_16x16x64_f16: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v15, v14 +; CHECK-NEXT: v_mov_b32_e32 v14, v13 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[14:15], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_f32_16x16x64_f16 a[0:3], v[0:3], v[4:11], v12 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.smfmac.f32.16x16x64.f16(<8 x half> %arg0, <16 x half> %arg1, <4 x float> %src2, i32 %arg2, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_smfmac_f32_32x32x32_f16(<8 x half> %arg0, <16 x half> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_f32_32x32x32_f16: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v15, v14 +; CHECK-NEXT: v_mov_b32_e32 v14, v13 +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[14:15], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[14:15], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[14:15], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[14:15], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_f32_32x32x32_f16 a[0:15], v[0:3], v[4:11], v12 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.smfmac.f32.32x32x32.f16(<8 x half> %arg0, <16 x half> %arg1, <16 x float> %src2, i32 %arg2, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_smfmac_f32_16x16x64_bf16(<8 x bfloat> %arg0, <16 x bfloat> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_f32_16x16x64_bf16: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v15, v14 +; CHECK-NEXT: v_mov_b32_e32 v14, v13 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[14:15], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_f32_16x16x64_bf16 a[0:3], v[0:3], v[4:11], v12 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.smfmac.f32.16x16x64.bf16(<8 x bfloat> %arg0, <16 x bfloat> %arg1, <4 x float> %src2, i32 %arg2, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_smfmac_f32_32x32x32_bf16(<8 x bfloat> %arg0, <16 x bfloat> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_f32_32x32x32_bf16: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v15, v14 +; CHECK-NEXT: v_mov_b32_e32 v14, v13 +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[14:15], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[14:15], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[14:15], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[14:15], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_f32_32x32x32_bf16 a[0:15], v[0:3], v[4:11], v12 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.smfmac.f32.32x32x32.bf16(<8 x bfloat> %arg0, <16 x bfloat> %arg1, <16 x float> %src2, i32 %arg2, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_smfmac_i32_16x16x128_i8(<4 x i32> %arg0, <8 x i32> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_i32_16x16x128_i8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v15, v14 +; CHECK-NEXT: v_mov_b32_e32 v14, v13 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[14:15], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_i32_16x16x128_i8 a[0:3], v[0:3], v[4:11], v12 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x i32>, ptr addrspace(1) %ptr + %mai = call <4 x i32> @llvm.amdgcn.smfmac.i32.16x16x128.i8(<4 x i32> %arg0, <8 x i32> %arg1, <4 x i32> %src2, i32 %arg2, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x i32> %mai) + ret void +} + +define void @test_rewrite_smfmac_i32_32x32x64_i8(<4 x i32> %arg0, <8 x i32> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_i32_32x32x64_i8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v15, v14 +; CHECK-NEXT: v_mov_b32_e32 v14, v13 +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[14:15], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[14:15], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[14:15], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[14:15], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_i32_32x32x64_i8 a[0:15], v[0:3], v[4:11], v12 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x i32>, ptr addrspace(1) %ptr + %mai = call <16 x i32> @llvm.amdgcn.smfmac.i32.32x32x64.i8(<4 x i32> %arg0, <8 x i32> %arg1, <16 x i32> %src2, i32 %arg2, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x i32> %mai) + ret void +} + +define void @test_rewrite_smfmac_f32_16x16x128_bf8_bf8(<4 x i32> %arg0, <8 x i32> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_f32_16x16x128_bf8_bf8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v15, v14 +; CHECK-NEXT: v_mov_b32_e32 v14, v13 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[14:15], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_f32_16x16x128_bf8_bf8 a[0:3], v[0:3], v[4:11], v12 cbsz:1 abid:2 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.smfmac.f32.16x16x128.bf8.bf8(<4 x i32> %arg0, <8 x i32> %arg1, <4 x float> %src2, i32 %arg2, i32 1, i32 2) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_smfmac_f32_16x16x128_bf8_fp8(<4 x i32> %arg0, <8 x i32> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_f32_16x16x128_bf8_fp8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v15, v14 +; CHECK-NEXT: v_mov_b32_e32 v14, v13 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[14:15], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_f32_16x16x128_bf8_fp8 a[0:3], v[0:3], v[4:11], v12 cbsz:1 abid:2 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.smfmac.f32.16x16x128.bf8.fp8(<4 x i32> %arg0, <8 x i32> %arg1, <4 x float> %src2, i32 %arg2, i32 1, i32 2) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_smfmac_f32_16x16x128_fp8_bf8(<4 x i32> %arg0, <8 x i32> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_f32_16x16x128_fp8_bf8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v15, v14 +; CHECK-NEXT: v_mov_b32_e32 v14, v13 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[14:15], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_f32_16x16x128_fp8_bf8 a[0:3], v[0:3], v[4:11], v12 cbsz:1 abid:2 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.smfmac.f32.16x16x128.fp8.bf8(<4 x i32> %arg0, <8 x i32> %arg1, <4 x float> %src2, i32 %arg2, i32 1, i32 2) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_smfmac_f32_16x16x128_fp8_fp8(<4 x i32> %arg0, <8 x i32> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_f32_16x16x128_fp8_fp8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v15, v14 +; CHECK-NEXT: v_mov_b32_e32 v14, v13 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[14:15], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_f32_16x16x128_fp8_fp8 a[0:3], v[0:3], v[4:11], v12 cbsz:1 abid:2 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.smfmac.f32.16x16x128.fp8.fp8(<4 x i32> %arg0, <8 x i32> %arg1, <4 x float> %src2, i32 %arg2, i32 1, i32 2) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_smfmac_f32_32x32x64_bf8_bf8(<4 x i32> %arg0, <8 x i32> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_f32_32x32x64_bf8_bf8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v15, v14 +; CHECK-NEXT: v_mov_b32_e32 v14, v13 +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[14:15], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[14:15], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[14:15], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[14:15], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_f32_32x32x64_bf8_bf8 a[0:15], v[0:3], v[4:11], v12 cbsz:1 abid:2 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.smfmac.f32.32x32x64.bf8.bf8(<4 x i32> %arg0, <8 x i32> %arg1, <16 x float> %src2, i32 %arg2, i32 1, i32 2) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_smfmac_f32_32x32x64_bf8_fp8(<4 x i32> %arg0, <8 x i32> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_f32_32x32x64_bf8_fp8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v15, v14 +; CHECK-NEXT: v_mov_b32_e32 v14, v13 +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[14:15], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[14:15], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[14:15], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[14:15], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_f32_32x32x64_bf8_fp8 a[0:15], v[0:3], v[4:11], v12 cbsz:1 abid:2 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.smfmac.f32.32x32x64.bf8.fp8(<4 x i32> %arg0, <8 x i32> %arg1, <16 x float> %src2, i32 %arg2, i32 1, i32 2) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_smfmac_f32_32x32x64_fp8_bf8(<4 x i32> %arg0, <8 x i32> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_f32_32x32x64_fp8_bf8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v15, v14 +; CHECK-NEXT: v_mov_b32_e32 v14, v13 +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[14:15], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[14:15], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[14:15], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[14:15], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_f32_32x32x64_fp8_bf8 a[0:15], v[0:3], v[4:11], v12 cbsz:1 abid:2 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.smfmac.f32.32x32x64.fp8.bf8(<4 x i32> %arg0, <8 x i32> %arg1, <16 x float> %src2, i32 %arg2, i32 1, i32 2) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_smfmac_f32_32x32x64_fp8_fp8(<4 x i32> %arg0, <8 x i32> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_f32_32x32x64_fp8_fp8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v15, v14 +; CHECK-NEXT: v_mov_b32_e32 v14, v13 +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[14:15], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[14:15], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[14:15], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[14:15], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_f32_32x32x64_fp8_fp8 a[0:15], v[0:3], v[4:11], v12 cbsz:1 abid:2 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.smfmac.f32.32x32x64.fp8.fp8(<4 x i32> %arg0, <8 x i32> %arg1, <16 x float> %src2, i32 %arg2, i32 1, i32 2) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll index 6f7809f46d10a..9bbe2c2b41340 100644 --- a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll +++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll @@ -884,6 +884,873 @@ define amdgpu_kernel void @test_rewrite_mfma_direct_copy_from_agpr_class_copy_ba ret void } +;--------------------------------------------------------------------- +; Comprehensively test all MFMA intrinsics are in the rewrite table +;--------------------------------------------------------------------- + +define void @test_rewrite_mfma_f32_32x32x1f32(float %arg0, float %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_32x32x1f32: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[28:31], v[2:3], off offset:112 +; CHECK-NEXT: global_load_dwordx4 a[24:27], v[2:3], off offset:96 +; CHECK-NEXT: global_load_dwordx4 a[20:23], v[2:3], off offset:80 +; CHECK-NEXT: global_load_dwordx4 a[16:19], v[2:3], off offset:64 +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[2:3], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[2:3], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[2:3], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[2:3], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 a[0:31], v0, v1, a[0:31] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:31] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <32 x float>, ptr addrspace(1) %ptr + %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %arg0, float %arg1, <32 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<32 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_16x16x1f32(float %arg0, float %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_16x16x1f32: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[2:3], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[2:3], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[2:3], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[2:3], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_16x16x1_4b_f32 a[0:15], v0, v1, a[0:15] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.mfma.f32.16x16x1f32(float %arg0, float %arg1, <16 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_4x4x1f32(float %arg0, float %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_4x4x1f32: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[2:3], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_4x4x1_16b_f32 a[0:3], v0, v1, a[0:3] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.mfma.f32.4x4x1f32(float %arg0, float %arg1, <4 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_32x32x2f32(float %arg0, float %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_32x32x2f32: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[2:3], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[2:3], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[2:3], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[2:3], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x2_f32 a[0:15], v0, v1, a[0:15] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x2f32(float %arg0, float %arg1, <16 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_16x16x4f32(float %arg0, float %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_16x16x4f32: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[2:3], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_16x16x4_f32 a[0:3], v0, v1, a[0:3] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.mfma.f32.16x16x4f32(float %arg0, float %arg1, <4 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_4x4x4f16(<4 x half> %arg0, <4 x half> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_4x4x4f16: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[4:5], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_4x4x4_16b_f16 a[0:3], v[0:1], v[2:3], a[0:3] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.mfma.f32.4x4x4f16(<4 x half> %arg0, <4 x half> %arg1, <4 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_32x32x8f16(<4 x half> %arg0, <4 x half> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_32x32x8f16: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[4:5], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[4:5], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[4:5], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[4:5], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x8_f16 a[0:15], v[0:1], v[2:3], a[0:15] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x8f16(<4 x half> %arg0, <4 x half> %arg1, <16 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_16x16x16f16(<4 x half> %arg0, <4 x half> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_16x16x16f16: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[4:5], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_16x16x16_f16 a[0:3], v[0:1], v[2:3], a[0:3] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.mfma.f32.16x16x16f16(<4 x half> %arg0, <4 x half> %arg1, <4 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_i32_32x32x4i8(i32 %arg0, i32 %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_i32_32x32x4i8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[28:31], v[2:3], off offset:112 +; CHECK-NEXT: global_load_dwordx4 a[24:27], v[2:3], off offset:96 +; CHECK-NEXT: global_load_dwordx4 a[20:23], v[2:3], off offset:80 +; CHECK-NEXT: global_load_dwordx4 a[16:19], v[2:3], off offset:64 +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[2:3], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[2:3], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[2:3], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[2:3], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_i32_32x32x4_2b_i8 a[0:31], v0, v1, a[0:31] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:31] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <32 x i32>, ptr addrspace(1) %ptr + %mai = call <32 x i32> @llvm.amdgcn.mfma.i32.32x32x4i8(i32 %arg0, i32 %arg1, <32 x i32> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<32 x i32> %mai) + ret void +} + +define void @test_rewrite_mfma_i32_16x16x4i8(i32 %arg0, i32 %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_i32_16x16x4i8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[2:3], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[2:3], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[2:3], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[2:3], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_i32_16x16x4_4b_i8 a[0:15], v0, v1, a[0:15] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x i32>, ptr addrspace(1) %ptr + %mai = call <16 x i32> @llvm.amdgcn.mfma.i32.16x16x4i8(i32 %arg0, i32 %arg1, <16 x i32> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x i32> %mai) + ret void +} + +define void @test_rewrite_mfma_i32_4x4x4i8(i32 %arg0, i32 %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_i32_4x4x4i8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[2:3], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_i32_4x4x4_16b_i8 a[0:3], v0, v1, a[0:3] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x i32>, ptr addrspace(1) %ptr + %mai = call <4 x i32> @llvm.amdgcn.mfma.i32.4x4x4i8(i32 %arg0, i32 %arg1, <4 x i32> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x i32> %mai) + ret void +} + +;--------------------------------------------------------------------- +; gfx90a intrinsics +;--------------------------------------------------------------------- + +define void @test_rewrite_mfma_f32_32x32x4bf16_1k(<4 x i16> %arg0, <4 x i16> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_32x32x4bf16_1k: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[28:31], v[4:5], off offset:112 +; CHECK-NEXT: global_load_dwordx4 a[24:27], v[4:5], off offset:96 +; CHECK-NEXT: global_load_dwordx4 a[20:23], v[4:5], off offset:80 +; CHECK-NEXT: global_load_dwordx4 a[16:19], v[4:5], off offset:64 +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[4:5], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[4:5], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[4:5], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[4:5], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x4_2b_bf16 a[0:31], v[0:1], v[2:3], a[0:31] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:31] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <32 x float>, ptr addrspace(1) %ptr + %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x4bf16.1k(<4 x i16> %arg0, <4 x i16> %arg1, <32 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<32 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_16x16x4bf16_1k(<4 x i16> %arg0, <4 x i16> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_16x16x4bf16_1k: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[4:5], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[4:5], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[4:5], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[4:5], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_16x16x4_4b_bf16 a[0:15], v[0:1], v[2:3], a[0:15] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.mfma.f32.16x16x4bf16.1k(<4 x i16> %arg0, <4 x i16> %arg1, <16 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_4x4x4bf16_1k(<4 x i16> %arg0, <4 x i16> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_4x4x4bf16_1k: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[4:5], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_4x4x4_16b_bf16 a[0:3], v[0:1], v[2:3], a[0:3] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.mfma.f32.4x4x4bf16.1k(<4 x i16> %arg0, <4 x i16> %arg1, <4 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_32x32x8bf16_1k(<4 x i16> %arg0, <4 x i16> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_32x32x8bf16_1k: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[4:5], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[4:5], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[4:5], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[4:5], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x8_bf16 a[0:15], v[0:1], v[2:3], a[0:15] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x8bf16.1k(<4 x i16> %arg0, <4 x i16> %arg1, <16 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_16x16x16bf16_1k(<4 x i16> %arg0, <4 x i16> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_16x16x16bf16_1k: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[4:5], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_16x16x16_bf16 a[0:3], v[0:1], v[2:3], a[0:3] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.mfma.f32.16x16x16bf16.1k(<4 x i16> %arg0, <4 x i16> %arg1, <4 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f64_16x16x4f64(double %arg0, double %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f64_16x16x4f64: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[4:5], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[4:5], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f64_16x16x4_f64 a[0:7], v[0:1], v[2:3], a[0:7] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:7] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x double>, ptr addrspace(1) %ptr + %mai = call <4 x double> @llvm.amdgcn.mfma.f64.16x16x4f64(double %arg0, double %arg1, <4 x double> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x double> %mai) + ret void +} + +define void @test_rewrite_mfma_f64_4x4xf64(double %arg0, double %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f64_4x4xf64: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx2 a[0:1], v[4:5], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f64_4x4x4_4b_f64 a[0:1], v[0:1], v[2:3], a[0:1] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:1] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load double, ptr addrspace(1) %ptr + %mai = call double @llvm.amdgcn.mfma.f64.4x4x4f64(double %arg0, double %arg1, double %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(double %mai) + ret void +} + +;--------------------------------------------------------------------- +; gfx942 intrinsics +;--------------------------------------------------------------------- + +define void @test_rewrite_mfma_i32_16x16x32_i8(i64 %arg0, i64 %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_i32_16x16x32_i8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[4:5], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_i32_16x16x32_i8 a[0:3], v[0:1], v[2:3], a[0:3] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x i32>, ptr addrspace(1) %ptr + %mai = call <4 x i32> @llvm.amdgcn.mfma.i32.16x16x32.i8(i64 %arg0, i64 %arg1, <4 x i32> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x i32> %mai) + ret void +} + +define void @test_rewrite_mfma_i32_32x32x16_i8(i64 %arg0, i64 %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_i32_32x32x16_i8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[4:5], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[4:5], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[4:5], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[4:5], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_i32_32x32x16_i8 a[0:15], v[0:1], v[2:3], a[0:15] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x i32>, ptr addrspace(1) %ptr + %mai = call <16 x i32> @llvm.amdgcn.mfma.i32.32x32x16.i8(i64 %arg0, i64 %arg1, <16 x i32> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x i32> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_16x16x8_xf32(<2 x float> %arg0, <2 x float> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_16x16x8_xf32: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[4:5], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_16x16x8_xf32 a[0:3], v[0:1], v[2:3], a[0:3] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.mfma.f32.16x16x8.xf32(<2 x float> %arg0, <2 x float> %arg1, <4 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_32x32x4_xf32(<2 x float> %arg0, <2 x float> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_32x32x4_xf32: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[4:5], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[4:5], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[4:5], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[4:5], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x4_xf32 a[0:15], v[0:1], v[2:3], a[0:15] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x4.xf32(<2 x float> %arg0, <2 x float> %arg1, <16 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_16x16x32_bf8_bf8(i64 %arg0, i64 %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_16x16x32_bf8_bf8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[4:5], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_16x16x32_bf8_bf8 a[0:3], v[0:1], v[2:3], a[0:3] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.mfma.f32.16x16x32.bf8.bf8(i64 %arg0, i64 %arg1, <4 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_16x16x32_bf8_fp8(i64 %arg0, i64 %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_16x16x32_bf8_fp8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[4:5], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_16x16x32_bf8_fp8 a[0:3], v[0:1], v[2:3], a[0:3] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.mfma.f32.16x16x32.bf8.fp8(i64 %arg0, i64 %arg1, <4 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_16x16x32_fp8_bf8(i64 %arg0, i64 %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_16x16x32_fp8_bf8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[4:5], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_16x16x32_fp8_bf8 a[0:3], v[0:1], v[2:3], a[0:3] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.mfma.f32.16x16x32.fp8.bf8(i64 %arg0, i64 %arg1, <4 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_16x16x32_fp8_fp8(i64 %arg0, i64 %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_16x16x32_fp8_fp8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[4:5], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_16x16x32_fp8_fp8 a[0:3], v[0:1], v[2:3], a[0:3] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.mfma.f32.16x16x32.fp8.fp8(i64 %arg0, i64 %arg1, <4 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_32x32x16_bf8_bf8(i64 %arg0, i64 %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_32x32x16_bf8_bf8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[4:5], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[4:5], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[4:5], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[4:5], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x16_bf8_bf8 a[0:15], v[0:1], v[2:3], a[0:15] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.bf8.bf8(i64 %arg0, i64 %arg1, <16 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_32x32x16_bf8_fp8(i64 %arg0, i64 %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_32x32x16_bf8_fp8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[4:5], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[4:5], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[4:5], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[4:5], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x16_bf8_fp8 a[0:15], v[0:1], v[2:3], a[0:15] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.bf8.fp8(i64 %arg0, i64 %arg1, <16 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_32x32x16_fp8_bf8(i64 %arg0, i64 %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_32x32x16_fp8_bf8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[4:5], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[4:5], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[4:5], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[4:5], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x16_fp8_bf8 a[0:15], v[0:1], v[2:3], a[0:15] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.fp8.bf8(i64 %arg0, i64 %arg1, <16 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_32x32x16_fp8_fp8(i64 %arg0, i64 %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_32x32x16_fp8_fp8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[4:5], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[4:5], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[4:5], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[4:5], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x16_fp8_fp8 a[0:15], v[0:1], v[2:3], a[0:15] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.fp8.fp8(i64 %arg0, i64 %arg1, <16 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_smfmac_f32_16x16x32_f16(<4 x half> %arg0, <8 x half> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_f32_16x16x32_f16: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v9, v8 +; CHECK-NEXT: v_mov_b32_e32 v8, v7 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[8:9], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_f32_16x16x32_f16 a[0:3], v[0:1], v[2:5], v6 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.smfmac.f32.16x16x32.f16(<4 x half> %arg0, <8 x half> %arg1, <4 x float> %src2, i32 %arg2, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_smfmac_f32_32x32x16_f16(<4 x half> %arg0, <8 x half> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_f32_32x32x16_f16: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v9, v8 +; CHECK-NEXT: v_mov_b32_e32 v8, v7 +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[8:9], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[8:9], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[8:9], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[8:9], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_f32_32x32x16_f16 a[0:15], v[0:1], v[2:5], v6 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.smfmac.f32.32x32x16.f16(<4 x half> %arg0, <8 x half> %arg1, <16 x float> %src2, i32 %arg2, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_smfmac_f32_16x16x32_bf16(<4 x i16> %arg0, <8 x i16> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_f32_16x16x32_bf16: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v9, v8 +; CHECK-NEXT: v_mov_b32_e32 v8, v7 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[8:9], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_f32_16x16x32_bf16 a[0:3], v[0:1], v[2:5], v6 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = call <4 x float> @llvm.amdgcn.smfmac.f32.16x16x32.bf16(<4 x i16> %arg0, <8 x i16> %arg1, <4 x float> %src2, i32 %arg2, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_smfmac_f32_32x32x16_bf16(<4 x i16> %arg0, <8 x i16> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_f32_32x32x16_bf16: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v9, v8 +; CHECK-NEXT: v_mov_b32_e32 v8, v7 +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[8:9], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[8:9], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[8:9], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[8:9], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_f32_32x32x16_bf16 a[0:15], v[0:1], v[2:5], v6 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.smfmac.f32.32x32x16.bf16(<4 x i16> %arg0, <8 x i16> %arg1, <16 x float> %src2, i32 %arg2, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_smfmac_i32_16x16x64_i8(<2 x i32> %arg0, <4 x i32> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_i32_16x16x64_i8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v9, v8 +; CHECK-NEXT: v_mov_b32_e32 v8, v7 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[8:9], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_i32_16x16x64_i8 a[0:3], v[0:1], v[2:5], v6 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x i32>, ptr addrspace(1) %ptr + %mai = call <4 x i32> @llvm.amdgcn.smfmac.i32.16x16x64.i8(<2 x i32> %arg0, <4 x i32> %arg1, <4 x i32> %src2, i32 %arg2, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x i32> %mai) + ret void +} + +define void @test_rewrite_smfmac_i32_32x32x32_i8(<2 x i32> %arg0, <4 x i32> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_i32_32x32x32_i8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v9, v8 +; CHECK-NEXT: v_mov_b32_e32 v8, v7 +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[8:9], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[8:9], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[8:9], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[8:9], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_i32_32x32x32_i8 a[0:15], v[0:1], v[2:5], v6 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x i32>, ptr addrspace(1) %ptr + %mai = call <16 x i32> @llvm.amdgcn.smfmac.i32.32x32x32.i8(<2 x i32> %arg0, <4 x i32> %arg1, <16 x i32> %src2, i32 %arg2, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x i32> %mai) + ret void +} + +define void @test_rewrite_smfmac_16x16x64_bf8_bf8(<2 x i32> %arg0, <4 x i32> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_16x16x64_bf8_bf8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v9, v8 +; CHECK-NEXT: v_mov_b32_e32 v8, v7 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[8:9], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_f32_16x16x64_bf8_bf8 a[0:3], v[0:1], v[2:5], v6 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = tail call <4 x float> @llvm.amdgcn.smfmac.f32.16x16x64.bf8.bf8(<2 x i32> %arg0, <4 x i32> %arg1, <4 x float> %src2, i32 %arg2, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_smfmac_16x16x64_bf8_fp8(<2 x i32> %arg0, <4 x i32> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_16x16x64_bf8_fp8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v9, v8 +; CHECK-NEXT: v_mov_b32_e32 v8, v7 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[8:9], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_f32_16x16x64_bf8_fp8 a[0:3], v[0:1], v[2:5], v6 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = tail call <4 x float> @llvm.amdgcn.smfmac.f32.16x16x64.bf8.fp8(<2 x i32> %arg0, <4 x i32> %arg1, <4 x float> %src2, i32 %arg2, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_smfmac_16x16x64_fp8_bf8(<2 x i32> %arg0, <4 x i32> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_16x16x64_fp8_bf8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v9, v8 +; CHECK-NEXT: v_mov_b32_e32 v8, v7 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[8:9], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_f32_16x16x64_fp8_bf8 a[0:3], v[0:1], v[2:5], v6 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = tail call <4 x float> @llvm.amdgcn.smfmac.f32.16x16x64.fp8.bf8(<2 x i32> %arg0, <4 x i32> %arg1, <4 x float> %src2, i32 %arg2, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_smfmac_16x16x64_fp8_fp8(<2 x i32> %arg0, <4 x i32> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_16x16x64_fp8_fp8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v9, v8 +; CHECK-NEXT: v_mov_b32_e32 v8, v7 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[8:9], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_f32_16x16x64_fp8_fp8 a[0:3], v[0:1], v[2:5], v6 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:3] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <4 x float>, ptr addrspace(1) %ptr + %mai = tail call <4 x float> @llvm.amdgcn.smfmac.f32.16x16x64.fp8.fp8(<2 x i32> %arg0, <4 x i32> %arg1, <4 x float> %src2, i32 %arg2, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<4 x float> %mai) + ret void +} + +define void @test_rewrite_smfmac_32x32x32_bf8_bf8(<2 x i32> %arg0, <4 x i32> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_32x32x32_bf8_bf8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v9, v8 +; CHECK-NEXT: v_mov_b32_e32 v8, v7 +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[8:9], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[8:9], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[8:9], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[8:9], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_f32_32x32x32_bf8_bf8 a[0:15], v[0:1], v[2:5], v6 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = tail call <16 x float> @llvm.amdgcn.smfmac.f32.32x32x32.bf8.bf8(<2 x i32> %arg0, <4 x i32> %arg1, <16 x float> %src2, i32 %arg2, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_smfmac_32x32x32_bf8_fp8(<2 x i32> %arg0, <4 x i32> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_32x32x32_bf8_fp8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v9, v8 +; CHECK-NEXT: v_mov_b32_e32 v8, v7 +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[8:9], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[8:9], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[8:9], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[8:9], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_f32_32x32x32_bf8_fp8 a[0:15], v[0:1], v[2:5], v6 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = tail call <16 x float> @llvm.amdgcn.smfmac.f32.32x32x32.bf8.fp8(<2 x i32> %arg0, <4 x i32> %arg1, <16 x float> %src2, i32 %arg2, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_smfmac_32x32x32_fp8_bf8(<2 x i32> %arg0, <4 x i32> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_32x32x32_fp8_bf8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v9, v8 +; CHECK-NEXT: v_mov_b32_e32 v8, v7 +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[8:9], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[8:9], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[8:9], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[8:9], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_f32_32x32x32_fp8_bf8 a[0:15], v[0:1], v[2:5], v6 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = tail call <16 x float> @llvm.amdgcn.smfmac.f32.32x32x32.fp8.bf8(<2 x i32> %arg0, <4 x i32> %arg1, <16 x float> %src2, i32 %arg2, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + +define void @test_rewrite_smfmac_32x32x32_fp8_fp8(<2 x i32> %arg0, <4 x i32> %arg1, i32 %arg2, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_smfmac_32x32x32_fp8_fp8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v9, v8 +; CHECK-NEXT: v_mov_b32_e32 v8, v7 +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[8:9], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[8:9], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[8:9], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[8:9], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_smfmac_f32_32x32x32_fp8_fp8 a[0:15], v[0:1], v[2:5], v6 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = tail call <16 x float> @llvm.amdgcn.smfmac.f32.32x32x32.fp8.fp8(<2 x i32> %arg0, <4 x i32> %arg1, <16 x float> %src2, i32 %arg2, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + declare <4 x float> @llvm.amdgcn.mfma.f32.16x16x16f16(<4 x half>, <4 x half>, <4 x float>, i32 immarg, i32 immarg, i32 immarg) #2 declare <16 x float> @llvm.amdgcn.mfma.f32.16x16x1f32(float, float, <16 x float>, i32 immarg, i32 immarg, i32 immarg) #2 declare <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float, float, <32 x float>, i32 immarg, i32 immarg, i32 immarg) #2 >From 6a92c5cb9da06e7a5b670319ea2f01fb47764dbb Mon Sep 17 00:00:00 2001 From: Matt Arsenault <matthew.arsena...@amd.com> Date: Tue, 19 Aug 2025 00:19:34 +0900 Subject: [PATCH 2/2] Add missed cases --- .../AMDGPU/rewrite-vgpr-mfma-to-agpr.ll | 44 +++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll index 9bbe2c2b41340..5f42abbeae253 100644 --- a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll +++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll @@ -986,6 +986,50 @@ define void @test_rewrite_mfma_f32_16x16x4f32(float %arg0, float %arg1, ptr addr ret void } +define void @test_rewrite_mfma_f32_32x32x4f16(<4 x half> %arg0, <4 x half> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_32x32x4f16: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[28:31], v[4:5], off offset:112 +; CHECK-NEXT: global_load_dwordx4 a[24:27], v[4:5], off offset:96 +; CHECK-NEXT: global_load_dwordx4 a[20:23], v[4:5], off offset:80 +; CHECK-NEXT: global_load_dwordx4 a[16:19], v[4:5], off offset:64 +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[4:5], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[4:5], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[4:5], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[4:5], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_32x32x4_2b_f16 a[0:31], v[0:1], v[2:3], a[0:31] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:31] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <32 x float>, ptr addrspace(1) %ptr + %mai = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x4f16(<4 x half> %arg0, <4 x half> %arg1, <32 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<32 x float> %mai) + ret void +} + +define void @test_rewrite_mfma_f32_16x16x4f16(<4 x half> %arg0, <4 x half> %arg1, ptr addrspace(1) %ptr) #0 { +; CHECK-LABEL: test_rewrite_mfma_f32_16x16x4f16: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: global_load_dwordx4 a[12:15], v[4:5], off offset:48 +; CHECK-NEXT: global_load_dwordx4 a[8:11], v[4:5], off offset:32 +; CHECK-NEXT: global_load_dwordx4 a[4:7], v[4:5], off offset:16 +; CHECK-NEXT: global_load_dwordx4 a[0:3], v[4:5], off +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_mfma_f32_16x16x4_4b_f16 a[0:15], v[0:1], v[2:3], a[0:15] +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use a[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %src2 = load <16 x float>, ptr addrspace(1) %ptr + %mai = call <16 x float> @llvm.amdgcn.mfma.f32.16x16x4f16(<4 x half> %arg0, <4 x half> %arg1, <16 x float> %src2, i32 0, i32 0, i32 0) + call void asm sideeffect "; use $0", "a"(<16 x float> %mai) + ret void +} + define void @test_rewrite_mfma_f32_4x4x4f16(<4 x half> %arg0, <4 x half> %arg1, ptr addrspace(1) %ptr) #0 { ; CHECK-LABEL: test_rewrite_mfma_f32_4x4x4f16: ; CHECK: ; %bb.0: _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits