https://github.com/ritter-x2a updated https://github.com/llvm/llvm-project/pull/146075
>From 395fdf948ee1864d6fc427e62db5433a5ef3eba0 Mon Sep 17 00:00:00 2001 From: Fabian Ritter <fabian.rit...@amd.com> Date: Fri, 27 Jun 2025 04:23:50 -0400 Subject: [PATCH 1/5] [AMDGPU][SDAG] DAGCombine PTRADD -> disjoint OR If we can't fold a PTRADD's offset into its users, lowering them to disjoint ORs is preferable: Often, a 32-bit OR instruction suffices where we'd otherwise use a pair of 32-bit additions with carry. This needs to be a DAGCombine (and not a selection rule) because its main purpose is to enable subsequent DAGCombines for bitwise operations. We don't want to just turn PTRADDs into disjoint ORs whenever that's sound because this transform loses the information that the operation implements pointer arithmetic, which we will soon need to fold offsets into FLAT instructions. Currently, disjoint ORs can still be used for offset folding, so that part of the logic can't be tested. The PR contains a hacky workaround for a situation where an AssertAlign operand of a PTRADD is not DAGCombined before the PTRADD, causing the PTRADD to be turned into a disjoint OR although reassociating it with the operand of the AssertAlign would be better. This wouldn't be a problem if the DAGCombiner ensured that a node is only processed after all its operands have been processed. For SWDEV-516125. --- llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 35 ++++++++++++ .../AMDGPU/ptradd-sdag-optimizations.ll | 56 ++++++++++++++++++- 2 files changed, 90 insertions(+), 1 deletion(-) diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 78d608556f056..ffaaef65569ae 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -16145,6 +16145,41 @@ SDValue SITargetLowering::performPtrAddCombine(SDNode *N, return Folded; } + // Transform (ptradd a, b) -> (or disjoint a, b) if it is equivalent and if + // that transformation can't block an offset folding at any use of the ptradd. + // This should be done late, after legalization, so that it doesn't block + // other ptradd combines that could enable more offset folding. + bool HasIntermediateAssertAlign = + N0->getOpcode() == ISD::AssertAlign && N0->getOperand(0)->isAnyAdd(); + // This is a hack to work around an ordering problem for DAGs like this: + // (ptradd (AssertAlign (ptradd p, c1), k), c2) + // If the outer ptradd is handled first by the DAGCombiner, it can be + // transformed into a disjoint or. Then, when the generic AssertAlign combine + // pushes the AssertAlign through the inner ptradd, it's too late for the + // ptradd reassociation to trigger. + if (!DCI.isBeforeLegalizeOps() && !HasIntermediateAssertAlign && + DAG.haveNoCommonBitsSet(N0, N1)) { + bool TransformCanBreakAddrMode = any_of(N->users(), [&](SDNode *User) { + if (auto *LoadStore = dyn_cast<MemSDNode>(User); + LoadStore && LoadStore->getBasePtr().getNode() == N) { + unsigned AS = LoadStore->getAddressSpace(); + // Currently, we only really need ptradds to fold offsets into flat + // memory instructions. + if (AS != AMDGPUAS::FLAT_ADDRESS) + return false; + TargetLoweringBase::AddrMode AM; + AM.HasBaseReg = true; + EVT VT = LoadStore->getMemoryVT(); + Type *AccessTy = VT.getTypeForEVT(*DAG.getContext()); + return isLegalAddressingMode(DAG.getDataLayout(), AM, AccessTy, AS); + } + return false; + }); + + if (!TransformCanBreakAddrMode) + return DAG.getNode(ISD::OR, DL, VT, N0, N1, SDNodeFlags::Disjoint); + } + if (N1.getOpcode() != ISD::ADD || !N1.hasOneUse()) return SDValue(); diff --git a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll index 199c1f61d2522..7d7fe141e5440 100644 --- a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll +++ b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll @@ -100,7 +100,7 @@ define void @baseptr_null(i64 %offset, i8 %v) { ; Taken from implicit-kernarg-backend-usage.ll, tests the PTRADD handling in the ; assertalign DAG combine. -define amdgpu_kernel void @llvm_amdgcn_queue_ptr(ptr addrspace(1) %ptr) #0 { +define amdgpu_kernel void @llvm_amdgcn_queue_ptr(ptr addrspace(1) %ptr) { ; GFX942-LABEL: llvm_amdgcn_queue_ptr: ; GFX942: ; %bb.0: ; GFX942-NEXT: v_mov_b32_e32 v0, 0 @@ -415,6 +415,60 @@ entry: ret void } +; Check that ptradds can be lowered to disjoint ORs. +define ptr @gep_disjoint_or(ptr %base) { +; GFX942-LABEL: gep_disjoint_or: +; GFX942: ; %bb.0: +; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX942-NEXT: v_and_or_b32 v0, v0, -16, 4 +; GFX942-NEXT: s_setpc_b64 s[30:31] + %p = call ptr @llvm.ptrmask(ptr %base, i64 s0xf0) + %gep = getelementptr nuw inbounds i8, ptr %p, i64 4 + ret ptr %gep +} + +; Check that AssertAlign nodes between ptradd nodes don't block offset folding, +; taken from preload-implicit-kernargs.ll +define amdgpu_kernel void @random_incorrect_offset(ptr addrspace(1) inreg %out) #0 { +; GFX942_PTRADD-LABEL: random_incorrect_offset: +; GFX942_PTRADD: ; %bb.1: +; GFX942_PTRADD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0 +; GFX942_PTRADD-NEXT: s_waitcnt lgkmcnt(0) +; GFX942_PTRADD-NEXT: s_branch .LBB21_0 +; GFX942_PTRADD-NEXT: .p2align 8 +; GFX942_PTRADD-NEXT: ; %bb.2: +; GFX942_PTRADD-NEXT: .LBB21_0: +; GFX942_PTRADD-NEXT: s_load_dword s0, s[0:1], 0xa +; GFX942_PTRADD-NEXT: v_mov_b32_e32 v0, 0 +; GFX942_PTRADD-NEXT: s_waitcnt lgkmcnt(0) +; GFX942_PTRADD-NEXT: v_mov_b32_e32 v1, s0 +; GFX942_PTRADD-NEXT: global_store_dword v0, v1, s[2:3] +; GFX942_PTRADD-NEXT: s_endpgm +; +; GFX942_LEGACY-LABEL: random_incorrect_offset: +; GFX942_LEGACY: ; %bb.1: +; GFX942_LEGACY-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0 +; GFX942_LEGACY-NEXT: s_waitcnt lgkmcnt(0) +; GFX942_LEGACY-NEXT: s_branch .LBB21_0 +; GFX942_LEGACY-NEXT: .p2align 8 +; GFX942_LEGACY-NEXT: ; %bb.2: +; GFX942_LEGACY-NEXT: .LBB21_0: +; GFX942_LEGACY-NEXT: s_mov_b32 s4, 8 +; GFX942_LEGACY-NEXT: s_load_dword s0, s[0:1], s4 offset:0x2 +; GFX942_LEGACY-NEXT: v_mov_b32_e32 v0, 0 +; GFX942_LEGACY-NEXT: s_waitcnt lgkmcnt(0) +; GFX942_LEGACY-NEXT: v_mov_b32_e32 v1, s0 +; GFX942_LEGACY-NEXT: global_store_dword v0, v1, s[2:3] +; GFX942_LEGACY-NEXT: s_endpgm + %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() + %gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 2 + %load = load i32, ptr addrspace(4) %gep + store i32 %load, ptr addrspace(1) %out + ret void +} + declare void @llvm.memcpy.p0.p4.i64(ptr noalias nocapture writeonly, ptr addrspace(4) noalias nocapture readonly, i64, i1 immarg) !0 = !{} + +attributes #0 = { "amdgpu-agpr-alloc"="0" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" } >From 36f24e430b1bd7d68b087ca30103ede72575e393 Mon Sep 17 00:00:00 2001 From: Fabian Ritter <fabian.rit...@amd.com> Date: Mon, 30 Jun 2025 02:39:22 -0400 Subject: [PATCH 2/5] Remove unnecessary attributes in test --- .../AMDGPU/ptradd-sdag-optimizations.ll | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll index 7d7fe141e5440..7d3b19e885877 100644 --- a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll +++ b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll @@ -429,36 +429,36 @@ define ptr @gep_disjoint_or(ptr %base) { ; Check that AssertAlign nodes between ptradd nodes don't block offset folding, ; taken from preload-implicit-kernargs.ll -define amdgpu_kernel void @random_incorrect_offset(ptr addrspace(1) inreg %out) #0 { +define amdgpu_kernel void @random_incorrect_offset(ptr addrspace(1) inreg %out) { ; GFX942_PTRADD-LABEL: random_incorrect_offset: ; GFX942_PTRADD: ; %bb.1: -; GFX942_PTRADD-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0 +; GFX942_PTRADD-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0 ; GFX942_PTRADD-NEXT: s_waitcnt lgkmcnt(0) ; GFX942_PTRADD-NEXT: s_branch .LBB21_0 ; GFX942_PTRADD-NEXT: .p2align 8 ; GFX942_PTRADD-NEXT: ; %bb.2: ; GFX942_PTRADD-NEXT: .LBB21_0: -; GFX942_PTRADD-NEXT: s_load_dword s0, s[0:1], 0xa +; GFX942_PTRADD-NEXT: s_load_dword s0, s[4:5], 0xa ; GFX942_PTRADD-NEXT: v_mov_b32_e32 v0, 0 ; GFX942_PTRADD-NEXT: s_waitcnt lgkmcnt(0) ; GFX942_PTRADD-NEXT: v_mov_b32_e32 v1, s0 -; GFX942_PTRADD-NEXT: global_store_dword v0, v1, s[2:3] +; GFX942_PTRADD-NEXT: global_store_dword v0, v1, s[8:9] ; GFX942_PTRADD-NEXT: s_endpgm ; ; GFX942_LEGACY-LABEL: random_incorrect_offset: ; GFX942_LEGACY: ; %bb.1: -; GFX942_LEGACY-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0 +; GFX942_LEGACY-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0 ; GFX942_LEGACY-NEXT: s_waitcnt lgkmcnt(0) ; GFX942_LEGACY-NEXT: s_branch .LBB21_0 ; GFX942_LEGACY-NEXT: .p2align 8 ; GFX942_LEGACY-NEXT: ; %bb.2: ; GFX942_LEGACY-NEXT: .LBB21_0: -; GFX942_LEGACY-NEXT: s_mov_b32 s4, 8 -; GFX942_LEGACY-NEXT: s_load_dword s0, s[0:1], s4 offset:0x2 +; GFX942_LEGACY-NEXT: s_mov_b32 s0, 8 +; GFX942_LEGACY-NEXT: s_load_dword s0, s[4:5], s0 offset:0x2 ; GFX942_LEGACY-NEXT: v_mov_b32_e32 v0, 0 ; GFX942_LEGACY-NEXT: s_waitcnt lgkmcnt(0) ; GFX942_LEGACY-NEXT: v_mov_b32_e32 v1, s0 -; GFX942_LEGACY-NEXT: global_store_dword v0, v1, s[2:3] +; GFX942_LEGACY-NEXT: global_store_dword v0, v1, s[8:9] ; GFX942_LEGACY-NEXT: s_endpgm %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr() %gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 2 @@ -470,5 +470,3 @@ define amdgpu_kernel void @random_incorrect_offset(ptr addrspace(1) inreg %out) declare void @llvm.memcpy.p0.p4.i64(ptr noalias nocapture writeonly, ptr addrspace(4) noalias nocapture readonly, i64, i1 immarg) !0 = !{} - -attributes #0 = { "amdgpu-agpr-alloc"="0" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" } >From abdc12f84289eaeb6337d29dc8dfd2fd1697d171 Mon Sep 17 00:00:00 2001 From: Fabian Ritter <fabian.rit...@amd.com> Date: Mon, 30 Jun 2025 09:28:34 -0400 Subject: [PATCH 3/5] Move ptradd -> disjoint OR combine to generic combines --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 27 ++++++++++++++ llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 35 ------------------- 2 files changed, 27 insertions(+), 35 deletions(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 7586c73924191..1e00df8794e94 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -2774,6 +2774,33 @@ SDValue DAGCombiner::visitPTRADD(SDNode *N) { } } + // Transform (ptradd a, b) -> (or disjoint a, b) if it is equivalent and if + // that transformation can't block an offset folding at any use of the ptradd. + // This should be done late, after legalization, so that it doesn't block + // other ptradd combines that could enable more offset folding. + if (LegalOperations && DAG.haveNoCommonBitsSet(N0, N1)) { + bool TransformCanBreakAddrMode = false; + if (auto *C = dyn_cast<ConstantSDNode>(N1)) { + TargetLoweringBase::AddrMode AM; + AM.HasBaseReg = true; + AM.BaseOffs = C->getSExtValue(); + TransformCanBreakAddrMode = any_of(N->users(), [&](SDNode *User) { + if (auto *LoadStore = dyn_cast<MemSDNode>(User); + LoadStore && LoadStore->getBasePtr().getNode() == N) { + unsigned AS = LoadStore->getAddressSpace(); + EVT AccessVT = LoadStore->getMemoryVT(); + Type *AccessTy = AccessVT.getTypeForEVT(*DAG.getContext()); + return TLI.isLegalAddressingMode(DAG.getDataLayout(), AM, AccessTy, + AS); + } + return false; + }); + } + + if (!TransformCanBreakAddrMode) + return DAG.getNode(ISD::OR, DL, PtrVT, N0, N1, SDNodeFlags::Disjoint); + } + return SDValue(); } diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index ffaaef65569ae..78d608556f056 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -16145,41 +16145,6 @@ SDValue SITargetLowering::performPtrAddCombine(SDNode *N, return Folded; } - // Transform (ptradd a, b) -> (or disjoint a, b) if it is equivalent and if - // that transformation can't block an offset folding at any use of the ptradd. - // This should be done late, after legalization, so that it doesn't block - // other ptradd combines that could enable more offset folding. - bool HasIntermediateAssertAlign = - N0->getOpcode() == ISD::AssertAlign && N0->getOperand(0)->isAnyAdd(); - // This is a hack to work around an ordering problem for DAGs like this: - // (ptradd (AssertAlign (ptradd p, c1), k), c2) - // If the outer ptradd is handled first by the DAGCombiner, it can be - // transformed into a disjoint or. Then, when the generic AssertAlign combine - // pushes the AssertAlign through the inner ptradd, it's too late for the - // ptradd reassociation to trigger. - if (!DCI.isBeforeLegalizeOps() && !HasIntermediateAssertAlign && - DAG.haveNoCommonBitsSet(N0, N1)) { - bool TransformCanBreakAddrMode = any_of(N->users(), [&](SDNode *User) { - if (auto *LoadStore = dyn_cast<MemSDNode>(User); - LoadStore && LoadStore->getBasePtr().getNode() == N) { - unsigned AS = LoadStore->getAddressSpace(); - // Currently, we only really need ptradds to fold offsets into flat - // memory instructions. - if (AS != AMDGPUAS::FLAT_ADDRESS) - return false; - TargetLoweringBase::AddrMode AM; - AM.HasBaseReg = true; - EVT VT = LoadStore->getMemoryVT(); - Type *AccessTy = VT.getTypeForEVT(*DAG.getContext()); - return isLegalAddressingMode(DAG.getDataLayout(), AM, AccessTy, AS); - } - return false; - }); - - if (!TransformCanBreakAddrMode) - return DAG.getNode(ISD::OR, DL, VT, N0, N1, SDNodeFlags::Disjoint); - } - if (N1.getOpcode() != ISD::ADD || !N1.hasOneUse()) return SDValue(); >From a1c816a4280b170985081052023cc98ee7e9af39 Mon Sep 17 00:00:00 2001 From: Fabian Ritter <fabian.rit...@amd.com> Date: Fri, 1 Aug 2025 10:12:23 -0400 Subject: [PATCH 4/5] Replace custom code with canFoldInAddressingMode --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 20 +++---------------- 1 file changed, 3 insertions(+), 17 deletions(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 1e00df8794e94..573346dc211af 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -2779,23 +2779,9 @@ SDValue DAGCombiner::visitPTRADD(SDNode *N) { // This should be done late, after legalization, so that it doesn't block // other ptradd combines that could enable more offset folding. if (LegalOperations && DAG.haveNoCommonBitsSet(N0, N1)) { - bool TransformCanBreakAddrMode = false; - if (auto *C = dyn_cast<ConstantSDNode>(N1)) { - TargetLoweringBase::AddrMode AM; - AM.HasBaseReg = true; - AM.BaseOffs = C->getSExtValue(); - TransformCanBreakAddrMode = any_of(N->users(), [&](SDNode *User) { - if (auto *LoadStore = dyn_cast<MemSDNode>(User); - LoadStore && LoadStore->getBasePtr().getNode() == N) { - unsigned AS = LoadStore->getAddressSpace(); - EVT AccessVT = LoadStore->getMemoryVT(); - Type *AccessTy = AccessVT.getTypeForEVT(*DAG.getContext()); - return TLI.isLegalAddressingMode(DAG.getDataLayout(), AM, AccessTy, - AS); - } - return false; - }); - } + bool TransformCanBreakAddrMode = any_of(N->users(), [&](SDNode *User) { + return canFoldInAddressingMode(N, User, DAG, TLI); + }); if (!TransformCanBreakAddrMode) return DAG.getNode(ISD::OR, DL, PtrVT, N0, N1, SDNodeFlags::Disjoint); >From 0587908ebb196aff7db59390ae9a47bda45377c7 Mon Sep 17 00:00:00 2001 From: Fabian Ritter <fabian.rit...@amd.com> Date: Mon, 4 Aug 2025 02:48:44 -0400 Subject: [PATCH 5/5] !any_of -> none_of --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 573346dc211af..0c773e7dcb5de 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -2779,11 +2779,11 @@ SDValue DAGCombiner::visitPTRADD(SDNode *N) { // This should be done late, after legalization, so that it doesn't block // other ptradd combines that could enable more offset folding. if (LegalOperations && DAG.haveNoCommonBitsSet(N0, N1)) { - bool TransformCanBreakAddrMode = any_of(N->users(), [&](SDNode *User) { + bool TransformCannotBreakAddrMode = none_of(N->users(), [&](SDNode *User) { return canFoldInAddressingMode(N, User, DAG, TLI); }); - if (!TransformCanBreakAddrMode) + if (TransformCannotBreakAddrMode) return DAG.getNode(ISD::OR, DL, PtrVT, N0, N1, SDNodeFlags::Disjoint); } _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits