https://github.com/ritter-x2a updated 
https://github.com/llvm/llvm-project/pull/146075

>From ff41f47a5fae2b51add3304eca5d00cc6c1a85a0 Mon Sep 17 00:00:00 2001
From: Fabian Ritter <fabian.rit...@amd.com>
Date: Fri, 27 Jun 2025 04:23:50 -0400
Subject: [PATCH 1/5] [AMDGPU][SDAG] DAGCombine PTRADD -> disjoint OR

If we can't fold a PTRADD's offset into its users, lowering them to
disjoint ORs is preferable: Often, a 32-bit OR instruction suffices
where we'd otherwise use a pair of 32-bit additions with carry.

This needs to be a DAGCombine (and not a selection rule) because its
main purpose is to enable subsequent DAGCombines for bitwise operations.
We don't want to just turn PTRADDs into disjoint ORs whenever that's
sound because this transform loses the information that the operation
implements pointer arithmetic, which we will soon need to fold offsets
into FLAT instructions. Currently, disjoint ORs can still be used for
offset folding, so that part of the logic can't be tested.

The PR contains a hacky workaround for a situation where an AssertAlign
operand of a PTRADD is not DAGCombined before the PTRADD, causing the
PTRADD to be turned into a disjoint OR although reassociating it with
the operand of the AssertAlign would be better. This wouldn't be a
problem if the DAGCombiner ensured that a node is only processed after
all its operands have been processed.

For SWDEV-516125.
---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     | 35 ++++++++++++
 .../AMDGPU/ptradd-sdag-optimizations.ll       | 56 ++++++++++++++++++-
 2 files changed, 90 insertions(+), 1 deletion(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp 
b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 4c7c42d1f9eaa..25f507c13bb8d 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -15370,6 +15370,41 @@ SDValue SITargetLowering::performPtrAddCombine(SDNode 
*N,
       return Folded;
   }
 
+  // Transform (ptradd a, b) -> (or disjoint a, b) if it is equivalent and if
+  // that transformation can't block an offset folding at any use of the 
ptradd.
+  // This should be done late, after legalization, so that it doesn't block
+  // other ptradd combines that could enable more offset folding.
+  bool HasIntermediateAssertAlign =
+      N0->getOpcode() == ISD::AssertAlign && N0->getOperand(0)->isAnyAdd();
+  // This is a hack to work around an ordering problem for DAGs like this:
+  //   (ptradd (AssertAlign (ptradd p, c1), k), c2)
+  // If the outer ptradd is handled first by the DAGCombiner, it can be
+  // transformed into a disjoint or. Then, when the generic AssertAlign combine
+  // pushes the AssertAlign through the inner ptradd, it's too late for the
+  // ptradd reassociation to trigger.
+  if (!DCI.isBeforeLegalizeOps() && !HasIntermediateAssertAlign &&
+      DAG.haveNoCommonBitsSet(N0, N1)) {
+    bool TransformCanBreakAddrMode = any_of(N->users(), [&](SDNode *User) {
+      if (auto *LoadStore = dyn_cast<MemSDNode>(User);
+          LoadStore && LoadStore->getBasePtr().getNode() == N) {
+        unsigned AS = LoadStore->getAddressSpace();
+        // Currently, we only really need ptradds to fold offsets into flat
+        // memory instructions.
+        if (AS != AMDGPUAS::FLAT_ADDRESS)
+          return false;
+        TargetLoweringBase::AddrMode AM;
+        AM.HasBaseReg = true;
+        EVT VT = LoadStore->getMemoryVT();
+        Type *AccessTy = VT.getTypeForEVT(*DAG.getContext());
+        return isLegalAddressingMode(DAG.getDataLayout(), AM, AccessTy, AS);
+      }
+      return false;
+    });
+
+    if (!TransformCanBreakAddrMode)
+      return DAG.getNode(ISD::OR, DL, VT, N0, N1, SDNodeFlags::Disjoint);
+  }
+
   if (N1.getOpcode() != ISD::ADD || !N1.hasOneUse())
     return SDValue();
 
diff --git a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll 
b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll
index b7bd56e5c33c4..e5f31bdb2df66 100644
--- a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll
+++ b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll
@@ -100,7 +100,7 @@ define void @baseptr_null(i64 %offset, i8 %v) {
 
 ; Taken from implicit-kernarg-backend-usage.ll, tests the PTRADD handling in 
the
 ; assertalign DAG combine.
-define amdgpu_kernel void @llvm_amdgcn_queue_ptr(ptr addrspace(1) %ptr)  #0 {
+define amdgpu_kernel void @llvm_amdgcn_queue_ptr(ptr addrspace(1) %ptr) {
 ; GFX942-LABEL: llvm_amdgcn_queue_ptr:
 ; GFX942:       ; %bb.0:
 ; GFX942-NEXT:    v_mov_b32_e32 v2, 0
@@ -416,6 +416,60 @@ entry:
   ret void
 }
 
+; Check that ptradds can be lowered to disjoint ORs.
+define ptr @gep_disjoint_or(ptr %base) {
+; GFX942-LABEL: gep_disjoint_or:
+; GFX942:       ; %bb.0:
+; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT:    v_and_or_b32 v0, v0, -16, 4
+; GFX942-NEXT:    s_setpc_b64 s[30:31]
+  %p = call ptr @llvm.ptrmask(ptr %base, i64 s0xf0)
+  %gep = getelementptr nuw inbounds i8, ptr %p, i64 4
+  ret ptr %gep
+}
+
+; Check that AssertAlign nodes between ptradd nodes don't block offset folding,
+; taken from preload-implicit-kernargs.ll
+define amdgpu_kernel void @random_incorrect_offset(ptr addrspace(1) inreg 
%out) #0 {
+; GFX942_PTRADD-LABEL: random_incorrect_offset:
+; GFX942_PTRADD:       ; %bb.1:
+; GFX942_PTRADD-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX942_PTRADD-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX942_PTRADD-NEXT:    s_branch .LBB21_0
+; GFX942_PTRADD-NEXT:    .p2align 8
+; GFX942_PTRADD-NEXT:  ; %bb.2:
+; GFX942_PTRADD-NEXT:  .LBB21_0:
+; GFX942_PTRADD-NEXT:    s_load_dword s0, s[0:1], 0xa
+; GFX942_PTRADD-NEXT:    v_mov_b32_e32 v0, 0
+; GFX942_PTRADD-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX942_PTRADD-NEXT:    v_mov_b32_e32 v1, s0
+; GFX942_PTRADD-NEXT:    global_store_dword v0, v1, s[2:3]
+; GFX942_PTRADD-NEXT:    s_endpgm
+;
+; GFX942_LEGACY-LABEL: random_incorrect_offset:
+; GFX942_LEGACY:       ; %bb.1:
+; GFX942_LEGACY-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX942_LEGACY-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX942_LEGACY-NEXT:    s_branch .LBB21_0
+; GFX942_LEGACY-NEXT:    .p2align 8
+; GFX942_LEGACY-NEXT:  ; %bb.2:
+; GFX942_LEGACY-NEXT:  .LBB21_0:
+; GFX942_LEGACY-NEXT:    s_mov_b32 s4, 8
+; GFX942_LEGACY-NEXT:    s_load_dword s0, s[0:1], s4 offset:0x2
+; GFX942_LEGACY-NEXT:    v_mov_b32_e32 v0, 0
+; GFX942_LEGACY-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX942_LEGACY-NEXT:    v_mov_b32_e32 v1, s0
+; GFX942_LEGACY-NEXT:    global_store_dword v0, v1, s[2:3]
+; GFX942_LEGACY-NEXT:    s_endpgm
+  %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+  %gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 2
+  %load = load i32, ptr addrspace(4) %gep
+  store i32 %load, ptr addrspace(1) %out
+  ret void
+}
+
 declare void @llvm.memcpy.p0.p4.i64(ptr noalias nocapture writeonly, ptr 
addrspace(4) noalias nocapture readonly, i64, i1 immarg)
 
 !0 = !{}
+
+attributes #0 = { "amdgpu-agpr-alloc"="0" "amdgpu-no-completion-action" 
"amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" 
"amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" 
"amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" 
"amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" 
"amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" 
"uniform-work-group-size"="false" }

>From bc7b3d64cf00ee86dc39588e81a66de704146e28 Mon Sep 17 00:00:00 2001
From: Fabian Ritter <fabian.rit...@amd.com>
Date: Mon, 30 Jun 2025 02:39:22 -0400
Subject: [PATCH 2/5] Remove unnecessary attributes in test

---
 .../AMDGPU/ptradd-sdag-optimizations.ll        | 18 ++++++++----------
 1 file changed, 8 insertions(+), 10 deletions(-)

diff --git a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll 
b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll
index e5f31bdb2df66..8bd67ff17a1af 100644
--- a/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll
+++ b/llvm/test/CodeGen/AMDGPU/ptradd-sdag-optimizations.ll
@@ -430,36 +430,36 @@ define ptr @gep_disjoint_or(ptr %base) {
 
 ; Check that AssertAlign nodes between ptradd nodes don't block offset folding,
 ; taken from preload-implicit-kernargs.ll
-define amdgpu_kernel void @random_incorrect_offset(ptr addrspace(1) inreg 
%out) #0 {
+define amdgpu_kernel void @random_incorrect_offset(ptr addrspace(1) inreg 
%out) {
 ; GFX942_PTRADD-LABEL: random_incorrect_offset:
 ; GFX942_PTRADD:       ; %bb.1:
-; GFX942_PTRADD-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX942_PTRADD-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x0
 ; GFX942_PTRADD-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX942_PTRADD-NEXT:    s_branch .LBB21_0
 ; GFX942_PTRADD-NEXT:    .p2align 8
 ; GFX942_PTRADD-NEXT:  ; %bb.2:
 ; GFX942_PTRADD-NEXT:  .LBB21_0:
-; GFX942_PTRADD-NEXT:    s_load_dword s0, s[0:1], 0xa
+; GFX942_PTRADD-NEXT:    s_load_dword s0, s[4:5], 0xa
 ; GFX942_PTRADD-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX942_PTRADD-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX942_PTRADD-NEXT:    v_mov_b32_e32 v1, s0
-; GFX942_PTRADD-NEXT:    global_store_dword v0, v1, s[2:3]
+; GFX942_PTRADD-NEXT:    global_store_dword v0, v1, s[8:9]
 ; GFX942_PTRADD-NEXT:    s_endpgm
 ;
 ; GFX942_LEGACY-LABEL: random_incorrect_offset:
 ; GFX942_LEGACY:       ; %bb.1:
-; GFX942_LEGACY-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX942_LEGACY-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x0
 ; GFX942_LEGACY-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX942_LEGACY-NEXT:    s_branch .LBB21_0
 ; GFX942_LEGACY-NEXT:    .p2align 8
 ; GFX942_LEGACY-NEXT:  ; %bb.2:
 ; GFX942_LEGACY-NEXT:  .LBB21_0:
-; GFX942_LEGACY-NEXT:    s_mov_b32 s4, 8
-; GFX942_LEGACY-NEXT:    s_load_dword s0, s[0:1], s4 offset:0x2
+; GFX942_LEGACY-NEXT:    s_mov_b32 s0, 8
+; GFX942_LEGACY-NEXT:    s_load_dword s0, s[4:5], s0 offset:0x2
 ; GFX942_LEGACY-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX942_LEGACY-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX942_LEGACY-NEXT:    v_mov_b32_e32 v1, s0
-; GFX942_LEGACY-NEXT:    global_store_dword v0, v1, s[2:3]
+; GFX942_LEGACY-NEXT:    global_store_dword v0, v1, s[8:9]
 ; GFX942_LEGACY-NEXT:    s_endpgm
   %imp_arg_ptr = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
   %gep = getelementptr i8, ptr addrspace(4) %imp_arg_ptr, i32 2
@@ -471,5 +471,3 @@ define amdgpu_kernel void @random_incorrect_offset(ptr 
addrspace(1) inreg %out)
 declare void @llvm.memcpy.p0.p4.i64(ptr noalias nocapture writeonly, ptr 
addrspace(4) noalias nocapture readonly, i64, i1 immarg)
 
 !0 = !{}
-
-attributes #0 = { "amdgpu-agpr-alloc"="0" "amdgpu-no-completion-action" 
"amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" 
"amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-lds-kernel-id" 
"amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" 
"amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" 
"amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" 
"uniform-work-group-size"="false" }

>From 89d54cf3b32b85346917d1ab592002e225333ce2 Mon Sep 17 00:00:00 2001
From: Fabian Ritter <fabian.rit...@amd.com>
Date: Mon, 30 Jun 2025 09:28:34 -0400
Subject: [PATCH 3/5] Move ptradd -> disjoint OR combine to generic combines

---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 27 ++++++++++++++
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     | 35 -------------------
 2 files changed, 27 insertions(+), 35 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp 
b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index a401ace81b8b9..36b6ce2032f0a 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -2767,6 +2767,33 @@ SDValue DAGCombiner::visitPTRADD(SDNode *N) {
     }
   }
 
+  // Transform (ptradd a, b) -> (or disjoint a, b) if it is equivalent and if
+  // that transformation can't block an offset folding at any use of the 
ptradd.
+  // This should be done late, after legalization, so that it doesn't block
+  // other ptradd combines that could enable more offset folding.
+  if (LegalOperations && DAG.haveNoCommonBitsSet(N0, N1)) {
+    bool TransformCanBreakAddrMode = false;
+    if (auto *C = dyn_cast<ConstantSDNode>(N1)) {
+      TargetLoweringBase::AddrMode AM;
+      AM.HasBaseReg = true;
+      AM.BaseOffs = C->getSExtValue();
+      TransformCanBreakAddrMode = any_of(N->users(), [&](SDNode *User) {
+        if (auto *LoadStore = dyn_cast<MemSDNode>(User);
+            LoadStore && LoadStore->getBasePtr().getNode() == N) {
+          unsigned AS = LoadStore->getAddressSpace();
+          EVT AccessVT = LoadStore->getMemoryVT();
+          Type *AccessTy = AccessVT.getTypeForEVT(*DAG.getContext());
+          return TLI.isLegalAddressingMode(DAG.getDataLayout(), AM, AccessTy,
+                                           AS);
+        }
+        return false;
+      });
+    }
+
+    if (!TransformCanBreakAddrMode)
+      return DAG.getNode(ISD::OR, DL, PtrVT, N0, N1, SDNodeFlags::Disjoint);
+  }
+
   return SDValue();
 }
 
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp 
b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 25f507c13bb8d..4c7c42d1f9eaa 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -15370,41 +15370,6 @@ SDValue SITargetLowering::performPtrAddCombine(SDNode 
*N,
       return Folded;
   }
 
-  // Transform (ptradd a, b) -> (or disjoint a, b) if it is equivalent and if
-  // that transformation can't block an offset folding at any use of the 
ptradd.
-  // This should be done late, after legalization, so that it doesn't block
-  // other ptradd combines that could enable more offset folding.
-  bool HasIntermediateAssertAlign =
-      N0->getOpcode() == ISD::AssertAlign && N0->getOperand(0)->isAnyAdd();
-  // This is a hack to work around an ordering problem for DAGs like this:
-  //   (ptradd (AssertAlign (ptradd p, c1), k), c2)
-  // If the outer ptradd is handled first by the DAGCombiner, it can be
-  // transformed into a disjoint or. Then, when the generic AssertAlign combine
-  // pushes the AssertAlign through the inner ptradd, it's too late for the
-  // ptradd reassociation to trigger.
-  if (!DCI.isBeforeLegalizeOps() && !HasIntermediateAssertAlign &&
-      DAG.haveNoCommonBitsSet(N0, N1)) {
-    bool TransformCanBreakAddrMode = any_of(N->users(), [&](SDNode *User) {
-      if (auto *LoadStore = dyn_cast<MemSDNode>(User);
-          LoadStore && LoadStore->getBasePtr().getNode() == N) {
-        unsigned AS = LoadStore->getAddressSpace();
-        // Currently, we only really need ptradds to fold offsets into flat
-        // memory instructions.
-        if (AS != AMDGPUAS::FLAT_ADDRESS)
-          return false;
-        TargetLoweringBase::AddrMode AM;
-        AM.HasBaseReg = true;
-        EVT VT = LoadStore->getMemoryVT();
-        Type *AccessTy = VT.getTypeForEVT(*DAG.getContext());
-        return isLegalAddressingMode(DAG.getDataLayout(), AM, AccessTy, AS);
-      }
-      return false;
-    });
-
-    if (!TransformCanBreakAddrMode)
-      return DAG.getNode(ISD::OR, DL, VT, N0, N1, SDNodeFlags::Disjoint);
-  }
-
   if (N1.getOpcode() != ISD::ADD || !N1.hasOneUse())
     return SDValue();
 

>From a21320ea3ccce31643a3eb86cf9621aa0143021b Mon Sep 17 00:00:00 2001
From: Fabian Ritter <fabian.rit...@amd.com>
Date: Fri, 1 Aug 2025 10:12:23 -0400
Subject: [PATCH 4/5] Replace custom code with canFoldInAddressingMode

---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 20 +++----------------
 1 file changed, 3 insertions(+), 17 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp 
b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 36b6ce2032f0a..d3e805a132900 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -2772,23 +2772,9 @@ SDValue DAGCombiner::visitPTRADD(SDNode *N) {
   // This should be done late, after legalization, so that it doesn't block
   // other ptradd combines that could enable more offset folding.
   if (LegalOperations && DAG.haveNoCommonBitsSet(N0, N1)) {
-    bool TransformCanBreakAddrMode = false;
-    if (auto *C = dyn_cast<ConstantSDNode>(N1)) {
-      TargetLoweringBase::AddrMode AM;
-      AM.HasBaseReg = true;
-      AM.BaseOffs = C->getSExtValue();
-      TransformCanBreakAddrMode = any_of(N->users(), [&](SDNode *User) {
-        if (auto *LoadStore = dyn_cast<MemSDNode>(User);
-            LoadStore && LoadStore->getBasePtr().getNode() == N) {
-          unsigned AS = LoadStore->getAddressSpace();
-          EVT AccessVT = LoadStore->getMemoryVT();
-          Type *AccessTy = AccessVT.getTypeForEVT(*DAG.getContext());
-          return TLI.isLegalAddressingMode(DAG.getDataLayout(), AM, AccessTy,
-                                           AS);
-        }
-        return false;
-      });
-    }
+    bool TransformCanBreakAddrMode = any_of(N->users(), [&](SDNode *User) {
+      return canFoldInAddressingMode(N, User, DAG, TLI);
+    });
 
     if (!TransformCanBreakAddrMode)
       return DAG.getNode(ISD::OR, DL, PtrVT, N0, N1, SDNodeFlags::Disjoint);

>From 72b3fb8d502e6f080fc7297f6982257020539122 Mon Sep 17 00:00:00 2001
From: Fabian Ritter <fabian.rit...@amd.com>
Date: Mon, 4 Aug 2025 02:48:44 -0400
Subject: [PATCH 5/5] !any_of -> none_of

---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp 
b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index d3e805a132900..2de6447ecbea3 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -2772,11 +2772,11 @@ SDValue DAGCombiner::visitPTRADD(SDNode *N) {
   // This should be done late, after legalization, so that it doesn't block
   // other ptradd combines that could enable more offset folding.
   if (LegalOperations && DAG.haveNoCommonBitsSet(N0, N1)) {
-    bool TransformCanBreakAddrMode = any_of(N->users(), [&](SDNode *User) {
+    bool TransformCannotBreakAddrMode = none_of(N->users(), [&](SDNode *User) {
       return canFoldInAddressingMode(N, User, DAG, TLI);
     });
 
-    if (!TransformCanBreakAddrMode)
+    if (TransformCannotBreakAddrMode)
       return DAG.getNode(ISD::OR, DL, PtrVT, N0, N1, SDNodeFlags::Disjoint);
   }
 

_______________________________________________
llvm-branch-commits mailing list
llvm-branch-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits

Reply via email to