================ @@ -2627,6 +2629,93 @@ SDValue DAGCombiner::foldSubToAvg(SDNode *N, const SDLoc &DL) { return SDValue(); } +/// Try to fold a pointer arithmetic node. +/// This needs to be done separately from normal addition, because pointer +/// addition is not commutative. +SDValue DAGCombiner::visitPTRADD(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); + EVT PtrVT = N0.getValueType(); + EVT IntVT = N1.getValueType(); + SDLoc DL(N); + + // This is already ensured by an assert in SelectionDAG::getNode(). Several + // combines here depend on this assumption. + assert(PtrVT == IntVT && + "PTRADD with different operand types is not supported"); + + // fold (ptradd undef, y) -> undef + if (N0.isUndef()) + return N0; + + // fold (ptradd x, undef) -> undef + if (N1.isUndef()) + return DAG.getUNDEF(PtrVT); + + // fold (ptradd x, 0) -> x + if (isNullConstant(N1)) + return N0; + + // fold (ptradd 0, x) -> x + if (isNullConstant(N0)) + return N1; + + if (N0.getOpcode() == ISD::PTRADD && + !reassociationCanBreakAddressingModePattern(ISD::PTRADD, DL, N, N0, N1)) { + SDValue X = N0.getOperand(0); + SDValue Y = N0.getOperand(1); + SDValue Z = N1; + bool N0OneUse = N0.hasOneUse(); + bool YIsConstant = DAG.isConstantIntBuildVectorOrConstantInt(Y); + bool ZIsConstant = DAG.isConstantIntBuildVectorOrConstantInt(Z); + + // (ptradd (ptradd x, y), z) -> (ptradd x, (add y, z)) if: + // * y is a constant and (ptradd x, y) has one use; or + // * y and z are both constants. ---------------- ritter-x2a wrote:
So that `y + z` can be folded into a single constant, which might be folded as an immediate offset into a memory instruction. `SeparateConstOffsetFromGEP` should do that for AMDGPU already in many cases when it's beneficial, but - I don't think that every backend uses `SeparateConstOffsetFromGEP`, so it can be worthwhile to have anyway, - There are cases where these are introduced after `SeparateConstOffsetFromGEP` runs; for example when a wide vector load/store with an offset is legalized to several loads/stores with nested offsets, for example in `store_v16i32` in `ptradd-sdag-optimizations.ll`; with this reassociation we get the code that we would get with the old non-PTRADD code path, and - while it's probably possible that this could lead to worse code, the `reassociationCanBreakAddressingModePattern` check above _should_ avoid these (I'm not 100% convinced the logic in there is sound, but that seems like a different problem). https://github.com/llvm/llvm-project/pull/142739 _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits