[libcxx] [clang] [llvm] [mlir] [compiler-rt] [flang] [clang-tools-extra] [mlir] Fix a zero stride canonicalizer crash (PR #74200)

2023-12-03 Thread Rik Huijzer via cfe-commits

rikhuijzer wrote:

> Is it possible to add the test case (or minimal similar example) with 
> `--inline` option so that we can confirm the original issue is resolved.
> 
> #73383

Thanks for the review and fixing the typos that I've made! I've added a test in 
a new `MemRef/inlining.mlir` file, which is in line with the other files:
```
$ rg -l '\-inline' mlir/test
mlir/test/Dialect/MemRef/inlining.mlir
mlir/test/Dialect/Vector/inlining.mlir
mlir/test/Dialect/UB/inlining.mlir
mlir/test/Dialect/Async/async-parallel-for-compute-fn.mlir
mlir/test/Dialect/Async/async-parallel-for-canonicalize.mlir
mlir/test/Dialect/Affine/inlining.mlir
mlir/test/Dialect/LLVMIR/inline-byval-huge.mlir
mlir/test/Dialect/Bufferization/inlining.mlir
mlir/test/Dialect/LLVMIR/inlining-alias-scopes.mlir
mlir/test/Dialect/LLVMIR/inlining.mlir
mlir/test/Dialect/Linalg/inline-scalar-operands.mlir
mlir/test/Dialect/Tosa/inlining.mlir
mlir/test/Dialect/Linalg/inlining.mlir
mlir/test/lib/Transforms/TestInlining.cpp
mlir/test/Transforms/inlining.mlir
mlir/test/Transforms/inlining-repeated-use.mlir
mlir/test/Transforms/inlining-recursive.mlir
mlir/test/Transforms/inlining-dce.mlir
mlir/test/Transforms/test-inlining.mlir
```
(Also test additions can always be reverted of course so it shouldn't be too 
bad.) Also I double checked that 
https://github.com/llvm/llvm-project/issues/73383 doesn't crash on this PR.

https://github.com/llvm/llvm-project/pull/74200
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits


[libcxx] [clang] [llvm] [mlir] [compiler-rt] [flang] [clang-tools-extra] [mlir] Fix a zero stride canonicalizer crash (PR #74200)

2023-12-03 Thread Rik Huijzer via cfe-commits

https://github.com/rikhuijzer updated 
https://github.com/llvm/llvm-project/pull/74200

>From 22928e7e5da508d8d9dc8d4b7e54f84cccadef06 Mon Sep 17 00:00:00 2001
From: Rik Huijzer 
Date: Mon, 20 Nov 2023 09:02:41 +0100
Subject: [PATCH 1/7] [mlir][tensor] Fix canon via `hasNegativeDimension`

---
 mlir/include/mlir/Dialect/Tensor/IR/Tensor.h |  6 ++
 mlir/lib/Dialect/Tensor/IR/TensorOps.cpp | 15 +++
 mlir/test/Dialect/Tensor/canonicalize.mlir   | 10 ++
 3 files changed, 31 insertions(+)

diff --git a/mlir/include/mlir/Dialect/Tensor/IR/Tensor.h 
b/mlir/include/mlir/Dialect/Tensor/IR/Tensor.h
index 06642adda42b3..0d027057b3a95 100644
--- a/mlir/include/mlir/Dialect/Tensor/IR/Tensor.h
+++ b/mlir/include/mlir/Dialect/Tensor/IR/Tensor.h
@@ -150,6 +150,12 @@ LogicalResult getOrCreateDestinations(OpBuilder , 
Location loc, Operation *op,
 /// Tests if types are the same when ignoring encoding on ranked tensors.
 bool isSameTypeWithoutEncoding(Type tp1, Type tp2);
 
+/// Helper function to check whether the dimensions are non-negative. This
+/// check also occurs in the verifier, but we need it at later stages too
+/// because the verifier ignores dynamic dimensions, but later stages might
+/// have constant folded those to (negative) constants.
+bool hasNegativeDimension(SmallVector shape);
+
 /// Function to control the folding of constant and extract slice.
 using ControlConstantExtractSliceFusionFn = 
std::function;
 
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp 
b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index e469815496e18..3297ef673ca2e 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -125,6 +125,12 @@ bool tensor::isSameTypeWithoutEncoding(Type tp1, Type tp2) 
{
   return tp1 == tp2; // default implementation
 }
 
+bool tensor::hasNegativeDimension(SmallVector shape) {
+  return llvm::any_of(shape, [](int64_t dim) {
+return !ShapedType::isDynamic(dim) && dim < 0;
+  });
+}
+
 /// Compute the dropped dimensions of a rank-reducing tensor.extract_slice op 
or
 /// rank-extending tensor.insert_slice op.
 static llvm::SmallBitVector getDroppedDims(ArrayRef reducedShape,
@@ -1801,6 +1807,10 @@ RankedTensorType 
ExtractSliceOp::inferCanonicalRankReducedResultType(
   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets);
   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes);
   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides);
+  if (hasNegativeDimension(staticOffsets))
+return {};
+  if (hasNegativeDimension(staticSizes))
+return {};
   return ExtractSliceOp::inferCanonicalRankReducedResultType(
   desiredResultRank, sourceRankedTensorType, staticOffsets, staticSizes,
   staticStrides);
@@ -2370,6 +2380,8 @@ class InsertSliceOpConstantArgumentFolder final
 auto sourceType = ExtractSliceOp::inferCanonicalRankReducedResultType(
 insertSliceOp.getSourceType().getRank(), insertSliceOp.getDestType(),
 mixedOffsets, mixedSizes, mixedStrides);
+if (!sourceType)
+  return failure();
 Value toInsert = insertSliceOp.getSource();
 if (sourceType != insertSliceOp.getSourceType()) {
   OpBuilder::InsertionGuard g(rewriter);
@@ -2500,6 +2512,8 @@ struct InsertSliceOpSourceCastInserter final
   getConstantIntValue(insertSliceOp.getMixedSizes()[i]))
 newSrcShape[i] = *constInt;
 }
+// if (hasNegativeDimension(newSrcShape))
+//  return failure();
 
 RankedTensorType newSrcType =
 RankedTensorType::get(newSrcShape, srcType.getElementType());
@@ -2521,6 +2535,7 @@ struct InsertSliceOpSourceCastInserter final
   rewriter.setInsertionPoint(insertSliceOp->getParentOp());
 Value cast = rewriter.create(
 insertSliceOp.getLoc(), newSrcType, insertSliceOp.getSource());
+
 rewriter.replaceOpWithNewOp(
 insertSliceOp, cast, insertSliceOp.getDest(),
 insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedSizes(),
diff --git a/mlir/test/Dialect/Tensor/canonicalize.mlir 
b/mlir/test/Dialect/Tensor/canonicalize.mlir
index ea8c17640d7c1..88f27d3d36b04 100644
--- a/mlir/test/Dialect/Tensor/canonicalize.mlir
+++ b/mlir/test/Dialect/Tensor/canonicalize.mlir
@@ -1102,6 +1102,16 @@ func.func @no_fold_collapse_of_expand_empty_expr(%arg0: 
tensor<3x2x2xf32>)
 
 // -
 
+func.func @no_fold_extract_slice_negative_offset(%arg0: tensor<8xf32>) -> 
tensor {
+  %c-1 = arith.constant -1 : index
+  %e = tensor.extract_slice %arg0[1] [%c-1] [1] : tensor<8xf32> to 
tensor
+  return %e : tensor
+}
+// CHECK-LABEL: func @no_fold_extract_slice_negative_offset
+// CHECK: tensor.extract_slice
+
+// -
+
 func.func @reshape_splat_constant_int32() -> tensor<2x4x2xi32> {
   %c0 = arith.constant dense<42> : tensor<2x8xi32>
   %0 = tensor.expand_shape %c0 [[0], [1, 2]]

>From ecef5428c160cb72103e06a160c450440ce1f416 Mon Sep 17 00:00:00 2001
From: Rik Huijzer 
Date: Mon, 20 Nov 2023 16:27:53 +0100