springerm updated this revision to Diff 492765.
springerm added a comment.
Herald added a subscriber: thopre.

rebase


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D142006/new/

https://reviews.llvm.org/D142006

Files:
  mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
  mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
  mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp
  mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp
  mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-analysis.mlir
  mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir
  
mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-pass-statistics.mlir
  mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir

Index: mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir
===================================================================
--- mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir
+++ mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir
@@ -136,7 +136,7 @@
 
 // CHECK-LABEL: func @select_different_tensors(
 //  CHECK-SAME:     %[[t:.*]]: tensor<?xf32>
-func.func @select_different_tensors(%t: tensor<?xf32>, %sz: index, %c: i1) -> tensor<?xf32> {
+func.func @select_different_tensors(%t: tensor<?xf32>, %sz: index, %pos: index, %c: i1) -> f32 {
   // CHECK-DAG: %[[m:.*]] = bufferization.to_memref %[[t]] : memref<?xf32, strided{{.*}}>
   // CHECK-DAG: %[[alloc:.*]] = memref.alloc(%{{.*}}) {{.*}} : memref<?xf32>
   %0 = bufferization.alloc_tensor(%sz) : tensor<?xf32>
@@ -145,7 +145,8 @@
   // CHECK: %[[casted:.*]] = memref.cast %[[alloc]] : memref<?xf32> to memref<?xf32, strided{{.*}}>
   // CHECK: arith.select %{{.*}}, %[[casted]], %[[m]]
   %1 = arith.select %c, %0, %t : tensor<?xf32>
-  return %1 : tensor<?xf32>
+  %2 = tensor.extract %1[%pos] : tensor<?xf32>
+  return %2 : f32
 }
 
 // -----
Index: mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-pass-statistics.mlir
===================================================================
--- mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-pass-statistics.mlir
+++ mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-pass-statistics.mlir
@@ -5,7 +5,7 @@
 // CHECK:  (S) 1 num-buffer-alloc
 // CHECK:  (S) 1 num-buffer-dealloc
 // CHECK:  (S) 1 num-tensor-in-place
-// CHECK:  (S) 1 num-tensor-out-of-place
+// CHECK:  (S) 2 num-tensor-out-of-place
 func.func @read_after_write_conflict(%cst : f32, %idx : index, %idx2 : index)
     -> (f32, f32) {
   %t = "test.dummy_op"() : () -> (tensor<10xf32>)
Index: mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir
===================================================================
--- mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir
+++ mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir
@@ -100,11 +100,14 @@
     %t1: tensor<?xf32>, %o: index, %s: index) -> (tensor<?xf32>, tensor<?xf32>) {
   // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]]
   // CHECK: %[[subview:.*]] = memref.subview %[[m1]]
+  // The op must alloc because "test.dummy" may bufferize to a memory write.
+  // CHECK: %[[alloc:.*]] = memref.alloc
+  // CHECK: memref.copy %[[subview]], %[[alloc]]
   %0 = tensor.extract_slice %t1[%o][%s][1] : tensor<?xf32> to tensor<?xf32>
-  // CHECK: %[[subview_tensor:.*]] = bufferization.to_tensor %[[subview]]
-  // CHECK: %[[dummy:.*]] = "test.dummy_op"(%[[subview_tensor]])
+  // CHECK: %[[alloc_tensor:.*]] = bufferization.to_tensor %[[alloc]]
+  // CHECK: %[[dummy:.*]] = "test.dummy_op"(%[[alloc_tensor]])
   %1 = "test.dummy_op"(%0) : (tensor<?xf32>) -> tensor<?xf32>
-  // CHECK: return %[[subview_tensor]], %[[dummy]]
+  // CHECK: return %[[alloc_tensor]], %[[dummy]]
   return %0, %1 : tensor<?xf32>, tensor<?xf32>
 }
 
Index: mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-analysis.mlir
===================================================================
--- /dev/null
+++ mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-analysis.mlir
@@ -0,0 +1,34 @@
+// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only" -allow-unregistered-dialect -split-input-file | FileCheck %s
+
+// CHECK-LABEL: func @unknown_op_aliasing(
+func.func @unknown_op_aliasing(%f: f32, %f2: f32, %pos: index) -> f32 {
+  %0 = tensor.empty() : tensor<10xf32>
+  // CHECK: linalg.fill {__inplace_operands_attr__ = ["none", "true"]}
+  %1 = linalg.fill ins(%f : f32) outs(%0 : tensor<10xf32>) -> tensor<10xf32>
+
+  // Something must bufferize out-of-place because the op may return an alias
+  // of %1.
+  // CHECK: "dummy.dummy_op"(%{{.*}}) {__inplace_operands_attr__ = ["false"]}
+  %alias = "dummy.dummy_op"(%1) : (tensor<10xf32>) -> (tensor<10xf32>)
+
+  // CHECK: linalg.fill {__inplace_operands_attr__ = ["none", "true"]}
+  %2 = linalg.fill ins(%f2 : f32) outs(%1 : tensor<10xf32>) -> tensor<10xf32>
+  %3 = tensor.extract %alias[%pos] : tensor<10xf32>
+  return %3 : f32
+}
+
+// -----
+
+// CHECK-LABEL: func @unknown_op_writing(
+func.func @unknown_op_writing(%f: f32, %f2: f32, %pos: index) -> f32 {
+  %0 = tensor.empty() : tensor<10xf32>
+  // CHECK: linalg.fill {__inplace_operands_attr__ = ["none", "true"]}
+  %1 = linalg.fill ins(%f : f32) outs(%0 : tensor<10xf32>) -> tensor<10xf32>
+
+  // The op may bufferize to a memory write, so it must bufferize out-of-place.
+  // CHECK: "dummy.dummy_op"(%{{.*}}) {__inplace_operands_attr__ = ["false"]}
+  "dummy.dummy_op"(%1) : (tensor<10xf32>) -> ()
+
+  %3 = tensor.extract %1[%pos] : tensor<10xf32>
+  return %3 : f32
+}
Index: mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp
===================================================================
--- mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp
+++ mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp
@@ -920,10 +920,9 @@
   auto analyzeOp = [&](Operation *op) {
     for (OpOperand &opOperand : op->getOpOperands())
       if (opOperand.get().getType().isa<TensorType>())
-        if (auto bufferizableOp = state.getOptions().dynCastBufferizableOp(op))
-          if (failed(bufferizableInPlaceAnalysisImpl(opOperand, aliasInfo,
-                                                     state, domInfo)))
-            return failure();
+        if (failed(bufferizableInPlaceAnalysisImpl(opOperand, aliasInfo, state,
+                                                   domInfo)))
+          return failure();
     return success();
   };
 
@@ -1049,11 +1048,10 @@
                                     const BufferizationAliasInfo &aliasInfo,
                                     const BufferizationOptions &options) {
   // Add __inplace_operands_attr__.
-  op->walk([&](BufferizableOpInterface bufferizableOp) {
-    if (options.isOpAllowed(bufferizableOp.getOperation()))
-      for (OpOperand &opOperand : bufferizableOp->getOpOperands())
-        if (opOperand.get().getType().isa<TensorType>())
-          setInPlaceOpOperand(opOperand, aliasInfo.isInPlace(opOperand));
+  op->walk([&](Operation *op) {
+    for (OpOperand &opOperand : op->getOpOperands())
+      if (opOperand.get().getType().isa<TensorType>())
+        setInPlaceOpOperand(opOperand, aliasInfo.isInPlace(opOperand));
   });
 }
 
Index: mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp
===================================================================
--- mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp
+++ mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp
@@ -181,15 +181,9 @@
     func::CallOp callOp = cast<func::CallOp>(op);
     FuncOp funcOp = getCalledFunction(callOp);
     assert(funcOp && "expected CallOp to a FuncOp");
-    if (getFuncOpAnalysisState(state, funcOp) !=
-        FuncOpAnalysisState::Analyzed) {
+    if (getFuncOpAnalysisState(state, funcOp) != FuncOpAnalysisState::Analyzed)
       // FuncOp not analyzed yet. Any OpResult may be aliasing.
-      SmallVector<OpResult> result;
-      for (OpResult opResult : op->getOpResults())
-        if (opResult.getType().isa<TensorType>())
-          result.push_back(opResult);
-      return result;
-    }
+      return detail::unknownGetAliasingOpResult(opOperand);
 
     // Get aliasing results from state.
     const FuncAnalysisState &funcState = getFuncAnalysisState(state);
@@ -208,15 +202,9 @@
     func::CallOp callOp = cast<func::CallOp>(op);
     FuncOp funcOp = getCalledFunction(callOp);
     assert(funcOp && "expected CallOp to a FuncOp");
-    if (getFuncOpAnalysisState(state, funcOp) !=
-        FuncOpAnalysisState::Analyzed) {
+    if (getFuncOpAnalysisState(state, funcOp) != FuncOpAnalysisState::Analyzed)
       // FuncOp not analyzed yet. Any OpOperand may be aliasing.
-      SmallVector<OpOperand *> result;
-      for (OpOperand &opOperand : op->getOpOperands())
-        if (opOperand.get().getType().isa<TensorType>())
-          result.push_back(&opOperand);
-      return result;
-    }
+      return detail::unknownGetAliasingOpOperand(opResult);
 
     // Get aliasing bbArgs from state.
     const FuncAnalysisState &funcState = getFuncAnalysisState(state);
Index: mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
===================================================================
--- mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
+++ mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
@@ -349,24 +349,29 @@
   }
 }
 
-/// Determine which OpOperand* will alias with `result` if the op is bufferized
-/// in place. Return an empty vector if the op is not bufferizable.
+/// Determine which OpOperand* will alias with `opResult` if the op is
+/// bufferized in place. Return all tensor OpOperand* if the op is not
+/// bufferizable.
 SmallVector<OpOperand *>
-AnalysisState::getAliasingOpOperand(OpResult result) const {
-  if (Operation *op = result.getDefiningOp())
+AnalysisState::getAliasingOpOperand(OpResult opResult) const {
+  if (Operation *op = opResult.getDefiningOp())
     if (auto bufferizableOp = getOptions().dynCastBufferizableOp(op))
-      return bufferizableOp.getAliasingOpOperand(result, *this);
-  return {};
+      return bufferizableOp.getAliasingOpOperand(opResult, *this);
+
+  // The op is not bufferizable.
+  return detail::unknownGetAliasingOpOperand(opResult);
 }
 
 /// Determine which OpResult will alias with `opOperand` if the op is bufferized
-/// in place. Return an empty vector if the op is not bufferizable.
+/// in place. Return all tensor OpResults if the op is not bufferizable.
 SmallVector<OpResult>
 AnalysisState::getAliasingOpResult(OpOperand &opOperand) const {
   if (auto bufferizableOp =
           getOptions().dynCastBufferizableOp(opOperand.getOwner()))
     return bufferizableOp.getAliasingOpResult(opOperand, *this);
-  return {};
+
+  // The op is not bufferizable.
+  return detail::unknownGetAliasingOpResult(opOperand);
 }
 
 /// Return true if `opOperand` bufferizes to a memory read. Return `true` if the
@@ -618,39 +623,6 @@
       .getResult();
 }
 
-FailureOr<BaseMemRefType> bufferization::detail::defaultGetBufferType(
-    Value value, const BufferizationOptions &options,
-    const DenseMap<Value, BaseMemRefType> &fixedTypes) {
-  assert(value.getType().isa<TensorType>() && "expected tensor type");
-
-  // No further analysis is possible for a block argument.
-  if (value.isa<BlockArgument>())
-    return bufferization::getMemRefType(value, options);
-
-  // Value is an OpResult.
-  Operation *op = getOwnerOfValue(value);
-  auto opResult = value.cast<OpResult>();
-  auto bufferizableOp = cast<BufferizableOpInterface>(op);
-  AnalysisState state(options);
-  auto aliasingOperands = bufferizableOp.getAliasingOpOperand(opResult, state);
-  if (!aliasingOperands.empty() &&
-      bufferizableOp.bufferRelation(opResult, state) ==
-          BufferRelation::Equivalent) {
-    // If the OpResult has an equivalent OpOperand, both OpResult and
-    // OpOperand bufferize to the exact same buffer type.
-    Value equivalentOperand = aliasingOperands.front()->get();
-    return getBufferType(equivalentOperand, options, fixedTypes);
-  }
-
-  // If we do not know the memory space and there is no default memory space,
-  // report a failure.
-  if (!options.defaultMemorySpace.has_value())
-    return op->emitError("could not infer memory space");
-
-  return getMemRefType(value, options, /*layout=*/{},
-                       *options.defaultMemorySpace);
-}
-
 /// Return the buffer type for a given Value (tensor) after bufferization.
 FailureOr<BaseMemRefType>
 bufferization::getBufferType(Value value, const BufferizationOptions &options) {
@@ -830,6 +802,43 @@
                          memorySpace);
 }
 
+//===----------------------------------------------------------------------===//
+// Default implementations of interface methods
+//===----------------------------------------------------------------------===//
+
+FailureOr<BaseMemRefType> bufferization::detail::defaultGetBufferType(
+    Value value, const BufferizationOptions &options,
+    const DenseMap<Value, BaseMemRefType> &fixedTypes) {
+  assert(value.getType().isa<TensorType>() && "expected tensor type");
+
+  // No further analysis is possible for a block argument.
+  if (value.isa<BlockArgument>())
+    return bufferization::getMemRefType(value, options);
+
+  // Value is an OpResult.
+  Operation *op = getOwnerOfValue(value);
+  auto opResult = value.cast<OpResult>();
+  auto bufferizableOp = cast<BufferizableOpInterface>(op);
+  AnalysisState state(options);
+  auto aliasingOperands = bufferizableOp.getAliasingOpOperand(opResult, state);
+  if (!aliasingOperands.empty() &&
+      bufferizableOp.bufferRelation(opResult, state) ==
+          BufferRelation::Equivalent) {
+    // If the OpResult has an equivalent OpOperand, both OpResult and
+    // OpOperand bufferize to the exact same buffer type.
+    Value equivalentOperand = aliasingOperands.front()->get();
+    return getBufferType(equivalentOperand, options, fixedTypes);
+  }
+
+  // If we do not know the memory space and there is no default memory space,
+  // report a failure.
+  if (!options.defaultMemorySpace.has_value())
+    return op->emitError("could not infer memory space");
+
+  return getMemRefType(value, options, /*layout=*/{},
+                       *options.defaultMemorySpace);
+}
+
 bool bufferization::detail::defaultIsRepetitiveRegion(
     BufferizableOpInterface bufferizableOp, unsigned index) {
   assert(index < bufferizableOp->getNumRegions() && "invalid region index");
@@ -839,3 +848,23 @@
     return false;
   return regionInterface.isRepetitiveRegion(index);
 }
+
+SmallVector<OpOperand *>
+bufferization::detail::unknownGetAliasingOpOperand(OpResult opResult) {
+  // Conservatively assume that everything is aliasing.
+  SmallVector<OpOperand *> r;
+  for (OpOperand &operand : opResult.getDefiningOp()->getOpOperands())
+    if (operand.get().getType().isa<TensorType>())
+      r.push_back(&operand);
+  return r;
+}
+
+SmallVector<OpResult>
+bufferization::detail::unknownGetAliasingOpResult(OpOperand &opOperand) {
+  // Conservatively assume that everything is aliasing.
+  SmallVector<OpResult> r;
+  for (OpResult result : opOperand.getOwner()->getOpResults())
+    if (result.getType().isa<TensorType>())
+      r.push_back(result);
+  return r;
+}
Index: mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
===================================================================
--- mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
+++ mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
@@ -315,12 +315,14 @@
 /// tensor values.
 class AnalysisState {
 public:
-  /// Determine which OpOperand* will alias with `result` if the op is
-  /// bufferized in place. Return an empty vector if the op is not bufferizable.
-  SmallVector<OpOperand *> getAliasingOpOperand(OpResult result) const;
+  /// Determine which OpOperand* will alias with `opResult` if the op is
+  /// bufferized in place. Return all tensor OpOperand* if the op is not
+  /// bufferizable.
+  SmallVector<OpOperand *> getAliasingOpOperand(OpResult opResult) const;
 
   /// Determine which OpResult will alias with `opOperand` if the op is
-  /// bufferized in place. Return an empty vector if the op is not bufferizable.
+  /// bufferized in place. Return all tensor OpResults if the op is not
+  /// bufferizable.
   SmallVector<OpResult> getAliasingOpResult(OpOperand &opOperand) const;
 
   /// Return true if `opOperand` bufferizes to a memory read. Return `true` if
@@ -549,6 +551,14 @@
 /// places.
 bool defaultIsRepetitiveRegion(BufferizableOpInterface bufferizableOp,
                                unsigned index);
+
+/// This is the default implementation of getAliasingOpOperand in case the
+/// defining op does not implement the BufferizableOpInterface.
+SmallVector<OpOperand *> unknownGetAliasingOpOperand(OpResult opResult);
+
+/// This is the default implementation of getAliasingOpResult in case the
+/// defining op does not implement the BufferizableOpInterface.
+SmallVector<OpResult> unknownGetAliasingOpResult(OpOperand &opOperand);
 } // namespace detail
 
 } // namespace bufferization
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to