electriclilies commented on a change in pull request #8110:
URL: https://github.com/apache/tvm/pull/8110#discussion_r646953614



##########
File path: src/driver/driver_api.cc
##########
@@ -128,63 +128,192 @@ transform::Pass Filter(FCond fcond) {
   return tir::transform::CreatePrimFuncPass(fpass, 0, "Filter", {});
 }
 
-IRModule lower(te::Schedule sch, const Array<te::Tensor>& args, const 
std::string& name,
-               const std::unordered_map<te::Tensor, tir::Buffer>& binds) {
-  Array<ObjectRef> out_arg_list;
+Array<tvm::transform::Pass> CreatePassList(bool simple_mode, bool 
legacy_te_pass) {
   auto pass_ctx = transform::PassContext::Current();
 
-  sch = sch.normalize();
-
-  // Before TIR transformation.
-  auto bounds = te::InferBound(sch);
-  auto stmt = te::ScheduleOps(sch, bounds, false);
-  bool compact = te::VerifyCompactBuffer(stmt);
-
-  Map<te::Tensor, tir::Buffer> out_binds;
-  GetBinds(args, compact, binds, &out_binds, &out_arg_list);
-
-  // build the function
-  tir::PrimFunc f = te::SchedulePostProcToPrimFunc(out_arg_list, 
std::move(stmt), out_binds);
-  f = WithAttr(std::move(f), "global_symbol", runtime::String(name));
-
-  bool noalias = pass_ctx->GetConfig<Bool>("tir.noalias", Bool(true)).value();
   bool disable_vectorize = pass_ctx->GetConfig<Bool>("tir.disable_vectorize", 
Bool(false)).value();
   bool instrument_bound_checkers =
       pass_ctx->GetConfig<Bool>("tir.instrument_bound_checkers", 
Bool(false)).value();
 
-  if (noalias) {
-    f = WithAttr(std::move(f), "tir.noalias", Bool(true));
+  // Get any user-added passes
+  auto add_lower_pass =
+      pass_ctx->GetConfig<Array<Array<ObjectRef>>>("tir.add_lower_pass", 
Array<Array<ObjectRef>>())
+          .value();
+
+  auto user_lower_phase0 = Array<tvm::transform::Pass>();

Review comment:
       I'm happy to add documentation, I just don't actually know what the 
answer is to this question

##########
File path: src/driver/driver_api.cc
##########
@@ -109,6 +109,51 @@ void GetBinds(const Array<te::Tensor>& args, bool compact,
   }
 }
 
+void GetBinds(const Array<ObjectRef>& args, bool compact,
+              const std::unordered_map<te::Tensor, tir::Buffer>& binds,
+              Map<te::Tensor, tir::Buffer>* out_binds, Array<ObjectRef>* 
out_arg_list) {
+  *out_binds = binds;
+
+  for (const ObjectRef& x : args) {
+    if (const auto* tensor_node = x.as<te::TensorNode>()) {
+      auto x_ref = GetRef<te::Tensor>(tensor_node);
+      if (out_binds->find(x_ref) == out_binds->end()) {
+        auto buf =
+            BufferWithOffsetAlignment(x_ref->shape, x_ref->dtype, 
x_ref->op->name, -1, 0, compact);
+        out_binds->Set(x_ref, buf);
+        out_arg_list->push_back(buf);
+      } else {
+        out_arg_list->push_back((*out_binds)[x_ref]);
+      }
+    } else if (x.as<te::BufferNode>() || x.as<tir::VarNode>()) {
+      out_arg_list->push_back(x);
+    } else {
+      ICHECK(false)
+          << "Expected type of the elements of args to be te::Tensor, 
te::Buffer or tir::Var";
+    }
+  }
+}
+
+TVM_REGISTER_GLOBAL("driver.get_binds")
+    .set_body_typed([](const Array<ObjectRef>& args, bool compact,
+                       const Map<te::Tensor, tir::Buffer>& binds) {
+      std::unordered_map<te::Tensor, tir::Buffer> c_binds;
+      // Check to make sure binds is not null before doing the conversion;
+      if (binds.get() != NULL) {
+        for (auto kv : binds) {
+          c_binds.insert(std::pair<te::Tensor, tir::Buffer>(kv.first, 
kv.second));
+        }
+      }
+      Map<te::Tensor, tir::Buffer> out_binds;
+      Array<ObjectRef> out_arg_list;
+      GetBinds(args, compact, c_binds, &out_binds, &out_arg_list);
+
+      // TVM object system doesn't have a pair object, so we'll put both ret 
values in an array
+      // and return that.
+      Array<ObjectRef> out_arr = {out_binds, out_arg_list};

Review comment:
       I've used the Map as a workaround for this before as well. I do think 
having some way to unpack multiple values from an FFI function is important 
since people do this in Python so often, so adding either a Pair object or 
Tuple object would be helpful. 

##########
File path: src/driver/driver_api.cc
##########
@@ -109,6 +109,51 @@ void GetBinds(const Array<te::Tensor>& args, bool compact,
   }
 }
 
+void GetBinds(const Array<ObjectRef>& args, bool compact,

Review comment:
       Originally I did this because I was making the signature as similar as 
possible to the other GetBinds (which was originally in C++), which takes in an 
`Array<te::Tensor>` for the args instead of an` Array<ObjectRef>. 
   I'm actually not sure if I should try to get rid of one of the versions of 
GetBinds, keep them as similar as possible, or just make two versions with 
different signatures, what do you think? 

##########
File path: src/relay/backend/compile_engine.cc
##########
@@ -770,7 +770,8 @@ class CompileEngineImpl : public CompileEngineNode {
       With<PassContext> fresh_pass_ctx_scope(PassContext::Create());
 
       std::unordered_map<te::Tensor, tir::Buffer> binds;
-      cache_node->funcs = tvm::lower(cfunc->schedule, all_args, 
cache_node->func_name, binds);
+      cache_node->funcs =
+          tvm::LowerSchedule(cfunc->schedule, all_args, cache_node->func_name, 
binds);

Review comment:
       Yeah I don't think we do. I'll remove it (I didn't look too closely at 
what this call was doing, oops!)

##########
File path: python/tvm/driver/build_module.py
##########
@@ -37,92 +37,54 @@
 from tvm.tir.buffer import Buffer
 from tvm.tir.expr import Var
 
+from . import _ffi_api as ffi
+
 
 def get_binds(args, compact=False, binds=None):
     """Internal function to get binds and arg_list given arguments.
-
     Parameters
     ----------
     args : list of Buffer or Tensor or Var
         The argument lists to the function.
-
     compact : bool
         If the statement has already bound to a compact buffer.
-
     binds : dict of :any:`Tensor` to :any:`Buffer`, optional
         Dictionary that maps the Tensor to Buffer which specified the data 
layout
         requirement of the function. By default, a new compact buffer is 
created
         for each tensor in the argument.
-
     Returns
     -------
     binds: dict
         The bind specification
-
     arg_list: list
         The list of symbolic buffers of arguments.
     """
-    binds = {} if binds is None else binds.copy()
-    arg_list = []
-    for x in args:
-        if isinstance(x, tensor.Tensor):
-            any_dim = any(isinstance(i, tvm.tir.Var) for i in x.shape)
-            buffer_type = "auto_broadcast" if any_dim and not compact else ""
-            if x not in binds:
-                buf = tvm.tir.decl_buffer(
-                    x.shape, dtype=x.dtype, name=x.name, 
buffer_type=buffer_type
-                )
-                binds[x] = buf
-                arg_list.append(buf)
-            else:
-                arg_list.append(binds[x])
-        elif isinstance(x, schedule.Buffer):
-            arg_list.append(x)
-        elif isinstance(x, tvm.tir.Var):
-            arg_list.append(x)
-        else:
-            raise ValueError("args must be Tensor, Buffer or Var")
-    return binds, arg_list
-
-
-def form_irmodule(sch, args, name, binds):
-    """According to the given schedule, form a function.
+    out_arr = ffi.get_binds(args, compact, binds)
+    return out_arr[0], out_arr[1]
+

Review comment:
       Yup can do this




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to