comaniac commented on a change in pull request #7892:
URL: https://github.com/apache/tvm/pull/7892#discussion_r656638428



##########
File path: cmake/config.cmake
##########
@@ -98,6 +98,8 @@ set(USE_STACKVM_RUNTIME OFF)
 
 # Whether enable tiny embedded graph executor.
 set(USE_GRAPH_EXECUTOR ON)
+# Whether enable subgraph runtime.

Review comment:
       Add an empty line.

##########
File path: python/tvm/contrib/graph_executor.py
##########
@@ -242,6 +243,16 @@ def get_input(self, index, out=None):
 
         return self._get_input(index)
 
+    def get_input_index(self, name):
+        """Set inputs to the module via kwargs

Review comment:
       Is this the correct description? And what's the return value?

##########
File path: python/tvm/contrib/pipeline_executor.py
##########
@@ -0,0 +1,237 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""Pipeline executor that executes pipeline containing TVM PackedFunc."""
+import json
+import tvm._ffi
+from tvm import relay
+from tvm.contrib import graph_executor
+
+
+def pipeline_executor_enabled():
+    """check if pipeline executor enabled."""
+    pipeline_enabled = False
+    try:
+        pipelinecreate = 
tvm._ffi.get_global_func("tvm.pipeline_executor.create")
+        assert pipelinecreate
+        pipeline_enabled = True
+    except ValueError:
+        print("pipeline executor not enabled!")
+
+    return pipeline_enabled
+
+
+def build_pipeline(config):
+    """build module list that can use for pipeline execution.
+
+    Parameters
+    ----------
+
+    config: Dict[IRModule, Dict[str, Any]]
+        build configuration informaton, structure like following.
+        {IRModule: {"target":target,
+                    "target_host":target_host,
+                    "params":params,
+                    "mod_name"mod_name,
+                    "build":build}}
+
+    Returns
+    -------
+    ret: List[IRModule]
+        list of IRModule
+    string_config: Dict[int, Dict[str, any]]
+        pipeline configuration
+    """
+    mods = {}
+    config_len = len(config)
+    string_config = [{} for _ in range(config_len)]
+    for ir_mod in config:
+        # Get module configuration
+        mod_config = config[ir_mod]
+        assert "pipeline" in mod_config and "mod_indx" in 
mod_config["pipeline"]
+        # Get module index in pipeline configuration
+        mod_indx = mod_config["pipeline"]["mod_indx"] - 1
+        assert mod_indx < config_len
+        # Create pipeline configuration
+        string_config[mod_indx] = mod_config["pipeline"]
+        build_func = relay.build
+        # if there is a self defined build function then use it.
+        if "build" in mod_config and mod_config["build"]:
+            build_func = mod_config.build
+
+        # build IRModule
+        mod = build_func(
+            ir_mod,
+            mod_config["target"],
+            params=mod_config["params"],
+            target_host=mod_config["target_host"],
+            mod_name=mod_config["mod_name"],
+        )
+
+        mods[mod] = {"dev": mod_config["dev"]}
+
+    # return IRModule list and pipeline configuration
+    return mods, string_config
+
+
+def create(pipeline_mods, mod_config):
+    """Create a pipeline runtime executor.
+
+    Parameters
+    ----------
+    pipeline_mods : List[IRModule]
+        list of IRModule
+
+    mod_config : Dict[int, Dict[str, Any]]
+        modules and modules dependency configuration informaiton.
+
+    Returns
+    -------
+    submodule : PipelineModule
+        Runtime pipeline module.
+    """
+
+    mods = []
+    for pipeline_mod in pipeline_mods:
+        mod = graph_executor.GraphModule(
+            pipeline_mod["default"](pipeline_mods[pipeline_mod]["dev"])
+        )
+
+        mods.append(mod)
+
+    submodule = PipelineModule(mods, json.dumps(mod_config))
+    return submodule
+
+
+class PipelineModule(object):
+    """Wrapper runtime module. This is a thin wrapper of the underlying TVM 
module.
+    you can also directly call set_input, run, and get_output of underlying 
module functions.
+
+    Parameters
+    ----------
+    graph_module : List[GraphModule]
+        The internal tvm module that holds the actual graph functions.
+
+    pipeline_config : Dict[IRModule, Dict[str, Any]]
+        modules and modules dependency configuration informaiton.
+
+    """
+
+    def __init__(self, graph_modules, pipeline_config):
+        mods = []
+        for module in graph_modules:
+            mods.append(module.module)
+
+        pipelinecreate = 
tvm._ffi.get_global_func("tvm.pipeline_executor.create")
+        assert pipelinecreate
+        module = pipelinecreate(mods, pipeline_config)
+
+        self.graph_modules_ = graph_modules
+
+        self._set_input = module["set_input"]
+        self._run = module["run"]
+        self._stop = module["stop"]
+        self._get_output = module["get_output"]
+        self._get_input = module["get_input"]
+        self._get_num_outputs = module["get_num_outputs"]
+        self._get_num_inputs = module["get_num_inputs"]
+
+    def set_input(self, key, value, modindx=1, params=None):
+        """Set inputs to the module via kwargs
+
+        Parameters
+        ----------
+        key : array_like
+           The input key
+
+        value : array_like.
+           The input key
+

Review comment:
       modidx? btw this is not a good naming...

##########
File path: src/runtime/pipeline/pipeline_executor.h
##########
@@ -0,0 +1,183 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \brief pipeline executor
+ * \file pipeline_executor.h
+ */
+#ifndef TVM_RUNTIME_PIPELINE_PIPELINE_EXECUTOR_H_
+#define TVM_RUNTIME_PIPELINE_PIPELINE_EXECUTOR_H_
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include "../file_utils.h"
+#include "pipeline_function.h"
+
+using namespace std;
+namespace tvm {
+namespace runtime {
+
+/*!
+ * \brief pipeline runtime.
+ *
+ *  This runtime can be acccesibly in various language via
+ *  TVM runtime PackedFunc API.
+ */
+class TVM_DLL SubGraphRuntime : public ModuleNode {
+ public:
+  SubGraphRuntime() { input_int_map = make_shared<MOD_DLDATA_MAP>(); }
+  ~SubGraphRuntime() {
+    /* stop pipeline threads and release data in deconstructor.
+     */
+    Stop();
+  }
+  /*!
+   * \brief Get member function to front-end
+   * \param name The name of the function.
+   * \param sptr_to_self The pointer to the module node.
+   * \return The corresponding member function.
+   */
+  virtual PackedFunc GetFunction(const std::string& name, const 
ObjectPtr<Object>& sptr_to_self);
+
+  /*!
+   * \return The type key of the executor.
+   */
+  const char* type_key() const final { return "SubGraphRuntime"; }
+  void Run();
+  void Stop();
+
+  /*!
+   * \brief Initialize the graph executor with graph and context.
+   * \param graph_json The execution graph.
+   * \param module The module containing the compiled functions for the host
+   *  processor.
+   * \param ctxs The context of the host and devices where graph nodes will be
+   *  executed on.
+   * \param lookup_linked_param_func If given, a PackedFunc invoked to lookup 
linked parameters
+   *  by storage_id. If not given, linked parameters are looked-up using an 
internal implementation,
+   *  which is not compatible with RPCModules.
+   */
+  void Init(const Array<tvm::runtime::Module>& modules, const std::string& 
pipeline_json);
+
+  /*!
+   * \brief set index-th input to the graph.
+   * \param index The input index.
+   * \param data_in The input data.
+   */
+  void SetInput(int index, DLTensor* data_in, int modIndx);
+
+  /*!
+   * \brief get index-th input.
+   * \param index The input index.
+   * \return The input data.
+   */
+  NDArray GetInput(int index, int mIndx) const;
+
+  /*!
+   * \brief get input index-th by name.
+   * \param input name.
+   * \return The input index.
+   */
+  int GetInputIndex(const string& name, int mIndx) const;
+  /*!
+   * \brief Get the number of outputs
+   *
+   * \return The number of outputs from graph.
+   */
+  int NumOutputs() const;
+  /*!
+   * \brief Get the number of inputs
+   *
+   * \return The number of inputs to the graph.
+   */
+  int NumInputs() const;
+  /*!
+   * \brief Return NDArray Array for all output.
+   *
+   * \param syncPoll Syncholization poll mode or ASyncholization.
+   * \return NDArray Array for all output.
+   */
+  Array<NDArray> GetOutput(bool syncPoll = true);
+
+  void Load(dmlc::JSONReader* reader) {
+    reader->BeginArray();
+    while (reader->NextArrayItem()) {
+      std::string key;
+      reader->BeginObject();
+      int mod_indx = 0;
+      unordered_map<int, unordered_map<int, string>> output;
+      while (reader->NextObjectItem(&key)) {
+        if (key == "mod_indx") {
+          reader->Read(&mod_indx);
+        }
+        if (key == "output") {
+          reader->BeginArray();
+          while (reader->NextArrayItem()) {
+            int output_indx = -1;
+            unordered_map<int, string> depend;
+            reader->BeginObject();
+            while (reader->NextObjectItem(&key)) {
+              if (key == "output_indx") {
+                reader->Read(&output_indx);
+              }
+              if (key == "dependent") {
+                reader->BeginArray();
+                int dep_mod_indx = -1;
+                string inputName;
+                while (reader->NextArrayItem()) {
+                  reader->BeginObject();
+                  while (reader->NextObjectItem(&key)) {
+                    if (key == "mod_indx") {
+                      reader->Read(&dep_mod_indx);
+                    }
+                    if (key == "input_name") {
+                      reader->Read(&inputName);
+                    }
+                  }
+                  if (dep_mod_indx >= 0) {
+                    depend[dep_mod_indx] = inputName;
+                  }
+                }
+              }
+            }
+
+            if (output_indx >= 0) {
+              output[output_indx] = depend;
+            }
+          }
+        }
+      }
+      if (mod_indx >= 0) {
+        pipeline_conf[mod_indx] = output;
+      }
+    }

Review comment:
       This logic needs to be improved. It's impossible to be maintained in the 
future.

##########
File path: python/tvm/contrib/pipeline_executor.py
##########
@@ -0,0 +1,237 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""Pipeline executor that executes pipeline containing TVM PackedFunc."""
+import json
+import tvm._ffi
+from tvm import relay
+from tvm.contrib import graph_executor
+
+
+def pipeline_executor_enabled():
+    """check if pipeline executor enabled."""
+    pipeline_enabled = False
+    try:
+        pipelinecreate = 
tvm._ffi.get_global_func("tvm.pipeline_executor.create")
+        assert pipelinecreate
+        pipeline_enabled = True
+    except ValueError:
+        print("pipeline executor not enabled!")
+
+    return pipeline_enabled
+
+
+def build_pipeline(config):

Review comment:
       A better naming: `mod_n_configs`.

##########
File path: python/tvm/contrib/pipeline_executor.py
##########
@@ -0,0 +1,237 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""Pipeline executor that executes pipeline containing TVM PackedFunc."""
+import json
+import tvm._ffi
+from tvm import relay
+from tvm.contrib import graph_executor
+
+
+def pipeline_executor_enabled():
+    """check if pipeline executor enabled."""
+    pipeline_enabled = False
+    try:
+        pipelinecreate = 
tvm._ffi.get_global_func("tvm.pipeline_executor.create")
+        assert pipelinecreate
+        pipeline_enabled = True
+    except ValueError:
+        print("pipeline executor not enabled!")
+
+    return pipeline_enabled
+
+
+def build_pipeline(config):
+    """build module list that can use for pipeline execution.
+
+    Parameters
+    ----------
+
+    config: Dict[IRModule, Dict[str, Any]]
+        build configuration informaton, structure like following.
+        {IRModule: {"target":target,
+                    "target_host":target_host,
+                    "params":params,
+                    "mod_name"mod_name,
+                    "build":build}}
+
+    Returns
+    -------
+    ret: List[IRModule]
+        list of IRModule
+    string_config: Dict[int, Dict[str, any]]
+        pipeline configuration
+    """
+    mods = {}
+    config_len = len(config)
+    string_config = [{} for _ in range(config_len)]
+    for ir_mod in config:
+        # Get module configuration
+        mod_config = config[ir_mod]

Review comment:
       ```suggestion
       for ir_mod, mod_config in mod_n_configs:
   ```

##########
File path: src/runtime/pipeline/pipeline_executor.cc
##########
@@ -0,0 +1,193 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file pipeline_executor.cc
+ */
+#include "pipeline_executor.h"
+
+#include <tvm/runtime/registry.h>
+
+namespace tvm {
+namespace runtime {
+
+/*!
+ *\bief Stop pipeline run.
+ */

Review comment:
       ```suggestion
   /*! \brief Stop pipeline run. */
   ```

##########
File path: src/runtime/pipeline/pipeline_executor.cc
##########
@@ -0,0 +1,193 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file pipeline_executor.cc
+ */
+#include "pipeline_executor.h"
+
+#include <tvm/runtime/registry.h>
+
+namespace tvm {
+namespace runtime {
+
+/*!
+ *\bief Stop pipeline run.
+ */
+void SubGraphRuntime::Stop() { pipeline_stop(runtimes); }
+/*!
+ * \brief Run all the operations one by one.
+ */

Review comment:
       ```suggestion
   
   /*! \brief Run all the operations one by one. */
   ```

##########
File path: src/runtime/pipeline/pipeline_executor.h
##########
@@ -0,0 +1,183 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \brief pipeline executor
+ * \file pipeline_executor.h
+ */
+#ifndef TVM_RUNTIME_PIPELINE_PIPELINE_EXECUTOR_H_
+#define TVM_RUNTIME_PIPELINE_PIPELINE_EXECUTOR_H_
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include "../file_utils.h"
+#include "pipeline_function.h"
+
+using namespace std;
+namespace tvm {
+namespace runtime {
+
+/*!
+ * \brief pipeline runtime.
+ *
+ *  This runtime can be acccesibly in various language via
+ *  TVM runtime PackedFunc API.
+ */
+class TVM_DLL SubGraphRuntime : public ModuleNode {
+ public:
+  SubGraphRuntime() { input_int_map = make_shared<MOD_DLDATA_MAP>(); }
+  ~SubGraphRuntime() {
+    /* stop pipeline threads and release data in deconstructor.
+     */
+    Stop();
+  }
+  /*!
+   * \brief Get member function to front-end
+   * \param name The name of the function.
+   * \param sptr_to_self The pointer to the module node.
+   * \return The corresponding member function.
+   */
+  virtual PackedFunc GetFunction(const std::string& name, const 
ObjectPtr<Object>& sptr_to_self);
+
+  /*!
+   * \return The type key of the executor.
+   */
+  const char* type_key() const final { return "SubGraphRuntime"; }
+  void Run();
+  void Stop();
+
+  /*!
+   * \brief Initialize the graph executor with graph and context.
+   * \param graph_json The execution graph.
+   * \param module The module containing the compiled functions for the host
+   *  processor.
+   * \param ctxs The context of the host and devices where graph nodes will be
+   *  executed on.
+   * \param lookup_linked_param_func If given, a PackedFunc invoked to lookup 
linked parameters
+   *  by storage_id. If not given, linked parameters are looked-up using an 
internal implementation,
+   *  which is not compatible with RPCModules.
+   */
+  void Init(const Array<tvm::runtime::Module>& modules, const std::string& 
pipeline_json);
+
+  /*!
+   * \brief set index-th input to the graph.
+   * \param index The input index.
+   * \param data_in The input data.
+   */
+  void SetInput(int index, DLTensor* data_in, int modIndx);
+
+  /*!
+   * \brief get index-th input.
+   * \param index The input index.
+   * \return The input data.
+   */
+  NDArray GetInput(int index, int mIndx) const;
+
+  /*!
+   * \brief get input index-th by name.
+   * \param input name.
+   * \return The input index.
+   */
+  int GetInputIndex(const string& name, int mIndx) const;
+  /*!
+   * \brief Get the number of outputs
+   *
+   * \return The number of outputs from graph.
+   */
+  int NumOutputs() const;
+  /*!
+   * \brief Get the number of inputs
+   *
+   * \return The number of inputs to the graph.
+   */
+  int NumInputs() const;
+  /*!
+   * \brief Return NDArray Array for all output.
+   *
+   * \param syncPoll Syncholization poll mode or ASyncholization.
+   * \return NDArray Array for all output.
+   */
+  Array<NDArray> GetOutput(bool syncPoll = true);
+
+  void Load(dmlc::JSONReader* reader) {
+    reader->BeginArray();
+    while (reader->NextArrayItem()) {
+      std::string key;
+      reader->BeginObject();
+      int mod_indx = 0;
+      unordered_map<int, unordered_map<int, string>> output;
+      while (reader->NextObjectItem(&key)) {
+        if (key == "mod_indx") {
+          reader->Read(&mod_indx);
+        }
+        if (key == "output") {
+          reader->BeginArray();
+          while (reader->NextArrayItem()) {
+            int output_indx = -1;
+            unordered_map<int, string> depend;
+            reader->BeginObject();
+            while (reader->NextObjectItem(&key)) {
+              if (key == "output_indx") {
+                reader->Read(&output_indx);
+              }
+              if (key == "dependent") {
+                reader->BeginArray();
+                int dep_mod_indx = -1;
+                string inputName;
+                while (reader->NextArrayItem()) {
+                  reader->BeginObject();
+                  while (reader->NextObjectItem(&key)) {
+                    if (key == "mod_indx") {
+                      reader->Read(&dep_mod_indx);
+                    }
+                    if (key == "input_name") {
+                      reader->Read(&inputName);
+                    }
+                  }
+                  if (dep_mod_indx >= 0) {
+                    depend[dep_mod_indx] = inputName;
+                  }
+                }
+              }
+            }
+
+            if (output_indx >= 0) {
+              output[output_indx] = depend;
+            }
+          }
+        }
+      }
+      if (mod_indx >= 0) {
+        pipeline_conf[mod_indx] = output;
+      }
+    }
+  }
+
+ protected:
+  vector<NDArray> output_entry_;
+  PIPELINE_CONF pipeline_conf;
+  vector<shared_ptr<RuntimeItem>> runtimes;
+  MOD_DLDATA_MAP_PTR input_int_map;
+  size_t outpuNumber = 0;

Review comment:
       The name of private class members should ends with `_`.

##########
File path: src/runtime/pipeline/pipeline_executor.cc
##########
@@ -0,0 +1,193 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file pipeline_executor.cc
+ */
+#include "pipeline_executor.h"
+
+#include <tvm/runtime/registry.h>
+
+namespace tvm {
+namespace runtime {
+
+/*!
+ *\bief Stop pipeline run.
+ */
+void SubGraphRuntime::Stop() { pipeline_stop(runtimes); }
+/*!
+ * \brief Run all the operations one by one.
+ */
+void SubGraphRuntime::Run() {
+  pipeline_run(runtimes, input_int_map);
+  /* Clear the input map
+   */
+}
+
+void SubGraphRuntime::Init(const Array<tvm::runtime::Module>& modules,
+                           const std::string& pipeline_json) {
+  std::istringstream is(pipeline_json);
+  dmlc::JSONReader reader(&is);
+  this->Load(&reader);
+  outpuNumber = pipeline_init(modules, &runtimes, &pipeline_conf);
+  return;
+}
+
+/*!
+ * \brief set index-th input to the modIndx-th graph.
+ * \param index The input index.
+ * \param data_in The input data.
+ * \param modIndx The runtime index.
+ */
+void SubGraphRuntime::SetInput(int index, DLTensor* data_in, int modIndx) {
+  if (1 == modIndx) {
+    runtimes[0]->runtimePtr->SetInput(index, data_in);
+  } else {
+    pipeline_setinput(input_int_map, index, data_in, modIndx);
+  }
+}
+
+/*!
+ * \brief Get the number of outputs
+ *
+ * \return The number of outputs from last pipeline.
+ */
+int SubGraphRuntime::NumOutputs() const { return outpuNumber; }
+
+/*!
+ * \brief Get the number of inputs
+ *
+ * \return The number of inputs to the first pipeline.
+ */
+int SubGraphRuntime::NumInputs() const {
+  int inputsNum = 0;
+  for (auto runtime : runtimes) {
+    inputsNum += runtime->runtimePtr->NumInputs();
+  }
+  return inputsNum;
+}
+
+/*!
+ * \brief Return NDArray for given input index.
+ * \param index The input index.
+ *
+ * \return NDArray corresponding to given input node index.
+ */
+NDArray SubGraphRuntime::GetInput(int index, int mIndx) const {
+  auto gruntime = runtimes[mIndx];
+  return gruntime->runtimePtr->GetInput(index);
+}
+
+/*!
+ * \brief Return input index for given input name.
+ * \param name The input name.
+ *
+ * \return int corresponding to given input node name.
+ */
+int SubGraphRuntime::GetInputIndex(const string& name, int mIndx) const {
+  auto gruntime = runtimes[mIndx - 1];
+  return gruntime->runtimePtr->GetInputIndex(name);
+}
+
+/*!
+ * \brief Return NDArray Array for all output.
+ *
+ * \return NDArray Array for all output.
+ */
+Array<NDArray> SubGraphRuntime::GetOutput(bool syncPoll) {
+  Array<NDArray> nd;
+  if (pipeline_poll(&output_entry_, runtimes, syncPoll)) {
+    for (auto output : output_entry_) {
+      nd.push_back(output);
+    }
+  }
+  return nd;
+}
+
+PackedFunc SubGraphRuntime::GetFunction(const std::string& name,
+                                        const ObjectPtr<Object>& sptr_to_self) 
{
+  /* Return member functions during query.
+   */
+  if (name == "set_input") {
+    return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) {
+      /* Default use first runtime index value.
+       */

Review comment:
       ```suggestion
         // Default use first runtime index value.
   ```

##########
File path: python/tvm/contrib/pipeline_executor.py
##########
@@ -0,0 +1,237 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""Pipeline executor that executes pipeline containing TVM PackedFunc."""
+import json
+import tvm._ffi
+from tvm import relay
+from tvm.contrib import graph_executor
+
+
+def pipeline_executor_enabled():
+    """check if pipeline executor enabled."""
+    pipeline_enabled = False
+    try:
+        pipelinecreate = 
tvm._ffi.get_global_func("tvm.pipeline_executor.create")
+        assert pipelinecreate
+        pipeline_enabled = True
+    except ValueError:
+        print("pipeline executor not enabled!")
+
+    return pipeline_enabled
+
+
+def build_pipeline(config):
+    """build module list that can use for pipeline execution.
+
+    Parameters
+    ----------
+

Review comment:
       remove the empty line

##########
File path: src/runtime/pipeline/pipeline_executor.cc
##########
@@ -0,0 +1,193 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file pipeline_executor.cc
+ */
+#include "pipeline_executor.h"
+
+#include <tvm/runtime/registry.h>
+
+namespace tvm {
+namespace runtime {
+
+/*!
+ *\bief Stop pipeline run.
+ */
+void SubGraphRuntime::Stop() { pipeline_stop(runtimes); }
+/*!
+ * \brief Run all the operations one by one.
+ */
+void SubGraphRuntime::Run() {
+  pipeline_run(runtimes, input_int_map);
+  /* Clear the input map
+   */
+}
+
+void SubGraphRuntime::Init(const Array<tvm::runtime::Module>& modules,
+                           const std::string& pipeline_json) {
+  std::istringstream is(pipeline_json);
+  dmlc::JSONReader reader(&is);
+  this->Load(&reader);
+  outpuNumber = pipeline_init(modules, &runtimes, &pipeline_conf);
+  return;
+}
+
+/*!
+ * \brief set index-th input to the modIndx-th graph.
+ * \param index The input index.
+ * \param data_in The input data.
+ * \param modIndx The runtime index.
+ */
+void SubGraphRuntime::SetInput(int index, DLTensor* data_in, int modIndx) {

Review comment:
       ```suggestion
   void SubGraphRuntime::SetInput(int index, DLTensor* data_in, int mod_idx) {
   ```
   Please also fix other variables. We should use the consist naming convention.

##########
File path: python/tvm/contrib/pipeline_executor.py
##########
@@ -0,0 +1,223 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+"""Minimum pipeline executor that executes pipeline containing TVM 
PackedFunc."""
+import json
+import tvm._ffi
+from tvm import relay
+from tvm.contrib import graph_executor
+
+
+def build_pipeline(config):
+    """build module list that can use for pipeline execution.
+    Parameters:
+    ir_mods:
+        list of IRModule
+
+    config:
+        build configuration informaiton, structure like following.
+        {IRModule: {"target":target,
+                    "target_host":target_host,
+                    "params":params,
+                    "mod_name"mod_name,
+                    "build":build}}
+
+    Return:
+        list of IRModule
+    """
+    mods = {}
+    string_config = [{}] * len(config)
+    for ir_mod in config:
+        mod_config = config[ir_mod]
+        string_config[mod_config["pipeline"]["mod_indx"] - 1] = 
mod_config["pipeline"]
+        build_func = relay.build
+        # if there is a self defined build function then use it.
+        if mod_config["build"]:
+            build_func = mod_config.build
+
+        mod = build_func(
+            ir_mod,
+            mod_config["target"],
+            params=mod_config["params"],
+            target_host=mod_config["target_host"],
+            mod_name=mod_config["mod_name"],
+        )
+
+        mods[mod] = {"dev": mod_config["dev"]}
+
+    return mods, string_config
+
+
+def create(mods, mod_config):
+    """Create a pipeline runtime executor.
+
+    Parameters
+    ----------
+    sub_mods :
+        {"lib": <module>,
+         "dev": <device>}
+
+    Returns
+    -------
+    submodule : PipelineModule
+        Runtime pipeline module.
+    """
+    pipeline_mods, string_config = build_pipeline(mod_config)
+
+    mods = []
+    for pipeline_mod in pipeline_mods:
+        mod = graph_executor.GraphModule(
+            pipeline_mod["default"](pipeline_mods[pipeline_mod]["dev"])
+        )
+
+        mods.append(mod)
+
+    submodule = PipelineModule(mods, json.dumps(string_config))
+    return submodule
+
+
+class PipelineModule(object):
+    """Wrapper runtime module.
+
+    This is a thin wrapper of the underlying TVM module.
+    you can also directly call set_input, run, and get_output
+    of underlying module functions
+
+    Parameters
+    ----------
+    module : tvm.runtime.Module
+        The internal tvm module that holds the actual graph functions.
+
+    Attributes
+    ----------
+    module : tvm.runtime.Module
+        The internal tvm module that holds the actual graph functions.
+
+    """
+
+    def __init__(self, graph_modules, pipeline_config):
+        mods = []
+        for module in graph_modules:
+            mods.append(module.module)
+
+        pipelinecreate = 
tvm._ffi.get_global_func("tvm.pipeline_executor.create")
+        module = pipelinecreate(mods, pipeline_config)
+
+        self.graph_modules_ = graph_modules
+
+        self._set_input = module["set_input"]
+        self._run = module["run"]
+        self._stop = module["stop"]
+        self._get_output = module["get_output"]
+        self._get_input = module["get_input"]
+        self._get_num_outputs = module["get_num_outputs"]
+        self._get_num_inputs = module["get_num_inputs"]
+
+    def set_input(self, key=None, value=None, params=None):
+        """Set inputs to the module via kwargs
+
+        Parameters
+        ----------
+        key : int or str
+           The input key
+
+        value : the input value.
+           The input key
+
+        params : dict of str to NDArray
+           Additional arguments
+        """
+        if key is not None:

Review comment:
       Didn't see the fix? Why we need `if key is not None`?

##########
File path: src/runtime/pipeline/pipeline_executor.cc
##########
@@ -0,0 +1,193 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file pipeline_executor.cc
+ */
+#include "pipeline_executor.h"
+
+#include <tvm/runtime/registry.h>
+
+namespace tvm {
+namespace runtime {
+
+/*!
+ *\bief Stop pipeline run.
+ */
+void SubGraphRuntime::Stop() { pipeline_stop(runtimes); }
+/*!
+ * \brief Run all the operations one by one.
+ */
+void SubGraphRuntime::Run() {
+  pipeline_run(runtimes, input_int_map);
+  /* Clear the input map
+   */

Review comment:
       Where to clean the input map?

##########
File path: src/runtime/pipeline/pipeline_executor.cc
##########
@@ -0,0 +1,193 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file pipeline_executor.cc
+ */
+#include "pipeline_executor.h"
+
+#include <tvm/runtime/registry.h>
+
+namespace tvm {
+namespace runtime {
+
+/*!
+ *\bief Stop pipeline run.
+ */
+void SubGraphRuntime::Stop() { pipeline_stop(runtimes); }
+/*!
+ * \brief Run all the operations one by one.
+ */
+void SubGraphRuntime::Run() {
+  pipeline_run(runtimes, input_int_map);
+  /* Clear the input map
+   */
+}
+
+void SubGraphRuntime::Init(const Array<tvm::runtime::Module>& modules,
+                           const std::string& pipeline_json) {
+  std::istringstream is(pipeline_json);
+  dmlc::JSONReader reader(&is);
+  this->Load(&reader);
+  outpuNumber = pipeline_init(modules, &runtimes, &pipeline_conf);
+  return;
+}
+
+/*!
+ * \brief set index-th input to the modIndx-th graph.
+ * \param index The input index.
+ * \param data_in The input data.
+ * \param modIndx The runtime index.
+ */
+void SubGraphRuntime::SetInput(int index, DLTensor* data_in, int modIndx) {
+  if (1 == modIndx) {
+    runtimes[0]->runtimePtr->SetInput(index, data_in);
+  } else {
+    pipeline_setinput(input_int_map, index, data_in, modIndx);
+  }
+}
+
+/*!
+ * \brief Get the number of outputs
+ *
+ * \return The number of outputs from last pipeline.
+ */
+int SubGraphRuntime::NumOutputs() const { return outpuNumber; }
+
+/*!
+ * \brief Get the number of inputs
+ *
+ * \return The number of inputs to the first pipeline.
+ */
+int SubGraphRuntime::NumInputs() const {
+  int inputsNum = 0;
+  for (auto runtime : runtimes) {
+    inputsNum += runtime->runtimePtr->NumInputs();
+  }
+  return inputsNum;
+}
+
+/*!
+ * \brief Return NDArray for given input index.
+ * \param index The input index.
+ *
+ * \return NDArray corresponding to given input node index.
+ */
+NDArray SubGraphRuntime::GetInput(int index, int mIndx) const {
+  auto gruntime = runtimes[mIndx];
+  return gruntime->runtimePtr->GetInput(index);
+}
+
+/*!
+ * \brief Return input index for given input name.
+ * \param name The input name.
+ *
+ * \return int corresponding to given input node name.
+ */
+int SubGraphRuntime::GetInputIndex(const string& name, int mIndx) const {
+  auto gruntime = runtimes[mIndx - 1];
+  return gruntime->runtimePtr->GetInputIndex(name);
+}
+
+/*!
+ * \brief Return NDArray Array for all output.
+ *
+ * \return NDArray Array for all output.
+ */
+Array<NDArray> SubGraphRuntime::GetOutput(bool syncPoll) {
+  Array<NDArray> nd;
+  if (pipeline_poll(&output_entry_, runtimes, syncPoll)) {
+    for (auto output : output_entry_) {
+      nd.push_back(output);
+    }
+  }
+  return nd;
+}
+
+PackedFunc SubGraphRuntime::GetFunction(const std::string& name,
+                                        const ObjectPtr<Object>& sptr_to_self) 
{
+  /* Return member functions during query.
+   */

Review comment:
       ```suggestion
   ```




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to