comaniac commented on a change in pull request #9108:
URL: https://github.com/apache/tvm/pull/9108#discussion_r722584139



##########
File path: src/runtime/pipeline/pipeline_executor.h
##########
@@ -36,25 +43,80 @@ namespace runtime {
  *
  *  This executor can be accessed by various language via TVM runtime 
PackedFunc API.
  */
-class TVM_DLL PipelineRuntime : public ModuleNode {
+class TVM_DLL PipelineExecutor : public ModuleNode {
  public:
   /*!
    * \Return the type key of the executor.
    */
-  const char* type_key() const final { return "PipelineRuntime"; }
+  const char* type_key() const final { return "PipelineExecutor"; }
   /*!
    * \brief Initialize the pipeline executor with module array and json text.
    * \param modules The module list used for building pipeline.
    * \param pipeline_json The configuration of modules dependencies.
    */
-  void Init(const Array<tvm::runtime::Module>& modules, const std::string& 
pipeline_json);
+  void Init(const Array<Module>& modules, const std::string& pipeline_json);
   /*!
    * \brief Give frontends an access to packed functions.
    * \param name The name of the function.
    * \param sptr_to_self The pointer to the module node.
    * \return The corresponding packed function.
    */
   virtual PackedFunc GetFunction(const std::string& name, const 
ObjectPtr<Object>& sptr_to_self);
+
+  /*!
+   * \brief Get the number of outputs.
+   *
+   * \return The number of outputs.
+   */
+  int NumOutputs() const { return num_outputs_; }
+
+ private:
+  /*!\brief The class used to execute pipeline logic.*/
+  PipelineScheduler pipeline_function_;
+  /*!\brief The Dependency information of each graph runtime module of 
pipeline.*/
+  PipelineConfig pipeline_config_;
+  /*!\brief The Module information used to create graph runtime.*/
+  ModuleConfig mod_config_;
+  /*!\brief How many outputs are in this pipeline executor.*/
+  size_t num_outputs_ = 0;
+  /*!\brief Json loader.*/
+  void Load(dmlc::JSONReader* reader) {
+    reader->BeginArray();
+    while (reader->NextArrayItem()) {
+      std::string key;
+      reader->BeginObject();
+      int mod_idx = -1;
+      std::string lib_name;
+      std::string json_name;
+      std::string params_name;
+      std::string dev;
+      OutputMap output;
+      while (reader->NextObjectItem(&key)) {
+        if (key == "mod_idx") {
+          reader->Read(&mod_idx);
+        } else if (key == "lib_name") {
+          reader->Read(&lib_name);
+        } else if (key == "json_name") {
+          reader->Read(&json_name);
+        } else if (key == "params_name") {
+          reader->Read(&params_name);
+        } else if (key == "dev") {
+          reader->Read(&dev);
+        } else if (key == "output") {
+          reader->Read(&output);
+        }

Review comment:
       Add the `else` to throw an error.

##########
File path: python/tvm/contrib/pipeline_executor.py
##########
@@ -501,17 +537,18 @@ class PipelineExecutorFactoryModule(object):
 
     """
 
-    def __init__(self, pipeline_mods, mods_config):
-        mods, config = self.graph_executor_create(pipeline_mods, mods_config)
-        assert (
-            pipeline_executor_enabled()
-        ), "Pipeline executor is not enabled. Please \
-              re-build TVM with USE_PIPELINE_EXECUTOR=ON"
-        pipeline_create = tvm._ffi.get_global_func(
+    def __init__(self, pipeline_libs=None, mods_config=None):
+        self.pipeline_libs = pipeline_libs
+        self.mods_config = mods_config
+        self.pipeline_create = tvm._ffi.get_global_func(
             "tvm.pipeline_executor.create", allow_missing=False
         )
-        assert pipeline_create
-        self.module = pipeline_create(mods, config)
+        self.module = None
+        # Only create pipeline executor when pipeline_libs, mods_config and
+        # self.pipeline_create are not None.
+        if pipeline_libs and mods_config and self.pipeline_create:

Review comment:
       Following my comment for PipelineModule, I don't think we should allow 
`pipeline_libs` and `mods_config` to be None:
   
   ```python
   class PipelineExecutorFactoryModule(object):
     def __init__(self, pipeline_libs, mods_config):
       self.pipeline_libs = pipeline_libs
       self.mods_config = mods_config
       self.pipeline_create = 
tvm._ffi.get_global_func("tvm.pipeline_executor.create", allow_missing=False)
       graph_executors, config = self.graph_executor_create(pipeline_libs, 
mods_config)
       self.module = self.pipeline_create(graph_executors, config)
   
     @staticmethod
     def load_library(config_file_name):
       return PipelineExecutorFactoryModule([], self.pipe_config)
   ```
   
   So that you can use in PipelineModule:
   
   ```python
   class PipelineModule(object):
     def __init__(self, module):
       self.module = module.module
       self._get_num_outputs = self.module["get_num_outputs"]
   
     @staticmethod
     def load_library(config_file_name):
       pipeline_factory = 
PipelineExecutorFactoryModule.load_library(config_file_name)
       return PipelineModule(pipeline_factory)
   ```

##########
File path: python/tvm/contrib/pipeline_executor.py
##########
@@ -86,8 +88,43 @@ class PipelineModule(object):
         Common interface for pipeline executor factory modules.
     """
 
-    def __init__(self, module):
-        self.module = module.module
+    def __init__(self, module=None):
+        self.module = module.module if module else None
+        self._get_num_outputs = None
+        # Get the packed functions from the pipeline executor.
+        self.load_functions()
+
+    def import_from_library(self, config_file_name):
+        """Import files to create pipeline executor.
+
+        Parameters
+        ----------
+        config_file_name : str
+            The configuration file path, the configuration file contains the
+            disk path of the parameter file, library file and JSON file。
+        """
+        # Create a empty PipelineExecutorFactoryModule.
+        pipeline_factory = PipelineExecutorFactoryModule()
+        # Load the configuration file to initialize a 
PipelineExecutorFactoryModule.
+        pipeline_factory.import_from_library(config_file_name)
+        self.module = pipeline_factory.module
+        # Get packed functions from the pipeline executor.
+        self.load_functions()
+
+    def load_functions(self):
+        # Get functions from the pipeline executor.
+        self._get_num_outputs = self.module["get_num_outputs"] if self.module 
else None
+
+    def get_num_outputs(self):
+        """Get the number of outputs.
+        Returns
+        -------
+        count : int
+            The number of outputs.
+        """
+        if not self._get_num_outputs:
+            raise RuntimeError(f"The pipeline executor has not been 
initialized.")
+        return self._get_num_outputs()

Review comment:
       I think this is the key that makes the implementation non-trivial. Can 
we disallow module to be None when creating a PipelineModule? You should make 
the initializer static, so that `load_functions` can be removed, such as:
   
   ```python
   class PipelineModule(object):
     def __init__(self, module):
       self.module = module.module
       self._get_num_outputs = self.module["get_num_outputs"]
   
     @staticmethod
     def load_library(config_file_name):
       pipeline_factory = PipelineExecutorFactoryModule()
       pipeline_factory.load_library(config_file_name)
       return PipelineModule(pipeline_factory)
   ```
   
   And the usage:
   
   ```python
   pipeline_module = PipelineModule(module) # Initialize using a module
   pipeline_module = PipelineModule.load_library(config_file) # Initialize 
using the configure file
   ```

##########
File path: src/runtime/pipeline/pipeline_executor.h
##########
@@ -36,25 +43,89 @@ namespace runtime {
  *
  *  This executor can be accessed by various language via TVM runtime 
PackedFunc API.
  */
-class TVM_DLL PipelineRuntime : public ModuleNode {
+class TVM_DLL PipelineExecutor : public ModuleNode {
  public:
   /*!
    * \Return the type key of the executor.
    */
-  const char* type_key() const final { return "PipelineRuntime"; }
+  const char* type_key() const final { return "PipelineExecutor"; }
   /*!
    * \brief Initialize the pipeline executor with module array and json text.
    * \param modules The module list used for building pipeline.
    * \param pipeline_json The configuration of modules dependencies.
    */
-  void Init(const Array<tvm::runtime::Module>& modules, const std::string& 
pipeline_json);
+  void Init(const Array<Module>& modules, const std::string& pipeline_json);
   /*!
    * \brief Give frontends an access to packed functions.
    * \param name The name of the function.
    * \param sptr_to_self The pointer to the module node.
    * \return The corresponding packed function.
    */
   virtual PackedFunc GetFunction(const std::string& name, const 
ObjectPtr<Object>& sptr_to_self);
+
+  /*!
+   * \brief Get the number of outputs.
+   *
+   * \return The number of outputs.
+   */
+  int NumOutputs() const { return num_outputs_; }
+
+ private:
+  /*!\brief The class used to execute pipeline logic*/
+  PipelineFunction pipeline_function_;
+  /*!\brief The Dependency information of each graph runtime module of 
pipeline.*/
+  PipelineConfig pipeline_config_;
+  /*!\brief The Module information that can get used to create graph runtime.*/
+  ModuleConfig mod_config_;
+  /*!\birief How many outputs are in this pipeline executor.*/
+  size_t num_outputs_ = 0;
+  /*!\brief Json loader.*/
+  void Load(dmlc::JSONReader* reader) {
+    reader->BeginArray();
+    while (reader->NextArrayItem()) {
+      std::string key;
+      reader->BeginObject();
+      int mod_idx = 0;
+      std::string lib_name;
+      std::string json_name;
+      std::string params_name;
+      std::string dev;
+      OutputMap output;
+      while (reader->NextObjectItem(&key)) {
+        if (key == "mod_idx") {
+          reader->Read(&mod_idx);
+        }
+        if (key == "lib_name") {
+          reader->Read(&lib_name);
+        }
+
+        if (key == "json_name") {
+          reader->Read(&json_name);
+        }
+
+        if (key == "params_name") {
+          reader->Read(&params_name);
+        }
+
+        if (key == "dev") {
+          reader->Read(&dev);
+        }
+
+        if (key == "output") {
+          reader->Read(&output);
+        }
+      }
+      // Check if mod_idx is read successfully.
+      ICHECK(mod_idx > 0);
+      // Check if the output is read successfully.
+      ICHECK(!output.Empty());
+      pipeline_config_.Insert(mod_idx, output);
+      // Check if there is lib, json and params information.
+      if (!lib_name.empty() && !json_name.empty() && !params_name.empty()) {

Review comment:
       Not a good design. No ones will know this behavior by reading this 
function.
   If the graph module is already initialized, why we need to call this 
function again?

##########
File path: python/tvm/contrib/pipeline_executor.py
##########
@@ -501,17 +538,18 @@ class PipelineExecutorFactoryModule(object):
 
     """
 
-    def __init__(self, pipeline_mods, mods_config):
-        mods, config = self.graph_executor_create(pipeline_mods, mods_config)
-        assert (
-            pipeline_executor_enabled()
-        ), "Pipeline executor is not enabled. Please \
-              re-build TVM with USE_PIPELINE_EXECUTOR=ON"
-        pipeline_create = tvm._ffi.get_global_func(
+    def __init__(self, pipeline_libs=None, mods_config=None):
+        self.pipeline_libs = pipeline_libs
+        self.mods_config = mods_config
+        self.pipeline_create = tvm._ffi.get_global_func(

Review comment:
       Please do not resolve conversation before checked by the commenter.
   Now you allow this to be None, but only create graph executor when it is 
not. Then if this function is None, users will get nothing silently. Please add 
the proper error message instead of silently doing nothing.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to