masahi commented on code in PR #11911:
URL: https://github.com/apache/tvm/pull/11911#discussion_r919794192


##########
python/tvm/contrib/torch/optimize_torch.py:
##########
@@ -0,0 +1,187 @@
+# pylint: disable=inconsistent-return-statements
+#!/usr/bin/env python
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# pylint: disable=missing-module-docstring
+# pylint: disable=missing-class-docstring
+# pylint: disable=missing-function-docstring
+"""
+optimize_torch: aa function similar to `torch.jit.trace`,
+which is used to optimize the `torch.nn.module` by TVM metaSchedule,
+and returns a custom TorchScript operator
+"""
+import base64
+import contextlib
+import tempfile
+from typing import Dict, Optional, Tuple, Union
+
+import torch
+import torch.utils.dlpack
+
+import tvm
+from tvm import relay
+from tvm._ffi import get_global_func, register_func
+from tvm.ir.module import IRModule
+from tvm.ir.transform import PassContext
+from tvm.meta_schedule import TuneConfig, default_config
+from tvm.meta_schedule.apply_history_best import ApplyHistoryBest
+from tvm.meta_schedule.relay_integration import extract_task_from_relay
+from tvm.meta_schedule.tune import tune_extracted_tasks
+from tvm.meta_schedule.utils import autotvm_silencer
+from tvm.runtime import vm
+from tvm.runtime.module import Module
+from tvm.runtime.ndarray import NDArray
+from tvm.target.target import Target
+
+
+# The python wrapper for GraphExecutorFactory
+class GraphExecutorFactoryWrapper(torch.nn.Module):
+    def __init__(self, module: tvm.runtime.Module):
+        super().__init__()
+        self.inner_module = module
+
+    def forward(self, *torch_inputs: Tuple[torch.Tensor]):
+        ret = self.inner_module.forward(torch_inputs)
+        if len(ret) == 1:
+            return ret[0]
+        return ret
+
+
+def llvm_target():
+    return "llvm -num-cores"
+
+
+@register_func("script_torch.save_to_base64")
+def save_to_base64(obj) -> bytes:
+    with tempfile.NamedTemporaryFile(suffix=".so") as tmpfile:
+        obj.export_library(tmpfile.name)
+        with open(tmpfile.name, "rb") as tfile:
+            return base64.b64encode(tfile.read())
+
+
+def tune_relay_auto(
+    mod: IRModule,
+    target: Union[str, Target],
+    config: TuneConfig,
+    work_dir: str,
+    backend: str = "graph",
+    params: Optional[Dict[str, NDArray]] = None,
+) -> Union[Module, vm.Executable]:
+    """A wrapper of `tune_relay` but provide a default setting for the config.
+
+    Parameters
+    ----------
+    mod : IRModule
+        The module to tune.
+    target : Union[str, Target]
+        The target to tune for.
+    config : TuneConfig
+        The search strategy config.
+    params : Optional[Dict[str, tvm.runtime.NDArray]]
+        The associated parameters of the program
+    work_dir : Optional[str]
+        The working directory to save intermediate results.
+    backend : str = "graph"
+        The backend to use for relay compilation(graph / vm).
+
+    Returns
+    -------
+    lib : Union[Module, tvm.runtime.vm.Executable]
+        The built runtime module or vm Executable for the given relay workload.
+    """
+    target = default_config.target(target)
+    extracted_tasks = extract_task_from_relay(mod, target, params)
+    if config is None:
+        config = TuneConfig(
+            num_trials_per_iter=16,
+            max_trials_global=16 * len(extracted_tasks),
+        )
+    database = tune_extracted_tasks(extracted_tasks, config, work_dir)
+    relay_build = {"graph": relay.build, "vm": relay.vm.compile}[backend]
+    with target, autotvm_silencer(), ApplyHistoryBest(database):
+        with PassContext(
+            opt_level=3,
+            config={
+                "relay.backend.use_meta_schedule": True,
+                "relay.backend.use_meta_schedule_dispatch": target.kind.name 
!= "cuda",
+            },
+        ):
+            return relay_build(mod, target=target, params=params)
+
+
+def optimize_torch(
+    func,
+    example_inputs,
+    tuning_config=None,
+    target=None,
+    work_dir=None,
+):
+    """Load PyTorch model that could be traced by TorchScript, then optimize 
it via MetaSchedule.
+
+    Parameters
+    ----------
+    func : callable or torch.nn.Module
+        A Python function or nn.Module that could run by TorchScript's trace.
+        (ie: torch.jit.trace(model, input))
+
+    example_inputs : tuple or torch.Tensor
+        Inputs to `torch.jit.trace`.
+
+    tuning_config : tvm.meta_schedule.TuneConfig
+        The configuration for tuning by MetaSchedule.
+        If user doesn't set the config, the tuning will run with a default 
setting,
+        a number proportional to the tasks of the module.

Review Comment:
   ` a number proportional to the tasks of the module` -> `where the total 
number of trials is proportional to the number of tunable tasks in the input 
module`.



##########
python/tvm/contrib/torch/as_torch.py:
##########
@@ -0,0 +1,88 @@
+# pylint: disable=inconsistent-return-statements
+#!/usr/bin/env python
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# pylint: disable=missing-module-docstring
+# pylint: disable=missing-class-docstring
+# pylint: disable=missing-function-docstring
+"""
+as_torch: a decorator, which is used to wrap the TVMscript code to 
`torch.nn.module`.
+"""
+from typing import Callable, List, Union
+
+import torch
+import torch.utils.dlpack
+
+import tvm
+
+
+# python wrapper for OperatorModule
+class OperatorModuleWrapper(torch.nn.Module):
+    def __init__(
+        self,
+        module: Union[
+            tvm.ir.module.IRModule,
+            tvm.tir.function.PrimFunc,
+        ],
+    ):
+        super().__init__()
+        self.rt_module = None  # runtime module
+        self.ir_module = module  # IR modules
+
+    def build(self, target=None):
+        runtime_module = tvm.build(self.ir_module, target=target)
+        func = tvm.get_global_func("tvmtorch.save_runtime_mod")
+        func(runtime_module)
+
+        self.rt_module = torch.classes.tvm_torch.OperatorModuleWrapper()
+
+    def forward(self, *torch_inputs: List[torch.Tensor]) -> List[torch.Tensor]:
+        if self.rt_module is None:
+            if torch_inputs[0].is_cuda:
+                self.build(target="cuda")
+            elif torch_inputs[0].device.type == "cpu":
+                self.build()
+            else:
+                raise Exception(f"the target {torch_inputs[0].device.type} is 
not supported yet")
+
+        return self.rt_module.forward(torch_inputs)
+
+
+def as_torch(func: Union[tvm.ir.module.IRModule, tvm.tir.function.PrimFunc, 
Callable]):

Review Comment:
   So `as_torch` doesn't provide tuning facilities? I noticed that all tuning 
tests in this PR is done via `optimize_torch` which involves Relay. If a user 
wants to tune a TVMScript-written op and use `@as_torch` decorator, how tuning 
can be done?



##########
src/contrib/torch/pt_call_tvm/RuntimeModuleWrapper.cc:
##########
@@ -0,0 +1,262 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+#include <ATen/DLConvertor.h>
+#include <dlpack/dlpack.h>
+#include <dmlc/memory_io.h>
+#include <torch/custom_class.h>
+#include <torch/script.h>
+#include <tvm/runtime/module.h>
+#include <tvm/runtime/registry.h>
+#include <tvm/target/codegen.h>
+#include <tvm/target/target.h>
+
+#include <cstdio>
+#include <map>
+#include <string>
+#include <vector>
+
+#include "../../../runtime/graph_executor/graph_executor_factory.h"
+#include "../base64.h"
+
+namespace tvm {
+namespace contrib {
+
+/**
+ * We pass the TVM module by TVM's FFI because Torch's FFI cannot recognize 
such TVM objects
+ */
+struct ThreadLocalStore {
+  tvm::runtime::Module mod;
+  static ThreadLocalStore* ThreadLocal() {
+    thread_local ThreadLocalStore tls;
+    return &tls;
+  }
+};
+
+using SerializationType = std::string;  // base64 stream
+
+SerializationType serialize(tvm::runtime::Module module) {
+  static const runtime::PackedFunc* f_to_str =
+      runtime::Registry::Get("script_torch.save_to_base64");
+  ICHECK(f_to_str) << "IndexError: Cannot find the packed function "
+                      "`script_torch.save_to_tar` in the global registry";
+  return (*f_to_str)(module);
+}
+
+struct Deleter {  // deleter
+  explicit Deleter(std::string file_name) { this->file_name = file_name; }
+  void operator()(FILE* p) const {
+    fclose(p);
+    ICHECK(remove(file_name.c_str()) == 0)
+        << "remove temporary file (" << file_name << ") unsuccessfully";
+  }
+  std::string file_name;
+};
+
+tvm::runtime::Module deserialize(SerializationType state) {
+  auto length = tvm::support::b64strlen(state);
+
+  std::vector<u_char> bytes(length);
+  tvm::support::b64decode(state, bytes.data());
+
+  const std::string name = tmpnam(NULL);
+  auto file_name = name + ".so";
+  std::unique_ptr<FILE, Deleter> pFile(fopen(file_name.c_str(), "wb"), 
Deleter(file_name));
+  fwrite(bytes.data(), sizeof(u_char), length, pFile.get());
+  fflush(pFile.get());
+
+  std::string load_f_name = "runtime.module.loadfile_so";
+  const PackedFunc* f = runtime::Registry::Get(load_f_name);
+  ICHECK(f != nullptr) << "Loader for `.so` files is not registered,"
+                       << " resolved to (" << load_f_name << ") in the global 
registry."
+                       << "Ensure that you have loaded the correct runtime 
code, and"
+                       << "that you are on the correct hardware architecture.";
+
+  tvm::runtime::Module ret = (*f)(file_name, "");
+
+  return ret;
+}
+
+/**
+ * @brief A Torch's module which wraps TVM's OperatorModule Class.
+ * The basic forward function calling TVM's runtime is provided.
+ * The TVM module can be serialized/deserialized as a Torch module.
+ */
+class OperatorModuleWrapper : public torch::jit::CustomClassHolder {
+ public:
+  OperatorModuleWrapper() { runtime_module = 
ThreadLocalStore::ThreadLocal()->mod; }
+
+  void forward(const c10::List<at::Tensor>& inputs) {
+    int input_length = inputs.size();
+
+    std::vector<DLManagedTensor*> tensors;
+
+    for (int i = 0; i < input_length; ++i) 
tensors.push_back(toDLPack(inputs[i]));
+
+    tvm::runtime::PackedFunc run = runtime_module.GetFunction("__tvm_main__");
+
+    std::vector<TVMValue> tvm_values(input_length);
+    std::vector<int> tvm_type_codes(input_length);
+    tvm::runtime::TVMArgsSetter setter(tvm_values.data(), 
tvm_type_codes.data());
+    for (int k = 0; k < input_length; ++k) {
+      setter(k, &tensors[k]->dl_tensor);
+    }
+
+    run.CallPacked(tvm::runtime::TVMArgs(tvm_values.data(), 
tvm_type_codes.data(), input_length),
+                   nullptr);
+
+    for (int k = 0; k < input_length; ++k) {
+      tensors[k]->deleter(tensors[k]);
+    }
+  }
+
+  SerializationType Serialize() { return serialize(runtime_module); }
+
+  explicit OperatorModuleWrapper(SerializationType state) { runtime_module = 
deserialize(state); }
+
+ private:
+  tvm::runtime::Module runtime_module;
+};
+
+tvm::Device getDevice(const at::Tensor& tensor) {
+  tvm::Device dev;
+  dev.device_id = tensor.get_device();
+  switch (tensor.device().type()) {
+    case at::DeviceType::CPU:
+      dev.device_type = DLDeviceType::kDLCPU;
+      if (dev.device_id == -1) {
+        /*
+         * In PyTorch the device ID for cpu is -1, sometimes causing error 
during tuning
+         * Thus we manually set the device ID as 0 for avoiding potentially 
error of index out of
+         * bounds
+         */
+        dev.device_id = 0;
+      }
+      break;
+    case at::DeviceType::CUDA:
+      dev.device_type = DLDeviceType::kDLCUDA;
+      break;
+    default:
+      TORCH_CHECK(false, "PyTorch TVM integration doesn't support device " + 
tensor.device().str());
+  }
+  return dev;
+}
+
+/**
+ * @brief A Torch's module which wraps TVM's GraphExecutorFactory Class.
+ * The basic forward function calling TVM's runtime is provided.
+ * The TVM module can be serialized/deserialized as a Torch module.
+ */
+class GraphExecutorFactoryWrapper : public torch::jit::CustomClassHolder {

Review Comment:
   If `GraphExecutorFactoryWrapper` is strictly better than existing one, I 
want to see the existing one removed or reimplemented in terms of 
`GraphExecutorFactoryWrapper`. But this can be done in a follow up.



##########
python/tvm/contrib/torch/optimize_torch.py:
##########
@@ -0,0 +1,187 @@
+# pylint: disable=inconsistent-return-statements
+#!/usr/bin/env python
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# pylint: disable=missing-module-docstring
+# pylint: disable=missing-class-docstring
+# pylint: disable=missing-function-docstring
+"""
+optimize_torch: aa function similar to `torch.jit.trace`,
+which is used to optimize the `torch.nn.module` by TVM metaSchedule,
+and returns a custom TorchScript operator
+"""
+import base64
+import contextlib
+import tempfile
+from typing import Dict, Optional, Tuple, Union
+
+import torch
+import torch.utils.dlpack
+
+import tvm
+from tvm import relay
+from tvm._ffi import get_global_func, register_func
+from tvm.ir.module import IRModule
+from tvm.ir.transform import PassContext
+from tvm.meta_schedule import TuneConfig, default_config
+from tvm.meta_schedule.apply_history_best import ApplyHistoryBest
+from tvm.meta_schedule.relay_integration import extract_task_from_relay
+from tvm.meta_schedule.tune import tune_extracted_tasks
+from tvm.meta_schedule.utils import autotvm_silencer
+from tvm.runtime import vm
+from tvm.runtime.module import Module
+from tvm.runtime.ndarray import NDArray
+from tvm.target.target import Target
+
+
+# The python wrapper for GraphExecutorFactory
+class GraphExecutorFactoryWrapper(torch.nn.Module):
+    def __init__(self, module: tvm.runtime.Module):
+        super().__init__()
+        self.inner_module = module
+
+    def forward(self, *torch_inputs: Tuple[torch.Tensor]):
+        ret = self.inner_module.forward(torch_inputs)
+        if len(ret) == 1:
+            return ret[0]
+        return ret
+
+
+def llvm_target():
+    return "llvm -num-cores"
+
+
+@register_func("script_torch.save_to_base64")
+def save_to_base64(obj) -> bytes:
+    with tempfile.NamedTemporaryFile(suffix=".so") as tmpfile:
+        obj.export_library(tmpfile.name)
+        with open(tmpfile.name, "rb") as tfile:
+            return base64.b64encode(tfile.read())
+
+
+def tune_relay_auto(
+    mod: IRModule,
+    target: Union[str, Target],
+    config: TuneConfig,
+    work_dir: str,
+    backend: str = "graph",
+    params: Optional[Dict[str, NDArray]] = None,
+) -> Union[Module, vm.Executable]:
+    """A wrapper of `tune_relay` but provide a default setting for the config.
+
+    Parameters
+    ----------
+    mod : IRModule
+        The module to tune.
+    target : Union[str, Target]
+        The target to tune for.
+    config : TuneConfig
+        The search strategy config.
+    params : Optional[Dict[str, tvm.runtime.NDArray]]
+        The associated parameters of the program
+    work_dir : Optional[str]
+        The working directory to save intermediate results.
+    backend : str = "graph"
+        The backend to use for relay compilation(graph / vm).
+
+    Returns
+    -------
+    lib : Union[Module, tvm.runtime.vm.Executable]
+        The built runtime module or vm Executable for the given relay workload.
+    """
+    target = default_config.target(target)
+    extracted_tasks = extract_task_from_relay(mod, target, params)
+    if config is None:

Review Comment:
   Lets add a warning log here:
   
   ```
   Using the default tuning parameters. The default number of trials is set to 
a small value to let tuning finish quickly.  For optimal performance, it is 
recommended to provide the `tuning_config` argument with a bigger number of 
trials.
   ```



##########
python/tvm/contrib/torch/optimize_torch.py:
##########
@@ -0,0 +1,187 @@
+# pylint: disable=inconsistent-return-statements
+#!/usr/bin/env python
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# pylint: disable=missing-module-docstring
+# pylint: disable=missing-class-docstring
+# pylint: disable=missing-function-docstring
+"""
+optimize_torch: aa function similar to `torch.jit.trace`,
+which is used to optimize the `torch.nn.module` by TVM metaSchedule,
+and returns a custom TorchScript operator
+"""
+import base64
+import contextlib
+import tempfile
+from typing import Dict, Optional, Tuple, Union
+
+import torch
+import torch.utils.dlpack
+
+import tvm
+from tvm import relay
+from tvm._ffi import get_global_func, register_func
+from tvm.ir.module import IRModule
+from tvm.ir.transform import PassContext
+from tvm.meta_schedule import TuneConfig, default_config
+from tvm.meta_schedule.apply_history_best import ApplyHistoryBest
+from tvm.meta_schedule.relay_integration import extract_task_from_relay
+from tvm.meta_schedule.tune import tune_extracted_tasks
+from tvm.meta_schedule.utils import autotvm_silencer
+from tvm.runtime import vm
+from tvm.runtime.module import Module
+from tvm.runtime.ndarray import NDArray
+from tvm.target.target import Target
+
+
+# The python wrapper for GraphExecutorFactory
+class GraphExecutorFactoryWrapper(torch.nn.Module):
+    def __init__(self, module: tvm.runtime.Module):
+        super().__init__()
+        self.inner_module = module
+
+    def forward(self, *torch_inputs: Tuple[torch.Tensor]):
+        ret = self.inner_module.forward(torch_inputs)
+        if len(ret) == 1:
+            return ret[0]
+        return ret
+
+
+def llvm_target():
+    return "llvm -num-cores"
+
+
+@register_func("script_torch.save_to_base64")
+def save_to_base64(obj) -> bytes:
+    with tempfile.NamedTemporaryFile(suffix=".so") as tmpfile:
+        obj.export_library(tmpfile.name)
+        with open(tmpfile.name, "rb") as tfile:
+            return base64.b64encode(tfile.read())
+
+
+def tune_relay_auto(
+    mod: IRModule,
+    target: Union[str, Target],
+    config: TuneConfig,
+    work_dir: str,
+    backend: str = "graph",
+    params: Optional[Dict[str, NDArray]] = None,
+) -> Union[Module, vm.Executable]:
+    """A wrapper of `tune_relay` but provide a default setting for the config.
+
+    Parameters
+    ----------
+    mod : IRModule
+        The module to tune.
+    target : Union[str, Target]
+        The target to tune for.
+    config : TuneConfig
+        The search strategy config.
+    params : Optional[Dict[str, tvm.runtime.NDArray]]
+        The associated parameters of the program
+    work_dir : Optional[str]
+        The working directory to save intermediate results.
+    backend : str = "graph"
+        The backend to use for relay compilation(graph / vm).
+
+    Returns
+    -------
+    lib : Union[Module, tvm.runtime.vm.Executable]
+        The built runtime module or vm Executable for the given relay workload.
+    """
+    target = default_config.target(target)
+    extracted_tasks = extract_task_from_relay(mod, target, params)
+    if config is None:
+        config = TuneConfig(
+            num_trials_per_iter=16,
+            max_trials_global=16 * len(extracted_tasks),
+        )
+    database = tune_extracted_tasks(extracted_tasks, config, work_dir)
+    relay_build = {"graph": relay.build, "vm": relay.vm.compile}[backend]
+    with target, autotvm_silencer(), ApplyHistoryBest(database):
+        with PassContext(
+            opt_level=3,
+            config={
+                "relay.backend.use_meta_schedule": True,
+                "relay.backend.use_meta_schedule_dispatch": target.kind.name 
!= "cuda",
+            },
+        ):
+            return relay_build(mod, target=target, params=params)
+
+
+def optimize_torch(
+    func,
+    example_inputs,
+    tuning_config=None,
+    target=None,
+    work_dir=None,
+):
+    """Load PyTorch model that could be traced by TorchScript, then optimize 
it via MetaSchedule.
+
+    Parameters
+    ----------
+    func : callable or torch.nn.Module
+        A Python function or nn.Module that could run by TorchScript's trace.
+        (ie: torch.jit.trace(model, input))
+
+    example_inputs : tuple or torch.Tensor
+        Inputs to `torch.jit.trace`.
+
+    tuning_config : tvm.meta_schedule.TuneConfig
+        The configuration for tuning by MetaSchedule.
+        If user doesn't set the config, the tuning will run with a default 
setting,
+        a number proportional to the tasks of the module.
+
+    target : Optional[Union[str, Target]]
+        The target of the compilation.
+        If user doesn't set the target, the module will be built for the CPU 
target.
+
+    work_dir : Optional[str]
+        The working directory to save intermediate results.
+
+    Returns
+    -------
+    mod : GraphExecutorFactoryWrapper
+        It will return an object of GraphExecutorFactoryWrapper,
+        which is the subclass of the original nn.Module.
+    """
+
+    if target is None:
+        target = llvm_target()
+
+    # If `func` is already a traced module this statement makes no effect
+    jit_mod = torch.jit.trace(func, example_inputs)
+
+    if isinstance(example_inputs, torch.Tensor):
+        example_inputs = [example_inputs]
+
+    shape_list = [(f"inp_{idx}", i.shape) for idx, i in 
enumerate(example_inputs)]
+    mod, params = relay.frontend.from_pytorch(jit_mod, shape_list)  # IRmodule
+    if work_dir:
+        context_manager = contextlib.nullcontext(work_dir)
+    else:
+        context_manager = tempfile.TemporaryDirectory()
+    with context_manager as work_dir_path:
+        executor_factory = tune_relay_auto(
+            mod=mod, params=params, config=tuning_config, target=target, 
work_dir=work_dir_path
+        )
+
+    save_runtime_mod = get_global_func("tvmtorch.save_runtime_mod")
+    save_runtime_mod(executor_factory.module)
+
+    return 
GraphExecutorFactoryWrapper(torch.classes.tvm_tuning.GraphExecutorFactoryWrapper())

Review Comment:
   This looks strange... Why 
`torch.classes.tvm_tuning.GraphExecutorFactoryWrapper()` doesn't take any 
argument?



##########
src/contrib/torch/pt_call_tvm/RuntimeModuleWrapper.cc:
##########
@@ -0,0 +1,262 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+#include <ATen/DLConvertor.h>
+#include <dlpack/dlpack.h>
+#include <dmlc/memory_io.h>
+#include <torch/custom_class.h>
+#include <torch/script.h>
+#include <tvm/runtime/module.h>
+#include <tvm/runtime/registry.h>
+#include <tvm/target/codegen.h>
+#include <tvm/target/target.h>
+
+#include <cstdio>
+#include <map>
+#include <string>
+#include <vector>
+
+#include "../../../runtime/graph_executor/graph_executor_factory.h"
+#include "../base64.h"
+
+namespace tvm {
+namespace contrib {
+
+/**
+ * We pass the TVM module by TVM's FFI because Torch's FFI cannot recognize 
such TVM objects
+ */
+struct ThreadLocalStore {
+  tvm::runtime::Module mod;
+  static ThreadLocalStore* ThreadLocal() {
+    thread_local ThreadLocalStore tls;
+    return &tls;
+  }
+};
+
+using SerializationType = std::string;  // base64 stream
+
+SerializationType serialize(tvm::runtime::Module module) {
+  static const runtime::PackedFunc* f_to_str =
+      runtime::Registry::Get("script_torch.save_to_base64");
+  ICHECK(f_to_str) << "IndexError: Cannot find the packed function "
+                      "`script_torch.save_to_base64` in the global registry";
+  return (*f_to_str)(module);
+}
+
+struct Deleter {  // deleter
+  explicit Deleter(std::string file_name) { this->file_name = file_name; }
+  void operator()(FILE* p) const {
+    fclose(p);
+    ICHECK(remove(file_name.c_str()) == 0)
+        << "Failed to  remove temporary file (" << file_name << ")";
+  }
+  std::string file_name;
+};
+
+tvm::runtime::Module deserialize(SerializationType state) {
+  auto length = tvm::support::b64strlen(state);
+
+  std::vector<u_char> bytes(length);
+  tvm::support::b64decode(state, bytes.data());
+
+  const std::string name = tmpnam(NULL);
+  auto file_name = name + ".so";
+  std::unique_ptr<FILE, Deleter> pFile(fopen(file_name.c_str(), "wb"), 
Deleter(file_name));
+  fwrite(bytes.data(), sizeof(u_char), length, pFile.get());
+  fflush(pFile.get());
+
+  std::string load_f_name = "runtime.module.loadfile_so";
+  const PackedFunc* f = runtime::Registry::Get(load_f_name);
+  ICHECK(f != nullptr) << "Loader for `.so` files is not registered,"
+                       << " resolved to (" << load_f_name << ") in the global 
registry."
+                       << "Ensure that you have loaded the correct runtime 
code, and"
+                       << "that you are on the correct hardware architecture.";
+
+  tvm::runtime::Module ret = (*f)(file_name, "");
+
+  return ret;
+}
+
+/**
+ * @brief A Torch's module which wraps TVM's OperatorModule Class.
+ * The basic forward function calling TVM's runtime is provided.
+ * The TVM module can be serialized/deserialized as a Torch module.
+ */
+class OperatorModuleWrapper : public torch::jit::CustomClassHolder {
+ public:
+  OperatorModuleWrapper() { runtime_module = 
ThreadLocalStore::ThreadLocal()->mod; }
+
+  void forward(const c10::List<at::Tensor>& inputs) {
+    int input_length = inputs.size();
+
+    std::vector<DLManagedTensor*> tensors;
+
+    for (int i = 0; i < input_length; ++i) 
tensors.push_back(toDLPack(inputs[i]));
+
+    tvm::runtime::PackedFunc run = runtime_module.GetFunction("__tvm_main__");
+
+    std::vector<TVMValue> tvm_values(input_length);
+    std::vector<int> tvm_type_codes(input_length);
+    tvm::runtime::TVMArgsSetter setter(tvm_values.data(), 
tvm_type_codes.data());
+    for (int k = 0; k < input_length; ++k) {
+      setter(k, &tensors[k]->dl_tensor);
+    }
+
+    run.CallPacked(tvm::runtime::TVMArgs(tvm_values.data(), 
tvm_type_codes.data(), input_length),
+                   nullptr);
+
+    for (int k = 0; k < input_length; ++k) {
+      tensors[k]->deleter(tensors[k]);
+    }
+  }
+
+  SerializationType Serialize() { return serialize(runtime_module); }
+
+  explicit OperatorModuleWrapper(SerializationType state) { runtime_module = 
deserialize(state); }
+
+ private:
+  tvm::runtime::Module runtime_module;
+};
+
+tvm::Device getDevice(const at::Tensor& tensor) {
+  tvm::Device dev;
+  dev.device_id = tensor.get_device();
+  switch (tensor.device().type()) {
+    case at::DeviceType::CPU:
+      dev.device_type = DLDeviceType::kDLCPU;
+      if (dev.device_id == -1) {
+        /*
+         * In PyTorch the device ID for cpu is -1, sometimes causing error 
during tuning
+         * Thus we manually set the device ID as 0 for avoiding potentially 
error of index out of
+         * bounds
+         */
+        dev.device_id = 0;
+      }
+      break;
+    case at::DeviceType::CUDA:
+      dev.device_type = DLDeviceType::kDLCUDA;
+      break;
+    default:
+      TORCH_CHECK(false, "PyTorch TVM integration doesn't support device " + 
tensor.device().str());
+  }
+  return dev;
+}
+
+/**
+ * @brief A Torch's module which wraps TVM's GraphExecutorFactory Class.
+ * The basic forward function calling TVM's runtime is provided.
+ * The TVM module can be serialized/deserialized as a Torch module.
+ */
+class GraphExecutorFactoryWrapper : public torch::jit::CustomClassHolder {
+ public:
+  explicit GraphExecutorFactoryWrapper(tvm::runtime::Module executor_factory)
+      : executor_factory_(executor_factory) {
+    CHECK(executor_factory_->IsInstance<runtime::GraphExecutorFactory>())
+        << "module is not an instance of GraphExecutorFactory";
+  }
+
+  GraphExecutorFactoryWrapper()
+      : GraphExecutorFactoryWrapper(ThreadLocalStore::ThreadLocal()->mod) {}
+
+  c10::List<at::Tensor> forward(const c10::List<at::Tensor>& inputs) {
+    int input_length = inputs.size();
+
+    if (!executor_.defined()) {
+      TORCH_CHECK(input_length > 0, "Receive empty list of input tensors");
+      DLDevice input_device = getDevice(inputs.get(0));
+
+      auto tmp = executor_factory_.GetFunction("default");
+
+      executor_ = tmp(input_device);
+    }
+
+    std::vector<DLManagedTensor*> tensors;
+
+    for (int i = 0; i < input_length; ++i) 
tensors.push_back(toDLPack(inputs[i]));
+
+    tvm::runtime::PackedFunc run = executor_.GetFunction("run");
+    tvm::runtime::PackedFunc set_input = executor_.GetFunction("set_input");
+    tvm::runtime::PackedFunc get_output = executor_.GetFunction("get_output");
+    tvm::runtime::PackedFunc get_num_outputs = 
executor_.GetFunction("get_num_outputs");
+
+    for (int k = 0; k < input_length; ++k) {
+      set_input(k, &tensors[k]->dl_tensor);
+    }
+
+    run();
+
+    int64_t output_length = get_num_outputs();
+
+    c10::List<at::Tensor> outputs;
+    outputs.reserve(output_length);
+
+    for (int k = 0; k < output_length; ++k) {
+      tvm::runtime::NDArray results = get_output(k);
+      at::Tensor atTensor = at::fromDLPack(results.ToDLPack());
+      outputs.emplace_back(atTensor);
+    }
+
+    for (int k = 0; k < input_length; ++k) {
+      tensors[k]->deleter(tensors[k]);
+    }
+    return outputs;
+  }
+
+  SerializationType Serialize() { return serialize(executor_factory_); }
+
+  explicit GraphExecutorFactoryWrapper(SerializationType state) {
+    executor_factory_ = deserialize(state);
+  }
+
+ private:
+  tvm::runtime::Module executor_factory_;
+  tvm::runtime::Module executor_;
+};
+
+TVM_REGISTER_GLOBAL("tvmtorch.save_runtime_mod").set_body_typed([](tvm::runtime::Module
 mod) {
+  ThreadLocalStore::ThreadLocal()->mod = mod;
+});
+
+TORCH_LIBRARY(tvm_torch, m) {
+  m.class_<OperatorModuleWrapper>("OperatorModuleWrapper")
+      .def(torch::init<>())
+      .def("forward", &OperatorModuleWrapper::forward)
+      .def_pickle(
+          [](const c10::intrusive_ptr<OperatorModuleWrapper>& self) -> 
SerializationType {
+            return self->Serialize();
+          },
+          [](SerializationType state) {
+            return c10::make_intrusive<OperatorModuleWrapper>(state);
+          });
+}
+
+TORCH_LIBRARY(tvm_tuning, m) {

Review Comment:
   Is there a need for separate namespaces, `tvm_torch` and `tvm_tuning`? I 
think only `tvm_torch` is needed.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to