This is an automated email from the ASF dual-hosted git repository.

bohan pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new cc03780b1c [REFACTOR] Followup cleanup of relay phase out (#17681)
cc03780b1c is described below

commit cc03780b1cee0a06a26161181aede98b3a39d00f
Author: Tianqi Chen <[email protected]>
AuthorDate: Wed Feb 26 09:48:52 2025 -0500

    [REFACTOR] Followup cleanup of relay phase out (#17681)
---
 CMakeLists.txt                                   |   4 -
 cmake/modules/contrib/ExampleTargetHooks.cmake   |  19 --
 cmake/modules/contrib/LibTorch.cmake             |  34 ---
 cmake/modules/contrib/NNPack.cmake               |  38 ----
 cmake/modules/contrib/UMA.cmake                  |  22 --
 include/tvm/runtime/contrib/libtorch_runtime.h   |  40 ----
 include/tvm/runtime/debug.h                      |  54 -----
 include/tvm/runtime/executor_info.h              |  39 ----
 include/tvm/runtime/name_transforms.h            |  43 ----
 src/relax/ir/dataflow_pattern_functor.cc         |   2 +-
 src/runtime/contrib/dnnl/dnnl_json_runtime.cc    |   4 +-
 src/runtime/contrib/libtorch/libtorch_runtime.cc | 173 ---------------
 src/runtime/contrib/nnpack/convolution.cc        | 264 -----------------------
 src/runtime/contrib/nnpack/fully_connected.cc    |  63 ------
 src/runtime/contrib/nnpack/nnpack_utils.cc       |  62 ------
 src/runtime/contrib/nnpack/nnpack_utils.h        |  42 ----
 src/runtime/debug.cc                             | 128 -----------
 src/runtime/hexagon/profiler/README.md           |  14 --
 src/runtime/meta_data.h                          |   1 -
 src/runtime/name_transforms.cc                   |  44 ----
 src/target/source/source_module.cc               |   1 -
 tests/scripts/release/make_notes.py              |   1 -
 22 files changed, 3 insertions(+), 1089 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 24504047d8..a86bc4cc33 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -483,20 +483,16 @@ include(cmake/modules/contrib/BLAS.cmake)
 include(cmake/modules/contrib/DNNL.cmake)
 include(cmake/modules/contrib/AMX.cmake)
 include(cmake/modules/contrib/CUTLASS.cmake)
-include(cmake/modules/contrib/ExampleTargetHooks.cmake)
 include(cmake/modules/contrib/Random.cmake)
 include(cmake/modules/contrib/Posit.cmake)
 include(cmake/modules/contrib/MSCCLPP.cmake)
 include(cmake/modules/contrib/Sort.cmake)
-include(cmake/modules/contrib/NNPack.cmake)
-include(cmake/modules/contrib/LibTorch.cmake)
 include(cmake/modules/contrib/TFLite.cmake)
 include(cmake/modules/contrib/CoreML.cmake)
 include(cmake/modules/contrib/BNNS.cmake)
 include(cmake/modules/contrib/ArmComputeLib.cmake)
 include(cmake/modules/contrib/TensorRT.cmake)
 include(cmake/modules/contrib/NNAPI.cmake)
-include(cmake/modules/contrib/UMA.cmake)
 include(cmake/modules/contrib/MSC.cmake)
 include(cmake/modules/contrib/vllm.cmake)
 include(cmake/modules/Git.cmake)
diff --git a/cmake/modules/contrib/ExampleTargetHooks.cmake 
b/cmake/modules/contrib/ExampleTargetHooks.cmake
deleted file mode 100644
index 88035724b1..0000000000
--- a/cmake/modules/contrib/ExampleTargetHooks.cmake
+++ /dev/null
@@ -1,19 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-tvm_file_glob(GLOB EXAMPLE_TARGET_HOOKS_SRC 
src/relay/backend/contrib/example_target_hooks/*.cc)
-list(APPEND COMPILER_SRCS ${EXAMPLE_TARGET_HOOKS_SRC})
diff --git a/cmake/modules/contrib/LibTorch.cmake 
b/cmake/modules/contrib/LibTorch.cmake
deleted file mode 100644
index 85881410ad..0000000000
--- a/cmake/modules/contrib/LibTorch.cmake
+++ /dev/null
@@ -1,34 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-if(USE_LIBTORCH)
-  find_package(Torch REQUIRED PATHS ${USE_LIBTORCH}/share/cmake/Torch
-               )
-  list(APPEND TVM_RUNTIME_LINKER_LIBS ${TORCH_LIBRARIES})
-  include_directories(${TORCH_INCLUDE_DIRS})
-
-  file(GLOB LIBTORCH_RELAY_CONTRIB_SRC
-    src/relay/backend/contrib/libtorch/libtorch_codegen.cc
-    )
-  list(APPEND COMPILER_SRCS ${LIBTORCH_RELAY_CONTRIB_SRC})
-
-  file(GLOB LIBTORCH_RUNTIME_CONTRIB_SRC
-    src/runtime/contrib/libtorch/libtorch_runtime.cc
-    )
-  list(APPEND RUNTIME_SRCS ${LIBTORCH_RUNTIME_CONTRIB_SRC})
-
-endif(USE_LIBTORCH)
diff --git a/cmake/modules/contrib/NNPack.cmake 
b/cmake/modules/contrib/NNPack.cmake
deleted file mode 100644
index 86059b298f..0000000000
--- a/cmake/modules/contrib/NNPack.cmake
+++ /dev/null
@@ -1,38 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-if(USE_NNPACK)
-  if(NNPACK_PATH STREQUAL "")
-    set(NNPACK_PATH ${CMAKE_CURRENT_SOURCE_DIR}/NNPack)
-  endif()
-       set(PTHREAD_POOL_PATH ${NNPACK_PATH}/deps/pthreadpool)
-  tvm_file_glob(GLOB NNPACK_CONTRIB_SRC src/runtime/contrib/nnpack/*.cc)
-  list(APPEND RUNTIME_SRCS ${NNPACK_CONTRIB_SRC})
-       include_directories(${NNPACK_PATH}/include)
-       include_directories(${PTHREAD_POOL_PATH}/include)
-  find_library(NNPACK_CONTRIB_LIB nnpack ${NNPACK_PATH}/lib)
-  find_library(NNPACK_PTHREAD_CONTRIB_LIB pthreadpool ${NNPACK_PATH}/lib)
-  find_library(NNPACK_CPUINFO_CONTRIB_LIB cpuinfo ${NNPACK_PATH}/lib)
-  find_library(NNPACK_CLOG_CONTRIB_LIB clog ${NNPACK_PATH}/lib)
-
-  list(APPEND TVM_RUNTIME_LINKER_LIBS ${NNPACK_CONTRIB_LIB})
-  list(APPEND TVM_RUNTIME_LINKER_LIBS ${NNPACK_PTHREAD_CONTRIB_LIB})
-  list(APPEND TVM_RUNTIME_LINKER_LIBS ${NNPACK_CPUINFO_CONTRIB_LIB})
-  if(NNPACK_CLOG_CONTRIB_LIB)
-    list(APPEND TVM_RUNTIME_LINKER_LIBS ${NNPACK_CLOG_CONTRIB_LIB})
-  endif(NNPACK_CLOG_CONTRIB_LIB)
-endif(USE_NNPACK)
diff --git a/cmake/modules/contrib/UMA.cmake b/cmake/modules/contrib/UMA.cmake
deleted file mode 100644
index 1d3a9a30ec..0000000000
--- a/cmake/modules/contrib/UMA.cmake
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-if(USE_UMA)
-  file(GLOB COMPILER_UMA_SRCS
-       CONFIGURE_DEPENDS src/relay/backend/contrib/uma/*)
-  list(APPEND COMPILER_SRCS ${COMPILER_UMA_SRCS})
-endif(USE_UMA)
diff --git a/include/tvm/runtime/contrib/libtorch_runtime.h 
b/include/tvm/runtime/contrib/libtorch_runtime.h
deleted file mode 100644
index 2645fb94d1..0000000000
--- a/include/tvm/runtime/contrib/libtorch_runtime.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-/*!
- * \brief runtime implementation for LibTorch/TorchScript.
- */
-#ifndef TVM_RUNTIME_CONTRIB_LIBTORCH_RUNTIME_H_
-#define TVM_RUNTIME_CONTRIB_LIBTORCH_RUNTIME_H_
-#include <tvm/runtime/module.h>
-
-#include <string>
-
-namespace tvm {
-namespace runtime {
-namespace contrib {
-
-runtime::Module TorchRuntimeCreate(const String& symbol_name,
-                                   const std::string& serialized_function);
-
-}  // namespace contrib
-}  // namespace runtime
-}  // namespace tvm
-
-#endif  // TVM_RUNTIME_CONTRIB_LIBTORCH_RUNTIME_H_
diff --git a/include/tvm/runtime/debug.h b/include/tvm/runtime/debug.h
deleted file mode 100644
index 29d812b74d..0000000000
--- a/include/tvm/runtime/debug.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-/*!
- * \file tvm/runtime/debug.h
- * \brief Helpers for debugging at runtime.
- */
-#ifndef TVM_RUNTIME_DEBUG_H_
-#define TVM_RUNTIME_DEBUG_H_
-
-#include <tvm/runtime/container/adt.h>
-#include <tvm/runtime/ndarray.h>
-
-#include <ostream>
-#include <string>
-
-namespace tvm {
-namespace runtime {
-
-/*!
- * \brief Helpers to describe runtime objects in human-friendly form. For \p 
nd_arrays we show their
- * shapes and dtypes, but also their contents if 'small' and on the \p 
host_device (mostly so that
- * we can see dynamic shapes as they are computed). For \p adts we show the 
ADT fields. For
- * \p objects we dispatch to one of the above as appropriate.
- */
-void AppendNDArray(std::ostream& os, const NDArray& nd_array, const DLDevice& 
host_device,
-                   bool show_content = true);
-void AppendADT(std::ostream& os, const ADT& adt, const DLDevice& host_device,
-               bool show_content = true);
-void AppendRuntimeObject(std::ostream& os, const ObjectRef& object, const 
DLDevice& host_device,
-                         bool show_content = true);
-std::string RuntimeObject2String(const ObjectRef& object, const DLDevice& 
host_device,
-                                 bool show_content = true);
-
-}  // namespace runtime
-}  // namespace tvm
-
-#endif  // TVM_RUNTIME_DEBUG_H_
diff --git a/include/tvm/runtime/executor_info.h 
b/include/tvm/runtime/executor_info.h
deleted file mode 100644
index 5b3572120c..0000000000
--- a/include/tvm/runtime/executor_info.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-/*!
- * \file executor_info.h
- * \brief Executor information
- */
-#ifndef TVM_RUNTIME_EXECUTOR_INFO_H_
-#define TVM_RUNTIME_EXECUTOR_INFO_H_
-
-namespace tvm {
-namespace runtime {
-
-/*! \brief Value used to indicate the graph executor. */
-static constexpr const char* kTvmExecutorGraph = "graph";
-
-/*! \brief Value used to indicate the aot executor. */
-static constexpr const char* kTvmExecutorAot = "aot";
-
-}  // namespace runtime
-}  // namespace tvm
-
-#endif  // TVM_RUNTIME_EXECUTOR_INFO_H_
diff --git a/include/tvm/runtime/name_transforms.h 
b/include/tvm/runtime/name_transforms.h
deleted file mode 100644
index 267dda4158..0000000000
--- a/include/tvm/runtime/name_transforms.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-/*!
- * \file tvm/runtime/name_transforms.h
- * \brief Transformations which are applied on names to generate appropriately 
named.
- *  These functions are used in both Runtime and Backend.
- */
-#ifndef TVM_RUNTIME_NAME_TRANSFORMS_H_
-#define TVM_RUNTIME_NAME_TRANSFORMS_H_
-
-#include <string>
-
-namespace tvm {
-namespace runtime {
-
-/*!
- * \brief Sanitize name for output into compiler artifacts
- * \param name Original name
- * \return Sanitized name
- */
-std::string SanitizeName(const std::string& name);
-
-}  // namespace runtime
-}  // namespace tvm
-
-#endif  // TVM_RUNTIME_NAME_TRANSFORMS_H_
diff --git a/src/relax/ir/dataflow_pattern_functor.cc 
b/src/relax/ir/dataflow_pattern_functor.cc
index 655fa2eea1..620a2d58db 100644
--- a/src/relax/ir/dataflow_pattern_functor.cc
+++ b/src/relax/ir/dataflow_pattern_functor.cc
@@ -18,7 +18,7 @@
  */
 
 /*!
- * \file src/tvm/relay/dataflow_matcher.cc
+ * \file src/tvm/relax/dataflow_matcher.cc
  * \brief The dataflow pattern matcher for Relay.
  */
 
diff --git a/src/runtime/contrib/dnnl/dnnl_json_runtime.cc 
b/src/runtime/contrib/dnnl/dnnl_json_runtime.cc
index f29628d56b..f0dfcc32c5 100644
--- a/src/runtime/contrib/dnnl/dnnl_json_runtime.cc
+++ b/src/runtime/contrib/dnnl/dnnl_json_runtime.cc
@@ -325,7 +325,7 @@ class DNNLJSONRuntime : public JSONRuntimeBase {
     // dst_layout == "" means to use data_layout
     if (dst_layout.empty()) dst_layout = src_layout;
 
-    // Minus one for DNNL representation. No dilation for DNNL is 0, for relay 
is 1.
+    // Minus one for DNNL representation. No dilation for DNNL is 0
     for (auto& d : dilates) d--;
 
     // Take into account provided layout strings
@@ -684,7 +684,7 @@ class DNNLJSONRuntime : public JSONRuntimeBase {
     std::vector<int64_t> padding_l(padding.begin(), padding.begin() + 
padding.size() / 2);
     std::vector<int64_t> padding_r(padding.begin() + padding.size() / 2, 
padding.end());
 
-    // Minus one for DNNL representation. No dilation for DNNL is 0, for relay 
is 1.
+    // Minus one for DNNL representation. No dilation for DNNL is 0
     for (auto& d : dilates) d--;
 
     // Attributes related to AvgPool
diff --git a/src/runtime/contrib/libtorch/libtorch_runtime.cc 
b/src/runtime/contrib/libtorch/libtorch_runtime.cc
deleted file mode 100644
index 01d927f911..0000000000
--- a/src/runtime/contrib/libtorch/libtorch_runtime.cc
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-/*!
- * \file src/runtime/contrib/libtorch/libtorch_runtime.cc
- * \brief runtime implementation for LibTorch/TorchScript.
- */
-
-// we do not want clang to reorder our includes
-// clang-format off
-#include <tvm/runtime/module.h>
-#include <tvm/runtime/ndarray.h>
-#include <tvm/runtime/registry.h>
-#include <tvm/runtime/contrib/libtorch_runtime.h>
-
-#include <ATen/dlpack.h>
-#include <ATen/DLConvertor.h>
-#include <torch/csrc/jit/serialization/import.h>
-#include <torch/torch.h>
-
-// clang-format on
-
-#include <cstddef>
-#include <string>
-#include <tuple>
-#include <type_traits>
-#include <unordered_map>
-#include <utility>
-#include <vector>
-
-namespace tvm {
-namespace runtime {
-namespace contrib {
-
-static void monly_deleter(DLManagedTensor* self) { delete self; }
-
-void run_torch_module(torch::jit::Module* module, TVMArgs args, TVMRetValue* 
rv) {
-  std::vector<torch::jit::IValue> inputs;
-  std::vector<torch::Tensor> outputs;
-  auto m = module->get_method("forward");
-  for (int i = 0; i < args.size(); i++) {
-    const DLTensor* arg;
-    if (args[i].IsObjectRef<NDArray>()) {
-      NDArray arr = args[i];
-      arg = arr.operator->();
-    } else {
-      arg = args[i].operator DLTensor*();
-    }
-    DLManagedTensor* inp = new DLManagedTensor{};
-    inp->dl_tensor = *arg;
-    inp->deleter = &monly_deleter;
-    // m.num_inputs includes the self argument of forward(self, ...)
-    // num_inputs - 1 is the number of (Tensor) inputs
-    if (i < static_cast<int>(m.num_inputs()) - 1) {
-      inputs.emplace_back(at::fromDLPack(inp));
-    } else {
-      outputs.emplace_back(at::fromDLPack(inp));
-    }
-  }
-  ICHECK(outputs.size() == 1) << "wrong number of args, can handle only one 
output";
-  torch::Tensor res = module->forward(inputs).toTensor();
-  outputs[0].copy_(res);  // too bad
-}
-
-/*!
- * \brief A json runtime that executes the serialized JSON format. This runtime
- * can be extended by user defined runtime for execution.
- */
-class TorchModuleNode : public ModuleNode {
- public:
-  TorchModuleNode(const std::string& symbol_name, const torch::jit::Module& 
module)
-      : symbol_name_(symbol_name), module_(module) {}
-
-  const char* type_key() const { return "torch"; }
-  /*! \brief Get the property of the runtime module .*/
-  int GetPropertyMask() const final {
-    return ModulePropertyMask::kBinarySerializable | 
ModulePropertyMask::kRunnable;
-  }
-
-  /*!
-   * \brief Get a packed function.
-   * \param name The name/symbol of the function.
-   * \param sptr_to_self The pointer to the module node.
-   * \return The packed function.
-   */
-  virtual PackedFunc GetFunction(const String& name, const ObjectPtr<Object>& 
sptr_to_self) {
-    if (name == "get_symbol") {
-      return PackedFunc(
-          [sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { *rv = 
this->symbol_name_; });
-    } else if (name == "get_const_vars") {
-      return PackedFunc(
-          [sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { *rv = 
Array<String>{}; });
-    } else if (this->symbol_name_ == name) {
-      return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) {
-        run_torch_module(&module_, args, rv);
-      });
-    } else if ("__init_" + this->symbol_name_ == name) {
-      // The function to initialize constant tensors.
-      return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { 
*rv = 0; });
-    } else {
-      return PackedFunc(nullptr);
-    }
-  }
-
-  virtual void SaveToBinary(dmlc::Stream* stream) {
-    // Save the symbol
-    stream->Write(symbol_name_);
-    // Save the module
-    std::stringstream str;
-    module_.save(str);
-    stream->Write(str.str());
-  }
-
-  static Module LoadFromBinary(void* strm) {
-    dmlc::Stream* stream = static_cast<dmlc::Stream*>(strm);
-    std::string symbol;
-    std::string module_str;
-    // Load the symbol
-    ICHECK(stream->Read(&symbol)) << "Loading symbol name failed";
-    ICHECK(stream->Read(&module_str)) << "Loading module str failed";
-    std::stringstream str(module_str);
-    torch::jit::Module mod = torch::jit::load(str);
-    auto n = make_object<TorchModuleNode>(symbol, mod);
-    return Module(n);
-  }
-
-  /*!
-   * \brief Get the source generated by codegen.
-   *
-   * \param format the format to return.
-   * \return A string of JSON.
-   */
-  String GetSource(const String& format = "json") override {
-    return module_.dump_to_str(true, true, true);
-  }
-
- protected:
-  /*! \brief The only subgraph name for this module. */
-  std::string symbol_name_;
-  /*! \brief Module. */
-  torch::jit::Module module_;
-};
-
-runtime::Module TorchRuntimeCreate(const String& symbol_name,
-                                   const std::string& serialized_function) {
-  std::stringstream str(serialized_function);
-  torch::jit::Module mod = torch::jit::load(str);
-  auto n = make_object<TorchModuleNode>(symbol_name, mod);
-  return runtime::Module(n);
-}
-
-TVM_REGISTER_GLOBAL("runtime.module.loadbinary_torch")
-    .set_body_typed(TorchModuleNode::LoadFromBinary);
-
-}  // namespace contrib
-}  // namespace runtime
-}  // namespace tvm
diff --git a/src/runtime/contrib/nnpack/convolution.cc 
b/src/runtime/contrib/nnpack/convolution.cc
deleted file mode 100644
index 2362e31f92..0000000000
--- a/src/runtime/contrib/nnpack/convolution.cc
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-/*!
- * \file Use external nnpack library call.
- */
-#include <nnpack.h>
-#include <tvm/runtime/data_type.h>
-#include <tvm/runtime/device_api.h>
-#include <tvm/runtime/logging.h>
-#include <tvm/runtime/registry.h>
-
-#include "nnpack_utils.h"
-
-namespace tvm {
-namespace contrib {
-using namespace runtime;
-
-TVM_REGISTER_GLOBAL("tvm.contrib.nnpack.convolution_inference")
-    .set_body([](TVMArgs args, TVMRetValue* ret) {
-      NNPackThreadLocalEntry* entry = NNPackThreadLocalEntry::ThreadLocal();
-      static std::once_flag flag;
-      std::call_once(flag, []() { ICHECK_EQ(nnp_initialize(), 
nnp_status_success); });
-      DLTensor* input = args[0];
-      DLTensor* kernel = args[1];
-      DLTensor* bias = nullptr;
-      if (args[2].type_code() == kTVMDLTensorHandle) {
-        bias = args[2];
-      }
-      DLTensor* output = args[3];
-      uint64_t pad_top = args[4], pad_right = args[5], pad_bottom = args[6], 
pad_left = args[7];
-      nnp_padding input_padding{pad_top, pad_right, pad_bottom, pad_left};
-      uint64_t stride_width = args[8], stride_height = args[9];
-      nnp_size stride_size{stride_width, stride_height};
-      NNPackConfig(args[10]);
-
-      uint64_t algo_ = args[11];
-      nnp_convolution_algorithm algo = 
static_cast<nnp_convolution_algorithm>(algo_);
-      ICHECK_EQ(input->ndim, 4);
-      ICHECK_EQ(kernel->ndim, 4);
-      if (bias) {
-        ICHECK_EQ(bias->ndim, 1);
-      }
-      ICHECK_EQ(output->ndim, 4);
-      ICHECK_EQ(input->shape[1], kernel->shape[1]);
-      ICHECK_EQ(input->shape[0], output->shape[0]);
-      size_t input_channels = input->shape[1];
-      ICHECK_EQ(output->shape[1], kernel->shape[0]);
-      if (bias) {
-        ICHECK_EQ(output->shape[1], bias->shape[0]);
-      }
-      size_t output_channels = output->shape[1];
-      nnp_size input_size{static_cast<size_t>(input->shape[2]),
-                          static_cast<size_t>(input->shape[3])};
-      nnp_size kernel_size{static_cast<size_t>(kernel->shape[2]),
-                           static_cast<size_t>(kernel->shape[3])};
-      ICHECK(input->strides == nullptr);
-      ICHECK(kernel->strides == nullptr);
-      if (bias) {
-        ICHECK(bias->strides == nullptr);
-      }
-
-      ICHECK(TypeMatch(input->dtype, kDLFloat, 32));
-      ICHECK(TypeMatch(kernel->dtype, kDLFloat, 32));
-      if (bias) {
-        ICHECK(TypeMatch(bias->dtype, kDLFloat, 32));
-      }
-      ICHECK(TypeMatch(output->dtype, kDLFloat, 32));
-
-      // Allocate a zero-bias if we don't pass one in.
-      std::unique_ptr<std::vector<float>> zero_bias;
-      if (!bias) {
-        zero_bias.reset(new std::vector<float>(output->shape[1], 0.0));
-      }
-
-      size_t workspace_size = 0;
-      nnp_status status = nnp_convolution_inference(
-          algo, nnp_convolution_transform_strategy_compute, input_channels, 
output_channels,
-          input_size, input_padding, kernel_size, stride_size, nullptr, 
nullptr, nullptr, nullptr,
-          nullptr, &workspace_size, nnp_activation_identity, nullptr, 
entry->threadpool, nullptr);
-      ICHECK_EQ(status, nnp_status_success);
-
-      // Division with rounding up, in case size is not multiple of 
sizeof(float)
-      const size_t workspace_elements = (workspace_size + sizeof(float) - 1) / 
sizeof(float);
-
-      Device dev = input->device;
-      DLDataType type_hint = input->dtype;
-
-      DeviceAPI* cpu_api = DeviceAPI::Get(dev);
-      void* workspace_buffer =
-          cpu_api->AllocWorkspace(dev, workspace_elements * sizeof(float), 
type_hint);
-      ICHECK(workspace_buffer != nullptr);
-
-      for (auto n = 0; n < input->shape[0]; ++n) {
-        nnp_status status = nnp_convolution_inference(
-            algo, nnp_convolution_transform_strategy_compute, input_channels, 
output_channels,
-            input_size, input_padding, kernel_size, stride_size,
-            static_cast<float*>(input->data) +
-                n * input->shape[1] * input->shape[2] * input->shape[3],
-            static_cast<float*>(kernel->data),
-            bias ? static_cast<float*>(bias->data) : zero_bias->data(),
-            static_cast<float*>(output->data) +
-                n * output->shape[1] * output->shape[2] * output->shape[3],
-            workspace_buffer, &workspace_size, nnp_activation_identity, 
nullptr, entry->threadpool,
-            nullptr);
-
-        ICHECK_EQ(status, nnp_status_success);
-      }
-      cpu_api->FreeWorkspace(dev, workspace_buffer);
-    });
-
-TVM_REGISTER_GLOBAL("tvm.contrib.nnpack.convolution_inference_without_weight_transform")
-    .set_body([](TVMArgs args, TVMRetValue* ret) {
-      NNPackThreadLocalEntry* entry = NNPackThreadLocalEntry::ThreadLocal();
-      static std::once_flag flag;
-      std::call_once(flag, []() { ICHECK_EQ(nnp_initialize(), 
nnp_status_success); });
-      DLTensor* input = args[0];
-      DLTensor* transformed_kernel = args[1];
-      DLTensor* bias = nullptr;
-      if (args[2].type_code() == kTVMDLTensorHandle) {
-        bias = args[2];
-      }
-      DLTensor* output = args[3];
-      uint64_t pad_top = args[4], pad_right = args[5], pad_bottom = args[6], 
pad_left = args[7];
-      nnp_padding input_padding{pad_top, pad_right, pad_bottom, pad_left};
-      uint64_t stride_width = args[8], stride_height = args[9];
-      nnp_size stride_size{stride_width, stride_height};
-      NNPackConfig(args[10]);
-
-      uint64_t algo_ = args[11];
-      nnp_convolution_algorithm algo = 
static_cast<nnp_convolution_algorithm>(algo_);
-      ICHECK_EQ(input->ndim, 4);
-      if (bias) {
-        ICHECK_EQ(bias->ndim, 1);
-      }
-      ICHECK_EQ(output->ndim, 4);
-      ICHECK_EQ(input->shape[0], output->shape[0]);
-      size_t input_channels = input->shape[1];
-      if (bias) {
-        ICHECK_EQ(output->shape[1], bias->shape[0]);
-      }
-      size_t output_channels = output->shape[1];
-      nnp_size input_size{static_cast<size_t>(input->shape[2]),
-                          static_cast<size_t>(input->shape[3])};
-      nnp_size kernel_size{3, 3};
-      ICHECK(input->strides == nullptr);
-      ICHECK(transformed_kernel->strides == nullptr);
-      if (bias) {
-        ICHECK(bias->strides == nullptr);
-      }
-
-      ICHECK(TypeMatch(input->dtype, kDLFloat, 32));
-      ICHECK(TypeMatch(transformed_kernel->dtype, kDLFloat, 32));
-      if (bias) {
-        ICHECK(TypeMatch(bias->dtype, kDLFloat, 32));
-      }
-      ICHECK(TypeMatch(output->dtype, kDLFloat, 32));
-
-      // Allocate a zero-bias if we don't pass one in.
-      std::unique_ptr<std::vector<float>> zero_bias;
-      if (!bias) {
-        zero_bias.reset(new std::vector<float>(output->shape[1], 0.0));
-      }
-
-      size_t workspace_size = 0;
-      nnp_status status = nnp_convolution_inference(
-          algo, nnp_convolution_transform_strategy_reuse, input_channels, 
output_channels,
-          input_size, input_padding, kernel_size, stride_size, nullptr, 
nullptr, nullptr, nullptr,
-          nullptr, &workspace_size, nnp_activation_identity, nullptr, 
entry->threadpool, nullptr);
-      ICHECK_EQ(status, nnp_status_success);
-
-      // Division with rounding up, in case size is not multiple of 
sizeof(float)
-      const size_t workspace_elements = (workspace_size + sizeof(float) - 1) / 
sizeof(float);
-
-      Device dev = input->device;
-      DLDataType type_hint = input->dtype;
-
-      DeviceAPI* cpu_api = DeviceAPI::Get(dev);
-      void* workspace_buffer =
-          cpu_api->AllocWorkspace(dev, workspace_elements * sizeof(float), 
type_hint);
-      ICHECK(workspace_buffer != nullptr);
-
-      for (auto n = 0; n < input->shape[0]; ++n) {
-        nnp_status status = nnp_convolution_inference(
-            algo, nnp_convolution_transform_strategy_reuse, input_channels, 
output_channels,
-            input_size, input_padding, kernel_size, stride_size,
-            static_cast<float*>(input->data) +
-                n * input->shape[1] * input->shape[2] * input->shape[3],
-            static_cast<float*>(transformed_kernel->data),
-            bias ? static_cast<float*>(bias->data) : zero_bias->data(),
-            static_cast<float*>(output->data) +
-                n * output->shape[1] * output->shape[2] * output->shape[3],
-            workspace_buffer, &workspace_size, nnp_activation_identity, 
nullptr, entry->threadpool,
-            nullptr);
-        ICHECK_EQ(status, nnp_status_success);
-      }
-
-      cpu_api->FreeWorkspace(dev, workspace_buffer);
-    });
-
-TVM_REGISTER_GLOBAL("tvm.contrib.nnpack.convolution_inference_weight_transform")
-    .set_body([](TVMArgs args, TVMRetValue* ret) {
-      NNPackThreadLocalEntry* entry = NNPackThreadLocalEntry::ThreadLocal();
-      static std::once_flag flag;
-      std::call_once(flag, []() { ICHECK_EQ(nnp_initialize(), 
nnp_status_success); });
-      DLTensor* kernel = args[0];
-      DLTensor* transformed_kernel = args[1];
-      // Dummy sizes
-      nnp_padding input_padding{1, 1, 1, 1};
-      nnp_size stride_size{1, 1};
-
-      nnp_size input_size{100, 100};
-
-      NNPackConfig(args[2]);
-
-      uint64_t algo_ = args[3];
-      nnp_convolution_algorithm algo = 
static_cast<nnp_convolution_algorithm>(algo_);
-      ICHECK_EQ(kernel->ndim, 4);
-      size_t input_channels = kernel->shape[1];
-      size_t output_channels = kernel->shape[0];
-      ICHECK_EQ(kernel->shape[2], 3);
-      ICHECK_EQ(kernel->shape[3], 3);
-      nnp_size kernel_size{static_cast<size_t>(kernel->shape[2]),
-                           static_cast<size_t>(kernel->shape[3])};
-      ICHECK(kernel->strides == nullptr);
-      ICHECK(TypeMatch(kernel->dtype, kDLFloat, 32));
-
-      size_t transformed_kernel_size = 0;
-      nnp_status status;
-      status = nnp_convolution_inference(
-          algo, nnp_convolution_transform_strategy_precompute, input_channels, 
output_channels,
-          input_size, input_padding, kernel_size, stride_size, nullptr, 
nullptr, nullptr, nullptr,
-          nullptr, &transformed_kernel_size, nnp_activation_identity, nullptr, 
entry->threadpool,
-          nullptr);
-      ICHECK_EQ(status, nnp_status_success);
-
-      ICHECK_LE(transformed_kernel_size, GetDataSize(*transformed_kernel));
-
-      status = nnp_convolution_inference(
-          algo, nnp_convolution_transform_strategy_precompute, input_channels, 
output_channels,
-          input_size, input_padding, kernel_size, stride_size, nullptr,
-          static_cast<float*>(kernel->data), nullptr, nullptr,
-          static_cast<float*>(transformed_kernel->data), 
&transformed_kernel_size,
-          nnp_activation_identity, nullptr, entry->threadpool, nullptr);
-      ICHECK_EQ(status, nnp_status_success);
-    });
-}  // namespace contrib
-}  // namespace tvm
diff --git a/src/runtime/contrib/nnpack/fully_connected.cc 
b/src/runtime/contrib/nnpack/fully_connected.cc
deleted file mode 100644
index 28570026ad..0000000000
--- a/src/runtime/contrib/nnpack/fully_connected.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-/*!
- * \file Use external nnpack library call.
- */
-#include <nnpack.h>
-#include <tvm/runtime/data_type.h>
-#include <tvm/runtime/logging.h>
-#include <tvm/runtime/registry.h>
-
-#include "nnpack_utils.h"
-
-namespace tvm {
-namespace contrib {
-
-using namespace runtime;
-
-// matrix multiplication for row major
-TVM_REGISTER_GLOBAL("tvm.contrib.nnpack.fully_connected_inference")
-    .set_body([](TVMArgs args, TVMRetValue* ret) {
-      NNPackThreadLocalEntry* entry = NNPackThreadLocalEntry::ThreadLocal();
-      nnp_initialize();
-      DLTensor* A = args[0];
-      DLTensor* B = args[1];
-      DLTensor* C = args[2];
-      NNPackConfig(args[3]);
-
-      ICHECK_EQ(A->ndim, 1);
-      ICHECK_EQ(B->ndim, 2);
-      ICHECK_EQ(C->ndim, 1);
-      ICHECK_EQ(B->shape[0], C->shape[0]);
-      ICHECK_EQ(B->shape[1], A->shape[0]);
-      ICHECK(C->strides == nullptr);
-      ICHECK(B->strides == nullptr);
-      ICHECK(A->strides == nullptr);
-      ICHECK(TypeMatch(A->dtype, kDLFloat, 32));
-      ICHECK(TypeMatch(B->dtype, kDLFloat, 32));
-      ICHECK(TypeMatch(C->dtype, kDLFloat, 32));
-
-      nnp_fully_connected_inference(B->shape[1], B->shape[0], 
static_cast<float*>(A->data),
-                                    static_cast<float*>(B->data), 
static_cast<float*>(C->data),
-                                    entry->threadpool);
-    });
-
-}  // namespace contrib
-}  // namespace tvm
diff --git a/src/runtime/contrib/nnpack/nnpack_utils.cc 
b/src/runtime/contrib/nnpack/nnpack_utils.cc
deleted file mode 100644
index 2fd6f69bf2..0000000000
--- a/src/runtime/contrib/nnpack/nnpack_utils.cc
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-/*!
- * \file Use external nnpack library call.
- */
-#include "nnpack_utils.h"
-
-namespace tvm {
-namespace contrib {
-using namespace runtime;
-
-typedef dmlc::ThreadLocalStore<NNPackThreadLocalEntry> NNPackThreadLocalStore;
-
-NNPackThreadLocalEntry* NNPackThreadLocalEntry::ThreadLocal() {
-  return NNPackThreadLocalStore::Get();
-}
-
-bool NNPackConfig(uint64_t nthreads) {
-  NNPackThreadLocalEntry* entry = NNPackThreadLocalEntry::ThreadLocal();
-  if (entry->threadpool && pthreadpool_get_threads_count(entry->threadpool) == 
nthreads) {
-    ICHECK_NE(nthreads, 1);
-    return true;
-  }
-  if (entry->threadpool) {
-    pthreadpool_destroy(entry->threadpool);
-    entry->threadpool = nullptr;
-  }
-
-  if (nthreads == 1) {
-    // a null threadpool means the function is invoked on the calling thread,
-    // which is the desired logic for nthreads == 1
-    ICHECK(!entry->threadpool);
-    return true;
-  }
-
-  entry->threadpool = pthreadpool_create(nthreads);
-  return true;
-}
-
-TVM_REGISTER_GLOBAL("contrib.nnpack._initialize").set_body([](TVMArgs args, 
TVMRetValue* ret) {
-  *ret = nnp_initialize();
-});
-
-}  // namespace contrib
-}  // namespace tvm
diff --git a/src/runtime/contrib/nnpack/nnpack_utils.h 
b/src/runtime/contrib/nnpack/nnpack_utils.h
deleted file mode 100644
index ed0312dac4..0000000000
--- a/src/runtime/contrib/nnpack/nnpack_utils.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-/*!
- * \file Use external nnpack library call.
- */
-#ifndef TVM_RUNTIME_CONTRIB_NNPACK_NNPACK_UTILS_H_
-#define TVM_RUNTIME_CONTRIB_NNPACK_NNPACK_UTILS_H_
-#include <dmlc/thread_local.h>
-#include <nnpack.h>
-#include <tvm/runtime/data_type.h>
-#include <tvm/runtime/logging.h>
-#include <tvm/runtime/registry.h>
-
-namespace tvm {
-namespace contrib {
-
-struct NNPackThreadLocalEntry {
-  pthreadpool_t threadpool{nullptr};
-  static NNPackThreadLocalEntry* ThreadLocal();
-};
-
-bool NNPackConfig(uint64_t nthreads);
-}  // namespace contrib
-}  // namespace tvm
-#endif  // TVM_RUNTIME_CONTRIB_NNPACK_NNPACK_UTILS_H_
diff --git a/src/runtime/debug.cc b/src/runtime/debug.cc
deleted file mode 100644
index 37ab6ec580..0000000000
--- a/src/runtime/debug.cc
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-/*!
- * \file src/runtime/debug.cc
- * \brief Helpers for debugging at runtime.
- */
-
-#include <tvm/runtime/debug.h>
-
-namespace tvm {
-namespace runtime {
-
-template <typename T>
-void AppendMembers(std::ostream& os, const NDArray& nd_array, int64_t dim0) {
-  os << "=[";
-  for (int64_t i = 0; i < dim0; ++i) {
-    if (i > 0) {
-      os << ",";
-    }
-    os << reinterpret_cast<T*>(nd_array->data)[i];
-  }
-  os << "]";
-}
-
-void AppendNDArray(std::ostream& os, const NDArray& nd_array, const DLDevice& 
host_device,
-                   bool show_contents) {
-  os << "NDArray[";
-  os << "(";
-  for (int dim = 0; dim < nd_array->ndim; ++dim) {
-    if (dim > 0) {
-      os << ",";
-    }
-    os << nd_array->shape[dim];
-  }
-  std::string basic_type = DLDataType2String(nd_array->dtype);
-  os << ")," << basic_type;
-  os << ",(" << nd_array->device.device_type;
-  os << "," << nd_array->device.device_id;
-  os << ")]";
-  if (show_contents && nd_array->device.device_type == host_device.device_type 
&&
-      nd_array->device.device_id == host_device.device_id) {
-    int64_t dim0;
-    if (nd_array->ndim == 0) {
-      dim0 = 1;
-    } else if (nd_array->ndim == 1) {
-      dim0 = nd_array->shape[0];
-      if (dim0 > 10) {
-        // Too large.
-        dim0 = 0;
-      }
-    } else {
-      // Not rank-1.
-      dim0 = 0;
-    }
-    if (dim0 > 0) {
-      if (basic_type == "bool") {
-        AppendMembers<bool>(os, nd_array, dim0);
-      } else if (basic_type == "int8") {
-        AppendMembers<int8_t>(os, nd_array, dim0);
-      } else if (basic_type == "int16") {
-        AppendMembers<int16_t>(os, nd_array, dim0);
-      } else if (basic_type == "int32") {
-        AppendMembers<int32_t>(os, nd_array, dim0);
-      } else if (basic_type == "int64") {
-        AppendMembers<int64_t>(os, nd_array, dim0);
-      } else if (basic_type == "uint8") {
-        AppendMembers<uint8_t>(os, nd_array, dim0);
-      } else if (basic_type == "uint16") {
-        AppendMembers<uint16_t>(os, nd_array, dim0);
-      } else if (basic_type == "uint32") {
-        AppendMembers<uint32_t>(os, nd_array, dim0);
-      } else if (basic_type == "uint64") {
-        AppendMembers<uint64_t>(os, nd_array, dim0);
-      } else if (basic_type == "float32") {
-        AppendMembers<float>(os, nd_array, dim0);
-      } else if (basic_type == "float64") {
-        AppendMembers<double>(os, nd_array, dim0);
-      }
-    }
-  }
-}
-
-void AppendADT(std::ostream& os, const ADT& adt, const DLDevice& host_device, 
bool show_contents) {
-  os << "ADT(" << adt->tag;
-  for (size_t i = 0; i < adt->size; ++i) {
-    os << ",";
-    AppendRuntimeObject(os, adt[i], host_device, show_contents);
-  }
-  os << ")";
-}
-
-void AppendRuntimeObject(std::ostream& os, const ObjectRef& object, const 
DLDevice& host_device,
-                         bool show_contents) {
-  if (auto adt = object.as<ADT>()) {
-    AppendADT(os, adt.value(), host_device, show_contents);
-  } else if (auto nd_array_cont = object.as<NDArray>()) {
-    AppendNDArray(os, nd_array_cont.value(), host_device, show_contents);
-  } else {
-    os << "?";
-  }
-}
-
-std::string RuntimeObject2String(const ObjectRef& object, const DLDevice& 
host_device,
-                                 bool show_contents) {
-  std::ostringstream os;
-  AppendRuntimeObject(os, object, host_device, show_contents);
-  return os.str();
-}
-
-}  // namespace runtime
-}  // namespace tvm
diff --git a/src/runtime/hexagon/profiler/README.md 
b/src/runtime/hexagon/profiler/README.md
index d83b7db283..fdcc94f692 100644
--- a/src/runtime/hexagon/profiler/README.md
+++ b/src/runtime/hexagon/profiler/README.md
@@ -59,20 +59,6 @@ The steps involved are as follows:
    can be altered using LWP config options as described above.
 2) Create `HexagonProfiler` object
 
-```
-with tvm.transform.PassContext(opt_level=3, config={"tir.instrument_lwp": 
True}):
-    lowered = tvm.relay.build(
-        relay_mod,
-        tvm.target.Target(target_hexagon, host=target_hexagon),
-        ...
-    )
-
-    # Create HexagonProfiler object. It sets the profiling mode based on the 
PassContext config.
-    # '--hexagon-debug' to pytest can be used to retain any temp or test 
directories to
-    # inspect the profiling data.
-    profiler = HexagonProfiler(lowered, hexagon_server_process, hexagon_debug)
-```
-
 4) Run the model and get the profiling data as a CSV file. It is done by 
post-processing
    'lwp.json' file generated during runtime.
 
diff --git a/src/runtime/meta_data.h b/src/runtime/meta_data.h
index 257c931df9..c415468088 100644
--- a/src/runtime/meta_data.h
+++ b/src/runtime/meta_data.h
@@ -26,7 +26,6 @@
 
 #include <dmlc/io.h>
 #include <dmlc/json.h>
-#include <tvm/runtime/executor_info.h>
 #include <tvm/runtime/module.h>
 #include <tvm/runtime/ndarray.h>
 #include <tvm/runtime/packed_func.h>
diff --git a/src/runtime/name_transforms.cc b/src/runtime/name_transforms.cc
deleted file mode 100644
index 608b88ac43..0000000000
--- a/src/runtime/name_transforms.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-#include <tvm/runtime/logging.h>
-#include <tvm/runtime/name_transforms.h>
-#include <tvm/runtime/registry.h>
-
-#include <algorithm>
-#include <cctype>
-#include <string>
-
-namespace tvm {
-namespace runtime {
-
-std::string SanitizeName(const std::string& name) {
-  ICHECK(!name.empty()) << "Name is empty";
-
-  auto isNotAlnum = [](char c) { return !std::isalnum(c); };
-  std::string sanitized_input = name;
-  std::replace_if(sanitized_input.begin(), sanitized_input.end(), isNotAlnum, 
'_');
-
-  return sanitized_input;
-}
-
-TVM_REGISTER_GLOBAL("runtime.SanitizeName").set_body_typed(SanitizeName);
-
-}  // namespace runtime
-}  // namespace tvm
diff --git a/src/target/source/source_module.cc 
b/src/target/source/source_module.cc
index fd16c3c85a..18ea8533a5 100644
--- a/src/target/source/source_module.cc
+++ b/src/target/source/source_module.cc
@@ -24,7 +24,6 @@
 
 #include <dmlc/memory_io.h>
 #include <tvm/runtime/module.h>
-#include <tvm/runtime/name_transforms.h>
 #include <tvm/runtime/ndarray.h>
 #include <tvm/runtime/packed_func.h>
 #include <tvm/runtime/registry.h>
diff --git a/tests/scripts/release/make_notes.py 
b/tests/scripts/release/make_notes.py
index 9045accace..f09a90649a 100644
--- a/tests/scripts/release/make_notes.py
+++ b/tests/scripts/release/make_notes.py
@@ -81,7 +81,6 @@ TAG_DICT = {
     "fix": "BugFix",
     "bug": "BugFix",
     "hotfix": "BugFix",
-    "relay": "Relay",
     "qnn": "Relay",
     "quantization": "Relay",
     "relax": "Relax",

Reply via email to