srkreddy1238 commented on code in PR #13791:
URL: https://github.com/apache/tvm/pull/13791#discussion_r1073225743


##########
apps/cpp_rtvm/tvm_runner.cc:
##########
@@ -0,0 +1,304 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file tvm_runner.cc
+ * \brief TVM model runner implementation.
+ */
+
+#include "tvm_runner.h"
+
+#include <cnpy.h>
+
+#include <fstream>
+#include <streambuf>
+#include <string>
+
+namespace tvm {
+namespace runtime {
+
+/*!
+ * \brief Get the TVM device id corresponding to device string.
+ * \param device the target device in string format.
+ * \return cl_device corresponding to the device string.
+ */
+int GetTVMDevice(std::string device) {
+  if (!device.compare("cl")) {
+    return static_cast<int>(kDLOpenCL);
+  } else {
+    LOG(FATAL) << "TVMRunner : Unsupported device :" << device;
+  }
+}
+
+/*!
+ * \brief Constructor for TVMRunner.
+ * \param path where the tfm compiler artifacts present.
+ * \param device the target device where we need to load the compiled model.
+ */
+TVMRunner::TVMRunner(std::string path, std::string device) : 
r_model_path(path), r_device(device) {
+  LOG(INFO) << "TVMRunner Constructor:" << r_model_path << " Devices:" << 
r_device;
+}
+
+/*!
+ * \brief Load Setup TVM graph runtime for given model.
+ * \param 0 on success else error code.
+ */
+int TVMRunner::Load(void) {
+  LOG(INFO) << "TVMRunner Load:" << r_model_path;
+  // Load the lib file
+  r_mod_handle = Module::LoadFromFile((r_model_path + "/mod.so").c_str(), 
"so");
+
+  // Read model json file
+  std::ifstream json_reader((r_model_path + "/mod.json").c_str());
+  CHECK(!json_reader.fail()) << "Failed to open json file:" << (r_model_path + 
"/mod.json").c_str();
+  std::string json_str((std::istreambuf_iterator<char>(json_reader)),
+                       std::istreambuf_iterator<char>());
+  json_reader.close();
+
+  // Get ref to graph exeutor
+  auto f_handle = tvm::runtime::Registry::Get("tvm.graph_executor.create");
+
+  // Greate graph runtime
+  r_graph_handle = (*f_handle)(json_str, r_mod_handle, GetTVMDevice(r_device), 
0);
+
+  // Read params binary file
+  std::ifstream params_reader((r_model_path + "/mod.params").c_str(), 
std::ios::binary);
+  CHECK(!params_reader.fail()) << "Failed to open json file:"
+                               << (r_model_path + "/mod.params").c_str();
+  const std::string params_str((std::istreambuf_iterator<char>(params_reader)),
+                               std::istreambuf_iterator<char>());
+  params_reader.close();
+  TVMByteArray params_arr;
+  params_arr.data = params_str.c_str();
+  params_arr.size = params_str.length();
+
+  // Load parameters
+  r_graph_handle.GetFunction("load_params")(params_arr);
+
+  return 0;
+}
+
+/*!
+ * \brief Calculated the memory size for the NDArray.
+ * \param NDArray object.
+ * \return size of the memory.
+ */
+inline size_t GetMemSize(NDArray& narr) {
+  size_t size = 1;
+  for (tvm_index_t i = 0; i < narr->ndim; ++i) {
+    size *= static_cast<size_t>(narr->shape[i]);
+  }
+  size *= (narr->dtype.bits * narr->dtype.lanes + 7) / 8;
+  return size;
+}
+
+/*!
+ * \brief Get the input alloc mem size.
+ * \param input_id The input id to query the mem size.
+ * \return The memory size.
+ */
+size_t TVMRunner::GetInputMemSize(std::string input_id) {
+  LOG(INFO) << "TVMRunner::GetInputMemSize:" << input_id;
+
+  NDArray in_arr = r_graph_handle.GetFunction("get_input")(input_id);
+  auto ssize = GetMemSize(in_arr);
+
+  return ssize;
+}
+
+/*!
+ * \brief Get the output alloc mem size.
+ * \param input_id The output id to query the mem size.
+ * \return The memory size.
+ */
+size_t TVMRunner::GetOutputMemSize(std::string input_id) {
+  LOG(INFO) << "TVMRunner::GetOutputMemSize:" << input_id;
+
+  NDArray in_arr = r_graph_handle.GetFunction("get_output")(input_id);
+  auto ssize = GetMemSize(in_arr);
+
+  return ssize;
+}
+
+/*!
+ * \brief Set the model inputs from npz file.
+ * \param inputfile the npz file from where we read input tensor data.
+ * \param 0 on success else error code.
+ */
+int TVMRunner::SetInput(std::string inputfile) {
+  LOG(INFO) << "TVMRunner::SetInput (Numpy):" << inputfile;
+  cnpy::npz_t npz_input = cnpy::npz_load(inputfile);
+
+  for (auto& elem : mInfo.input_info) {
+    LOG(INFO) << "Set Numpy Input for :" << elem.first;
+    NDArray in_arr = r_graph_handle.GetFunction("get_input")(elem.first);
+    auto ssize = GetMemSize(in_arr);
+
+    if (npz_input.find(elem.first) != npz_input.end()) {
+      in_arr.CopyFromBytes(npz_input[elem.first].data<char>(), ssize);
+    } else {
+      LOG(WARNING) << "Couldn't find input " << elem.first << " in npy input 
file";
+    }
+  }
+
+  return 0;
+}
+
+/*!
+ * \brief Set the model input from the given binary buffer.
+ * \param input_id input node name.
+ * \param raw_input binary input buffer to copy over input NDArray.
+ * \param 0 on success else error code.
+ */
+int TVMRunner::SetInput(std::string input_id, char* raw_input) {
+  LOG(INFO) << "TVMRunner::SetInput (Raw)";
+  NDArray in_arr = r_graph_handle.GetFunction("get_input")(input_id);
+  auto ssize = GetMemSize(in_arr);

Review Comment:
   But, I need in_arr for data copy below.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to