[GitHub] [incubator-tvm] tqchen commented on a change in pull request #6917: Add Relay option to link parameters into runtime Modules

2020-11-23 Thread GitBox


tqchen commented on a change in pull request #6917:
URL: https://github.com/apache/incubator-tvm/pull/6917#discussion_r529091748



##
File path: src/target/source/codegen_params.cc
##
@@ -0,0 +1,214 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file codegen_params.cc
+ */
+
+#include "codegen_params.h"
+
+#include 
+
+#include 
+#include 
+#include 
+#include 
+
+namespace tvm {
+namespace codegen {
+
+namespace {
+class DLManagedTensorDeleter {
+ public:
+  void operator()(DLManagedTensor* ptr) { ptr->deleter(ptr); }
+};
+}  // namespace
+
+static constexpr const int kMaxLineLength = 80;
+
+template ::value>>
+void PrintArray(void* data, size_t num_elements, int elements_per_row, 
std::string indent_str,
+std::ostream& os) {
+  for (size_t i = 0; i < num_elements; i++) {
+int64_t elem = static_cast(data)[i];
+if (std::is_signed::value) {
+  uint64_t to_print;
+  if (elem < 0) {
+os << "-";
+to_print = -elem;
+  } else {
+os << "+";
+to_print = elem;
+  }
+  os << "0x" << std::setw(sizeof(T) * 8 / 4) << 
static_cast(to_print);
+} else {
+  os << "0x" << std::setw(sizeof(T) * 8 / 4) << 
static_cast(elem);
+}
+if (i < num_elements - 1) {
+  os << ", ";
+}
+if (((i + 1) % elements_per_row) == 0) {
+  os << "\n" << indent_str;
+}
+  }
+}
+
+template ::value>>
+void PrintArray(void* data, size_t num_elements, int one_element_size_bytes, 
int elements_per_row,
+std::string indent_str, std::ostream& os) {
+  std::stringstream ss;
+  if (std::is_signed::value) {
+ss.setf(std::ios::hex | std::ios::showbase | std::ios::fixed | 
std::ios::scientific,
+std::ios::basefield | std::ios::showbase | std::ios::floatfield);
+  } else {
+ss.setf(std::ios::hex | std::ios::fixed | std::ios::scientific,
+std::ios::basefield | std::ios::showbase | std::ios::floatfield);
+  }
+  for (size_t i = 0; i < num_elements; i++) {
+T elem = static_cast(data)[i];
+if (std::isinf(elem)) {
+  // C99 standard.
+  os << (elem < 0 ? "-" : " ") << std::setw(one_element_size_bytes - 1) << 
"INFINITY";
+} else if (std::isnan(elem)) {
+  // GNU extension, implemenatation-dependent.
+  os << std::setw(one_element_size_bytes) << "NAN";
+} else {
+  ss << elem;
+  os << std::setw(one_element_size_bytes) << ss.str();
+  ss.str("");
+}
+if (i < num_elements - 1) {
+  os << ", ";
+}
+if (((i + 1) % elements_per_row) == 0) {
+  os << "\n" << indent_str;
+}
+  }
+}
+
+void NDArrayDataToC(::tvm::runtime::NDArray arr, int indent_chars, 
std::ostream& os) {
+  auto arr_type = arr.DataType();
+  CHECK_EQ(arr_type.lanes(), 1) << "CodegenParams: only support generating 
1-lane parameters; saw "
+<< arr_type.lanes();
+
+  int one_element_size_bytes = (arr_type.bits() / 4) + (2 /* "0x" */) + (2 /* 
", " */);
+  if (arr_type.code() == runtime::DataType::TypeCode::kInt) {
+one_element_size_bytes += 1;  // sign character
+if (arr_type.bits() > 32) {
+  one_element_size_bytes += 2;  // "LL"
+}
+  } else if (arr_type.code() == runtime::DataType::TypeCode::kUInt) {
+if (arr_type.bits() > 32) {
+  one_element_size_bytes += 3;  // "ULL"
+}
+  } else if (arr_type.code() == runtime::DataType::TypeCode::kFloat) {
+// Floats and doubles are printed as hex but casted.
+one_element_size_bytes += 1 /* sign */ + 1 /* decimal point */ + 1 /* 
exponent sign */ +
+  1 /* extra decimal digit in exponent */;
+  }
+
+  int elements_per_row = 16;
+  while (elements_per_row > 1 &&
+ (elements_per_row * one_element_size_bytes) > (kMaxLineLength - 
indent_chars)) {
+elements_per_row /= 2;
+  }
+
+  std::string indent_str(indent_chars, ' ');
+  os << indent_str;
+
+  auto shape = arr.Shape();
+  int num_elements = 1;
+  for (auto shape_elem : shape) {
+num_elements *= shape_elem;
+  }
+
+  std::unique_ptr 
tensor(arr.ToDLPack());
+  auto old_fmtflags = os.flags();
+  os.setf(std::ios::internal | std::ios::hex,
+  std::ios::adjustfield | 

[GitHub] [incubator-tvm] tqchen commented on a change in pull request #6917: Add Relay option to link parameters into runtime Modules

2020-11-23 Thread GitBox


tqchen commented on a change in pull request #6917:
URL: https://github.com/apache/incubator-tvm/pull/6917#discussion_r529090811



##
File path: src/target/llvm/codegen_params.cc
##
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file codegen_params.cc
+ */
+#ifdef TVM_LLVM_VERSION
+
+#include "codegen_params.h"
+
+#include 
+#include 
+
+namespace tvm {
+namespace codegen {
+
+namespace {
+class DLManagedTensorDeleter {
+ public:
+  void operator()(DLManagedTensor* ptr) { ptr->deleter(ptr); }
+};
+}  // namespace
+
+llvm::ConstantArray* NDArrayToLLVMArray(llvm::LLVMContext* ctx, 
::tvm::runtime::NDArray arr) {
+  llvm::Type* element_type = nullptr;
+

Review comment:
   We still need to support cases like bfloat16, which is not natively 
supported by LLVM. It would still be great to support via uint in these cases





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] tqchen commented on a change in pull request #6917: Add Relay option to link parameters into runtime Modules

2020-11-23 Thread GitBox


tqchen commented on a change in pull request #6917:
URL: https://github.com/apache/incubator-tvm/pull/6917#discussion_r528718197



##
File path: src/target/llvm/codegen_params.cc
##
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file codegen_params.cc
+ */
+#ifdef TVM_LLVM_VERSION
+
+#include "codegen_params.h"
+
+#include 
+#include 
+
+namespace tvm {
+namespace codegen {
+
+namespace {
+class DLManagedTensorDeleter {
+ public:
+  void operator()(DLManagedTensor* ptr) { ptr->deleter(ptr); }
+};
+}  // namespace
+
+llvm::ConstantArray* NDArrayToLLVMArray(llvm::LLVMContext* ctx, 
::tvm::runtime::NDArray arr) {
+  llvm::Type* element_type = nullptr;
+

Review comment:
   How about we take the middle ground approach, e.g. using the same data 
type for the same bits. E.G. use uint32 for both float32, int32. This will 
allow us to effectively support new data types without having llvm to be aware 
of it 
   
   





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] tqchen commented on a change in pull request #6917: Add Relay option to link parameters into runtime Modules

2020-11-22 Thread GitBox


tqchen commented on a change in pull request #6917:
URL: https://github.com/apache/incubator-tvm/pull/6917#discussion_r528343180



##
File path: include/tvm/tir/function.h
##
@@ -150,6 +151,31 @@ class PrimFunc : public BaseFunc {
   TVM_DEFINE_OBJECT_REF_COW_METHOD(PrimFuncNode);
 };
 
+class LinkedParamNode : public Object {
+ public:
+  /*! \brief Unique numeric identifier used by runtimes to lookup this 
parameter. */
+  int64_t id;
+
+  /*! \brief Parameter data which should get linked into the final module. */
+  ::tvm::runtime::NDArray param;
+
+  void VisitAttrs(tvm::AttrVisitor* v) {
+v->Visit("id", );
+v->Visit("param", );
+  }
+
+  static constexpr const char* _type_key = "tir.LinkedParam";
+  TVM_DECLARE_FINAL_OBJECT_INFO(LinkedParamNode, Object);
+};
+
+class LinkedParam : public ObjectRef {
+ public:
+  LinkedParam(int64_t id, ::tvm::runtime::NDArray param);

Review comment:
   Add TVM_DLL so LinkParam can be constructed elsewhere

##
File path: src/runtime/graph/graph_runtime.cc
##
@@ -244,6 +249,39 @@ void GraphRuntime::ShareParams(const GraphRuntime& other, 
dmlc::Stream* strm) {
   this->SetupOpExecs();
 }
 
+void GraphRuntime::PreAllocatedDLTensorDeleter(DLManagedTensor* tensor) {

Review comment:
   This function is not used anywhere. Likely we need a 
NDArray::Container's deleter instead

##
File path: src/target/llvm/codegen_params.cc
##
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file codegen_params.cc
+ */
+#ifdef TVM_LLVM_VERSION
+
+#include "codegen_params.h"
+
+#include 
+#include 
+
+namespace tvm {
+namespace codegen {
+
+namespace {
+class DLManagedTensorDeleter {
+ public:
+  void operator()(DLManagedTensor* ptr) { ptr->deleter(ptr); }
+};
+}  // namespace
+
+llvm::ConstantArray* NDArrayToLLVMArray(llvm::LLVMContext* ctx, 
::tvm::runtime::NDArray arr) {
+  llvm::Type* element_type = nullptr;
+

Review comment:
   I wonder whether or not should we instead look into LLVM's target 
machine to lookup the endian parameter, and do byte swap ourselves.
   
   This would allow us to generalizes to data types that goes beyond the types 
that can recognized by LLVM, as long as we know the target machine layout
   
   See 
https://github.com/apache/incubator-tvm/blob/main/src/runtime/rpc/rpc_endpoint.cc#L429

##
File path: src/relay/backend/build_module.cc
##
@@ -443,16 +456,35 @@ class RelayBuildModule : public runtime::ModuleNode {
 
 auto lowered_funcs = graph_codegen_->GetIRModule();
 
-// When there is no lowered_funcs due to reasons such as optimization.
-if (lowered_funcs.size() == 0) {
-  Target target_host = GetTargetHost();
+Target target_host = GetTargetHost();
+// If no target_host has been set, we choose a default one, which is
+// llvm if "codegen.LLVMModuleCreate" is accessible.
+const runtime::PackedFunc* pf = 
runtime::Registry::Get("codegen.LLVMModuleCreate");
+if (!target_host.defined()) target_host = (pf != nullptr) ? Target("llvm") 
: Target("stackvm");
+
+if (target_host->GetAttr("link-params").value_or(Bool(false))) {

Review comment:
   Add comment to the code block. Generate a placeholder function that 
attaches linked params as its arguments

##
File path: include/tvm/tir/function.h
##
@@ -150,6 +151,31 @@ class PrimFunc : public BaseFunc {
   TVM_DEFINE_OBJECT_REF_COW_METHOD(PrimFuncNode);
 };
 
+class LinkedParamNode : public Object {

Review comment:
   Document this class name as well
   

##
File path: src/runtime/graph/graph_runtime.cc
##
@@ -64,14 +64,19 @@ void GraphRuntime::Run() {
  * processor.
  * \param ctxs The context of the host and devices where graph nodes will be
  * executed on.
+ * \param lookup_linked_param_func Linked parameter lookup function.
  */
 void GraphRuntime::Init(const std::string& graph_json, tvm::runtime::Module 
module,
-const std::vector& ctxs) {
+const std::vector& ctxs, PackedFunc 
lookup_linked_param_func) {

Review comment:
   Consider use TypedPackedFunc to add type signatures

##
File path: