This is an automated email from the ASF dual-hosted git repository.

tqchen pushed a commit to branch refactor-s1
in repository https://gitbox.apache.org/repos/asf/tvm.git

commit 493e97c2e516134e3dc34ee3946555d344e2f98b
Author: tqchen <[email protected]>
AuthorDate: Sun Apr 13 09:40:04 2025 -0400

    update contrib
---
 src/runtime/contrib/cudnn/conv_forward.cc     | 2 +-
 src/runtime/contrib/dnnl/dnnl_json_runtime.cc | 9 +--------
 src/target/llvm/intrin_rule_hexagon.cc        | 1 +
 3 files changed, 3 insertions(+), 9 deletions(-)

diff --git a/src/runtime/contrib/cudnn/conv_forward.cc 
b/src/runtime/contrib/cudnn/conv_forward.cc
index 351761679c..c5f46d4847 100644
--- a/src/runtime/contrib/cudnn/conv_forward.cc
+++ b/src/runtime/contrib/cudnn/conv_forward.cc
@@ -150,7 +150,7 @@ void FindAlgo(int format, int dims, int groups, const int 
pad[], const int strid
     }
   }
 
-  ret[0] = best_algo;
+  ret[0] = static_cast<int>(best_algo);
 }
 
 TVM_REGISTER_GLOBAL("tvm.contrib.cudnn.conv2d.forward")
diff --git a/src/runtime/contrib/dnnl/dnnl_json_runtime.cc 
b/src/runtime/contrib/dnnl/dnnl_json_runtime.cc
index 09d93660f9..33feadbb8b 100644
--- a/src/runtime/contrib/dnnl/dnnl_json_runtime.cc
+++ b/src/runtime/contrib/dnnl/dnnl_json_runtime.cc
@@ -116,16 +116,9 @@ class DNNLJSONRuntime : public JSONRuntimeBase {
 
   /* Same as makeInitDataProvider but in case of InputOutput return real 
DLTensor */
   TensorRegistry::DLTensorProvider makeIODataProvider(const TVMArgs& args) 
const {
-    auto extract_dl_tensor = [](const TVMArgValue& val) -> const DLTensor* {
-      ICHECK(val.type_code() == kTVMNDArrayHandle || val.type_code() == 
kTVMDLTensorHandle)
-          << "Expect NDArray or DLTensor";
-      return val.IsObjectRef<NDArray>() ? val.operator NDArray().operator->()
-                                        : val.operator DLTensor*();
-    };
-
     std::map<uint32_t, const DLTensor*> io_map;  // eid to dl tensor map
     for (size_t i = 0; i < run_arg_eid_.size(); i++) {
-      io_map[run_arg_eid_[i]] = extract_dl_tensor(args[i]);
+      io_map[run_arg_eid_[i]] = args[i].operator DLTensor*();
     }
 
     // lambda with captured IO data handlers
diff --git a/src/target/llvm/intrin_rule_hexagon.cc 
b/src/target/llvm/intrin_rule_hexagon.cc
index 58661c9978..83c6871d5c 100644
--- a/src/target/llvm/intrin_rule_hexagon.cc
+++ b/src/target/llvm/intrin_rule_hexagon.cc
@@ -57,6 +57,7 @@ inline PrimExpr DispatchTVMQHLWrapperFp16(const PrimExpr& e) {
   const auto* f = tvm::runtime::Registry::Get("target.TargetCurrent");
   ICHECK(f != nullptr);
   const auto ret = (*f)(true);
+  bool useqhl = true;
   if (auto opt_target = ret.as<Target>()) {
     const std::string tstring = opt_target.value()->str();
     useqhl = tstring.find("+hvx-qfloat") != std::string::npos;

Reply via email to