jikechao opened a new issue, #14615:
URL: https://github.com/apache/tvm/issues/14615

   The ONNX model can be imported into Relay well but crashed when compiled. It 
throws the following crash messages:
   ```
   Traceback (most recent call last):
     ....
       irmod, params = relay.frontend.from_onnx(model, {'x': (1, 1, 5, 5)}, 
freeze_params=True)
     File "/workplace/software/tvm/tvm/python/tvm/relay/frontend/onnx.py", line 
7346, in from_onnx
       mod, params = g.from_onnx(graph, opset)
     File "/workplace/software/tvm/tvm/python/tvm/relay/frontend/onnx.py", line 
6963, in from_onnx
       self._construct_nodes(graph)
     File "/workplace/software/tvm/tvm/python/tvm/relay/frontend/onnx.py", line 
7120, in _construct_nodes
       len(node_output), outputs_num, op_name
   AssertionError: Number of output mismatch 2 vs 1 in MaxPool.
   (tf2) root@R730-3:/share_host/TVMFT/BorrowTests/ONNX/bugs# python 
6_sorted.py  
   Traceback (most recent call last):
     File "6_sorted.py", line 11, in <module>
       graph, lib, params = relay.build_module.build(irmod, target='llvm 
-mcpu=core-avx2', params=params)
     File "/workplace/software/tvm/tvm/python/tvm/relay/build_module.py", line 
372, in build
       mod_name=mod_name,
     File "/workplace/software/tvm/tvm/python/tvm/relay/build_module.py", line 
169, in build
       mod_name,
     File "/workplace/software/tvm/tvm/python/tvm/_ffi/_ctypes/packed_func.py", 
line 237, in __call__
       raise get_last_ffi_error()
   tvm._ffi.base.TVMError: Traceback (most recent call last):
     25: TVMFuncCall
     24: 
tvm::relay::backend::RelayBuildModule::GetFunction(std::__cxx11::basic_string<char,
 std::char_traits<char>, std::allocator<char> > const&, 
tvm::runtime::ObjectPtr<tvm::runtime::Object> 
const&)::{lambda(tvm::runtime::TVMArgs, 
tvm::runtime::TVMRetValue*)#3}::operator()(tvm::runtime::TVMArgs, 
tvm::runtime::TVMRetValue*) const
     23: tvm::relay::backend::RelayBuildModule::Build(tvm::IRModule, 
tvm::runtime::Array<tvm::Target, void> const&, tvm::Target const&, 
tvm::relay::Executor const&, tvm::relay::Runtime const&, 
tvm::WorkspaceMemoryPools const&, tvm::ConstantMemoryPools const&, 
tvm::runtime::String)
     22: tvm::relay::backend::RelayBuildModule::BuildRelay(tvm::IRModule, 
tvm::runtime::String const&)
     21: tvm::relay::backend::ExecutorCodegen::Codegen(tvm::IRModule, 
tvm::relay::Function const&, tvm::runtime::String)
     20: void tvm::relay::backend::ExecutorCodegen::CallFunc<tvm::IRModule, 
tvm::relay::Function, tvm::runtime::String>(std::__cxx11::basic_string<char, 
std::char_traits<char>, std::allocator<char> > const&, tvm::IRModule, 
tvm::relay::Function, tvm::runtime::String)
     19: 
tvm::relay::backend::GraphExecutorCodegenModule::GetFunction(std::__cxx11::basic_string<char,
 std::char_traits<char>, std::allocator<char> > const&, 
tvm::runtime::ObjectPtr<tvm::runtime::Object> 
const&)::{lambda(tvm::runtime::TVMArgs, 
tvm::runtime::TVMRetValue*)#2}::operator()(tvm::runtime::TVMArgs, 
tvm::runtime::TVMRetValue*) const
     18: tvm::relay::backend::GraphExecutorCodegen::Codegen(tvm::IRModule, 
tvm::relay::Function, tvm::runtime::String)
     17: tvm::relay::GraphPlanMemory(tvm::relay::Function const&)
     16: tvm::relay::StorageAllocator::Plan(tvm::relay::Function const&)
     15: tvm::relay::ExprVisitor::VisitExpr(tvm::RelayExpr const&)
     14: 
tvm::relay::transform::DeviceAwareExprVisitor::VisitExpr_(tvm::relay::FunctionNode
 const*)
     13: 
tvm::relay::StorageAllocaBaseVisitor::DeviceAwareVisitExpr_(tvm::relay::FunctionNode
 const*)
     12: tvm::relay::StorageAllocaBaseVisitor::GetToken(tvm::RelayExpr const&)
     11: tvm::relay::ExprVisitor::VisitExpr(tvm::RelayExpr const&)
     10: tvm::relay::StorageAllocaBaseVisitor::VisitExpr_(tvm::relay::TupleNode 
const*)
     9: tvm::relay::StorageAllocaBaseVisitor::GetToken(tvm::RelayExpr const&)
     8: tvm::relay::ExprVisitor::VisitExpr(tvm::RelayExpr const&)
     7: 
tvm::relay::transform::DeviceAwareExprVisitor::VisitExpr_(tvm::relay::CallNode 
const*)
     6: 
tvm::relay::StorageAllocator::DeviceAwareVisitExpr_(tvm::relay::CallNode const*)
     5: tvm::relay::StorageAllocaBaseVisitor::CreateToken(tvm::RelayExprNode 
const*, bool)
     4: tvm::relay::StorageAllocator::CreateTokenOnDevice(tvm::RelayExprNode 
const*, tvm::VirtualDevice const&, bool)
     3: 
tvm::relay::StorageAllocator::TokenAllocator::Request(tvm::relay::StorageToken*)
     2: tvm::relay::TokenAllocator1D::Request(tvm::relay::StorageToken*)
     1: tvm::relay::TokenAllocator1D::GetMemorySize(tvm::relay::StorageToken*)
     0: _ZN3tvm7runtime6detail
     File "/workplace/software/tvm/tvm/src/relay/backend/token_allocator.cc", 
line 41
   TVMError: 
   ---------------------------------------------------------------
   An error occurred during the execution of TVM.
   For more information, please see: https://tvm.apache.org/docs/errors.html
   ---------------------------------------------------------------
     Check failed: (pval != nullptr) is false: Cannot allocate memory symbolic 
tensor shape [T.Any()]
   ```
   Although the shape is `T.Any()`, it's a static graph rather than. Thus this 
crash is a bug, we should support the T.any() correctly.
    
   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to