engineer1109 opened a new issue, #13076:
URL: https://github.com/apache/tvm/issues/13076

   
   ### Actual behavior
   
   ```
   Traceback (most recent call last):
     File "runtime.py", line 37, in <module>
       model_agent = ModelAgent()
     File "runtime.py", line 15, in __init__
       self.lib = tvm.runtime.load_module("onnx2cl.tar")
     File "/usr/local/lib/python3.8/dist-packages/tvm/runtime/module.py", line 
607, in load_module
       return _ffi_api.ModuleLoadFromFile(path, fmt)
     File "tvm/_ffi/_cython/./packed_func.pxi", line 331, in 
tvm._ffi._cy3.core.PackedFuncBase.__call__
     File "tvm/_ffi/_cython/./packed_func.pxi", line 262, in 
tvm._ffi._cy3.core.FuncCall
     File "tvm/_ffi/_cython/./packed_func.pxi", line 251, in 
tvm._ffi._cy3.core.FuncCall3
     File "tvm/_ffi/_cython/./base.pxi", line 181, in 
tvm._ffi._cy3.core.CHECK_CALL
   tvm._ffi.base.TVMError: Traceback (most recent call last):
     8: TVMFuncCall
     7: _ZN3tvm7runtime13PackedFun
     6: tvm::runtime::TypedPackedFunc<tvm::runtime::Module (std::string const&, 
std::string const&)>::AssignTypedLambda<tvm::runtime::Module (*)(std::string 
const&, std::string const&)>(tvm::runtime::Module (*)(std::string const&, 
std::string const&), std::string)::{lambda(tvm::runtime::TVMArgs const&, 
tvm::runtime::TVMRetValue*)#1}::operator()(tvm::runtime::TVMArgs const&, 
tvm::runtime::TVMRetValue*) const
     5: tvm::runtime::Module::LoadFromFile(std::string const&, std::string 
const&)
     4: 
tvm::runtime::PackedFuncObj::Extractor<tvm::runtime::PackedFuncSubObj<tvm::runtime::__mk_TVM0::{lambda(tvm::runtime::TVMArgs,
 tvm::runtime::TVMRetValue*)#1}> >::Call(tvm::runtime::PackedFuncObj const*, 
tvm::runtime::__mk_TVM0, tvm::runtime::TVMRetValue)
     3: 
tvm::runtime::CreateModuleFromLibrary(tvm::runtime::ObjectPtr<tvm::runtime::Library>,
 std::function<tvm::runtime::PackedFunc (int (*)(TVMValue*, int*, int, 
TVMValue*, int*, void*), tvm::runtime::ObjectPtr<tvm::runtime::Object> const&)>)
     2: tvm::runtime::ProcessModuleBlob(char const*, 
tvm::runtime::ObjectPtr<tvm::runtime::Library>, 
std::function<tvm::runtime::PackedFunc (int (*)(TVMValue*, int*, int, 
TVMValue*, int*, void*), tvm::runtime::ObjectPtr<tvm::runtime::Object> 
const&)>, tvm::runtime::Module*, tvm::runtime::ModuleNode**)
     1: tvm::runtime::LoadModuleFromBinary(std::string const&, dmlc::Stream*)
     0: _ZN3tvm7runtime6deta
     File "/workspace/tvm/src/runtime/library_module.cc", line 118
   TVMError: Binary was created using {opencl} but a loader of that name is not 
registered. Available loaders are ethos-n, AotExecutorFactory, metadata_module, 
const_loader, metadata, arm_compute_lib, VMExecutable, GraphRuntimeFactory, 
GraphExecutorFactory. Perhaps you need to recompile with this runtime enabled.
   ```
   
   
   ### Environment
   
   Ubuntu 20.04 GCC 9.4.0 TVM 0.9.0
   ### Steps to reproduce
   
   ```
   import numpy as np
   from PIL import Image
   
   import tvm
   import tvm.relay as relay
   from tvm.contrib import graph_runtime
   
   class ModelAgent:
   
       ctx = tvm.cl(0)
       dtype = 'float32'
   
       def __init__(self):
           self.graph = open('shufflenet.json').read()
           self.lib = tvm.runtime.load_module("onnx2cl.tar")
           self.params = bytearray(open("shufflenet.params", "rb").read())
           # Compute with GPU
           self.mod = graph_runtime.create(self.graph, self.lib, self.ctx)
           self.mod.load_params(self.params)
   
       def preprocess_image(self, image):
           image = image.resize((224, 224))
           image = np.array(image)/np.array([255, 255, 255])
           image -= np.array([0.485, 0.456, 0.406])
           image /= np.array([0.229, 0.224, 0.225])
           image = image.transpose((2, 0, 1))
           image = image[np.newaxis, :]
           return image
   
       def execute(self, inputs):
           inputs = self.preprocess_image(inputs)
           self.mod.set_input("input", tvm.nd.array(inputs.astype(self.dtype)))
           self.mod.run()
           outputs = self.mod.get_output(0)
           return outputs
   
   model_agent = ModelAgent()
   
   image = Image.open("keyboard.jpg").resize((224, 224))
   
   for i in range(1000):
     print("Run")
     outputs = model_agent.execute(image)
     top1 = np.argmax(outputs.asnumpy()[0])
     print(top1)
   
   ```
   
   ### Triage
   
   Please refer to the list of label tags linked above to find the relevant 
tags and add them here in a bullet format (example below).
   
   * needs-triage
   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to