xifengT opened a new issue, #16607:
URL: https://github.com/apache/tvm/issues/16607

   I used this code to compile an onnx network model on python with the suffix 
so.
   ```python
   onnx_model = onnx.load('mobilenetv2.onnx')  
   input_name = 'input.1'
   shape_dict = {input_name: (1,3,224,224)}
   sym, params = relay.frontend.from_onnx(onnx_model, shape_dict)
   target = tvm.target.Target("llvm",host='llvm')
   dev = tvm.cpu()
   with tvm.transform.PassContext(opt_level=3):
       lib = relay.build(sym, target, params=params)
   path_lib = os.path.join(os.getcwd(),"mobilenetv2.so")
   lib.export_library(path_lib)
   ``` 
   I made sure this so file was usable, I imported this file again on the 
python side, and ran the inference, I used a picture of an airplane, and 
finally I got the classification number is 404.
   
   **Now I'm loading this file using the C++ side, using the same image of the 
plane for reasoning, and I can't get the correct result. And when I load other 
images, the result is the same, and this is the same result when I use empty 
input. Can anyone help me see what the problem is, I'm using Windows and the 
TVM version is 0.12.**
   ```c++
   void Mat_to_CHW(float* data, cv::Mat& frame)
   {
       unsigned int volChl = SIZE_W * SIZE_D;
       for (int c = 0; c < 3; ++c)
       {
           for (unsigned j = 0; j < volChl; ++j) {
               data[c * volChl + j] = static_cast<float>(float(frame.data[j * 3 
+ c])/255.);
           }    
       }
   }
   void DeployGraphExecutor() {
       LOG(INFO) << "Running graph executor...";
       printf("load in the library\n");
       DLDevice dev{ kDLCPU, 0};
   
       tvm::runtime::Module mod_factory = 
tvm::runtime::Module::LoadFromFile("mobilenetv2.so");
       printf("create the graph executor module\n");
   
       tvm::runtime::Module gmod = mod_factory.GetFunction("default")(dev); 
       tvm::runtime::PackedFunc set_input = gmod.GetFunction("set_input"); 
       tvm::runtime::PackedFunc get_output = gmod.GetFunction("get_output");
       tvm::runtime::PackedFunc run = gmod.GetFunction("run");
   
       printf("Use the C++ API\n");
       tvm::runtime::NDArray x = tvm::runtime::NDArray::Empty({ 1,3, 224,224 }, 
DLDataType{ kDLFloat, 32, 1 }, dev);
       tvm::runtime::NDArray y = tvm::runtime::NDArray::Empty({ 1, 1000 }, 
DLDataType{ kDLFloat, 32, 1 }, dev);
   
       cv::Mat img = cv::imread("D:/ALGC/TVM/test/airliner1.png");
       cv::Mat frame;
       cv::Mat input;
       cv::cvtColor(img, frame, cv::COLOR_BGR2RGB);
       cv::resize(frame, input, cv::Size(SIZE_W, SIZE_D));
   
       float data[SIZE_W * SIZE_D * 3];
       Mat_to_CHW(data, input);
       memcpy(x->data, &data, SIZE_W * SIZE_D * 3 * sizeof(float));
   
       set_input("x", x);
       run();
       get_output(0, y);
   
       float* tmp = new float;
       *tmp = 0;
       int cla;
       auto result = static_cast<float*>(y->data);
       for (int i = 0; i < 1000; i++) {
           if (*tmp < static_cast<float*>(y->data)[i]) {
               *tmp = static_cast<float*>(y->data)[i];
               cls = i;
           }
       }
       printf("%d---%f\n", cls, *tmp);
   }
   ``` 


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to