echuraev commented on code in PR #15818:
URL: https://github.com/apache/tvm/pull/15818#discussion_r1339863992
##########
apps/cpp_rtvm/main.cc:
##########
@@ -350,43 +350,46 @@ int ExecuteModel(ToolArgs& args) {
} else if (!args.input.empty() && !args.output.empty()) {
LOG(INFO) << "Executing with Input:" << args.input << " Output:" <<
args.output;
// Set Input from Numpy Input
- runner->SetInput(args.input);
+ runner.SetInput(args.input);
// Run the model
- runner->Run();
+ runner.Run();
// Get Output as Numpy dump
- runner->GetOutput(args.output);
+ runner.GetOutput(args.output);
} else {
LOG(INFO) << "Executing dry run ... ";
// Set random input for all inputs
for (auto& elem : mInfo.input_info) {
LOG(INFO) << "Set Random Input for :" << elem.first;
auto shape = elem.second.first;
- size_t ssize = runner->GetInputMemSize(elem.first);
+ size_t ssize = runner.GetInputMemSize(elem.first);
char* data = (char*)malloc(ssize);
LOG(INFO) << "Random Input Size:" << ssize << " bytes";
- runner->SetInput(elem.first, data);
+ runner.SetInput(elem.first, data);
free(data);
}
// Run the model
- runner->Run();
+ runner.Run();
// Get Output and dump few values
for (auto& elem : mInfo.output_info) {
LOG(INFO) << "Get Output for :" << elem.first;
auto shape = elem.second.first;
- size_t ssize = runner->GetOutputMemSize(elem.first);
+ size_t ssize = runner.GetOutputMemSize(elem.first);
char* data = (char*)malloc(ssize);
- runner->GetOutput(elem.first, data);
+ runner.GetOutput(elem.first, data);
LOG(INFO) << "Output Size:" << ssize << " bytes";
free(data);
}
}
+
+ if (args.profile) {
+ // Print Stats
+ runner.PrintStats();
+ }
auto tstart = std::chrono::high_resolution_clock::now();
- delete runner;
+ runner.~TVMRunner();
Review Comment:
Why do you need to call dtor here? Probably, I missed something in the logic.
I suppose that the object will be automatically destroyed in the end of the
program.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]