This is an automated email from the ASF dual-hosted git repository. jroesch pushed a commit to branch tvm_ep in repository https://gitbox.apache.org/repos/asf/tvm.git
commit bcb100a069897e73f799416aca863f6147c83c0e Author: Chris Sullivan <[email protected]> AuthorDate: Thu Aug 20 14:29:23 2020 -0700 [AMD:ONNXRT:TVM] Add packed function to import and compile an ONNX model which can be used as a JIT TVM interface for ONNXRT. --- include/tvm/driver/jit_interface.h | 4 +- python/tvm/relay/frontend/__init__.py | 1 + python/tvm/relay/frontend/{ => jit}/__init__.py | 22 ++------- .../relay/frontend/{__init__.py => jit/onnx.py} | 34 ++++++------- src/driver/driver_api.cc | 57 ++++++++-------------- 5 files changed, 40 insertions(+), 78 deletions(-) diff --git a/include/tvm/driver/jit_interface.h b/include/tvm/driver/jit_interface.h index 966d5a8..e0906f1 100644 --- a/include/tvm/driver/jit_interface.h +++ b/include/tvm/driver/jit_interface.h @@ -3,8 +3,6 @@ #ifdef __cplusplus extern "C" { EXPORT_DLL tvm::runtime::Module TVMCompile(const std::string& onnx_txt, const std::string& target, const std::string& target_host, int opt_level); - EXPORT_DLL void TVMRun(tvm::runtime::Module& mod, const std::string& name, tvm::runtime::TVMArgs& args, tvm::runtime::TVMRetValue* ret); - - + EXPORT_DLL void TVMRun(tvm::runtime::Module& mod, std::vector<DLTensor> inputs, std::vector<DLTensor> outputs, tvm::runtime::TVMRetValue* ret); } // TVM_EXTERN_C #endif diff --git a/python/tvm/relay/frontend/__init__.py b/python/tvm/relay/frontend/__init__.py index 7e16499..460da78 100644 --- a/python/tvm/relay/frontend/__init__.py +++ b/python/tvm/relay/frontend/__init__.py @@ -35,3 +35,4 @@ from .darknet import from_darknet from .pytorch import from_pytorch from .caffe import from_caffe from .change_datatype import ChangeDatatype +from . import jit diff --git a/python/tvm/relay/frontend/__init__.py b/python/tvm/relay/frontend/jit/__init__.py similarity index 57% copy from python/tvm/relay/frontend/__init__.py copy to python/tvm/relay/frontend/jit/__init__.py index 7e16499..40cd1fd 100644 --- a/python/tvm/relay/frontend/__init__.py +++ b/python/tvm/relay/frontend/jit/__init__.py @@ -15,23 +15,7 @@ # specific language governing permissions and limitations # under the License. """ -Frontends for constructing Relay programs. - -Contains the model importers currently defined -for Relay. +JIT interface implementing packed functions that +import and compile frontend models """ - -from __future__ import absolute_import - -from .mxnet import from_mxnet -from .mxnet_qnn_op_utils import quantize_conv_bias_mkldnn_from_var -from .keras import from_keras -from .onnx import from_onnx -from .tflite import from_tflite -from .coreml import from_coreml -from .caffe2 import from_caffe2 -from .tensorflow import from_tensorflow -from .darknet import from_darknet -from .pytorch import from_pytorch -from .caffe import from_caffe -from .change_datatype import ChangeDatatype +from .onnx import * diff --git a/python/tvm/relay/frontend/__init__.py b/python/tvm/relay/frontend/jit/onnx.py similarity index 53% copy from python/tvm/relay/frontend/__init__.py copy to python/tvm/relay/frontend/jit/onnx.py index 7e16499..9545395 100644 --- a/python/tvm/relay/frontend/__init__.py +++ b/python/tvm/relay/frontend/jit/onnx.py @@ -14,24 +14,22 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -""" -Frontends for constructing Relay programs. +import onnx +import tvm +import tvm.relay -Contains the model importers currently defined -for Relay. -""" [email protected]_func("tvm_onnx_import_and_compile") +def onnx_compile(model_string, target, target_host, opt_level): + model = onnx.load_model_from_string(bytes(model_string)) -from __future__ import absolute_import + # input shape from data + input_shape = {model.graph.input[0].name: (6,)} -from .mxnet import from_mxnet -from .mxnet_qnn_op_utils import quantize_conv_bias_mkldnn_from_var -from .keras import from_keras -from .onnx import from_onnx -from .tflite import from_tflite -from .coreml import from_coreml -from .caffe2 import from_caffe2 -from .tensorflow import from_tensorflow -from .darknet import from_darknet -from .pytorch import from_pytorch -from .caffe import from_caffe -from .change_datatype import ChangeDatatype + irmod, params = tvm.relay.frontend.from_onnx(model, input_shape, opset=11) + with tvm.relay.build_config(opt_level=opt_level): + graph, lib, params = tvm.relay.build(irmod, target_host=target_host, target=target, params=params) + + ctx = tvm.context(target, 0) + m = tvm.contrib.graph_runtime.create(graph, lib, ctx) + m.set_input(**params) + return m.module diff --git a/src/driver/driver_api.cc b/src/driver/driver_api.cc index 758f019..b876c38 100644 --- a/src/driver/driver_api.cc +++ b/src/driver/driver_api.cc @@ -35,6 +35,7 @@ #include <tvm/runtime/module.h> #include <tvm/runtime/packed_func.h> #include <tvm/runtime/container.h> +#include <tvm/runtime/c_runtime_api.h> #include <tvm/runtime/registry.h> #include <tvm/target/codegen.h> #include <tvm/te/operation.h> @@ -340,45 +341,25 @@ runtime::Module build(const IRModule& funcs, const Target& target, const Target& tvm::runtime::Module TVMCompile(const std::string& onnx_txt, const std::string& target, const std::string& target_host, int opt_level) { - auto tensor_type = tvm::relay::TensorType({1, 6}, tvm::runtime::DataType::Float(32)); - auto X1 = tvm::relay::Var("X1", tensor_type); - auto mul_op = tvm::relay::Op::Get("multiply"); - auto mul1 = tvm::relay::Call(mul_op, {X1, X1}, tvm::Attrs(), {}); - auto mul2 = tvm::relay::Call(mul_op, {X1, mul1}, tvm::Attrs(), {}); - auto mul3 = tvm::relay::Call(mul_op, {X1, mul2}, tvm::Attrs(), {}); - auto Y4 = tvm::relay::Call(mul_op, {X1, mul3}, tvm::Attrs(), {}); - auto func = tvm::relay::Function(tvm::relay::FreeVars(Y4), Y4, tvm::relay::Type(), {}); - - auto reg = tvm::runtime::Registry::Get("ir.RegisterOpAttr"); - if (!reg) - LOG(FATAL) << "no _Register"; - - auto fs = tvm::runtime::Registry::Get("jit.strategy"); - if (!fs) - LOG(FATAL) << "No jit strategy registered."; - - auto fgeneric = tvm::GenericFunc::Get("jit.strategy_generic").set_default(*fs); - (*reg)("multiply", "FTVMStrategy", fgeneric, 10); - (*reg)("multiply", "TShapeDataDependant", false, 10); - - auto pfb = tvm::runtime::Registry::Get("relay.build_module._BuildModule"); - tvm::runtime::Module build_mod = (*pfb)(); - auto build_f = build_mod.GetFunction("build", false); - auto mod_f = build_mod.GetFunction("get_module", false); - auto relay_mod = tvm::IRModule::FromExpr(func); - tvm::Map<tvm::Integer, tvm::Target> targets; - // tvm::Target tgt = tvm::Target::Create(target); - tvm::Target tgt = tvm::Target::Create("llvm"); - targets.Set(0, tgt); - // tvm::Target host = (target == target_host) ? tgt : tvm::Target::Create(target_host); - build_f(relay_mod, targets, tgt); - tvm::runtime::Module mod = mod_f(); - return mod; + const tvm::PackedFunc* compile = tvm::runtime::Registry::Get("tvm_onnx_import_and_compile"); + tvm::runtime::Module mod = (*compile)(TVMByteArray{onnx_txt.data(), onnx_txt.size()}, target, target_host, opt_level); + return mod; + } -void TVMRun(tvm::runtime::Module& mod, const std::string& name, tvm::runtime::TVMArgs& args, tvm::runtime::TVMRetValue* ret) +void TVMRun(tvm::runtime::Module& mod, std::vector<DLTensor> inputs, std::vector<DLTensor> outputs, tvm::runtime::TVMRetValue* ret) { - mod.GetFunction(name).CallPacked(args, ret); - // process return value, refe to TVMFuncCall in c_runtime_api.cc - + tvm::PackedFunc set_input = mod.GetFunction("set_input_zero_copy", false); + for (size_t i = 0; i < inputs.size(); i++) + { + set_input(i, &inputs[i]); + } + + mod.GetFunction("run", false)(); + + tvm::PackedFunc get_output = mod.GetFunction("get_output", false); + for (size_t i = 0; i < outputs.size(); i++) + { + get_output(i, &outputs[i]); + } }
