[GitHub] [incubator-tvm] kazum commented on a change in pull request #5052: [TARGET] ONNX codegen

2020-06-09 Thread GitBox


kazum commented on a change in pull request #5052:
URL: https://github.com/apache/incubator-tvm/pull/5052#discussion_r437683470



##
File path: python/tvm/contrib/target/onnx.py
##
@@ -0,0 +1,905 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# pylint: disable=invalid-name, import-self, len-as-condition, 
unused-argument, too-many-lines, redefined-builtin
+"""Relay to ONNX codegen """
+
+import os
+import struct
+import copy
+import numpy
+import onnx
+import onnx.utils
+from onnx import numpy_helper, OperatorSetIdProto, defs
+import tvm
+from tvm import relay
+import tvm._ffi
+from tvm.relay.expr_functor import ExprVisitor
+from tvm.relay.ty import TupleType, TensorType
+
+ONNX_OPSET_VERSONS_SUPPORTED = [11]
+
+
+def tvm_array_to_list(arr):
+return tuple(x.value for x in arr)
+
+
+def get_onnx_version():
+return onnx.__version__
+
+
+def infer_type(node):
+"""A method to infer the type of a relay expression."""
+mod = tvm.IRModule.from_expr(node)
+mod = relay.transform.InferType()(mod)
+entry = mod["main"]
+return entry if isinstance(node, relay.Function) else entry.body
+
+
+def call_node_infer_type(node):
+"""infer the output types of call node"""
+infer_out = infer_type(node)
+out_type = infer_out._checked_type_
+types = []
+if isinstance(out_type, TensorType):
+types.append(out_type)
+elif isinstance(out_type, TupleType):
+for tupe_type in out_type.fields:
+types.append(tupe_type)

Review comment:
   `types = list(out_type.fields)` looks simpler.

##
File path: tests/python/contrib/test_onnx.py
##
@@ -0,0 +1,467 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+"""Relay to ONNX serialization test cases"""
+import pytest
+pytest.importorskip('onnx')
+pytest.importorskip('onnxruntime')
+
+import numpy as np
+import onnxruntime as rt
+
+import tvm
+from tvm import relay
+from tvm.contrib.target.onnx import to_onnx
+
+
+
+def func_to_onnx(func, name):
+mod = tvm.IRModule()
+mod['main'] = func
+onnx_model = to_onnx(mod, {}, name, path=None)
+return onnx_model.SerializeToString()
+
+
+def run_onnx(onnx_model, input_data):
+sess = rt.InferenceSession(onnx_model)
+input_names = {}
+for input, data in zip(sess.get_inputs(), input_data):
+input_names[input.name] = data
+output_names = [out.name for out in sess.get_outputs()]
+res = sess.run(output_names, input_names)
+return res
+
+
+def run_relay(func, data_tuple):
+target = 'llvm'
+ctx = tvm.context('llvm', 0)
+intrp = relay.create_executor("graph", ctx=ctx, target=target)
+relay_res = intrp.evaluate(func)(*data_tuple)
+
+result = []
+relay_res = relay_res if isinstance(relay_res, list) else [relay_res]
+for res in relay_res:
+result.append(res.asnumpy())
+
+return result
+
+
+def verify_results(relay_func, indata, test_name, rtol=1e-7, atol=0):
+relay_results = run_relay(relay_func, indata)
+onnx_results = run_onnx(func_to_onnx(relay_func, test_name), indata)
+
+for relay_res, onnx_res in zip(relay_results, onnx_results):
+np.testing.assert_allclose(relay_res, onnx_res, rtol=rtol, atol=atol)
+
+
+def test_add():
+dtype = 'float32'
+t1 = relay.TensorType((5, 10, 5))
+t2 = relay.TensorType((5, 10, 5))
+x = relay.var("x", t1, dtype=dtype)
+y = relay.var("y", t2, dtype=dtype)
+z = relay.add(x, y)
+func = relay.Function([x, y],

[GitHub] [incubator-tvm] kazum commented on a change in pull request #5052: [TARGET] ONNX codegen

2020-05-23 Thread GitBox


kazum commented on a change in pull request #5052:
URL: https://github.com/apache/incubator-tvm/pull/5052#discussion_r429565777



##
File path: tests/python/contrib/test_onnx.py
##
@@ -0,0 +1,393 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+"""Relay to ONNX serialization test cases"""
+import pytest
+pytest.importorskip('onnx')
+pytest.importorskip('onnxruntime')
+
+import numpy as np
+import tvm
+from tvm import relay
+from tvm.contrib.target.onnx import to_onnx
+import onnxruntime as rt
+
+
+def func_to_onnx(func, name):
+mod = tvm.IRModule()
+mod['main'] = func
+onnx_model = to_onnx(mod, {}, name, path=None)
+return onnx_model.SerializeToString()
+
+
+def run_onnx(onnx_model, input_data):
+sess = rt.InferenceSession(onnx_model)
+input_names = {}
+for input, data in zip(sess.get_inputs(), input_data):
+input_names[input.name] = data
+output_name = sess.get_outputs()[0].name
+res = sess.run([output_name], input_names)
+return res[0]
+
+
+def run_relay(func, data_tuple):
+target = 'llvm'
+ctx = tvm.context('llvm', 0)
+intrp = relay.create_executor("graph", ctx=ctx, target=target)
+relay_res = intrp.evaluate(func)(*data_tuple)
+return relay_res.asnumpy()
+
+
+def verify_results(relay_func, indata, test_name, rtol=1e-7, atol=0):
+relay_res = run_relay(relay_func, indata)
+onnx_res = run_onnx(func_to_onnx(relay_func, test_name), indata)
+np.testing.assert_allclose(relay_res, onnx_res, rtol=rtol, atol=atol)
+
+
+def test_add():
+dtype = 'float32'
+t1 = relay.TensorType((5, 10, 5))
+t2 = relay.TensorType((5, 10, 5))
+x = relay.var("x", t1, dtype=dtype)
+y = relay.var("y", t2, dtype=dtype)
+z = relay.add(x, y)
+func = relay.Function([x, y], z)
+
+x_data = np.random.rand(5, 10, 5).astype(dtype)
+y_data = np.random.rand(5, 10, 5).astype(dtype)
+
+verify_results(func, [x_data, y_data], 'test_add')
+
+
+def test_bias_add():
+for dtype in ['float16', 'float32']:
+xshape = (10, 2, 3, 4)
+bshape = (2,)
+rtol = 1e-2 if dtype == 'float16' else 1e-5
+x = relay.var("x", shape=xshape, dtype=dtype)
+bias = relay.var("bias", dtype=dtype)
+z = relay.nn.bias_add(x, bias)
+func = relay.Function([x, bias], z)
+
+x_data = np.random.uniform(size=xshape).astype(dtype)
+y_data = np.random.uniform(size=bshape).astype(dtype)
+
+verify_results(func, [x_data, y_data], 'test_bias_add', rtol=rtol)
+
+
+def test_conv2d():
+def verify_conv2d(dtype, scale, dshape, kshape,
+  padding=(1, 1),
+  groups=1,
+  dilation=(1, 1),
+  **attrs):
+x = relay.var("x", shape=dshape, dtype=dtype)
+w = relay.var("w", shape=kshape, dtype=dtype)
+y = relay.nn.conv2d(x, w,
+padding=padding,
+dilation=dilation,
+groups=groups,
+**attrs)
+func = relay.Function([x, w], y)
+data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
+kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
+verify_results(func, [data, kernel], 'test_conv2d', rtol=1e-5, 
atol=1e-5)
+
+dshape = (1, 32, 18, 18)
+kshape = (32, 1, 3, 3)
+verify_conv2d("float32", 1, dshape, kshape,
+  padding=(1, 1), channels=32, groups=32, kernel_size=(3, 3))
+
+dshape = (1, 32, 18, 18)
+kshape = (32, 4, 3, 3)
+verify_conv2d("float32", 1, dshape, kshape,
+  padding=(1, 1), channels=32, groups=8, kernel_size=(3, 3))
+
+# also group conv2d
+dshape = (1, 32, 18, 18)
+kshape = (64, 1, 3, 3)
+verify_conv2d("float32", 1, dshape, kshape,
+  padding=(1, 1), channels=64, groups=32, kernel_size=(3, 3))
+
+# normal conv2d
+dshape = (1, 3, 224, 224)
+kshape = (10, 3, 3, 3)
+verify_conv2d("float32", 1, dshape, kshape,
+  padding=(1, 1), channels=10, kernel_size=(3, 3))
+
+dshape = (1, 3, 224, 224)
+kshape = (10, 3, 3, 3)
+verify_conv2d("float32", 1, dshap

[GitHub] [incubator-tvm] kazum commented on a change in pull request #5052: [TARGET] ONNX codegen

2020-05-22 Thread GitBox


kazum commented on a change in pull request #5052:
URL: https://github.com/apache/incubator-tvm/pull/5052#discussion_r429168772



##
File path: src/runtime/contrib/onnx/onnx_module.cc
##
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file onnx_module.cc
+ * \brief ONNX Module without runtime support
+ */
+#include 
+#include 
+#include 
+
+namespace tvm {
+namespace codegen {
+using namespace tvm::runtime;
+
+class ONNXSourceModuleNode : public runtime::ModuleNode {
+ public:
+  explicit ONNXSourceModuleNode(String code) : code_(code) {}
+
+  const char* type_key() const { return "onnx"; }
+
+  PackedFunc GetFunction(const std::string& name, const ObjectPtr& 
sptr_to_self) final {
+LOG(FATAL) << "ONNX Source module cannot execute, to get executable module"
+   << " build TVM with 'onnx' runtime support";
+return PackedFunc();
+  }
+
+  std::string GetSource(const std::string& format) final { return code_; }

Review comment:
   The content of `code_` is the source code of this module?  If no, I 
think it's better to return an empty string.

##
File path: tests/python/contrib/test_onnx.py
##
@@ -0,0 +1,406 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+"""Relay to ONNX serialization test cases"""
+import pytest
+pytest.importorskip('onnx')
+pytest.importorskip('onnxruntime')
+
+import numpy as np
+import tvm
+from tvm import relay
+from tvm.contrib.target.onnx import to_onnx
+import onnxruntime as rt
+
+
+def func_to_onnx(func, name):
+mod = tvm.IRModule()
+mod['main'] = func
+onnx_model = to_onnx(mod, {}, name, path=None)
+return onnx_model.SerializeToString()
+
+
+def run_onnx(onnx_model, input_data):
+sess = rt.InferenceSession(onnx_model)
+input_names = {}
+for input, data in zip(sess.get_inputs(), input_data):
+input_names[input.name] = data
+output_name = sess.get_outputs()[0].name
+res = sess.run([output_name], input_names)
+return res[0]
+
+
+def run_relay(func, data_tuple):
+target = 'llvm'
+ctx = tvm.context('llvm', 0)
+intrp = relay.create_executor("graph", ctx=ctx, target=target)
+relay_res = intrp.evaluate(func)(*data_tuple)
+return relay_res.asnumpy()
+
+
+def verify_results(relay_func, indata, test_name, rtol=1e-7, atol=0):
+relay_res = run_relay(relay_func, indata)
+onnx_res = run_onnx(func_to_onnx(relay_func, test_name), indata)
+np.testing.assert_allclose(relay_res, onnx_res, rtol=rtol, atol=atol)
+
+
+def test_add():
+dtype = 'float32'
+t1 = relay.TensorType((5, 10, 5))
+t2 = relay.TensorType((5, 10, 5))
+x = relay.var("x", t1, dtype=dtype)
+y = relay.var("y", t2, dtype=dtype)
+z = relay.add(x, y)
+func = relay.Function([x, y], z)
+
+x_data = np.random.rand(5, 10, 5).astype(dtype)
+y_data = np.random.rand(5, 10, 5).astype(dtype)
+
+verify_results(func, [x_data, y_data], 'test_add')
+
+
+def test_bias_add():
+for dtype in ['float16', 'float32']:
+xshape = (10, 2, 3, 4)
+bshape = (2,)
+rtol = 1e-2 if dtype == 'float16' else 1e-5
+x = relay.var("x", shape=xshape, dtype=dtype)
+bias = relay.var("bias", dtype=dtype)
+z = relay.nn.bias_add(x, bias)
+func = relay.Function([x, bias], z)
+
+x_data = np.random.uniform(size=xshape).astype(dtype)
+y_data = np.random.uniform(size=bshape).astype(dtype)
+
+ve