This is an automated email from the ASF dual-hosted git repository.
masahi pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git
The following commit(s) were added to refs/heads/main by this push:
new b782e4f add onnx resize v10 and unit test (#6726)
b782e4f is described below
commit b782e4f347cff527ecbf4829600ff2af4949c1d7
Author: Matthew Brookhart <[email protected]>
AuthorDate: Sun Oct 25 14:52:05 2020 -0600
add onnx resize v10 and unit test (#6726)
---
python/tvm/relay/frontend/onnx.py | 23 ++++++++++++++++++++---
tests/python/frontend/onnx/test_forward.py | 30 ++++++++++++++++++++++++++++++
2 files changed, 50 insertions(+), 3 deletions(-)
diff --git a/python/tvm/relay/frontend/onnx.py
b/python/tvm/relay/frontend/onnx.py
index e2c6b9a..2d21156 100644
--- a/python/tvm/relay/frontend/onnx.py
+++ b/python/tvm/relay/frontend/onnx.py
@@ -1871,6 +1871,25 @@ class Resize(OnnxOpConverter):
"""Operator converter for Resize"""
@classmethod
+ def _impl_v10(cls, inputs, attr, params):
+ mode = attr.get("mode")
+ if mode == b"nearest":
+ method = "nearest_neighbor"
+ elif mode == b"linear":
+ method = "bilinear"
+ else:
+ raise tvm.error.OpAttributeInvalid(
+ 'Value {} in attribute "mode" of operator Resize is not
valid.'.format(mode)
+ )
+
+ scale = inputs[1]
+ size = _op.cast(_op.shape_of(inputs[0]),
infer_type(scale).checked_type.dtype) * scale
+
+ layout = "NCHW" # ONNX assumes NCHW layout
+ out_size = _op.strided_slice(size, [2], [4])
+ return _op.image.resize(inputs[0], out_size, layout, method,
"asymmetric")
+
+ @classmethod
def _impl_v11(cls, inputs, attr, params):
mode = attr.get("mode")
if mode == b"nearest":
@@ -1891,9 +1910,7 @@ class Resize(OnnxOpConverter):
size = inputs[3]
else:
assert len(scale_shape) != 0, "One of scale or size should be
passed."
- size = (
- _op.cast(_op.shape_of(inputs[0]),
infer_type(scale).type_annotation.dtype) * scale
- )
+ size = _op.cast(_op.shape_of(inputs[0]),
infer_type(scale).checked_type.dtype) * scale
coord_trans = attr.get("coordinate_transformation_mode")
if coord_trans in [b"pytorch_half_pixel", b"half_pixel"]:
diff --git a/tests/python/frontend/onnx/test_forward.py
b/tests/python/frontend/onnx/test_forward.py
index 81b5186..bf27ba5 100644
--- a/tests/python/frontend/onnx/test_forward.py
+++ b/tests/python/frontend/onnx/test_forward.py
@@ -3525,6 +3525,36 @@ def test_resize():
verify([1, 16, 32, 32], [], [1, 1, 2, 2], "nearest", "asymmetric")
verify([1, 16, 32, 32], [], [1, 1, 0.5, 0.5], "linear", "half_pixel")
+ def verify_opset_10(ishape, scales, mode):
+ nodes = [
+ make_constant_node("scales", onnx.TensorProto.FLOAT,
(len(scales),), scales),
+ ]
+ input_names = ["X", "scales"]
+ nodes.append(
+ helper.make_node(
+ "Resize",
+ inputs=input_names,
+ outputs=["Y"],
+ mode=mode,
+ )
+ )
+
+ oshape = [round(dim * scale) for (dim, scale) in zip(ishape, scales)]
+ graph = helper.make_graph(
+ nodes,
+ "resize_test",
+ inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT,
ishape)],
+ outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT,
oshape)],
+ )
+
+ model = helper.make_model(graph, producer_name="resize_test")
+ model.opset_import[0].version = 10
+
+ verify_with_ort(model, [ishape], oshape, use_vm=True,
freeze_params=True)
+
+ verify_opset_10([1, 16, 32, 32], [1, 1, 2, 2], "nearest")
+ verify_opset_10([1, 16, 32, 32], [1, 1, 0.5, 0.5], "linear")
+
@tvm.testing.uses_gpu
def test_nonzero():