This is an automated email from the ASF dual-hosted git repository.
andrewzhaoluo pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git
The following commit(s) were added to refs/heads/main by this push:
new 0e27bf5ee9 [frontend][ONNX]support ConvTranspose explicitly specified
output_shape (#11076)
0e27bf5ee9 is described below
commit 0e27bf5ee9fed303c8ec21687b1705b806fcd02f
Author: ah cheng <[email protected]>
AuthorDate: Tue May 17 00:37:04 2022 +0800
[frontend][ONNX]support ConvTranspose explicitly specified output_shape
(#11076)
* support ConvTranspose explicitly specified output_shape
* fix unit test case
* fix lint test
* retest
* fix code error
* fix lint test
* update test
* retest
* fix test onnx official tests
---
python/tvm/relay/frontend/onnx.py | 85 ++++++++++++++++++++++--------
tests/python/frontend/onnx/test_forward.py | 65 ++++++++++++++++++++++-
2 files changed, 128 insertions(+), 22 deletions(-)
diff --git a/python/tvm/relay/frontend/onnx.py
b/python/tvm/relay/frontend/onnx.py
index 233067959f..81f12c2d81 100644
--- a/python/tvm/relay/frontend/onnx.py
+++ b/python/tvm/relay/frontend/onnx.py
@@ -648,19 +648,45 @@ class ConvTranspose(OnnxOpConverter):
data = inputs[0]
input_shape = infer_shape(data)
ndim = len(input_shape)
- if "auto_pad" in attr:
- attr["auto_pad"] = attr["auto_pad"].decode("utf-8")
- if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"):
+ if "auto_pad" in attr or "output_shape" in attr:
+ if "auto_pad" in attr:
+ attr["auto_pad"] = attr["auto_pad"].decode("utf-8")
+ if "output_shape" in attr or attr["auto_pad"] in ("SAME_UPPER",
"SAME_LOWER"):
# Warning: Convolution does not yet support dynamic shapes,
# one will need to run dynamic_to_static on this model after
import
- data = autopad(
- data,
- attr.get("strides", [1] * (ndim - 2)),
- attr["kernel_shape"],
- attr.get("dilations", [1] * (ndim - 2)),
- deconv=True,
- mode=attr["auto_pad"],
- )
+ kernel_shape = attr["kernel_shape"]
+ kndim = len(kernel_shape)
+ dilations = attr.get("dilations", [1] * kndim)
+ output_padding = attr.get("output_padding", [0] * kndim)
+ strides = attr["strides"]
+ total_pad = [0] * kndim
+ #
https://github.com/onnx/onnx/blob/main/docs/Operators.md#ConvTranspose
+ if "output_shape" in attr:
+ for i in range(kndim):
+ total_pad[i] = (
+ strides[i] * (input_shape[ndim - kndim + i] - 1)
+ + output_padding[i]
+ + ((kernel_shape[i] - 1) * dilations[i] + 1)
+ - attr["output_shape"][i]
+ )
+ left = [p // 2 for p in total_pad]
+ right = [total_pad[i] - left[i] for i in range(kndim)]
+ if "output_shape" in attr and "auto_pad" not in attr:
+ pad = right + left
+ elif "LOWER" in attr["auto_pad"]:
+ pad = left + right
+ else:
+ pad = right + left
+ attr["pads"] = pad
+ else:
+ data = autopad(
+ data,
+ attr.get("strides", [1] * (ndim - 2)),
+ attr["kernel_shape"],
+ attr.get("dilations", [1] * (ndim - 2)),
+ deconv=True,
+ mode=attr["auto_pad"],
+ )
elif attr["auto_pad"] == "VALID":
attr["pads"] = tuple([0 for i in range(ndim - 2)])
elif attr["auto_pad"] == "NOTSET":
@@ -668,7 +694,8 @@ class ConvTranspose(OnnxOpConverter):
else:
msg = 'Value {} in attribute "auto_pad" of operator Conv is
invalid.'
raise
tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"]))
- attr.pop("auto_pad")
+ if "auto_pad" in attr:
+ attr.pop("auto_pad")
out = AttrCvt(
op_name=dimension_picker("conv", "_transpose"),
@@ -703,9 +730,10 @@ class ConvTranspose(OnnxOpConverter):
data = inputs[0]
input_shape = infer_shape(data)
ndim = len(input_shape)
- if "auto_pad" in attr:
- attr["auto_pad"] = attr["auto_pad"].decode("utf-8")
- if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"):
+ if "auto_pad" in attr or "output_shape" in attr:
+ if "auto_pad" in attr:
+ attr["auto_pad"] = attr["auto_pad"].decode("utf-8")
+ if "output_shape" in attr or attr["auto_pad"] in ("SAME_UPPER",
"SAME_LOWER"):
# Warning: Convolution does not yet support dynamic shapes,
# one will need to run dynamic_to_static on this model after
import
kernel_shape = attr["kernel_shape"]
@@ -714,13 +742,27 @@ class ConvTranspose(OnnxOpConverter):
output_padding = attr.get("output_padding", [0] * kndim)
strides = attr["strides"]
total_pad = [0] * kndim
- for i in range(kndim):
- total_pad[i] = (
- output_padding[i] + ((kernel_shape[i] - 1) *
dilations[i] + 1) - strides[i]
- )
+ #
https://github.com/onnx/onnx/blob/main/docs/Operators.md#ConvTranspose
+ if "output_shape" in attr:
+ for i in range(kndim):
+ total_pad[i] = (
+ strides[i] * (input_shape[ndim - kndim + i] - 1)
+ + output_padding[i]
+ + ((kernel_shape[i] - 1) * dilations[i] + 1)
+ - attr["output_shape"][i]
+ )
+ else:
+ for i in range(kndim):
+ total_pad[i] = (
+ output_padding[i]
+ + ((kernel_shape[i] - 1) * dilations[i] + 1)
+ - strides[i]
+ )
left = [p // 2 for p in total_pad]
right = [total_pad[i] - left[i] for i in range(kndim)]
- if "LOWER" in attr["auto_pad"]:
+ if "output_shape" in attr and "auto_pad" not in attr:
+ pad = right + left
+ elif "LOWER" in attr["auto_pad"]:
pad = left + right
else:
pad = right + left
@@ -732,7 +774,8 @@ class ConvTranspose(OnnxOpConverter):
else:
msg = 'Value {} in attribute "auto_pad" of operator Conv is
invalid.'
raise
tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"]))
- attr.pop("auto_pad")
+ if "auto_pad" in attr:
+ attr.pop("auto_pad")
out = AttrCvt(
op_name=dimension_picker("conv", "_transpose"),
diff --git a/tests/python/frontend/onnx/test_forward.py
b/tests/python/frontend/onnx/test_forward.py
index ec5d2b6ae2..643dfe820b 100644
--- a/tests/python/frontend/onnx/test_forward.py
+++ b/tests/python/frontend/onnx/test_forward.py
@@ -2833,6 +2833,48 @@ def test_conv(target, dev):
@tvm.testing.parametrize_targets
def test_convtranspose(target, dev):
+ def verify_convtranspose_with_output_shape(
+ x_shape,
+ w_shape,
+ output_shape,
+ kernel_shape,
+ strides,
+ dilations,
+ auto_pad="SAME_UPPER",
+ group=1,
+ ):
+ node = helper.make_node(
+ "ConvTranspose",
+ inputs=["x", "W"],
+ outputs=["y"],
+ kernel_shape=kernel_shape,
+ # Default values for other attributes:
+ strides=strides,
+ dilations=dilations,
+ output_shape=output_shape,
+ auto_pad=auto_pad,
+ )
+
+ if group is not None:
+ group_attr = helper.make_attribute("group", group)
+ node.attribute.append(group_attr)
+
+ graph = helper.make_graph(
+ [node],
+ "ConvTranspose_with_output_shape_test",
+ inputs=[
+ helper.make_tensor_value_info("x", TensorProto.FLOAT,
list(x_shape)),
+ helper.make_tensor_value_info("W", TensorProto.FLOAT,
list(w_shape)),
+ ],
+ outputs=[
+ helper.make_tensor_value_info("y", TensorProto.FLOAT, [1, 1] +
list(output_shape))
+ ],
+ )
+
+ model = helper.make_model(graph,
producer_name="convtranspose_output_shape_test")
+
+ verify_with_ort(model, [x_shape, w_shape], use_vm=True, target=target,
dev=dev)
+
def verify_convtranspose_with_padding(
x_shape,
w_shape,
@@ -2996,6 +3038,28 @@ def test_convtranspose(target, dev):
# repeat(2, D),
# )
+ # Convolution with output_shape
+ for D in [1, 2, 3]:
+ for N in range(60, 66):
+ verify_convtranspose_with_output_shape(
+ (1, 1) + repeat(32, D),
+ (1, 1) + repeat(4, D),
+ repeat(N, D),
+ repeat(4, D),
+ repeat(2, D),
+ repeat(1, D),
+ )
+
+ verify_convtranspose_with_output_shape(
+ (1, 1) + repeat(32, D),
+ (1, 1) + repeat(4, D),
+ repeat(N, D),
+ repeat(4, D),
+ repeat(2, D),
+ repeat(1, D),
+ auto_pad="SAME_LOWER",
+ )
+
@tvm.testing.parametrize_targets
def test_unsqueeze_constant(target, dev):
@@ -5053,7 +5117,6 @@ unsupported_onnx_tests = [
"test_castlike_STRING_to_FLOAT_expanded",
"test_convtranspose_autopad_same",
"test_convtranspose_dilations",
- "test_convtranspose_output_shape",
"test_cumsum_1d",
"test_cumsum_1d_exclusive",
"test_cumsum_1d_reverse",