This is an automated email from the ASF dual-hosted git repository.
masahi pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git
The following commit(s) were added to refs/heads/main by this push:
new ffc0443913 [Frontend] [PaddlePaddle] Add split operator (#11354)
ffc0443913 is described below
commit ffc0443913d837c6b7a6ec55375ea29cf3d1fa7c
Author: heliqi <[email protected]>
AuthorDate: Thu May 19 03:53:03 2022 -0500
[Frontend] [PaddlePaddle] Add split operator (#11354)
* suuport split op of paddlepaddle
* black formatting
---
python/tvm/relay/frontend/paddlepaddle.py | 45 ++++++++++++++++++++++
tests/python/frontend/paddlepaddle/test_forward.py | 38 ++++++++++++++++++
2 files changed, 83 insertions(+)
diff --git a/python/tvm/relay/frontend/paddlepaddle.py
b/python/tvm/relay/frontend/paddlepaddle.py
index 7f2460d66e..7042154709 100644
--- a/python/tvm/relay/frontend/paddlepaddle.py
+++ b/python/tvm/relay/frontend/paddlepaddle.py
@@ -1920,6 +1920,50 @@ def convert_softsign(g, op, block):
g.add_node(op.output("Out")[0], out)
+def convert_split(g, op, block):
+ """Operator converter for split."""
+
+ x = g.get_node(op.input("X")[0])
+ axis = op.input("AxisTensor")
+ if axis:
+ axis = g.get_node(axis[0])
+ axis, infered = try_infer_value(axis, g.get_params())
+ if infered:
+ axis = axis.tolist()[0]
+ else:
+ axis = op.attr("axis")
+
+ sections = op.input("SectionsTensorList")
+ if sections:
+ tmp_section = []
+ for i in sections:
+ i = g.get_node(i)
+ i, infered = try_infer_value(i, g.get_params())
+ if infered:
+ i = i.tolist()
+ else:
+ raise ValueError("Dynamic Split not yet supported.")
+ tmp_section.extend(i)
+ sections = tmp_section
+ else:
+ sections = op.attr("sections")
+ if sections:
+ indices = []
+ split_index = 0
+ for i in sections[:-1]:
+ if i == -1:
+ input_shape = infer_shape(x)[axis]
+ i = input_shape - np.sum(sections) - 1
+ split_index += i
+ indices.append(split_index)
+ else:
+ indices = op.attr("num")
+
+ out = _op.split(x, indices, axis)
+ for i, out_i in enumerate(out):
+ g.add_node(op.output("Out")[i], out_i)
+
+
def convert_square(g, op, block):
"""Operator converter for square."""
@@ -2092,6 +2136,7 @@ _convert_map = {
"softmax": convert_softmax,
"softplus": convert_softplus,
"softsign": convert_softsign,
+ "split": convert_split,
"strided_slice": convert_slice,
"sqrt": convert_unary_op,
"square": convert_square,
diff --git a/tests/python/frontend/paddlepaddle/test_forward.py
b/tests/python/frontend/paddlepaddle/test_forward.py
index 9fa4063755..0f243e0ea0 100644
--- a/tests/python/frontend/paddlepaddle/test_forward.py
+++ b/tests/python/frontend/paddlepaddle/test_forward.py
@@ -782,6 +782,44 @@ def test_forward_shape_full():
verify_model(full2, input_data=[input_data])
[email protected]_gpu
+def test_forward_split():
+ class Split(nn.Layer):
+ def __init__(
+ self, axis=None, num_or_sections=None, axis_is_tensor=False,
num_is_tensor=False
+ ):
+ super(Split, self).__init__()
+ self.axis = axis
+ self.num_or_sections = num_or_sections
+ self.axis_is_tensor = axis_is_tensor
+ self.num_is_tensor = num_is_tensor
+
+ @paddle.jit.to_static
+ def forward(self, inputs):
+ axis = self.axis
+ if self.axis_is_tensor:
+ axis = paddle.to_tensor(axis, dtype="int32")
+ num_or_sections = self.num_or_sections
+ if self.num_is_tensor:
+ new_num_or_sections = []
+ for i in num_or_sections:
+ if isinstance(i, list):
+ i = paddle.to_tensor(i, dtype="int32")
+ new_num_or_sections.append(i)
+ num_or_sections = new_num_or_sections
+ return paddle.split(inputs, num_or_sections=num_or_sections,
axis=axis)
+
+ input_shape = [3, 6, 2]
+ input_data = paddle.rand(input_shape, dtype="float32")
+ verify_model(Split(axis=1, num_or_sections=3), input_data=input_data)
+ verify_model(
+ Split(axis=[1], num_or_sections=[2, 3, 1], axis_is_tensor=True),
input_data=input_data
+ )
+ verify_model(
+ Split(axis=1, num_or_sections=[2, -1, [3]], num_is_tensor=True),
input_data=input_data
+ )
+
+
@tvm.testing.uses_gpu
def test_forward_squeeze():
class Squeeze(nn.Layer):