This is an automated email from the ASF dual-hosted git repository.
junrushao pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git
The following commit(s) were added to refs/heads/main by this push:
new 6fa88e38a6 [PaddlePaddle Hackathon 4][Frontend][Paddle]add
thresholded_relu/index_select/eye/linspace/take_alone_axis/dist for paddle
frontend (#14172)
6fa88e38a6 is described below
commit 6fa88e38a6de6d6e6ed98a0b7252ab5f8e1679b9
Author: xg <[email protected]>
AuthorDate: Mon Mar 13 04:13:03 2023 +0800
[PaddlePaddle Hackathon 4][Frontend][Paddle]add
thresholded_relu/index_select/eye/linspace/take_alone_axis/dist for paddle
frontend (#14172)
Add thresholded_relu/index_select/eye/linspace/take_alone_axis/dist for
paddle frontend.
But in paddle 2.1.3, eye/linspace/take_alone_axis are not supported.
The test case has passed completely in version 2.4.2.
---
python/tvm/relay/frontend/paddlepaddle.py | 107 ++++++++++++++++
tests/python/frontend/paddlepaddle/test_forward.py | 140 +++++++++++++++++++++
2 files changed, 247 insertions(+)
diff --git a/python/tvm/relay/frontend/paddlepaddle.py
b/python/tvm/relay/frontend/paddlepaddle.py
index 3c6429246a..a79a58ca14 100755
--- a/python/tvm/relay/frontend/paddlepaddle.py
+++ b/python/tvm/relay/frontend/paddlepaddle.py
@@ -400,6 +400,30 @@ def convert_conv2d_transpose(g, op, block):
g.add_node(op.output("Output")[0], out)
+def convert_dist(g, op, block):
+ """Operator converter for dist."""
+
+ x = g.get_node(op.input("X")[0])
+ y = g.get_node(op.input("Y")[0])
+ z = _op.abs(_op.subtract(x, y))
+ dtype = infer_type(x).checked_type.dtype
+ p = op.attr("p")
+ if p == np.inf:
+ out = _op.reduce.max(_op.abs(z))
+ elif p == np.NINF:
+ out = _op.reduce.min(_op.abs(z))
+ elif p == 0.0:
+ out = _op.reduce.sum(_op.sign(_op.abs(z)))
+ else:
+ inv_p = _expr.const(1.0 / p, dtype=dtype)
+ p = _expr.const(p, dtype=dtype)
+ power_z = _op.power(z, p)
+ sum_pow = _op.reduce.sum(power_z)
+ out = _op.power(sum_pow, inv_p)
+ out = _op.full(out, shape=(1))
+ g.add_node(op.output("Out")[0], out)
+
+
def convert_cumsum(g, op, block):
"""Operator converter for cumsum."""
@@ -475,6 +499,39 @@ def convert_elementwise_op(g, op, block):
g.add_node(op.output("Out")[0], out)
+def convert_linspace(g, op, block):
+ """Operator converter for linspace."""
+
+ start = g.get_node(op.input("Start")[0])
+ stop = g.get_node(op.input("Stop")[0])
+ num = g.get_node(op.input("Num")[0])
+ dtype = _convert_dtype_value(op.attr("dtype"))
+
+ start = _op.cast(start, dtype)
+ stop = _op.cast(stop, dtype)
+ num = _op.cast(num, dtype)
+
+ if dtype in ["int32", "float32"]:
+ tmp_dtype = "float32"
+ else:
+ tmp_dtype = "float64"
+ start = _op.cast(start, tmp_dtype)
+ stop = _op.cast(stop, tmp_dtype)
+ num = _op.cast(num, tmp_dtype)
+ const_one = _expr.const(1, tmp_dtype)
+ const_zero = _expr.const(0, tmp_dtype)
+ seg_num = _op.where(num > const_one, num - const_one, num - const_zero)
+ seg_len = _op.subtract(stop, start)
+ step_len = _op.divide(seg_len, seg_num)
+ step_cnt = _op.argwhere(_op.ones(num, dtype=tmp_dtype))
+ step_cnt = _op.cast(step_cnt, dtype=tmp_dtype)
+ out = _op.multiply(step_len, step_cnt)
+ out = _op.add(start, out)
+ out = _op.squeeze(out, axis=[1])
+ out = _op.cast(out, dtype)
+ g.add_node(op.output("Out")[0], out)
+
+
def convert_elu(g, op, block):
"""Operator converter for elu."""
@@ -514,6 +571,27 @@ def convert_expand_as(g, op, block):
g.add_node(op.output("Out")[0], out)
+def convert_eye(g, op, block):
+ """Operator converter for eye."""
+
+ num_rows = op.attr("num_rows")
+ num_columns = op.attr("num_columns")
+ one_nums = min(num_rows, num_columns)
+ dtype = op.attr("dtype")
+ dtype = _convert_dtype_value(dtype)
+
+ zeros = _op.zeros((num_rows, num_columns), dtype)
+ if one_nums == 0:
+ out = zeros
+ else:
+ ones = _op.ones(one_nums, dtype)
+ indices = _op.arange(
+ _expr.const(0, dtype="int32"), _expr.const(one_nums,
dtype="int32"), dtype="int32"
+ )
+ out = _op.scatter_nd(zeros, _op.stack([indices, indices], axis=0),
ones, "update")
+ g.add_node(op.output("Out")[0], out)
+
+
def convert_feed(g, op, block):
"""Converter for model input node."""
@@ -830,6 +908,16 @@ def convert_interpolate(g, op, block):
g.add_node(op.output("Out")[0], out)
+def convert_index_select(g, op, block):
+ """Operator converter for index_select."""
+
+ x = g.get_node(op.input("X")[0])
+ index = g.get_node(op.input("Index")[0])
+ axis = op.attr("dim")
+ out = _op.transform.take(x, index, axis, mode="wrap")
+ g.add_node(op.output("Out")[0], out)
+
+
def convert_instance_norm(g, op, block):
"""Operator converter for instance_norm."""
@@ -2072,6 +2160,8 @@ def convert_swish(g, op, block):
def convert_take_along_axis(g, op, block):
+ """Operator converter for take_along_axis."""
+
x = g.get_node(op.input("Input")[0])
idx = g.get_node(op.input("Index")[0])
axis = op.attr("Axis")
@@ -2079,6 +2169,18 @@ def convert_take_along_axis(g, op, block):
g.add_node(op.output("Result")[0], out)
+def convert_thresholded_relu(g, op, block):
+ """Operator converter for thresholded_relu."""
+
+ x = g.get_node(op.input("X")[0])
+ dtype = infer_type(x).checked_type.dtype
+ threshold = op.attr("threshold")
+ threshold = _expr.const(threshold, dtype)
+ zero = _expr.const(0, dtype=dtype)
+ out = tvm.relay.where(x > threshold, x, zero)
+ g.add_node(op.output("Out")[0], out)
+
+
def convert_tile(g, op, block):
"""Operator converter for tile."""
@@ -2220,6 +2322,7 @@ _convert_map = {
"cumsum": convert_cumsum,
"depthwise_conv2d": convert_conv2d,
"depthwise_conv2d_transpose": convert_conv2d_transpose,
+ "dist": convert_dist,
"dot": convert_dot,
"dropout": convert_dropout,
"elementwise_add": convert_elementwise_op,
@@ -2238,6 +2341,7 @@ _convert_map = {
"exp": convert_unary_op,
"expand_v2": convert_expand,
"expand_as_v2": convert_expand_as,
+ "eye": convert_eye,
"feed": convert_feed,
"fill_any_like": convert_fill_any_like,
"fill_constant": convert_fill_constant,
@@ -2254,6 +2358,7 @@ _convert_map = {
"hard_shrink": convert_hard_shrink,
"hard_sigmoid": convert_hard_sigmoid,
"hard_swish": convert_hard_swish,
+ "index_select": convert_index_select,
"instance_norm": convert_instance_norm,
"isfinite_v2": convert_unary_op,
"isinf_v2": convert_unary_op,
@@ -2262,6 +2367,7 @@ _convert_map = {
"leaky_relu": convert_leaky_relu,
"less_equal": convert_elementwise_op,
"less_than": convert_elementwise_op,
+ "linspace": convert_linspace,
"log": convert_unary_op,
"log2": convert_unary_op,
"log10": convert_unary_op,
@@ -2333,6 +2439,7 @@ _convert_map = {
"tan": convert_unary_op,
"tanh": convert_unary_op,
"top_k": convert_topk,
+ "thresholded_relu": convert_thresholded_relu,
"tile": convert_tile,
"top_k_v2": convert_topk,
"transpose2": convert_transpose,
diff --git a/tests/python/frontend/paddlepaddle/test_forward.py
b/tests/python/frontend/paddlepaddle/test_forward.py
index 392db76942..3ee20124dc 100755
--- a/tests/python/frontend/paddlepaddle/test_forward.py
+++ b/tests/python/frontend/paddlepaddle/test_forward.py
@@ -1992,5 +1992,145 @@ def test_forward_mish():
verify_model(Mish(), input_data=input_data)
[email protected]_gpu
+def test_forward_thresholded_relu():
+ class ThresholdedRelu1(nn.Layer):
+ @paddle.jit.to_static
+ def forward(self, inputs):
+ return nn.functional.thresholded_relu(inputs)
+
+ class ThresholdedRelu2(nn.Layer):
+ @paddle.jit.to_static
+ def forward(self, inputs):
+ return nn.functional.thresholded_relu(inputs, threshold=0.5)
+
+ input_shapes = [[10], [2, 3], [5, 10, 11], [3, 4, 5, 6]]
+ for input_shape in input_shapes:
+ input_data = paddle.randn(shape=input_shape, dtype="float32")
+ verify_model(ThresholdedRelu1(), input_data=input_data)
+ verify_model(ThresholdedRelu2(), input_data=input_data)
+
+
[email protected]_gpu
+def test_forward_index_select():
+ class IndexSelect1(nn.Layer):
+ @paddle.jit.to_static
+ def forward(self, x, index):
+ return paddle.index_select(x, index, axis=0)
+
+ class IndexSelect2(nn.Layer):
+ @paddle.jit.to_static
+ def forward(self, x, index):
+ return paddle.index_select(x, index, axis=-1)
+
+ input_shapes = [[10], [2, 3], [5, 10, 11], [3, 4, 5, 6]]
+ for input_shape in input_shapes:
+ input_data = paddle.randn(shape=input_shape, dtype="float32")
+ index = paddle.to_tensor([0, 1, 1], dtype="int32")
+ verify_model(IndexSelect1(), input_data=[input_data, index])
+ verify_model(IndexSelect2(), input_data=[input_data, index])
+
+
[email protected]_gpu
+def test_forward_eye():
+ class Eye1(nn.Layer):
+ @paddle.jit.to_static
+ def forward(self, inputs):
+ return paddle.eye(3, 5, dtype="int32"), paddle.eye(3, 5,
dtype="float32"), inputs
+
+ class Eye2(nn.Layer):
+ @paddle.jit.to_static
+ def forward(self, inputs):
+ return paddle.eye(5, 3, dtype="int64"), paddle.eye(5, 3,
dtype="float64"), inputs
+
+ class Eye3(nn.Layer):
+ @paddle.jit.to_static
+ def forward(self, inputs):
+ return paddle.eye(0, 3, dtype="int64"), paddle.eye(0, 0,
dtype="float64"), inputs
+
+ class Eye4(nn.Layer):
+ @paddle.jit.to_static
+ def forward(self, inputs):
+ return paddle.eye(4, None, dtype="int64"), paddle.eye(4, None,
dtype="float64"), inputs
+
+ x = paddle.to_tensor([1], dtype="float32")
+ verify_model(Eye1(), input_data=[x])
+ verify_model(Eye2(), input_data=[x])
+ verify_model(Eye3(), input_data=[x])
+ verify_model(Eye4(), input_data=[x])
+
+
[email protected]_gpu
+def test_forward_linspace():
+ class Linspace1(nn.Layer):
+ @paddle.jit.to_static
+ def forward(self, inputs):
+ out1 = paddle.linspace(0.5, 7, 1, "int32")
+ out2 = paddle.linspace(1.3, 7.1, 5, "float32")
+ out3 = paddle.linspace(1, 1000000000, 10, "int64")
+ out4 = paddle.linspace(1, 7.1, 5, "float64")
+ return out1, out2, out3, out4, inputs
+
+ class Linspace2(nn.Layer):
+ @paddle.jit.to_static
+ def forward(self, inputs):
+ start = paddle.to_tensor([-2.5])
+ stop = paddle.to_tensor([31.6])
+ num = paddle.to_tensor([13])
+ start = paddle.cast(start, "float32")
+ stop = paddle.cast(stop, "float32")
+ num = paddle.cast(num, "int32")
+ out1 = paddle.linspace(start, stop, num, "int32")
+ out2 = paddle.linspace(start, stop, num, "float32")
+ out3 = paddle.linspace(start, stop, num, "int64")
+ out4 = paddle.linspace(start, stop, num, "float64")
+ return out1, out2, out3, out4, inputs
+
+ class Linspace3(nn.Layer):
+ @paddle.jit.to_static
+ def forward(self, start, stop, num):
+ out1 = paddle.linspace(start, stop, num, "int32")
+ out2 = paddle.linspace(start, stop, num, "float32")
+ out3 = paddle.linspace(start, stop, num, "int64")
+ out4 = paddle.linspace(start, stop, num, "float32")
+ return out1
+
+ start = paddle.to_tensor([1.3])
+ stop = paddle.to_tensor([5.1])
+ num = paddle.to_tensor([3])
+ start = paddle.cast(start, "float32")
+ stop = paddle.cast(stop, "float32")
+ num = paddle.cast(num, "int32")
+ x = paddle.to_tensor([1], dtype="float32")
+ verify_model(Linspace1(), input_data=[x])
+ verify_model(Linspace2(), input_data=[x])
+ verify_model(Linspace3(), input_data=[start, stop, num], use_vm=True)
+ num = paddle.to_tensor([1])
+ num = paddle.cast(num, "int32")
+ verify_model(Linspace3(), input_data=[start, stop, num], use_vm=True)
+
+
[email protected]_gpu
+def test_forward_dist():
+ class Dist(nn.Layer):
+ @paddle.jit.to_static
+ def forward(self, x, y):
+ l0_norm = paddle.dist(x, y, 0)
+ l2_norm = paddle.dist(x, y, 2)
+ float_norm = paddle.dist(x, y, 1.3)
+ inf_norm = paddle.dist(x, y, float("inf"))
+ ninf_norm = paddle.dist(x, y, float("-inf"))
+ return l0_norm, l2_norm, float_norm, inf_norm, ninf_norm
+
+ x = paddle.to_tensor([[3, 3], [3, 3]], dtype="float32")
+ y = paddle.to_tensor([[1, 2], [3, 4]], dtype="float32")
+ w = paddle.to_tensor([[1, 2]], dtype="float32")
+ v = paddle.to_tensor([[2.1]], dtype="float32")
+ verify_model(Dist(), input_data=[x, y])
+ verify_model(Dist(), input_data=[x, w])
+ verify_model(Dist(), input_data=[w, v])
+ verify_model(Dist(), input_data=[y, v])
+
+
if __name__ == "__main__":
tvm.testing.main()