This is an automated email from the ASF dual-hosted git repository.
junrushao pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git
The following commit(s) were added to refs/heads/main by this push:
new f80771f556 [Frontend][Paddle] Add norm and one_hot_v2 op (#14049)
f80771f556 is described below
commit f80771f5561cc60762388fe7f016dfe5658f5ea7
Author: GaoYuYang <[email protected]>
AuthorDate: Tue Feb 21 01:06:12 2023 +0800
[Frontend][Paddle] Add norm and one_hot_v2 op (#14049)
Add norm op and one_hot_v2 op for paddlepaddle frontend.
---
python/tvm/relay/frontend/paddlepaddle.py | 28 ++++++++++++++++++++++
tests/python/frontend/paddlepaddle/test_forward.py | 26 ++++++++++++++++++++
2 files changed, 54 insertions(+)
diff --git a/python/tvm/relay/frontend/paddlepaddle.py
b/python/tvm/relay/frontend/paddlepaddle.py
old mode 100644
new mode 100755
index 4927a36252..0842cd55da
--- a/python/tvm/relay/frontend/paddlepaddle.py
+++ b/python/tvm/relay/frontend/paddlepaddle.py
@@ -1141,6 +1141,32 @@ def convert_mv(g, op, block):
g.add_node(op.output("Out")[0], out)
+def convert_norm(g, op, block):
+ """Operator converter for norm."""
+
+ x = g.get_node(op.input("X")[0])
+ axis = op.attr("axis")
+ axis_l = [axis]
+ epsilon = op.attr("epsilon")
+ out = _op.nn.l2_normalize(x, epsilon, axis_l)
+ g.add_node(op.output("Out")[0], out)
+
+
+def convert_one_hot_v2(g, op, block):
+ """Operator converter for one_hot_v2."""
+
+ x = g.get_node(op.input("X")[0])
+ depth = op.attr("depth")
+ dtype = op.attr("dtype")
+ dtype = _convert_dtype_value(dtype)
+ ndim = len(infer_shape(x))
+ on_value = _op.const(1)
+ off_value = _op.const(0)
+ axis = ndim
+ out = _op.one_hot(x, on_value, off_value, depth, axis, dtype)
+ g.add_node(op.output("Out")[0], out)
+
+
def convert_padding(g, op, block):
"""Operator converter for padding."""
@@ -2135,7 +2161,9 @@ _convert_map = {
"mul": convert_mul,
"mv": convert_mv,
"nearest_interp_v2": convert_interpolate,
+ "norm": convert_norm,
"not_equal": convert_elementwise_op,
+ "one_hot_v2": convert_one_hot_v2,
"pad1d": convert_padding,
"pad2d": convert_padding,
"pad3d": convert_padding,
diff --git a/tests/python/frontend/paddlepaddle/test_forward.py
b/tests/python/frontend/paddlepaddle/test_forward.py
old mode 100644
new mode 100755
index de6ea1dcf1..cd2c0be7ef
--- a/tests/python/frontend/paddlepaddle/test_forward.py
+++ b/tests/python/frontend/paddlepaddle/test_forward.py
@@ -1723,5 +1723,31 @@ def test_forward_topk():
verify_model(topk6, input_data=input_data)
[email protected]_gpu
+def test_forward_one_hot_v2():
+ @paddle.jit.to_static
+ def one_hot_v2_1(inputs):
+ return nn.functional.one_hot(inputs, num_classes=4)
+
+ input_data = paddle.to_tensor([1, 1, 3, 0], dtype=paddle.int32)
+ verify_model(one_hot_v2_1, input_data=input_data)
+
+
[email protected]_gpu
+def test_forward_norm():
+ @paddle.jit.to_static
+ def norm_1(inputs):
+ return paddle.fluid.layers.l2_normalize(inputs, -1, 1e-12)
+
+ def norm_2(inputs):
+ return paddle.fluid.layers.l2_normalize(inputs, 1, 1e-12)
+
+ input_data = paddle.to_tensor(
+ [[[1, 2], [3, 1], [4, 5]], [[3, 1], [3, 5], [2, 4]]],
dtype=paddle.float32
+ )
+ verify_model(norm_1, input_data=input_data)
+ verify_model(norm_2, input_data=input_data)
+
+
if __name__ == "__main__":
tvm.testing.main()