This is an automated email from the ASF dual-hosted git repository.

masahi pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new 1453fe0780 [PaddlePaddle Hackathon 4][Frontend][Paddle]Add 
tile/mish/stack/unstack/silu/softshrink/where op for paddle frontend (#14160)
1453fe0780 is described below

commit 1453fe07807088c248dfd1ed5cc4785d5a07fd3b
Author: xg <[email protected]>
AuthorDate: Thu Mar 9 07:18:06 2023 +0800

    [PaddlePaddle Hackathon 4][Frontend][Paddle]Add 
tile/mish/stack/unstack/silu/softshrink/where op for paddle frontend (#14160)
    
    * [Frontend][Paddle]Add tile/mish/stack/unstack/silu/softshrink/where op 
for paddle frontend
    
    * fix convert tile and update test case
    
    * fix test case and tile dtype
    
    * remove mish and tile
    
    * fix tensor type error in test case
    
    * optimize convert softshrink
    
    * add convert mish and test case
    
    * optimize mish implementation
    
    * modify mish test case
---
 python/tvm/relay/frontend/paddlepaddle.py          | 106 ++++++++++++
 tests/python/frontend/paddlepaddle/test_forward.py | 186 +++++++++++++++++++++
 2 files changed, 292 insertions(+)

diff --git a/python/tvm/relay/frontend/paddlepaddle.py 
b/python/tvm/relay/frontend/paddlepaddle.py
index 78895e4b49..4b849987ed 100755
--- a/python/tvm/relay/frontend/paddlepaddle.py
+++ b/python/tvm/relay/frontend/paddlepaddle.py
@@ -1084,6 +1084,19 @@ def convert_meshgrid(g, op, block):
         g.add_node(op.output("Out")[i], out)
 
 
+def convert_mish(g, op, block):
+    """Operator converter for mish."""
+
+    x = g.get_node(op.input("X")[0])
+    dtype = infer_type(x).checked_type.dtype
+    exp = _op.exp(x)
+    add = _op.add(exp, _expr.const(1.0, dtype))
+    log = _op.log(add)
+    tanh = _op.tanh(log)
+    out = _op.multiply(x, tanh)
+    g.add_node(op.output("Out")[0], out)
+
+
 def convert_mul(g, op, block):
     """Operator converter for mul."""
 
@@ -1785,6 +1798,14 @@ def convert_shape(g, op, block):
     g.add_node(op.output("Out")[0], out)
 
 
+def convert_silu(g, op, block):
+    """Operator converter for silu."""
+
+    x = g.get_node(op.input("X")[0])
+    out = _op.multiply(x, _op.sigmoid(x))
+    g.add_node(op.output("Out")[0], out)
+
+
 def convert_size(g, op, block):
     """Operator converter for size."""
 
@@ -1950,6 +1971,19 @@ def convert_softsign(g, op, block):
     g.add_node(op.output("Out")[0], out)
 
 
+def convert_softshrink(g, op, block):
+    """Operator converter for softshrink."""
+
+    x = g.get_node(op.input("X")[0])
+    dtype = infer_type(x).checked_type.dtype
+    threshold = _expr.const(op.attr("lambda"), dtype=dtype)
+    zeros = _op.zeros_like(x)
+    out = _op.where(x < -threshold, x + threshold, zeros) + _op.where(
+        x > threshold, x - threshold, zeros
+    )
+    g.add_node(op.output("Out")[0], out)
+
+
 def convert_split(g, op, block):
     """Operator converter for split."""
 
@@ -1994,6 +2028,18 @@ def convert_split(g, op, block):
         g.add_node(op.output("Out")[i], out_i)
 
 
+def convert_stack(g, op, blcok):
+    """Operator converter for stack."""
+
+    x = op.input("X")
+    all_inputs = []
+    for inp in x:
+        all_inputs.append(g.get_node(inp))
+    axis = op.attr("axis")
+    out = _op.stack(all_inputs, axis)
+    g.add_node(op.output("Y")[0], out)
+
+
 def convert_square(g, op, block):
     """Operator converter for square."""
 
@@ -2025,6 +2071,37 @@ def convert_swish(g, op, block):
     g.add_node(op.output("Out")[0], out)
 
 
+def convert_tile(g, op, block):
+    """Operator converter for tile."""
+
+    x = g.get_node(op.input("X")[0])
+    if op.input("RepeatTimes"):
+        reps = g.get_node(op.input("RepeatTimes")[0])
+        reps, infered = try_infer_value(reps, g.get_params())
+        if infered:
+            reps = reps.tolist()
+    elif op.input("repeat_times_tensor"):
+        reps = []
+        for rep_value in op.input("repeat_times_tensor"):
+            rep_value = g.get_node(rep_value).astype("int32")
+            reps.append(rep_value)
+        reps = _op.concatenate(reps, axis=0)
+        reps, infered = try_infer_value(reps, g.get_params())
+        if infered:
+            reps = reps.tolist()
+    else:
+        reps = op.attr("repeat_times")
+        infered = True
+
+    if not infered:
+        msg = 'Value {} in attribute "repeat_times" of operator Tile is not 
"valid."'
+        raise tvm.error.OpAttributeInvalid(msg.format(reps))
+
+    op_func = get_relay_op(op.type)
+    out = op_func(x, reps=reps)
+    g.add_node(op.output("Out")[0], out)
+
+
 def convert_topk(g, op, block):
     """Operator converter for topk."""
 
@@ -2074,6 +2151,28 @@ def convert_unsqueeze(g, op, block):
     g.add_node(op.output("Out")[0], x)
 
 
+def convert_unstack(g, op, block):
+    """Operator converter for unstack."""
+
+    x = g.get_node(op.input("X")[0])
+    axis = op.attr("axis")
+    indices_or_sections = len(op.output("Y"))
+    outs = _op.split(x, indices_or_sections=indices_or_sections, axis=axis)
+    for i, out in enumerate(outs):
+        out = _op.squeeze(out, axis=axis)
+        g.add_node(op.output("Y")[i], out)
+
+
+def convert_where(g, op, block):
+    """Operator converter for where."""
+
+    condition = g.get_node(op.input("Condition")[0])
+    x = g.get_node(op.input("X")[0])
+    y = g.get_node(op.input("Y")[0])
+    out = _op.where(condition, x, y)
+    g.add_node(op.output("Out")[0], out)
+
+
 def convert_where_index(g, op, block):
     """Operator converter for where_index."""
 
@@ -2166,6 +2265,7 @@ _convert_map = {
     "matmul": convert_matmul,
     "matmul_v2": convert_matmul,
     "meshgrid": convert_meshgrid,
+    "mish": convert_mish,
     "mul": convert_mul,
     "mv": convert_mv,
     "nearest_interp_v2": convert_interpolate,
@@ -2201,6 +2301,7 @@ _convert_map = {
     "shape": convert_shape,
     "sigmoid": convert_unary_op,
     "sign": convert_unary_op,
+    "silu": convert_silu,
     "sin": convert_unary_op,
     "sinh": convert_unary_op,
     "size": convert_size,
@@ -2208,7 +2309,9 @@ _convert_map = {
     "softmax": convert_softmax,
     "softplus": convert_softplus,
     "softsign": convert_softsign,
+    "softshrink": convert_softshrink,
     "split": convert_split,
+    "stack": convert_stack,
     "strided_slice": convert_slice,
     "sqrt": convert_unary_op,
     "square": convert_square,
@@ -2216,9 +2319,12 @@ _convert_map = {
     "swish": convert_swish,
     "tan": convert_unary_op,
     "tanh": convert_unary_op,
+    "tile": convert_tile,
     "top_k_v2": convert_topk,
     "transpose2": convert_transpose,
     "unsqueeze2": convert_unsqueeze,
+    "unstack": convert_unstack,
+    "where": convert_where,
     "where_index": convert_where_index,
 }
 
diff --git a/tests/python/frontend/paddlepaddle/test_forward.py 
b/tests/python/frontend/paddlepaddle/test_forward.py
index 28867848e1..d21323d7ba 100755
--- a/tests/python/frontend/paddlepaddle/test_forward.py
+++ b/tests/python/frontend/paddlepaddle/test_forward.py
@@ -1783,5 +1783,191 @@ def test_forward_where_index():
     verify_model(where_index_1, input_data=input_data, use_vm=True)
 
 
[email protected]_gpu
+def test_forward_stack():
+    class Stack1(nn.Layer):
+        @paddle.jit.to_static
+        def forward(self, input0, input1, input2):
+            return paddle.stack([input0, input1, input2], axis=-1)
+
+    class Stack2(nn.Layer):
+        @paddle.jit.to_static
+        def forward(self, input0, input1, input2):
+            return paddle.stack([input0, input1, input2], axis=1)
+
+    class Stack3(nn.Layer):
+        @paddle.jit.to_static
+        def forward(self, input0, input1, input2):
+            return paddle.stack([input0, input1, input2], axis=2)
+
+    input_shapes = [[2, 3], [5, 10, 11], [3, 4, 5, 6]]
+    for input_shape in input_shapes:
+        input_data_0 = paddle.randn(shape=input_shape, dtype="float32")
+        input_data_1 = paddle.randn(shape=input_shape, dtype="float32")
+        input_data_2 = paddle.randn(shape=input_shape, dtype="float32")
+        verify_model(Stack1(), [input_data_0, input_data_1, input_data_2])
+        verify_model(Stack2(), [input_data_0, input_data_1, input_data_2])
+        verify_model(Stack3(), [input_data_0, input_data_1, input_data_2])
+
+
[email protected]_gpu
+def test_forward_unstack():
+    class UnStack1(nn.Layer):
+        @paddle.jit.to_static
+        def forward(self, inputs):
+            return paddle.unstack(inputs, axis=-1)
+
+    class UnStack2(nn.Layer):
+        @paddle.jit.to_static
+        def forward(self, inputs):
+            return paddle.unstack(inputs, axis=1)
+
+    class UnStack3(nn.Layer):
+        @paddle.jit.to_static
+        def forward(self, inputs):
+            return paddle.unstack(inputs, axis=0)
+
+    input_shapes = [[2, 3], [5, 10, 11], [3, 4, 5, 6], [1, 3, 4, 1, 1]]
+    for input_shape in input_shapes:
+        input_data = paddle.randn(shape=input_shape, dtype="float32")
+        verify_model(UnStack1(), input_data)
+        verify_model(UnStack2(), input_data)
+        verify_model(UnStack3(), input_data)
+
+
[email protected]_gpu
+def test_forward_silu():
+    class Silu(nn.Layer):
+        @paddle.jit.to_static
+        def forward(self, inputs):
+            return nn.functional.silu(inputs)
+
+    input_shapes = [[10], [2, 3], [5, 10, 11], [3, 4, 5, 6]]
+    for input_shape in input_shapes:
+        input_data = paddle.randn(shape=input_shape, dtype="float32")
+        verify_model(Silu(), input_data=input_data)
+
+
[email protected]_gpu
+def test_forward_softshrink():
+    @paddle.jit.to_static
+    def Softshrink1(input):
+        return nn.functional.softshrink(input, threshold=0.0)
+
+    @paddle.jit.to_static
+    def Softshrink2(input):
+        return nn.functional.softshrink(input, threshold=0.5)
+
+    @paddle.jit.to_static
+    def Softshrink3(input):
+        return nn.functional.softshrink(input, threshold=1.0)
+
+    x = paddle.to_tensor([-0.9, -0.2, 0.1, 0.8])
+    verify_model(Softshrink2, x)
+
+    input_shapes = [[10], [2, 3], [5, 10, 11], [3, 4, 5, 6]]
+    for input_shape in input_shapes:
+        input_data = paddle.randn(shape=input_shape, dtype="float32")
+        verify_model(Softshrink1, input_data=input_data)
+        verify_model(Softshrink2, input_data=input_data)
+        verify_model(Softshrink3, input_data=input_data)
+
+
[email protected]_gpu
+def test_forward_where():
+    @paddle.jit.to_static
+    def where1(x, y):
+        return paddle.where(x > 1, x, y)
+
+    @paddle.jit.to_static
+    def where2(x, y):
+        return paddle.where(x > y, x, y)
+
+    x = paddle.to_tensor([0.9383, 0.1983, 3.2, 1.2])
+    y = paddle.to_tensor([1.0, 1.0, 1.0, 1.0])
+    verify_model(where1, [x, y])
+
+    input_shapes = [[10], [2, 3], [5, 10, 11], [3, 4, 5, 6]]
+    for input_shape in input_shapes:
+        x = paddle.randn(shape=input_shape, dtype="float32")
+        y = paddle.randn(shape=input_shape, dtype="float32")
+        verify_model(where1, [x, y])
+        verify_model(where2, [x, y])
+
+
[email protected]_gpu
+def test_forward_tile():
+    class Tile1(nn.Layer):
+        @paddle.jit.to_static
+        def forward(self, inputs):
+            return paddle.tile(inputs, repeat_times=[10])
+
+    class Tile2(nn.Layer):
+        @paddle.jit.to_static
+        def forward(self, inputs):
+            return paddle.tile(inputs, repeat_times=[2, 3])
+
+    class Tile3(nn.Layer):
+        @paddle.jit.to_static
+        def forward(self, inputs):
+            return paddle.tile(inputs, repeat_times=[1, 2, 3])
+
+    class Tile4(nn.Layer):
+        @paddle.jit.to_static
+        def forward(self, inputs):
+            return paddle.tile(inputs, repeat_times=[2, 3, 4, 1, 5])
+
+    class Tile5(nn.Layer):
+        @paddle.jit.to_static
+        def forward(self, inputs):
+            reps = paddle.to_tensor([3, 2])
+            reps = paddle.cast(reps, "int32")
+            return paddle.tile(inputs, repeat_times=reps)
+
+    class Tile6(nn.Layer):
+        @paddle.jit.to_static
+        def forward(self, inputs):
+            rep_0 = paddle.to_tensor([3])
+            rep_1 = paddle.to_tensor([2])
+            rep_0 = paddle.cast(rep_0, "int32")
+            rep_1 = paddle.cast(rep_1, "int32")
+            return paddle.tile(inputs, repeat_times=[rep_0, rep_1])
+
+    input_shapes = [
+        [10],
+        [2, 3],
+        [3, 4, 5],
+        [5, 3, 1, 4],
+        [1, 3, 1, 6, 7],
+    ]
+    for input_shape in input_shapes:
+        input_data = paddle.randn(shape=input_shape, dtype="float32")
+        verify_model(Tile1(), input_data=input_data)
+        verify_model(Tile2(), input_data=input_data)
+        verify_model(Tile3(), input_data=input_data)
+        verify_model(Tile4(), input_data=input_data)
+        verify_model(Tile5(), input_data=input_data)
+        verify_model(Tile6(), input_data=input_data)
+
+
[email protected]_gpu
+def test_forward_mish():
+    class Mish(nn.Layer):
+        @paddle.jit.to_static
+        def forward(self, inputs):
+            return nn.functional.mish(inputs)
+
+    input_shapes = [[10], [2, 3], [5, 10, 11], [3, 4, 5, 6]]
+    if paddle.version.full_version >= "2.4.2":
+        for input_shape in input_shapes:
+            input_data = paddle.randn(shape=input_shape, dtype="float32")
+            verify_model(Mish(), input_data=input_data)
+            input_data += 20.0
+            verify_model(Mish(), input_data=input_data)
+
+        input_data = paddle.to_tensor([-5.0, 0.0, 5.0, 23.1, 20.0])
+        verify_model(Mish(), input_data=input_data)
+
+
 if __name__ == "__main__":
     tvm.testing.main()

Reply via email to