This is an automated email from the ASF dual-hosted git repository.

junrushao pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new 06fabe4c5a [PaddlePaddle Hackathon 4][Frontend][Paddle]add 
grid-sample/gaussian_random/flip/fill_zeros_like/unique for paddle frontend 
(#14277)
06fabe4c5a is described below

commit 06fabe4c5a34db8ce33327b7022f63b7539c07e8
Author: MayYouBeProsperous 
<[email protected]>
AuthorDate: Mon Mar 13 10:32:46 2023 +0800

    [PaddlePaddle Hackathon 4][Frontend][Paddle]add 
grid-sample/gaussian_random/flip/fill_zeros_like/unique for paddle frontend 
(#14277)
    
    Add grid-sample/gaussian_random/flip/fill_zeros_like/unique for paddle 
frontend.
---
 python/tvm/relay/frontend/paddlepaddle.py          | 102 ++++++++++++++++++++
 tests/python/frontend/paddlepaddle/test_forward.py | 106 +++++++++++++++++++++
 2 files changed, 208 insertions(+)

diff --git a/python/tvm/relay/frontend/paddlepaddle.py 
b/python/tvm/relay/frontend/paddlepaddle.py
index a79a58ca14..f771e605ca 100755
--- a/python/tvm/relay/frontend/paddlepaddle.py
+++ b/python/tvm/relay/frontend/paddlepaddle.py
@@ -680,6 +680,17 @@ def convert_fill_constant_batch_size_like(g, op, block):
     g.add_node(op.output("Out")[0], out)
 
 
+def convert_fill_zeros_like(g, op, block):
+    """Operator converter for fill_zeros_like."""
+
+    x = g.get_node(op.input("X")[0])
+    dtype = op.attr("dtype")
+    dtype = _convert_dtype_value(dtype)
+    value = _expr.const(0, dtype=dtype)
+    out = _op.transform.full_like(x, value).astype(dtype)
+    g.add_node(op.output("Out")[0], out)
+
+
 def convert_flatten(g, op, block):
     """Operator converter for flatten."""
 
@@ -707,6 +718,21 @@ def convert_flatten(g, op, block):
     g.add_node(op.output("Out")[0], out)
 
 
+def convert_flip(g, op, block):
+    """Operator converter for flip."""
+
+    x = g.get_node(op.input("X")[0])
+    axis = op.attr("axis")
+
+    for i, ax in enumerate(axis):
+        if i == 0:
+            out = _op.reverse(x, ax)
+        else:
+            out = _op.reverse(out, ax)
+
+    g.add_node(op.output("Out")[0], out)
+
+
 def convert_gather(g, op, block):
     """Operator converter for gather."""
 
@@ -730,6 +756,17 @@ def convert_gather_nd(g, op, block):
     g.add_node(op.output("Out")[0], out)
 
 
+def convert_gaussian_random(g, op, block):
+    """Operator converter for convert_gaussian_random."""
+
+    mean = op.attr("mean")
+    std = op.attr("std")
+    shape = op.attr("shape")
+    seed = op.attr("seed")
+    out = _op.random.normal(key=seed, shape=shape, mean=mean, scale=std)
+    g.add_node(op.output("Out")[0], out)
+
+
 def convert_gelu(g, op, block):
     """Operator converter for gelu."""
 
@@ -741,6 +778,32 @@ def convert_gelu(g, op, block):
     g.add_node(op.output("Out")[0], out)
 
 
+def convert_grid_sampler(g, op, block):
+    """Operator converter for grid_sampler."""
+
+    x = g.get_node(op.input("X")[0])
+    data_shape = infer_shape(x)
+    grid = g.get_node(op.input("Grid")[0])
+    mode = op.attr("mode")
+    padding_mode = op.attr("padding_mode")
+    align_corners = op.attr("align_corners")
+
+    if len(data_shape) == 4:
+        layout = "NCHW"
+        axes = [0, 3, 1, 2]
+        grid = _op.transform.transpose(grid, axes)
+    elif len(data_shape) == 5:
+        layout = "NCDHW"
+        axes = [0, 4, 1, 2, 3]
+        grid = _op.transform.transpose(grid, axes)
+    else:
+        msg = f"only 4D and 5D are supported."
+        raise ValueError(msg)
+
+    out = _op.image.grid_sample(x, grid, mode, layout, padding_mode, 
align_corners)
+    g.add_node(op.output("Output")[0], out)
+
+
 def convert_group_norm(g, op, block):
     """Operator converter for group_norm."""
 
@@ -2255,6 +2318,40 @@ def convert_transpose(g, op, block):
     g.add_node(op.output("Out")[0], out)
 
 
+def convert_unique(g, op, block):
+    """Operator converter for unique."""
+
+    x = g.get_node(op.input("X")[0])
+    return_index = op.attr("return_index")
+    return_inverse = op.attr("return_inverse")
+    return_counts = op.attr("return_counts")
+    axis = op.attr("axis")
+    dtype = op.attr("dtype")
+    dtype = _convert_dtype_value(dtype)
+
+    if len(axis) == 0:
+        x = _op.reshape(x, [-1])
+
+    if return_counts:
+        unique, indices, inverse_indices, _, counts = _op.unique(
+            x, is_sorted=True, return_counts=True
+        )
+    else:
+        unique, indices, inverse_indices, _ = _op.unique(x, is_sorted=True, 
return_counts=False)
+
+    out = unique
+    if dtype != infer_type(out).checked_type.dtype:
+        out = _op.cast(out, dtype)
+    g.add_node(op.output("Out")[0], unique)
+
+    if return_index:
+        g.add_node(op.output("Indices")[0], indices)
+    if return_inverse:
+        g.add_node(op.output("Index")[0], inverse_indices)
+    if return_counts:
+        g.add_node(op.output("Counts")[0], counts)
+
+
 def convert_unsqueeze(g, op, block):
     """Operator converter for unsqueeze."""
 
@@ -2346,14 +2443,18 @@ _convert_map = {
     "fill_any_like": convert_fill_any_like,
     "fill_constant": convert_fill_constant,
     "fill_constant_batch_size_like": convert_fill_constant_batch_size_like,
+    "fill_zeros_like": convert_fill_zeros_like,
     "flatten_contiguous_range": convert_flatten,
     "floor": convert_unary_op,
     "floor_mod": convert_elementwise_op,
+    "flip": convert_flip,
     "gather": convert_gather,
     "gather_nd": convert_gather_nd,
+    "gaussian_random": convert_gaussian_random,
     "gelu": convert_gelu,
     "greater_equal": convert_elementwise_op,
     "greater_than": convert_elementwise_op,
+    "grid_sampler": convert_grid_sampler,
     "group_norm": convert_group_norm,
     "hard_shrink": convert_hard_shrink,
     "hard_sigmoid": convert_hard_sigmoid,
@@ -2443,6 +2544,7 @@ _convert_map = {
     "tile": convert_tile,
     "top_k_v2": convert_topk,
     "transpose2": convert_transpose,
+    "unique": convert_unique,
     "unsqueeze2": convert_unsqueeze,
     "unstack": convert_unstack,
     "where": convert_where,
diff --git a/tests/python/frontend/paddlepaddle/test_forward.py 
b/tests/python/frontend/paddlepaddle/test_forward.py
index 3ee20124dc..612c43bb16 100755
--- a/tests/python/frontend/paddlepaddle/test_forward.py
+++ b/tests/python/frontend/paddlepaddle/test_forward.py
@@ -679,6 +679,23 @@ def test_forward_expand_as():
         verify_model(ExpandAs(), [x_data, y_data])
 
 
[email protected]_gpu
+def test_forward_fill_zeros_like():
+    class FilZeroLike(nn.Layer):
+        def __init__(self, dtype=None):
+            super(FilZeroLike, self).__init__()
+            self.dtype = dtype
+
+        @paddle.jit.to_static
+        def forward(self, x):
+            return paddle.zeros_like(x, dtype=self.dtype)
+
+    input_shape = [2, 3, 5]
+    input_data = paddle.rand(input_shape, dtype="float32")
+    verify_model(FilZeroLike("float32"), input_data=input_data)
+    verify_model(FilZeroLike("int32"), input_data=input_data)
+
+
 @tvm.testing.uses_gpu
 def test_forward_flatten():
     class Flatten(nn.Layer):
@@ -697,6 +714,23 @@ def test_forward_flatten():
     verify_model(Flatten(2, -2), input_data=input_data)
 
 
[email protected]_gpu
+def test_forward_flip():
+    class Flip(nn.Layer):
+        def __init__(self, axis):
+            super(Flip, self).__init__()
+            self.axis = axis
+
+        @paddle.jit.to_static
+        def forward(self, x):
+            return paddle.flip(x, axis=self.axis)
+
+    input_data = paddle.rand([2, 3, 4], dtype="float32")
+    verify_model(Flip(0), input_data)
+    verify_model(Flip(-1), input_data)
+    verify_model(Flip([0, 1]), input_data)
+
+
 @tvm.testing.uses_gpu
 def test_forward_gather():
     class Gather(nn.Layer):
@@ -750,6 +784,39 @@ def test_forward_group_norm():
         verify_model(GroupNorm(num_channels, 2), input_data, rtol=1e-4, 
atol=1e-4)
 
 
[email protected]_gpu
+def test_forward_grid_sampler():
+    class GridSampler(nn.Layer):
+        def __init__(self, mode="bilinear", padding_mode="zeros", 
align_corners=True):
+            super(GridSampler, self).__init__()
+            self.mode = mode
+            self.padding_mode = padding_mode
+            self.align_corners = align_corners
+
+        def forward(self, x, grid):
+            return paddle.nn.functional.grid_sample(
+                x,
+                grid,
+                mode=self.mode,
+                padding_mode=self.padding_mode,
+                align_corners=self.align_corners,
+            )
+
+    x_2D = paddle.rand(shape=[4, 4, 8, 8], dtype="float32")
+    grid_2D = paddle.rand(shape=[4, 8, 8, 2], dtype="float32")
+    verify_model(GridSampler(mode="nearest"), input_data=[x_2D, grid_2D])
+    verify_model(GridSampler(padding_mode="reflection"), input_data=[x_2D, 
grid_2D])
+    verify_model(GridSampler(padding_mode="border"), input_data=[x_2D, 
grid_2D])
+    verify_model(GridSampler(align_corners=False), input_data=[x_2D, grid_2D])
+
+    x_3D = paddle.rand(shape=[4, 4, 4, 4, 4], dtype="float32")
+    grid_3D = paddle.rand(shape=[4, 8, 8, 8, 3], dtype="float32")
+    verify_model(GridSampler(mode="nearest"), input_data=[x_3D, grid_3D])
+    verify_model(GridSampler(padding_mode="reflection"), input_data=[x_3D, 
grid_3D])
+    verify_model(GridSampler(padding_mode="border"), input_data=[x_3D, 
grid_3D])
+    verify_model(GridSampler(align_corners=False), input_data=[x_3D, grid_3D])
+
+
 @tvm.testing.uses_gpu
 def test_forward_scatter():
     class Scatter(nn.Layer):
@@ -1394,6 +1461,45 @@ def test_forward_slice():
     # verify_model(slice5, input_data=paddle.randn((4,)))
 
 
[email protected]_gpu
+def test_forward_unique():
+    class Unique(nn.Layer):
+        def __init__(
+            self,
+            return_index=False,
+            return_inverse=False,
+            return_counts=False,
+            axis=None,
+            dtype="int64",
+        ):
+            super(Unique, self).__init__()
+            self.return_index = return_index
+            self.return_inverse = return_inverse
+            self.return_counts = return_counts
+            self.axis = None
+            self.dtype = dtype
+
+        @paddle.jit.to_static
+        def forward(self, inputs):
+            result = paddle.unique(
+                inputs,
+                return_inverse=self.return_inverse,
+                return_counts=self.return_counts,
+                axis=self.axis,
+                dtype=self.dtype,
+            )
+            return result
+
+    input_shape = [2, 3, 5]
+    input_data = paddle.rand(input_shape)
+    verify_model(Unique(), input_data=input_data)
+    verify_model(Unique(return_index=True), input_data=input_data)
+    verify_model(Unique(return_index=True, return_inverse=True), 
input_data=input_data)
+    verify_model(
+        Unique(return_index=True, return_inverse=True, return_counts=True), 
input_data=input_data
+    )
+
+
 @tvm.testing.uses_gpu
 def run_math_api(func):
     api_name = func.__name__.split("_")[-1]

Reply via email to