This is an automated email from the ASF dual-hosted git repository.

leandron pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new e774fed  Add a PaddlePaddle Frontend (#8645)
e774fed is described below

commit e774fed67c2d12e6cfc29a013f029d4b55c28e2a
Author: Jason <[email protected]>
AuthorDate: Fri Aug 27 15:34:05 2021 +0800

    Add a PaddlePaddle Frontend (#8645)
    
    * fix some problems for matmul
    
    * fix some problems for matmul
    
    * add alpha parameter for matmul
    
    * remove unnecessary condition
    
    * add TranslatedLayer which support model loaded by jit.load
    
    * add mul operator support
    
    * Add padding mode support for conv/pool2d
    
    * support 4 two-tuples
    
    * add paddle test case
    
    * add paddle conv2d  case
    
    * update test_forward.py
    
    * fix paddle convert_matmul
    
    * add paddle multiply and matmul op test case
    
    * add test case and fix bug
    
    * delete import pandas
    
    * add paddlepaddle tests
    
    * modify the variable name of convert_reshape
    
    * formatting
    
    * formatting
    
    * use black to format python code
    
    * pylint check
    
    * Remove fluid api
    
    * black format
    
    Co-authored-by: root <[email protected]>
    Co-authored-by: wjj19950828 <[email protected]>
    Co-authored-by: heliqi <[email protected]>
    Co-authored-by: Junru Shao <[email protected]>
---
 python/tvm/relay/frontend/__init__.py              |   1 +
 python/tvm/relay/frontend/paddlepaddle.py          | 918 +++++++++++++++++++++
 tests/python/frontend/paddlepaddle/test_forward.py | 661 +++++++++++++++
 tests/scripts/task_python_frontend.sh              |   3 +
 4 files changed, 1583 insertions(+)

diff --git a/python/tvm/relay/frontend/__init__.py 
b/python/tvm/relay/frontend/__init__.py
index aa8ac4f..aa49b63 100644
--- a/python/tvm/relay/frontend/__init__.py
+++ b/python/tvm/relay/frontend/__init__.py
@@ -31,4 +31,5 @@ from .tensorflow import from_tensorflow
 from .darknet import from_darknet
 from .pytorch import from_pytorch
 from .caffe import from_caffe
+from .paddlepaddle import from_paddle
 from .change_datatype import ChangeDatatype
diff --git a/python/tvm/relay/frontend/paddlepaddle.py 
b/python/tvm/relay/frontend/paddlepaddle.py
new file mode 100644
index 0000000..76a1269
--- /dev/null
+++ b/python/tvm/relay/frontend/paddlepaddle.py
@@ -0,0 +1,918 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# pylint: disable=invalid-name, import-self, len-as-condition, 
unused-argument, too-many-lines
+# pylint: disable=import-outside-toplevel
+"""Paddle: PArallel Distributed Deep LEarning."""
+import warnings
+
+import numpy as np
+
+import tvm
+from tvm.ir import IRModule
+
+from .. import analysis
+from .. import expr as _expr
+from .. import function as _function
+from .. import ty as _ty
+from .. import op as _op
+from .common import (
+    fold_constant,
+    infer_shape,
+    infer_type,
+    infer_value,
+    new_var,
+)
+
+__all__ = ["from_paddle"]
+
+
+def shape_of(x, dtype="int32"):
+    """Get shape of a tensor"""
+
+    ttype = infer_type(x).checked_type
+    if not _ty.is_dynamic(ttype):
+        shape = list(ttype.shape)
+        return _expr.const(shape, dtype)
+    return _op.shape_of(x, dtype)
+
+
+def _get_pad_size(in_size, dilated_kernel_size, stride_size):
+    """calculate the paddings size"""
+
+    if stride_size == 1 or in_size % stride_size == 0:
+        pad = max(dilated_kernel_size - stride_size, 0)
+    else:
+        pad = max(dilated_kernel_size - (in_size % stride_size), 0)
+
+    pad_before = pad // 2
+    pad_after = pad - pad_before
+
+    return [pad_before, pad_after]
+
+
+def convert_arg_max(g, op, block):
+    """Operator converter for arg_max."""
+
+    axis = op.attr("axis")
+    keepdims = op.attr("keepdims")
+    flatten = op.attr("flatten")
+
+    x = g.get_node(op.input("X")[0])
+    if axis is None or flatten:
+        x = _op.reshape(x, [-1])
+        out = _op.argmax(x, axis=None, keepdims=True)
+    else:
+        out = _op.argmax(x, axis=axis, keepdims=keepdims)
+    g.add_node(op.output("Out")[0], out)
+
+
+def convert_assign(g, op, block):
+    """Operator converter for assign."""
+
+    out = _op.copy(g.get_node(op.input("X")[0]))
+    g.add_node(op.output("Out")[0], out)
+
+
+def convert_batch_norm(g, op, block):
+    """Operator converter for batch_norm."""
+
+    ipt_name = op.input("X")[0]
+    scale_name = op.input("Scale")[0]
+    bias_name = op.input("Bias")[0]
+    mean_name = op.input("Mean")[0]
+    variance_name = op.input("Variance")[0]
+    epsilon = op.attr("epsilon")
+    out = _op.nn.batch_norm(
+        g.get_node(ipt_name),
+        g.get_node(scale_name),
+        g.get_node(bias_name),
+        g.get_node(mean_name),
+        g.get_node(variance_name),
+        epsilon=epsilon,
+    )
+    g.add_node(op.output("Y")[0], out[0])
+
+
+def convert_cast(g, op, block):
+    """Operator converter for cast."""
+
+    dtype = block.var(op.output("Out")[0]).dtype
+    dtype = str(dtype).strip().split(".")[1]
+    x = g.get_node(op.input("X")[0])
+    out = _op.cast(x, dtype=dtype)
+    g.add_node(op.output("Out")[0], out)
+
+
+def convert_concat(g, op, block):
+    """Operator converter for concat."""
+
+    inputs = [g.get_node(op.input("X")[i]) for i in range(len(op.input("X")))]
+    axis = op.attr("axis")
+    out = _op.concatenate(inputs, axis=axis)
+    g.add_node(op.output("Out")[0], out)
+
+
+def convert_conv2d(g, op, block):
+    """Operator converter for conv2d."""
+
+    dilations = op.attr("dilations")
+    groups = op.attr("groups")
+    paddings = op.attr("paddings")
+    padding_algorithm = op.attr("padding_algorithm")
+    strides = op.attr("strides")
+
+    kernel = g.get_node(op.input("Filter")[0])
+    input_x = g.get_node(op.input("Input")[0])
+    out_channels, _, k_h, k_w = infer_shape(kernel)
+    in_h, in_w = infer_shape(input_x)[2:]
+    if padding_algorithm == "VALID":
+        paddings = [0, 0]
+    elif padding_algorithm == "SAME":
+        pad_h = _get_pad_size(in_h, (k_h - 1) * dilations[0] + 1, strides[0])
+        pad_w = _get_pad_size(in_w, (k_w - 1) * dilations[1] + 1, strides[1])
+        paddings = [pad_h[0], pad_w[0], pad_h[1], pad_w[1]]
+    elif padding_algorithm == "EXPLICIT":
+        if len(paddings) == 2:
+            paddings = [paddings[0], paddings[1], paddings[0], paddings[1]]
+        if len(paddings) == 4:
+            paddings = [paddings[0], paddings[2], paddings[1], paddings[3]]
+    else:
+        msg = 'Value {} in attribute "padding" of operator Conv is not 
"valid."'
+        raise tvm.error.OpAttributeInvalid(msg.format(padding_algorithm))
+
+    out = _op.nn.conv2d(
+        input_x,
+        kernel,
+        strides=strides,
+        padding=paddings,
+        dilation=dilations,
+        groups=groups,
+        channels=out_channels,
+        kernel_size=[k_h, k_w],
+    )
+    g.add_node(op.output("Output")[0], out)
+
+
+def convert_cumsum(g, op, block):
+    """Operator converter for cumsum."""
+
+    axis = op.attr("axis")
+    exclusive = op.attr("exclusive")
+    flatten = op.attr("flatten")
+    reverse = op.attr("reverse")
+
+    x = g.get_node(op.input("X")[0])
+    if axis is None or flatten:
+        x = _op.reshape(x, [-1])
+    if reverse:
+        x = _op.reverse(x, axis=axis)
+        out = _op.cumsum(x, axis=axis, exclusive=exclusive)
+        out = _op.reverse(out, axis=axis)
+    else:
+        out = _op.cumsum(x, axis=axis, exclusive=exclusive)
+    g.add_node(op.output("Out")[0], out)
+
+
+def convert_dropout(g, op, block):
+    """Operator converter for dropout."""
+
+    x = g.get_node(op.input("X")[0])
+    out = _op.copy(x)
+    g.add_node(op.output("Out")[0], out)
+
+
+def convert_elementwise_op(g, op, block):
+    """Operator converter for all the elementwise operators."""
+
+    op_map = {
+        "elementwise_div": lambda x, y: x / y,
+        "elementwise_add": lambda x, y: x + y,
+        "elementwise_mul": lambda x, y: x * y,
+        "elementwise_sub": lambda x, y: x - y,
+        "elementwise_mod": lambda x, y: x % y,
+    }
+    op_func = op_map[op.type]
+    ipt0 = g.get_node(op.input("X")[0])
+    ipt1 = g.get_node(op.input("Y")[0])
+    ipt0_shape = block.var(op.input("X")[0]).shape
+    ipt1_shape = block.var(op.input("Y")[0]).shape
+    axis = op.attr("axis")
+    if len(ipt0_shape) != len(ipt1_shape):
+        if axis < 0:
+            axis = axis + len(ipt0_shape)
+        if axis != len(ipt0_shape) - 1:
+            ipt1 = _op.expand_dims(ipt1, axis=axis, 
num_newaxis=(len(ipt0_shape) - axis - 1))
+    out = op_func(ipt0, ipt1)
+    g.add_node(op.output("Out")[0], out)
+
+
+def convert_equal(g, op, block):
+    """Operator converter for equal."""
+
+    x = g.get_node(op.input("X")[0])
+    y = g.get_node(op.input("Y")[0])
+    out = _op.equal(x, y)
+    g.add_node(op.output("Out")[0], out)
+
+
+def convert_activation(g, op, block):
+    """Operator converter for all the activation."""
+
+    op_map = {
+        "exp": _op.exp,
+        "relu": _op.nn.relu,
+        "tanh": _op.tanh,
+        "sqrt": _op.sqrt,
+        "erf": _op.erf,
+        "abs": _op.abs,
+    }
+    act_func = op_map[op.type]
+    out = act_func(g.get_node(op.input("X")[0]))
+    g.add_node(op.output("Out")[0], out)
+
+
+def convert_feed(g, op, block):
+    """Converter for model input node."""
+
+    if block is not None:
+        ipt_name = op.output("Out")[0]
+        ipt_shape = block.var(ipt_name).shape
+        ipt_dtype = block.var(ipt_name).dtype
+        ipt_dtype = str(ipt_dtype).strip().split(".")[1]
+    else:
+        ipt_shape = op.shape
+        ipt_dtype = str(op.dtype).strip().split(".")[1]
+        ipt_name = op.name
+    if g.shape_dict is not None:
+        ipt_shape = g.shape_dict[ipt_name]
+    out = new_var(ipt_name, shape=ipt_shape, dtype=ipt_dtype)
+    g.add_node(ipt_name, out)
+
+
+def convert_fill_any_like(g, op, block):
+    """Operator converter for fill_any_like."""
+
+    out_name = op.output("Out")[0]
+    out_dtype = block.var(out_name).dtype
+    out_dtype = str(out_dtype).strip().split(".")[1]
+    x = g.get_node(op.input("X")[0])
+    ipt_type = infer_type(x).checked_type
+    value = op.attr("value")
+    if not _ty.is_dynamic(ipt_type):
+        shape = infer_shape(x)
+        const = np.ones(shape) * value
+        out = _expr.const(const.astype(out_dtype))
+    else:
+        out = _op.transform.full_like(x, value).astype(out_dtype)
+    g.add_node(op.output("Out")[0], out)
+
+
+def convert_fill_constant(g, op, block):
+    """Operator converter for fill_constant."""
+
+    value = op.attr("value")
+    shape = block.var(op.output("Out")[0]).shape
+    dtype = block.var(op.output("Out")[0]).dtype
+    dtype = str(dtype).strip().split(".")[1]
+    if op.input("ValueTensor"):
+        shape = g.get_node(op.input("ValueTensor")[0])
+        shape = infer_value(shape, g.get_params()).numpy()
+    if op.input("ShapeTensor"):
+        shape = g.get_node(op.input("ShapeTensor")[0])
+        shape = infer_value(shape, g.get_params()).numpy()
+    value = np.full(shape, value, dtype)
+    out = _expr.const(value.astype(dtype)).astype(dtype)
+    g.add_node(op.output("Out")[0], out)
+
+
+def convert_gelu(g, op, block):
+    """Operator converter for gelu."""
+
+    x = g.get_node(op.input("X")[0])
+    out = x * (
+        _expr.const(0.5, dtype="float32")
+        + _op.erf(x * _expr.const(0.5 ** 0.5, dtype="float32")) * 
_expr.const(0.5, dtype="float32")
+    )
+    g.add_node(op.output("Out")[0], out)
+
+
+def convert_hard_sigmoid(g, op, block):
+    """Operator converter for hard_sigmoid."""
+
+    slope = op.attr("slope")
+    x = g.get_node(op.input("X")[0])
+    out = x * _expr.const(slope) + _expr.const(0.5)
+    out = _op.clip(out, 0, 1)
+    g.add_node(op.output("Out")[0], out)
+
+
+def convert_hard_swish(g, op, block):
+    """Operator converter for hard_swish."""
+
+    offset = op.attr("offset")
+    scale = op.attr("scale")
+    threshold = op.attr("threshold")
+    assert np.isclose(offset, 3.0), "Only support offset==3.0 for 
PaddlePaddle's hard_swish"
+    assert np.isclose(scale, 6.0), "Only support scale==6.0 for PaddlePaddle's 
hard_swish"
+    assert np.isclose(threshold, 6.0), "Only support threshold==6.0 for 
PaddlePaddle's hard_swish"
+    x = g.get_node(op.input("X")[0])
+    out = _op.clip(x, -1 * offset, offset)
+    out = out / _expr.const(threshold) + _expr.const(0.5)
+    out = x * out
+    g.add_node(op.output("Out")[0], out)
+
+
+def convert_layer_norm(g, op, block):
+    """Operator converter for layer_norm."""
+
+    begin_norm_axis = op.attr("begin_norm_axis")
+    epsilon = op.attr("epsilon")
+    x = g.get_node(op.input("X")[0])
+    bias_input = op.input("Bias")
+    scale_input = op.input("Scale")
+
+    x_shape = infer_shape(x)
+    assert begin_norm_axis in (
+        len(x_shape) - 1,
+        -1,
+    ), "Support only normalization over last one dimension."
+
+    if bias_input:
+        bias = g.get_node(bias_input[0])
+    else:
+        bias = _expr.const(np.zeros(x_shape[begin_norm_axis]))
+
+    if scale_input:
+        scale = g.get_node(scale_input[0])
+    else:
+        scale = _expr.const(np.ones(x_shape[begin_norm_axis]))
+
+    out = _op.nn.layer_norm(
+        x, gamma=scale, beta=bias, axis=begin_norm_axis, epsilon=epsilon, 
center=True, scale=True
+    )
+    g.add_node(op.output("Y")[0], out)
+
+
+def convert_leaky_relu(g, op, block):
+    """Operator converter for leaky_relu."""
+
+    alpha = op.attr("alpha")
+    x = g.get_node(op.input("X")[0])
+    out = _op.nn.leaky_relu(x, alpha=alpha)
+    g.add_node(op.output("Out")[0], out)
+
+
+def convert_lookup_table(g, op, block):
+    """Operator converter for lookup_table_v2."""
+
+    indices = g.get_node(op.input("Ids")[0])
+    padding_idx = op.attr("padding_idx")
+    if padding_idx != -1:
+        g.get_params[op.input("W")[0]][padding_idx] = 0.0
+        g.add_node(op.input("W")[0], _expr.const(g.params[op.input("W")[0]]))
+    weights = g.get_node(op.input("W")[0])
+    out = _op.take(weights, indices.astype("int32"), axis=0)
+    g.add_node(op.output("Out")[0], out)
+
+
+def convert_matmul(g, op, block):
+    """Operator converter for matmul."""
+
+    inputs = [g.get_node(op.input("X")[0]), g.get_node(op.input("Y")[0])]
+    a_shape = infer_shape(inputs[0])
+    b_shape = infer_shape(inputs[1])
+    if op.has_attr("trans_x"):
+        # for matmul_v2
+        trans_x = op.attr("trans_x")
+        trans_y = op.attr("trans_y")
+    else:
+        # for matmul
+        trans_x = op.attr("transpose_X")
+        trans_y = op.attr("transpose_Y")
+    if trans_x:
+        perm = list(range(len(a_shape)))
+        perm[-2] = len(a_shape) - 1
+        perm[-1] = len(a_shape) - 2
+        inputs[0] = _op.transpose(inputs[0], axes=perm)
+    if trans_y:
+        perm = list(range(len(b_shape)))
+        perm[-2] = len(b_shape) - 1
+        perm[-1] = len(b_shape) - 2
+        inputs[1] = _op.transpose(inputs[1], axes=perm)
+
+    # This implemention almost keeps same with ONNX
+    # Need to check input shape as batch matmul must be supported.
+    a_shape = shape_of(inputs[0])
+    a_rank = infer_shape(a_shape)[0]
+    b_shape = shape_of(inputs[1])
+    b_rank = infer_shape(b_shape)[0]
+    # When performing a batch matmul, we need to properly handle N-dim shapes.
+    if a_rank > 2 or b_rank > 2:
+
+        def flatten_to_nd(x, x_shape, nd=3):
+            ndims = infer_shape(x_shape)[0]
+            if ndims == nd:
+                return x
+            newshape = _op.concatenate(
+                [
+                    _expr.const([-1], 
dtype=infer_type(x_shape).checked_type.dtype),
+                    _op.strided_slice(x_shape, [ndims - nd + 1], [ndims]),
+                ],
+                0,
+            )
+            out = _op.reshape(x, fold_constant(newshape))
+            return out
+
+        b_type = infer_type(inputs[1])
+        # Convert to dense if the second matrix is 2d and non-dynamic
+        if b_rank == 2 and not _ty.is_dynamic(b_type.checked_type):
+            a = flatten_to_nd(inputs[0], a_shape, 2)
+            b = _op.transpose(inputs[1])
+            output = _op.nn.dense(a, b)
+        else:
+            # Convert a and b into 3 dimensional tensors.
+            a = flatten_to_nd(inputs[0], a_shape, 3)
+            b = flatten_to_nd(inputs[1], b_shape, 3)
+            # Transpose matrix dimensions of b.
+            b = _op.transpose(b, [0, 2, 1])
+            # Perform a batch matmul.
+            output = _op.nn.batch_matmul(a, b)
+        # Determine the output batch dimension.
+        if a_rank > b_rank:
+            out_batch = _op.strided_slice(a_shape, [0], [a_rank - 2])
+        elif a_rank < b_rank:
+            out_batch = _op.strided_slice(b_shape, [0], [b_rank - 2])
+        # If its unclear how broadcasting should be applied, the output
+        # shape is determined by choosing the maximum value from each input.
+        else:
+            out_batch = _op.concatenate(
+                [
+                    _op.maximum(
+                        _op.strided_slice(a_shape, [i], [i + 1]),
+                        _op.strided_slice(b_shape, [i], [i + 1]),
+                    )
+                    for i in range(a_rank - 2)
+                ],
+                0,
+            )
+        # Reshape output to original dimensions.
+        final_shape = _op.concatenate(
+            [
+                out_batch,
+                _op.strided_slice(
+                    a_shape, [infer_shape(a_shape)[0] - 2], 
[infer_shape(a_shape)[0] - 1]
+                ),
+                _op.strided_slice(
+                    b_shape, [infer_shape(b_shape)[0] - 1], 
[infer_shape(b_shape)[0]]
+                ),
+            ],
+            0,
+        )
+        out = _op.reshape(output, fold_constant(final_shape))
+    else:
+        if b_rank == 1:
+            inputs[1] = _op.expand_dims(inputs[1], 1, 1)
+        # Otherwise a simple dense op will get the job done.
+        input_1_t = _op.transpose(inputs[1], axes=(1, 0))
+        out = _op.nn.dense(inputs[0], input_1_t)
+        if b_rank == 1:
+            out = _op.squeeze(out, axis=[-1])
+    if op.has_attr("alpha"):
+        alpha = op.attr("alpha")
+        if not np.isclose(alpha, 1.0):
+            out = out * _expr.const(alpha).astype("float32")
+    g.add_node(op.output("Out")[0], out)
+
+
+def convert_mul(g, op, block):
+    """Operator converter for mul."""
+
+    x = g.get_node(op.input("X")[0])
+    y = g.get_node(op.input("Y")[0])
+    x_num_col_dims = op.attr("x_num_col_dims")
+    y_num_col_dims = op.attr("y_num_col_dims")
+    x_shape = shape_of(x)
+    y_shape = shape_of(y)
+    x_dim = infer_shape(x_shape)[0]
+    y_dim = infer_shape(y_shape)[0]
+    if x_num_col_dims < 0:
+        x_num_col_dims += x_dim
+    if y_num_col_dims < 0:
+        y_num_col_dims += y_dim
+    if x_num_col_dims == 1:
+        x = _op.nn.batch_flatten(x)
+    else:
+        pre_shape = _op.prod(_op.strided_slice(x_shape, [0], [x_num_col_dims], 
[1]), keepdims=True)
+        post_shape = _op.prod(
+            _op.strided_slice(x_shape, [x_num_col_dims], [x_dim], [1]), 
keepdims=True
+        )
+        new_shape = _op.concatenate([pre_shape, post_shape], axis=0)
+        new_shape = fold_constant(new_shape)
+        x = _op.reshape(x, new_shape)
+    if y_num_col_dims == 1:
+        y = _op.nn.batch_flatten(y)
+    else:
+        pre_shape = _op.prod(_op.strided_slice(y_shape, [0], [y_num_col_dims], 
[1]), keepdims=True)
+        post_shape = _op.prod(
+            _op.strided_slice(y_shape, [y_num_col_dims], [y_dim], [1]), 
keepdims=True
+        )
+        new_shape = _op.concatenate([pre_shape, post_shape], axis=0)
+        new_shape = fold_constant(new_shape)
+        y = _op.reshape(y, new_shape)
+    y = _op.transpose(y)
+    out = _op.nn.dense(x, y)
+    out_pre_shape = _op.strided_slice(x_shape, [0], [x_num_col_dims], [1])
+    out_post_shape = _op.strided_slice(y_shape, [y_num_col_dims], [y_dim], [1])
+    out_shape = _op.concatenate([out_pre_shape, out_post_shape], axis=0)
+    out_shape = fold_constant(out_shape)
+    out = _op.reshape(out, out_shape)
+    g.add_node(op.output("Out")[0], out)
+
+
+def convert_pool2d(g, op, block):
+    """Operator converter for pool2d."""
+
+    adaptive = op.attr("adaptive")
+    ceil_mode = op.attr("ceil_mode")
+    global_pooling = op.attr("global_pooling")
+    ksize = op.attr("ksize")
+    paddings = op.attr("paddings")
+    padding_algorithm = op.attr("padding_algorithm")
+    pooling_type = op.attr("pooling_type")
+    if global_pooling:
+        adaptive = True
+        ksize = [1, 1]
+
+    input_x = g.get_node(op.input("X")[0])
+    in_h, in_w = infer_shape(input_x)[2:]
+
+    op_map = {
+        "avg": "avg_pool2d",
+        "max": "max_pool2d",
+    }
+    strides = op.attr("strides")
+    if isinstance(strides, int):
+        strides = [strides, strides]
+    if isinstance(ksize, int):
+        ksize = [ksize, ksize]
+    if isinstance(paddings, int):
+        paddings = [paddings] * 2
+
+    if padding_algorithm == "VALID":
+        paddings = [0, 0]
+    elif padding_algorithm == "SAME":
+        pad_h = _get_pad_size(in_h, ksize[0], strides[0])
+        pad_w = _get_pad_size(in_w, ksize[1], strides[1])
+        paddings = [pad_h[0], pad_w[0], pad_h[1], pad_w[1]]
+    elif padding_algorithm == "EXPLICIT":
+        if len(paddings) == 2:
+            paddings = [paddings[0], paddings[1], paddings[0], paddings[1]]
+        if len(paddings) == 4:
+            paddings = [paddings[0], paddings[2], paddings[1], paddings[3]]
+    else:
+        msg = 'Value {} in attribute "padding" of operator Pool2d is not 
"valid."'
+        raise tvm.error.OpAttributeInvalid(msg.format(padding_algorithm))
+
+    if not adaptive:
+        out = getattr(_op.nn, op_map[pooling_type])(
+            input_x, pool_size=ksize, strides=strides, padding=paddings, 
ceil_mode=ceil_mode
+        )
+    else:
+        out = getattr(_op.nn, "adaptive_" + op_map[pooling_type])(input_x, 
output_size=ksize)
+    g.add_node(op.output("Out")[0], out)
+
+
+def convert_reshape(g, op, block):
+    """Operator converter for reshape."""
+
+    input_shape = op.input("Shape")
+    input_shape_tensor = op.input("ShapeTensor")
+    data = g.get_node(op.input("X")[0])
+    if input_shape:
+        new_shape = g.get_node(input_shape[0])
+    elif input_shape_tensor:
+        tmp_shape = []
+        for shape_name in input_shape_tensor:
+            shape = g.get_node(shape_name)
+            if len(infer_shape(shape)) == 0:
+                shape = _op.reshape(shape, [-1])
+            if isinstance(shape, _expr.Constant):
+                tmp_shape.append(shape)
+            elif isinstance(shape, _expr.Expr):
+                tmp_shape.append(shape)
+            else:
+                tmp_shape.append(_expr.const(np.array(shape).astype("int64")))
+        new_shape = _op.concatenate(tmp_shape, axis=0)
+    else:
+        new_shape = op.attr("shape")
+    out = _op.reshape(data, new_shape)
+    g.add_node(op.output("Out")[0], out)
+
+
+def convert_scale(g, op, block):
+    """Operator converter for scale."""
+
+    scale = op.attr("scale")
+    bias = op.attr("bias")
+    bias_after_scale = op.attr("bias_after_scale")
+    x = g.get_node(op.input("X")[0])
+    if np.isclose(scale, 1.0) and np.isclose(bias, 0.0):
+        out = _op.copy(x)
+    else:
+        if np.isclose(bias, 0.0):
+            out = x * _expr.const(np.array(scale).astype("float32"))
+        elif np.isclose(scale, 1.0):
+            out = x + _expr.const(np.array(bias).astype("float32"))
+        else:
+            if bias_after_scale:
+                out = x * _expr.const(np.array(scale).astype("float32")) + 
_expr.const(
+                    np.array(bias).astype("float32")
+                )
+            else:
+                out = (x + _expr.const(np.array(bias).astype("float32"))) * 
_expr.const(
+                    np.array(scale).astype("float32")
+                )
+    g.add_node(op.output("Out")[0], out)
+
+
+def convert_shape(g, op, block):
+    """Operator converter for shape."""
+
+    x = g.get_node(op.input("Input")[0])
+    out = shape_of(x)
+    g.add_node(op.output("Out")[0], out)
+
+
+def convert_slice(g, op, block):
+    """Operator converter for slice."""
+
+    def parameter_process(starts, ends, axes, dshape):
+        new_axes = []
+        new_starts = []
+        new_ends = []
+        pop_index = 0
+        for i in range(max(axes) + 1):
+            new_axes.append(i)
+            if i in axes:
+                new_starts.append(starts[pop_index])
+                new_ends.append(ends[pop_index])
+                pop_index += 1
+            else:
+                new_starts.append(0)
+                new_ends.append(dshape[i])
+        return new_starts, new_ends, new_axes
+
+    data = g.get_node(op.input("Input")[0])
+    dshape = infer_shape(data)
+    starts = op.attr("starts")
+    ends = op.attr("ends")
+    axes = op.attr("axes")
+    decrease_axis = op.attr("decrease_axis")
+    if isinstance(starts, int):
+        starts = [starts]
+    if isinstance(ends, int):
+        ends = [ends]
+    if isinstance(axes, int):
+        axes = [axes]
+    if isinstance(decrease_axis, int):
+        decrease_axis = [decrease_axis]
+    starts, ends, axes = parameter_process(starts, ends, axes, dshape)
+    out = _op.strided_slice(data, begin=starts, end=ends)
+    if decrease_axis:
+        out = _op.squeeze(out, axis=decrease_axis)
+    g.add_node(op.output("Out")[0], out)
+
+
+def convert_softmax(g, op, block):
+    """Operator converter for softmax."""
+
+    axis = op.attr("axis")
+    input_shape = block.var(op.input("X")[0]).shape
+    if axis < 0:
+        axis = len(input_shape) + axis
+    x = g.get_node(op.input("X")[0])
+    m = _op.max(x, axis, keepdims=True)
+    e = _op.exp(x - m)
+    out = e / _op.sum(e, axis, keepdims=True)
+    g.add_node(op.output("Out")[0], out)
+
+
+def convert_unsqueeze(g, op, block):
+    """Operator converter for unsqueeze."""
+
+    x = g.get_node(op.input("X")[0])
+    axes = sorted(op.attr("axes"))
+    for axis in axes:
+        x = _op.expand_dims(x, axis=axis, num_newaxis=1)
+    g.add_node(op.output("Out")[0], x)
+
+
+_convert_map = {
+    "arg_max": convert_arg_max,
+    "assign": convert_assign,
+    "batch_norm": convert_batch_norm,
+    "cast": convert_cast,
+    "concat": convert_concat,
+    "conv2d": convert_conv2d,
+    "cumsum": convert_cumsum,
+    "depthwise_conv2d": convert_conv2d,
+    "dropout": convert_dropout,
+    "elementwise_add": convert_elementwise_op,
+    "elementwise_div": convert_elementwise_op,
+    "elementwise_mul": convert_elementwise_op,
+    "elementwise_sub": convert_elementwise_op,
+    "equal": convert_equal,
+    "exp": convert_activation,
+    "feed": convert_feed,
+    "fill_any_like": convert_fill_any_like,
+    "fill_constant": convert_fill_constant,
+    "gelu": convert_gelu,
+    "hard_sigmoid": convert_hard_sigmoid,
+    "hard_swish": convert_hard_swish,
+    "layer_norm": convert_layer_norm,
+    "leaky_relu": convert_leaky_relu,
+    "lookup_table_v2": convert_lookup_table,
+    "matmul": convert_matmul,
+    "matmul_v2": convert_matmul,
+    "mul": convert_mul,
+    "pool2d": convert_pool2d,
+    "relu": convert_activation,
+    "reshape2": convert_reshape,
+    "scale": convert_scale,
+    "shape": convert_shape,
+    "slice": convert_slice,
+    "softmax": convert_softmax,
+    "tanh": convert_activation,
+    "unsqueeze2": convert_unsqueeze,
+}
+
+
+class GraphProto:
+    """A helper class for handling relay functions from PaddlePaddle model."""
+
+    def __init__(self):
+        self.nodes = {}
+        self.params = {}
+        self.shape_dict = None
+
+    def get_node(self, name):
+        """get node from graph"""
+
+        assert name in self.nodes
+        return self.nodes[name]
+
+    def add_node(self, name, node):
+        """add a node to graph"""
+
+        self.nodes[name] = fold_constant(node)
+
+    def get_params(self, name=None):
+        """get params from graph"""
+
+        if name is None:
+            return self.params
+        assert name in self.params
+        return self.params[name]
+
+    def extract_parameters(self, program, scope=None):
+        """Extract all the weights from PaddlePaddle program."""
+
+        self.params = {}
+        variables = program.global_block().vars
+        for name in variables:
+            var = program.global_block().var(name)
+            if name.endswith("feed") or name.endswith("fetch"):
+                continue
+            if not var.persistable:
+                continue
+            if isinstance(scope, dict):
+                self.params[name] = scope[name]
+            else:
+                self.params[name] = np.array(scope.var(name).get_tensor())
+            self.nodes[name] = _expr.const(self.params[name])
+
+    def check_input_shape(self, op, block):
+        """Check the shape information of model's inputs, fixed shape is 
recommended."""
+
+        ipt_name = op.input(op.input_names[0])
+        ipt_shape = block.var(ipt_name).shape
+        for i in ipt_shape:
+            if i < 0:
+                warning_msg = "Input {}(shape={}) has unkown dimension shapes. 
\
+                               Specifying static values may improve 
performance".format(
+                    ipt_name, ipt_shape
+                )
+                warnings.warn(warning_msg)
+
+    def check_unsupported_ops(self, program):
+        """Check whether all the operators are supported."""
+
+        unsupported_ops = set()
+        for block in program.blocks:
+            for op in block.ops:
+                if op.type == "fetch":
+                    continue
+                if op.type not in _convert_map:
+                    unsupported_ops.add(op.type)
+        if len(unsupported_ops) > 0:
+            msg = "The following operators are not supported for frontend 
Paddle: "
+            msg += ", ".join(unsupported_ops)
+            raise tvm.error.OpNotImplemented(msg)
+
+    def ops_to_relay(self, program, input_specs=None):
+        """Convert PaddlePaddle operators to TVM relay functions."""
+
+        if input_specs is not None:
+            for input_spec in input_specs:
+                convert_feed(self, input_spec, None)
+        for block in program.blocks:
+            for op in block.ops:
+                if op.type == "fetch":
+                    continue
+                convert_func = _convert_map[op.type]
+                convert_func(self, op, block)
+
+    def from_program(self, program, shape_dict, scope):
+        """Construct the TVM relay expression from PaddlePaddle program."""
+
+        self.shape_dict = shape_dict
+        if scope is None:
+            import paddle
+
+            scope = paddle.fluid.global_scope()
+        self.check_unsupported_ops(program)
+        self.extract_parameters(program, scope)
+        self.ops_to_relay(program)
+
+        output_names = list()
+        for block in program.blocks:
+            for op in block.ops:
+                if op.type == "fetch":
+                    output_names.append(op.input("X")[0])
+
+        outputs = [self.nodes[name] for name in output_names]
+        outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
+
+        free_vars = analysis.free_vars(outputs)
+        func = _function.Function(free_vars, outputs)
+        mod = IRModule.from_expr(func)
+        return mod, self.params
+
+    def from_translated_layer(self, layer, shape_dict):
+        """Construct the TVM relay expression from PaddlePaddle 
TranslatedLayer."""
+
+        self.shape_dict = shape_dict
+        program = layer.program()
+        parameters = dict()
+        for param in layer.parameters():
+            parameters[param.name] = np.array(param.value().get_tensor())
+        self.check_unsupported_ops(program)
+        self.extract_parameters(program, parameters)
+
+        input_specs = layer._input_spec()
+        self.ops_to_relay(program, input_specs)
+
+        output_names = [x.name for x in layer._output_spec()]
+
+        outputs = [self.nodes[name] for name in output_names]
+        outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
+
+        free_vars = analysis.free_vars(outputs)
+        func = _function.Function(free_vars, outputs)
+        mod = IRModule.from_expr(func)
+        return mod, self.params
+
+
+def from_paddle(program_or_layer, shape_dict=None, scope=None):
+    """Convert a PaddlePaddle model into an equivalent Relay Function.
+
+    PaddlePaddle Program/TranslatedLayer represent the computation graph of 
PaddlePaddle model,
+    and PaddlePaddle scope stores all the weights of PaddlePaddle model.
+    """
+
+    import paddle
+
+    g = GraphProto()
+    if isinstance(program_or_layer, paddle.jit.TranslatedLayer):
+        # model is loaded by `paddle.jit.load`
+        mod, params = g.from_translated_layer(program_or_layer, shape_dict)
+    elif isinstance(program_or_layer, paddle.static.Program):
+        # model is loaded by `paddle.static.load_inference_model`
+        mod, params = g.from_program(program_or_layer, shape_dict, scope)
+    else:
+        raise Exception("Only PaddlePaddle's Program and TranslatedLayer are 
supported.")
+    return mod, params
diff --git a/tests/python/frontend/paddlepaddle/test_forward.py 
b/tests/python/frontend/paddlepaddle/test_forward.py
new file mode 100644
index 0000000..db07e07
--- /dev/null
+++ b/tests/python/frontend/paddlepaddle/test_forward.py
@@ -0,0 +1,661 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+import os
+from pathlib import Path
+import shutil
+
+import numpy as np
+import tvm
+import tvm.testing
+import tvm.topi.testing
+from tvm import relay
+from tvm.contrib import graph_executor
+
+import paddle
+import paddle.nn as nn
+
+PADDLE_TEST_DATA_ROOT_PATH = Path(Path("~").expanduser(), ".tvm_test_data", 
"paddle")
+PADDLE_TEST_DATA_ROOT_PATH.mkdir(parents=True, exist_ok=True)
+
+
+def assert_shapes_match(tru, est):
+    if tru.shape != est.shape:
+        msg = "Output shapes {} and {} don't match"
+        raise AssertionError(msg.format(tru.shape, est.shape))
+
+
+def get_paddle_model(func, input_spec):
+    global PADDLE_TEST_DATA_ROOT_PATH
+    model_path = Path(PADDLE_TEST_DATA_ROOT_PATH, "model")
+
+    paddle.jit.save(func, str(model_path), input_spec=input_spec)
+    baseline_model = paddle.jit.load(str(model_path))
+
+    shutil.rmtree(str(PADDLE_TEST_DATA_ROOT_PATH))
+    return baseline_model
+
+
+def verify_model(func, input_data, rtol=1e-5, atol=1e-5):
+    if not (isinstance(input_data, (tuple, list))):
+        input_data = [input_data]
+
+    input_spec = []
+    input_names = []
+    input_shape_dict = {}
+    compiled_input = {}
+    for idx, data in enumerate(input_data):
+        input_name = "input{}".format(idx)
+        input_spec.append(
+            paddle.static.InputSpec(dtype=data.dtype, shape=data.shape, 
name=input_name)
+        )
+        input_names.append(input_name)
+        input_shape_dict[input_name] = data.shape
+        if isinstance(data, np.ndarray):
+            compiled_input[input_name] = data
+        else:
+            compiled_input[input_name] = data.numpy()
+
+    baseline_model = get_paddle_model(func, input_spec)
+    baseline_outputs = baseline_model(*[input[:] for input in input_data])
+
+    # get paddle outputs
+    if isinstance(baseline_outputs, (tuple, list)):
+        baseline_outputs = tuple(out.numpy() for out in baseline_outputs)
+    else:
+        baseline_outputs = (baseline_outputs.numpy(),)
+
+    mod, params = relay.frontend.from_paddle(baseline_model, input_shape_dict)
+    parms_num = min(len(input_names), len(mod["main"].params))
+    compiled_names = []
+    for arg in mod["main"].params[:parms_num]:
+        assert arg.name_hint in input_names
+        compiled_names.append(arg.name_hint)
+
+    with tvm.transform.PassContext(opt_level=3):
+        for target, dev in tvm.testing.enabled_targets():
+            lib = relay.build(mod, target=target, params=params)
+            gmod = graph_executor.GraphModule(lib["default"](dev))
+            for name in compiled_names:
+                gmod.set_input(name, compiled_input[name])
+            gmod.run()
+
+            for i, baseline_output in enumerate(baseline_outputs):
+                compiled_output = gmod.get_output(i).numpy()
+
+                assert_shapes_match(baseline_output, compiled_output)
+                tvm.testing.assert_allclose(baseline_output, compiled_output, 
rtol=rtol, atol=atol)
+
+
[email protected]_gpu
+def test_forward_add_subtract():
+    input_shape = [10]
+
+    @paddle.jit.to_static
+    def add_subtract(inputs):
+        return paddle.subtract(paddle.add(inputs, inputs), inputs)
+
+    @paddle.jit.to_static
+    def add_subtract2(inputs):
+        return inputs + 1 - 2
+
+    @paddle.jit.to_static
+    def add_subtract3(inputs1, inputs2):
+        ones = paddle.ones([10], dtype="float32")
+        return inputs1 + ones - inputs2
+
+    input_data = paddle.rand(input_shape, dtype="float32")
+    verify_model(add_subtract, input_data)
+    verify_model(add_subtract2, input_data)
+    input_data2 = paddle.rand(input_shape, dtype="float32")
+    verify_model(add_subtract3, [input_data, input_data2])
+
+
[email protected]_gpu
+def test_forward_argmax():
+    input_shape = [1, 3, 10, 10]
+
+    class ArgMax(nn.Layer):
+        @paddle.jit.to_static
+        def forward(self, inputs):
+            return paddle.argmax(inputs)
+
+    class ArgMax1(nn.Layer):
+        @paddle.jit.to_static
+        def forward(self, inputs):
+            return inputs.argmax(axis=1)
+
+    class ArgMax2(nn.Layer):
+        @paddle.jit.to_static
+        def forward(self, inputs):
+            return inputs.argmax(axis=1, keepdim=False)
+
+    class ArgMax3(nn.Layer):
+        @paddle.jit.to_static
+        def forward(self, inputs):
+            return inputs.argmax(axis=2, keepdim=True)
+
+    input_data = paddle.rand(input_shape, dtype="float32")
+    verify_model(ArgMax(), input_data=input_data)
+    verify_model(ArgMax1(), input_data=input_data)
+    verify_model(ArgMax2(), input_data=input_data)
+    verify_model(ArgMax3(), input_data=input_data)
+
+
[email protected]_gpu
+def test_forward_assign():
+    @paddle.jit.to_static
+    def assign(inputs):
+        return paddle.assign(inputs)
+
+    input_shape = [2, 3]
+    input_data = paddle.rand(input_shape, dtype="float32")
+    verify_model(
+        assign,
+        [
+            input_data,
+        ],
+    )
+    input_data2 = np.random.randint(100, size=input_shape)
+    verify_model(
+        assign,
+        [
+            input_data2,
+        ],
+    )
+
+
[email protected]_gpu
+def test_forward_batch_norm():
+    class BatchNorm1D(nn.Layer):
+        def __init__(self):
+            super(BatchNorm1D, self).__init__()
+            self.batch_norm = nn.BatchNorm1D(2)
+
+        @paddle.jit.to_static
+        def forward(self, input_data):
+            return self.batch_norm(input_data)
+
+    class BatchNorm2D(nn.Layer):
+        def __init__(self):
+            super(BatchNorm2D, self).__init__()
+            self.batch_norm = nn.BatchNorm2D(2)
+
+        @paddle.jit.to_static
+        def forward(self, input_data):
+            return self.batch_norm(input_data)
+
+    class BatchNorm3D(nn.Layer):
+        def __init__(self):
+            super(BatchNorm3D, self).__init__()
+            self.batch_norm = nn.BatchNorm3D(2)
+
+        @paddle.jit.to_static
+        def forward(self, input_data):
+            return self.batch_norm(input_data)
+
+    input_data = paddle.rand((2, 2, 3), dtype="float32")
+    verify_model(BatchNorm1D(), input_data=input_data)
+    input_data = paddle.rand((2, 2, 2, 3), dtype="float32")
+    verify_model(BatchNorm2D(), input_data=input_data)
+    input_data = paddle.rand((2, 2, 2, 2, 3), dtype="float32")
+    verify_model(BatchNorm3D(), input_data=input_data)
+
+
[email protected]_gpu
+def test_forward_cast():
+    @paddle.jit.to_static
+    def cast1(inputs, dtype="uint8"):
+        return paddle.cast(inputs, dtype)
+
+    @paddle.jit.to_static
+    def cast2(inputs, dtype="int64"):
+        return inputs.cast(dtype)
+
+    input_shape = [2, 3]
+    input_data = paddle.rand(input_shape, dtype="float32") * 100
+    verify_model(
+        cast1,
+        [
+            input_data,
+        ],
+    )
+    verify_model(
+        cast2,
+        [
+            input_data,
+        ],
+    )
+
+
[email protected]_gpu
+def test_forward_concat_unsqueeze():
+    @paddle.jit.to_static
+    def concat_unsqueeze1(inputs):
+        return paddle.concat([inputs[:, 0].unsqueeze(1), inputs[:, 
1].unsqueeze(1)], axis=1)
+
+    @paddle.jit.to_static
+    def concat_unsqueeze2(inputs):
+        a = (inputs[:, :, 0] + 2) * 7
+        b = (inputs[:, :, 1] + 3) * 11
+        c = (inputs[:, :, 2] + 5) * 13
+        return paddle.concat([paddle.unsqueeze(t, axis=2) for t in [a, b, c]], 
axis=2)
+
+    input_shape = [1, 3, 10, 10]
+    input_data = paddle.rand(input_shape, dtype="float32")
+    verify_model(concat_unsqueeze1, input_data=input_data)
+    verify_model(concat_unsqueeze2, input_data=input_data)
+
+
[email protected]_gpu
+def test_forward_cumsum():
+    @paddle.jit.to_static
+    def cusum1(inputs):
+        return paddle.cumsum(inputs)
+
+    @paddle.jit.to_static
+    def cusum2(inputs):
+        return paddle.cumsum(inputs, axis=0)
+
+    @paddle.jit.to_static
+    def cusum3(inputs):
+        return paddle.cumsum(inputs, axis=1)
+
+    input_data = paddle.randint(0, 100, (10, 10), dtype=paddle.int32)
+    verify_model(cusum1, [input_data])
+    verify_model(cusum1, [input_data.astype(paddle.int64)])
+    verify_model(
+        cusum2,
+        [
+            input_data,
+        ],
+    )
+    verify_model(
+        cusum3,
+        [
+            input_data,
+        ],
+    )
+
+
[email protected]_gpu
+def test_forward_conv():
+    conv2d_input_shape = [1, 3, 10, 10]
+
+    class Conv2D1(nn.Layer):
+        def __init__(self):
+            super(Conv2D1, self).__init__()
+            self.conv = nn.Conv2D(3, 6, 7, bias_attr=True)
+            self.softmax = nn.Softmax()
+
+        @paddle.jit.to_static
+        def forward(self, inputs):
+            return self.softmax(self.conv(inputs))
+
+    class Conv2D2(nn.Layer):
+        def __init__(self):
+            super(Conv2D2, self).__init__()
+            self.conv = nn.Conv2D(3, 6, 7, groups=3, bias_attr=False)
+            self.softmax = nn.Softmax()
+
+        @paddle.jit.to_static
+        def forward(self, inputs):
+            return self.softmax(self.conv(inputs))
+
+    conv2d_input_data = paddle.rand(conv2d_input_shape, dtype="float32")
+    verify_model(Conv2D1(), input_data=conv2d_input_data)
+    verify_model(Conv2D2(), input_data=conv2d_input_data)
+
+
[email protected]_gpu
+def test_forward_dropout():
+    @paddle.jit.to_static
+    def dropout(inputs):
+        return nn.functional.dropout(inputs)
+
+    input_shape = [1, 3, 10, 10]
+    input_data = paddle.rand(input_shape, dtype="float32")
+    verify_model(dropout, input_data=input_data[0, 0])
+    verify_model(dropout, input_data=input_data)
+
+
[email protected]_gpu
+def test_forward_shape_full():
+    @paddle.jit.to_static
+    def full1(inputs):
+        return paddle.full(paddle.shape(inputs), 3.14)
+
+    @paddle.jit.to_static
+    def full2(inputs):
+        return paddle.full(paddle.shape(inputs), 1.0, dtype=inputs.dtype)
+
+    input_shape = [1, 3, 10, 10]
+    input_data = paddle.rand(input_shape, dtype="float32")
+    verify_model(full1, input_data=[input_data])
+    verify_model(full2, input_data=[input_data])
+
+
[email protected]_gpu
+def test_forward_ones_like():
+    @paddle.jit.to_static
+    def ones_like1(inputs):
+        return paddle.ones_like(inputs)
+
+    @paddle.jit.to_static
+    def ones_like2(inputs):
+        return paddle.ones_like(inputs, dtype="int32")
+
+    input_shape = [1, 3, 10, 10]
+    input_data = paddle.rand(input_shape, dtype="float32")
+    verify_model(ones_like1, input_data=input_data)
+    verify_model(ones_like2, input_data=input_data)
+
+
[email protected]_gpu
+def test_forward_gelu():
+    @paddle.jit.to_static
+    def gelu(inputs):
+        return nn.functional.gelu(inputs)
+
+    input_shape = [1, 3, 10, 10]
+    input_data = paddle.rand(input_shape, dtype="float32")
+    verify_model(gelu, input_data=input_data)
+
+
[email protected]_gpu
+def test_forward_hard_sigmoid():
+    @paddle.jit.to_static
+    def hard_sigmoid(inputs):
+        return nn.functional.hardsigmoid(inputs)
+
+    input_shape = [1, 3, 10, 10]
+    input_data = paddle.rand(input_shape, dtype="float32")
+    verify_model(hard_sigmoid, input_data=input_data)
+
+
[email protected]_gpu
+def test_forward_hard_swish():
+    @paddle.jit.to_static
+    def hard_swish(inputs):
+        return nn.functional.hardswish(inputs)
+
+    input_shape = [1, 3, 10, 10]
+    input_data = paddle.rand(input_shape, dtype="float32")
+    verify_model(hard_swish, input_data=input_data)
+
+
[email protected]_gpu
+def test_forward_layer_norm():
+    @paddle.jit.to_static
+    def layer_norm(inputs, weight, bias):
+        return nn.functional.layer_norm(inputs, inputs.shape[-1], 
weight=weight, bias=bias)
+
+    class LayerNorm(nn.Layer):
+        def __init__(self):
+            super(LayerNorm, self).__init__()
+            data_shape = [10]
+            self.layer_norm = nn.LayerNorm(data_shape)
+
+        @paddle.jit.to_static
+        def forward(self, inputs):
+            return self.layer_norm(inputs)
+
+    input_shape = [1, 3, 10, 10]
+    input_data = paddle.rand(input_shape, dtype="float32")
+    weight = paddle.rand([10], dtype="float32")
+    bias = paddle.rand([10], dtype="float32")
+    verify_model(layer_norm, input_data=[input_data, weight, bias])
+    verify_model(LayerNorm(), input_data=input_data)
+
+
[email protected]_gpu
+def test_forward_leaky_relu():
+    @paddle.jit.to_static
+    def leaky_relu(inputs):
+        return nn.functional.leaky_relu(inputs)
+
+    input_shape = [1, 3, 10, 10]
+    input_data = paddle.rand(input_shape, dtype="float32")
+    verify_model(leaky_relu, input_data=input_data)
+
+
[email protected]_gpu
+def test_forward_look_up():
+    @paddle.jit.to_static
+    def look_up(inputs, weight):
+        return nn.functional.embedding(inputs, weight)
+
+    class LookUp(nn.Layer):
+        def __init__(self):
+            super(LookUp, self).__init__()
+            self.embedding = paddle.nn.Embedding(10, 4, sparse=True)
+
+        @paddle.jit.to_static
+        def forward(self, inputs):
+            return self.embedding(inputs)
+
+    input_shape = [1, 3, 10, 10]
+    input_data = paddle.randint(0, 10, input_shape, dtype="int32")
+    weight = paddle.rand([10, 4], dtype="float32")
+    verify_model(look_up, input_data=[input_data, weight])
+    verify_model(LookUp(), input_data=input_data)
+
+
[email protected]_gpu
+def test_forward_multiply():
+    @paddle.jit.to_static
+    def multiply1(inputs):
+        return inputs * inputs
+
+    @paddle.jit.to_static
+    def multiply2(inputs):
+        return inputs * 1.0 / 2.0
+
+    @paddle.jit.to_static
+    def multiply3(inputs, inputs2):
+        ones = paddle.ones([10], dtype="float32")
+        return inputs * ones / inputs2
+
+    input_shape = [10]
+    input_data = paddle.rand(input_shape, dtype="float32")
+    verify_model(multiply1, input_data=input_data)
+    verify_model(multiply2, input_data=input_data)
+    input_data2 = paddle.rand(input_shape, dtype="float32")
+    verify_model(multiply3, input_data=[input_data, input_data2])
+
+
[email protected]_gpu
+def test_forward_matmul():
+    class MatMul1(nn.Layer):
+        def forward(self, input1, input2):
+            return paddle.matmul(input1, input2)
+
+    # matrix x vector
+    input_data1 = paddle.randn((3, 4), dtype="float32")
+    input_data2 = paddle.randn((4,), dtype="float32")
+    verify_model(MatMul1(), input_data=[input_data1, input_data2])
+
+    # matrix x matrix
+    input_data1 = paddle.randn((5, 4), dtype="float32")
+    input_data2 = paddle.randn((4, 5), dtype="float32")
+    verify_model(MatMul1(), input_data=[input_data1, input_data2])
+
+    # batched matrix x batched matrix
+    input_data1 = paddle.randn((10, 3, 4), dtype="float32")
+    input_data2 = paddle.randn((10, 4, 5), dtype="float32")
+    verify_model(MatMul1(), input_data=[input_data1, input_data2])
+
+    # batched matrix x broadcasted matrix
+    input_data1 = paddle.randn((10, 3, 4), dtype="float32")
+    input_data2 = paddle.randn((4, 5), dtype="float32")
+    verify_model(MatMul1(), input_data=[input_data1, input_data2])
+
+
[email protected]_gpu
+def test_forward_pool2d():
+    @paddle.jit.to_static
+    def pool2d1(inputs):
+        return nn.functional.avg_pool2d(inputs, kernel_size=2, stride=2, 
padding=0)
+
+    @paddle.jit.to_static
+    def pool2d2(inputs):
+        return nn.functional.adaptive_avg_pool2d(inputs, output_size=[3, 3])
+
+    @paddle.jit.to_static
+    def pool2d3(inputs):
+        return nn.functional.max_pool2d(
+            inputs, kernel_size=2, stride=2, padding=0, return_mask=True
+        )
+
+    input_data = paddle.uniform(shape=[1, 2, 32, 32], dtype="float32", min=-1, 
max=1)
+    verify_model(pool2d1, input_data=input_data)
+    verify_model(pool2d2, input_data=input_data)
+    # verify_model(pool2d3, input_data=input_data)
+
+
[email protected]_gpu
+def test_forward_relu():
+    @paddle.jit.to_static
+    def relu(inputs):
+        return nn.functional.relu(inputs)
+
+    input_shape = [10, 10]
+    input_data = paddle.rand(input_shape, dtype="float32")
+    verify_model(relu, input_data=input_data)
+
+
[email protected]_gpu
+def test_forward_reshape():
+    @paddle.jit.to_static
+    def reshape1(inputs, x):
+        new_shape = paddle.shape(x)
+        return paddle.reshape(inputs, new_shape)
+
+    @paddle.jit.to_static
+    def reshape2(inputs):
+        return inputs.reshape([-1])
+
+    @paddle.jit.to_static
+    def reshape3(inputs):
+        data_shape = inputs.shape
+        return inputs.reshape([data_shape[0] * data_shape[1], data_shape[2]])
+
+    @paddle.jit.to_static
+    def reshape4(inputs, x):
+        new_shape = paddle.shape(x)
+        return paddle.reshape(inputs, [new_shape[2], 2, -1])
+
+    input_shape = [2, 1, 10, 1, 10]
+    input_data = paddle.rand(input_shape, dtype="float32")
+    input_data2 = paddle.randn([2, 1, 10, 10])
+    verify_model(reshape1, input_data=[input_data, input_data2])
+    verify_model(reshape2, input_data=input_data)
+    verify_model(reshape3, input_data=paddle.randn((2, 3, 4)))
+    verify_model(reshape4, input_data=[input_data, input_data2])
+
+
[email protected]_gpu
+def test_forward_scale():
+    @paddle.jit.to_static
+    def scale1(inputs):
+        return paddle.scale(inputs, scale=2.0, bias=1.0)
+
+    @paddle.jit.to_static
+    def scale2(inputs):
+        return paddle.scale(inputs, scale=3, bias=2.1, act="gelu")
+
+    input_data = paddle.randn(shape=[2, 3], dtype="float32")
+    verify_model(
+        scale1,
+        input_data=[
+            input_data,
+        ],
+    )
+    verify_model(scale2, input_data=input_data)
+
+
[email protected]_gpu
+def test_forward_slice():
+    @paddle.jit.to_static
+    def slice1(inputs):
+        return inputs[:, :, :, :3]
+
+    @paddle.jit.to_static
+    def slice2(inputs):
+        return inputs[0, :, :-3, :]
+
+    @paddle.jit.to_static
+    def slice3(inputs):
+        return inputs[0::2, 0::2] + inputs[1::2, 1::2]
+
+    @paddle.jit.to_static
+    def slice4(inputs):
+        x0 = paddle.to_tensor([2]) - paddle.to_tensor([1])
+        x1 = paddle.to_tensor([3]) + paddle.to_tensor([1])
+        return inputs[:, x0:, 1:x1, :]
+
+    input_shape = [1, 3, 10, 10]
+    input_data = paddle.rand(input_shape, dtype="float32")
+    verify_model(
+        slice1,
+        input_data=[
+            input_data,
+        ],
+    )
+    verify_model(slice2, input_data=input_data)
+    # need op "strided_slice"
+    # verify_model(slice3, input_data=paddle.randn((4, 4)))
+    # need op "assign_value"
+    # verify_model(slice4, input_data=input_data)
+
+
[email protected]_gpu
+def test_forward_tanh():
+    @paddle.jit.to_static
+    def tanh(inputs):
+        return paddle.tanh(inputs)
+
+    input_shape = [1, 3, 10, 10]
+    input_data = paddle.rand(input_shape, dtype="float32")
+    verify_model(tanh, input_data=input_data)
+
+
+if __name__ == "__main__":
+    test_forward_add_subtract()
+    test_forward_argmax()
+    test_forward_assign()
+    test_forward_batch_norm()
+    test_forward_cast()
+    test_forward_concat_unsqueeze()
+    test_forward_cumsum()
+    test_forward_conv()
+    test_forward_dropout()
+    test_forward_shape_full()
+    test_forward_ones_like()
+    test_forward_gelu()
+    test_forward_hard_sigmoid()
+    test_forward_hard_swish()
+    test_forward_layer_norm()
+    test_forward_leaky_relu()
+    test_forward_look_up()
+    test_forward_multiply()
+    test_forward_matmul()
+    test_forward_pool2d()
+    test_forward_relu()
+    test_forward_reshape()
+    test_forward_scale()
+    test_forward_slice()
+    test_forward_tanh()
diff --git a/tests/scripts/task_python_frontend.sh 
b/tests/scripts/task_python_frontend.sh
index 62a0fa1..a2f6d70 100755
--- a/tests/scripts/task_python_frontend.sh
+++ b/tests/scripts/task_python_frontend.sh
@@ -51,3 +51,6 @@ run_pytest cython python-frontend-darknet 
tests/python/frontend/darknet
 
 echo "Running relay PyTorch frontend test..."
 run_pytest cython python-frontend-pytorch tests/python/frontend/pytorch
+
+echo "Running relay PaddlePaddle frontend test..."
+run_pytest cython python-frontend-paddlepaddle 
tests/python/frontend/paddlepaddle

Reply via email to