This is an automated email from the ASF dual-hosted git repository.

kevinthesun pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git


The following commit(s) were added to refs/heads/master by this push:
     new 81ff061  Move Ops in relay.op.contrib.* (#4942)
81ff061 is described below

commit 81ff0613e15eb5bc4e2d02dc74c14c5b6403f46d
Author: Cody Yu <comaniac0...@gmail.com>
AuthorDate: Thu Feb 27 12:38:54 2020 -0800

    Move Ops in relay.op.contrib.* (#4942)
    
    * move contrib
    
    * lint
    
    * address comment
    
    * address comment
---
 python/tvm/relay/frontend/mxnet.py      |   2 +-
 python/tvm/relay/frontend/pytorch.py    |   4 +-
 python/tvm/relay/op/__init__.py         |   1 -
 python/tvm/relay/op/_tensor.py          |   2 +
 python/tvm/relay/op/contrib/__init__.py |   2 -
 python/tvm/relay/op/contrib/_contrib.py |  36 ----------
 python/tvm/relay/op/contrib/_make.py    |  20 ------
 python/tvm/relay/op/contrib/contrib.py  | 112 --------------------------------
 python/tvm/relay/op/nn/_nn.py           |  10 +++
 python/tvm/relay/op/nn/nn.py            |  94 +++++++++++++++++++++++++++
 python/tvm/relay/op/tensor.py           |  19 ++++++
 src/relay/op/nn/pooling.cc              |  16 ++---
 src/relay/op/tensor/unary.cc            |   6 +-
 tests/python/relay/test_op_level10.py   |   4 +-
 14 files changed, 141 insertions(+), 187 deletions(-)

diff --git a/python/tvm/relay/frontend/mxnet.py 
b/python/tvm/relay/frontend/mxnet.py
index 8510adb..0020a63 100644
--- a/python/tvm/relay/frontend/mxnet.py
+++ b/python/tvm/relay/frontend/mxnet.py
@@ -313,7 +313,7 @@ def _mx_pooling(inputs, attrs):
 
 def _mx_adaptive_avg_pooling(inputs, attrs):
     output_size = attrs.get_int_tuple("output_size", [])
-    return _op.contrib.adaptive_avg_pool2d(inputs[0], output_size)
+    return _op.nn.adaptive_avg_pool2d(inputs[0], output_size)
 
 
 def _mx_dropout(inputs, attrs):
diff --git a/python/tvm/relay/frontend/pytorch.py 
b/python/tvm/relay/frontend/pytorch.py
index 0b766a1..edd6ad8 100644
--- a/python/tvm/relay/frontend/pytorch.py
+++ b/python/tvm/relay/frontend/pytorch.py
@@ -151,7 +151,7 @@ def _adaptive_avg_2d():
         data = inputs[0]
         output_size = _infer_shape(inputs[1])
 
-        return _op.contrib.contrib.adaptive_avg_pool2d(
+        return _op.nn.adaptive_avg_pool2d(
             data,
             output_size=output_size)
     return _impl
@@ -161,7 +161,7 @@ def _adaptive_max_2d():
         data = inputs[0]
         output_size = _infer_shape(inputs[1])
 
-        return _op.contrib.contrib.adaptive_max_pool2d(
+        return _op.nn.adaptive_max_pool2d(
             data,
             output_size=output_size)
     return _impl
diff --git a/python/tvm/relay/op/__init__.py b/python/tvm/relay/op/__init__.py
index 7427c63..4a4823d 100644
--- a/python/tvm/relay/op/__init__.py
+++ b/python/tvm/relay/op/__init__.py
@@ -32,7 +32,6 @@ from . import annotation
 from . import memory
 from . import image
 from . import vision
-from . import contrib
 from . import op_attrs
 
 
diff --git a/python/tvm/relay/op/_tensor.py b/python/tvm/relay/op/_tensor.py
index 0fbbaef..9f0906b 100644
--- a/python/tvm/relay/op/_tensor.py
+++ b/python/tvm/relay/op/_tensor.py
@@ -70,6 +70,8 @@ register_injective_schedule("minimum")
 register_injective_schedule("right_shift")
 register_injective_schedule("left_shift")
 register_injective_schedule("shape_of")
+register_injective_schedule("ndarray_size")
+
 
 # zeros
 @register_compute("zeros")
diff --git a/python/tvm/relay/op/contrib/__init__.py 
b/python/tvm/relay/op/contrib/__init__.py
index 3159006..c6e086a 100644
--- a/python/tvm/relay/op/contrib/__init__.py
+++ b/python/tvm/relay/op/contrib/__init__.py
@@ -17,5 +17,3 @@
 # pylint: disable=wildcard-import
 """Neural network related operators."""
 from __future__ import absolute_import as _abs
-from .contrib import *
-from . import _contrib
diff --git a/python/tvm/relay/op/contrib/_contrib.py 
b/python/tvm/relay/op/contrib/_contrib.py
deleted file mode 100644
index 3927cef..0000000
--- a/python/tvm/relay/op/contrib/_contrib.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-# pylint: disable=invalid-name, unused-argument
-"""Backend compiler related feature registration"""
-from __future__ import absolute_import
-
-from .. import op as reg
-from .. import strategy
-from ..op import OpPattern
-
-
-# adaptive_max_pool2d
-reg.register_schedule("contrib.adaptive_max_pool2d", 
strategy.schedule_adaptive_pool)
-reg.register_pattern("contrib.adaptive_max_pool2d", 
OpPattern.OUT_ELEMWISE_FUSABLE)
-
-
-# adaptive_avg_pool2d
-reg.register_schedule("contrib.adaptive_avg_pool2d", 
strategy.schedule_adaptive_pool)
-reg.register_pattern("contrib.adaptive_avg_pool2d", 
OpPattern.OUT_ELEMWISE_FUSABLE)
-
-# relay.contrib.ndarray_size
-reg.register_injective_schedule("contrib.ndarray_size")
diff --git a/python/tvm/relay/op/contrib/_make.py 
b/python/tvm/relay/op/contrib/_make.py
deleted file mode 100644
index 9d3369e..0000000
--- a/python/tvm/relay/op/contrib/_make.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-"""Constructor APIs"""
-import tvm._ffi
-
-tvm._ffi._init_api("relay.op.contrib._make", __name__)
diff --git a/python/tvm/relay/op/contrib/contrib.py 
b/python/tvm/relay/op/contrib/contrib.py
index 7114b7e..cb7e5d4 100644
--- a/python/tvm/relay/op/contrib/contrib.py
+++ b/python/tvm/relay/op/contrib/contrib.py
@@ -17,115 +17,3 @@
 #pylint: disable=invalid-name, too-many-lines
 """Contrib operations."""
 from __future__ import absolute_import as _abs
-from . import _make
-
-
-def adaptive_max_pool2d(data,
-                        output_size=None,
-                        layout="NCHW"):
-    r"""2D adaptive max pooling operator. This operator is experimental.
-
-    This operator takes data as input and does 2D max value calculation
-    across each window represented by WxH.
-
-
-    In the default case, where the data_layout is `NCHW`
-    a data Tensor with shape `(batch_size, in_channels, height, width)`,
-    to produce an output Tensor with shape
-    (batch_size, in_channels, output_height, output_width).
-
-    The pooling kernel and stride sizes are automatically chosen for
-    desired output sizes.
-
-    For output_size:
-        If this argument is not provided, input height and width will be used
-        as output height and width.
-
-        If a single integer is provided for output_size, the output size is
-        (N x C x output_size x output_size) for any input (NCHW).
-
-        If a tuple of integers (height, width) are provided for output_size,
-        the output size is (N x C x height x width) for any input (NCHW).
-
-    Parameters
-    ----------
-    data : tvm.relay.Expr
-        The input data to the operator.
-
-    output_size : tuple of int. optional
-        Output height and width.
-
-    layout : str, optional
-        Layout of the input.
-
-    Returns
-    -------
-    result : tvm.relay.Expr
-        The computed result.
-    """
-    output_size = [] or output_size
-    return _make.adaptive_max_pool2d(data, output_size, layout)
-
-def adaptive_avg_pool2d(data,
-                        output_size=None,
-                        layout="NCHW"):
-    r"""2D adaptive average pooling operator. This operator is experimental.
-
-    This operator takes data as input and does 2D average value calculation
-    across each window represented by WxH.
-
-
-    In the default case, where the data_layout is `NCHW`
-    a data Tensor with shape `(batch_size, in_channels, height, width)`,
-    to produce an output Tensor with shape
-    (batch_size, in_channels, output_height, output_width).
-
-    The pooling kernel and stride sizes are automatically chosen for
-    desired output sizes.
-
-    For output_size:
-        If this argument is not provided, input height and width will be used
-        as output height and width.
-
-        If a single integer is provided for output_size, the output size is
-        (N x C x output_size x output_size) for any input (NCHW).
-
-        If a tuple of integers (height, width) are provided for output_size,
-        the output size is (N x C x height x width) for any input (NCHW).
-
-    Parameters
-    ----------
-    data : tvm.relay.Expr
-        The input data to the operator.
-
-    output_size : tuple of int. optional
-        Output height and width.
-
-    layout : str, optional
-        Layout of the input.
-
-    Returns
-    -------
-    result : tvm.relay.Expr
-        The computed result.
-    """
-    output_size = [] or output_size
-    return _make.adaptive_avg_pool2d(data, output_size, layout)
-
-def ndarray_size(data, dtype="int32"):
-    """Get number of elements of input tensor.
-
-    Parameters
-    ----------
-    data : tvm.relay.Expr
-        The input tensor.
-
-    dtype : str, optional
-        The target data type.
-
-    Returns
-    -------
-    result : tvm.relay.Expr
-        The number of elements of input tensor.
-    """
-    return _make.ndarray_size(data, dtype)
diff --git a/python/tvm/relay/op/nn/_nn.py b/python/tvm/relay/op/nn/_nn.py
index a4fde28..c522ef9 100644
--- a/python/tvm/relay/op/nn/_nn.py
+++ b/python/tvm/relay/op/nn/_nn.py
@@ -247,6 +247,16 @@ reg.register_schedule("nn.global_avg_pool2d", 
strategy.schedule_adaptive_pool)
 reg.register_pattern("nn.global_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
 
 
+# adaptive_max_pool2d
+reg.register_schedule("nn.adaptive_max_pool2d", 
strategy.schedule_adaptive_pool)
+reg.register_pattern("nn.adaptive_max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
+
+
+# adaptive_avg_pool2d
+reg.register_schedule("nn.adaptive_avg_pool2d", 
strategy.schedule_adaptive_pool)
+reg.register_pattern("nn.adaptive_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
+
+
 # leaky_relu
 reg.register_broadcast_schedule("nn.leaky_relu")
 reg.register_pattern("nn.leaky_relu", OpPattern.ELEMWISE)
diff --git a/python/tvm/relay/op/nn/nn.py b/python/tvm/relay/op/nn/nn.py
index 9ecb5af..30918a4 100644
--- a/python/tvm/relay/op/nn/nn.py
+++ b/python/tvm/relay/op/nn/nn.py
@@ -2277,3 +2277,97 @@ def space_to_depth(data, block_size, layout='NCHW'):
                            in_height / block_size, in_width / block_size]
     """
     return _make.space_to_depth(data, block_size, layout)
+
+
+def adaptive_max_pool2d(data,
+                        output_size=None,
+                        layout="NCHW"):
+    r"""2D adaptive max pooling operator. This operator is experimental.
+
+    This operator takes data as input and does 2D max value calculation
+    across each window represented by WxH.
+
+
+    In the default case, where the data_layout is `NCHW`
+    a data Tensor with shape `(batch_size, in_channels, height, width)`,
+    to produce an output Tensor with shape
+    (batch_size, in_channels, output_height, output_width).
+
+    The pooling kernel and stride sizes are automatically chosen for
+    desired output sizes.
+
+    For output_size:
+        If this argument is not provided, input height and width will be used
+        as output height and width.
+
+        If a single integer is provided for output_size, the output size is
+        (N x C x output_size x output_size) for any input (NCHW).
+
+        If a tuple of integers (height, width) are provided for output_size,
+        the output size is (N x C x height x width) for any input (NCHW).
+
+    Parameters
+    ----------
+    data : tvm.relay.Expr
+        The input data to the operator.
+
+    output_size : tuple of int. optional
+        Output height and width.
+
+    layout : str, optional
+        Layout of the input.
+
+    Returns
+    -------
+    result : tvm.relay.Expr
+        The computed result.
+    """
+    output_size = [] or output_size
+    return _make.adaptive_max_pool2d(data, output_size, layout)
+
+
+def adaptive_avg_pool2d(data,
+                        output_size=None,
+                        layout="NCHW"):
+    r"""2D adaptive average pooling operator. This operator is experimental.
+
+    This operator takes data as input and does 2D average value calculation
+    across each window represented by WxH.
+
+
+    In the default case, where the data_layout is `NCHW`
+    a data Tensor with shape `(batch_size, in_channels, height, width)`,
+    to produce an output Tensor with shape
+    (batch_size, in_channels, output_height, output_width).
+
+    The pooling kernel and stride sizes are automatically chosen for
+    desired output sizes.
+
+    For output_size:
+        If this argument is not provided, input height and width will be used
+        as output height and width.
+
+        If a single integer is provided for output_size, the output size is
+        (N x C x output_size x output_size) for any input (NCHW).
+
+        If a tuple of integers (height, width) are provided for output_size,
+        the output size is (N x C x height x width) for any input (NCHW).
+
+    Parameters
+    ----------
+    data : tvm.relay.Expr
+        The input data to the operator.
+
+    output_size : tuple of int. optional
+        Output height and width.
+
+    layout : str, optional
+        Layout of the input.
+
+    Returns
+    -------
+    result : tvm.relay.Expr
+        The computed result.
+    """
+    output_size = [] or output_size
+    return _make.adaptive_avg_pool2d(data, output_size, layout)
diff --git a/python/tvm/relay/op/tensor.py b/python/tvm/relay/op/tensor.py
index 898038d..ada1f5e 100644
--- a/python/tvm/relay/op/tensor.py
+++ b/python/tvm/relay/op/tensor.py
@@ -974,3 +974,22 @@ def shape_of(data, dtype="int32"):
         The shape tensor.
     """
     return _make.shape_of(data, dtype)
+
+
+def ndarray_size(data, dtype="int32"):
+    """Get number of elements of input tensor.
+
+    Parameters
+    ----------
+    data : tvm.relay.Expr
+        The input tensor.
+
+    dtype : str, optional
+        The target data type.
+
+    Returns
+    -------
+    result : tvm.relay.Expr
+        The number of elements of input tensor.
+    """
+    return _make.ndarray_size(data, dtype)
diff --git a/src/relay/op/nn/pooling.cc b/src/relay/op/nn/pooling.cc
index 77baae5..f174882 100644
--- a/src/relay/op/nn/pooling.cc
+++ b/src/relay/op/nn/pooling.cc
@@ -499,21 +499,21 @@ Array<te::Tensor> AdaptivePool2DCompute(const Attrs& 
attrs,
                             mode, layout.name()) };
 }
 
-// relay.contrib.adaptive_avg_pool2d
+// relay.nn.adaptive_avg_pool2d
 Expr MakeAdaptiveAvgPool2D(Expr data,
                            Array<IndexExpr> output_size,
                            std::string layout) {
   auto attrs = make_object<AdaptivePool2DAttrs>();
   attrs->output_size = std::move(output_size);
   attrs->layout = std::move(layout);
-  static const Op& op = Op::Get("contrib.adaptive_avg_pool2d");
+  static const Op& op = Op::Get("nn.adaptive_avg_pool2d");
   return CallNode::make(op, {data}, Attrs(attrs), {});
 }
 
-TVM_REGISTER_GLOBAL("relay.op.contrib._make.adaptive_avg_pool2d")
+TVM_REGISTER_GLOBAL("relay.op.nn._make.adaptive_avg_pool2d")
 .set_body_typed(MakeAdaptiveAvgPool2D);
 
-RELAY_REGISTER_OP("contrib.adaptive_avg_pool2d")
+RELAY_REGISTER_OP("nn.adaptive_avg_pool2d")
   .describe(R"code(Adaptive average pooling operation for 2D data.
 
 - **data**: This depends on the `layout` parameter. Input is 4D array of shape
@@ -538,21 +538,21 @@ RELAY_REGISTER_OP("contrib.adaptive_avg_pool2d")
 .set_attr<FTVMCompute>("FTVMCompute", 
AdaptivePool2DCompute<topi::nn::kAvgPool>);
 
 
-// relay.contrib.adaptive_max_pool2d
+// relay.nn.adaptive_max_pool2d
 Expr MakeAdaptiveMaxPool2D(Expr data,
                            Array<IndexExpr> output_size,
                            std::string layout) {
   auto attrs = make_object<AdaptivePool2DAttrs>();
   attrs->output_size = std::move(output_size);
   attrs->layout = std::move(layout);
-  static const Op& op = Op::Get("contrib.adaptive_max_pool2d");
+  static const Op& op = Op::Get("nn.adaptive_max_pool2d");
   return CallNode::make(op, {data}, Attrs(attrs), {});
 }
 
-TVM_REGISTER_GLOBAL("relay.op.contrib._make.adaptive_max_pool2d")
+TVM_REGISTER_GLOBAL("relay.op.nn._make.adaptive_max_pool2d")
 .set_body_typed(MakeAdaptiveMaxPool2D);
 
-RELAY_REGISTER_OP("contrib.adaptive_max_pool2d")
+RELAY_REGISTER_OP("nn.adaptive_max_pool2d")
   .describe(R"code(Adaptive max pooling operation for 2D data.
 
 - **data**: This depends on the `layout` parameter. Input is 4D array of shape
diff --git a/src/relay/op/tensor/unary.cc b/src/relay/op/tensor/unary.cc
index caa6451..2c73458 100644
--- a/src/relay/op/tensor/unary.cc
+++ b/src/relay/op/tensor/unary.cc
@@ -359,15 +359,15 @@ Array<te::Tensor> NdarraySizeCompute(const Attrs& attrs,
   return Array<te::Tensor>{topi::ndarray_size(inputs[0], param->dtype)};
 }
 
-TVM_REGISTER_GLOBAL("relay.op.contrib._make.ndarray_size")
+TVM_REGISTER_GLOBAL("relay.op._make.ndarray_size")
 .set_body_typed([](Expr data, DataType dtype) {
   auto attrs = make_object<NdarraySizeAttrs>();
   attrs->dtype = dtype;
-  static const Op& op = Op::Get("contrib.ndarray_size");
+  static const Op& op = Op::Get("ndarray_size");
   return CallNode::make(op, {data}, Attrs(attrs), {});
 });
 
-RELAY_REGISTER_OP("contrib.ndarray_size")
+RELAY_REGISTER_OP("ndarray_size")
 .describe(R"code(Returns a tensor representing the number of elements of input 
tensor.
 
 )code" TVM_ADD_FILELINE)
diff --git a/tests/python/relay/test_op_level10.py 
b/tests/python/relay/test_op_level10.py
index 1e4be74..2e6ed62 100644
--- a/tests/python/relay/test_op_level10.py
+++ b/tests/python/relay/test_op_level10.py
@@ -335,7 +335,7 @@ def test_shape_of():
 def test_ndarray_size():
     def verify_ndarray_size(shape):
         x = relay.var("x", shape=shape)
-        func = relay.Function([x], relay.op.contrib.ndarray_size(x))
+        func = relay.Function([x], relay.op.ndarray_size(x))
         func = run_infer_type(func)
 
         x_data = np.random.uniform(size=shape).astype("float32")
@@ -374,7 +374,7 @@ def verify_adaptive_pool2d(dshape, out_size, pool_type, 
layout="NCHW", dtype="fl
                     l_sl = slice(l_start, l_end)
                     np_out[i, j, k, l] = np_op(np_data[i, j, k_sl, l_sl])
 
-    opfunc = relay.contrib.adaptive_avg_pool2d if pool_type == "avg" else 
relay.contrib.adaptive_max_pool2d
+    opfunc = relay.nn.adaptive_avg_pool2d if pool_type == "avg" else 
relay.nn.adaptive_max_pool2d
     x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
     y = opfunc(x, out_size, layout)
     func = relay.Function([x], y)

Reply via email to