This is an automated email from the ASF dual-hosted git repository.

tqchen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git


The following commit(s) were added to refs/heads/master by this push:
     new fd6d783  [DOCS] Fix sphinx warnings (#4917)
fd6d783 is described below

commit fd6d7837ed05661e81358045adb902772a4f82c3
Author: Cody Yu <comaniac0...@gmail.com>
AuthorDate: Thu Feb 20 09:24:37 2020 -0800

    [DOCS] Fix sphinx warnings (#4917)
    
    * Fix Python docstrings
    
    * More fixes
    
    * Fix lint
---
 docs/api/python/contrib.rst           |  2 +-
 docs/api/python/relay/base.rst        |  6 ----
 docs/api/python/relay/expr.rst        |  6 ----
 docs/api/python/tensor.rst            | 44 ------------------------
 python/tvm/autotvm/task/dispatcher.py |  6 ++--
 python/tvm/autotvm/task/space.py      | 54 ++++++++++++++++-------------
 python/tvm/contrib/cblas.py           | 29 ++++++++--------
 python/tvm/ir/base.py                 | 10 +++---
 python/tvm/ir/transform.py            |  3 +-
 python/tvm/relay/op/reduce.py         | 60 ++++++++++++++++----------------
 python/tvm/relay/op/transform.py      | 64 +++++++++++++++++------------------
 python/tvm/relay/op/vision/yolo.py    | 31 ++++++++++-------
 python/tvm/relay/transform.py         | 19 ++++++-----
 python/tvm/te/tensor.py               |  2 +-
 python/tvm/tir/expr.py                |  2 +-
 topi/python/topi/sparse/csrmv.py      |  1 -
 16 files changed, 148 insertions(+), 191 deletions(-)

diff --git a/docs/api/python/contrib.rst b/docs/api/python/contrib.rst
index 9546511..b482d30 100644
--- a/docs/api/python/contrib.rst
+++ b/docs/api/python/contrib.rst
@@ -21,7 +21,7 @@ tvm.contrib
 
 tvm.contrib.cblas
 ~~~~~~~~~~~~~~~~~
-.. automodule:: tvm.contrib.cc
+.. automodule:: tvm.contrib.cblas
     :members:
 
 
diff --git a/docs/api/python/relay/base.rst b/docs/api/python/relay/base.rst
index a3c5248..dc9dac0 100644
--- a/docs/api/python/relay/base.rst
+++ b/docs/api/python/relay/base.rst
@@ -26,11 +26,5 @@ tvm.relay.base
 .. autoclass:: tvm.relay.base.RelayNode
     :members:
 
-.. autoclass:: tvm.relay.base.Span
-    :members:
-
-.. autoclass:: tvm.relay.base.SourceName
-    :members:
-
 .. autoclass:: tvm.relay.base.Id
     :members:
diff --git a/docs/api/python/relay/expr.rst b/docs/api/python/relay/expr.rst
index f17fc24..57a4a25 100644
--- a/docs/api/python/relay/expr.rst
+++ b/docs/api/python/relay/expr.rst
@@ -35,12 +35,6 @@ tvm.relay.expr
 .. autoclass:: tvm.relay.expr.Tuple
     :members:
 
-.. autoclass:: tvm.relay.expr.Var
-    :members:
-
-.. autoclass:: tvm.relay.expr.GlobalVar
-    :members:
-
 .. autoclass:: tvm.relay.expr.Function
     :members:
 
diff --git a/docs/api/python/tensor.rst b/docs/api/python/tensor.rst
deleted file mode 100644
index 032de39..0000000
--- a/docs/api/python/tensor.rst
+++ /dev/null
@@ -1,44 +0,0 @@
-..  Licensed to the Apache Software Foundation (ASF) under one
-    or more contributor license agreements.  See the NOTICE file
-    distributed with this work for additional information
-    regarding copyright ownership.  The ASF licenses this file
-    to you under the Apache License, Version 2.0 (the
-    "License"); you may not use this file except in compliance
-    with the License.  You may obtain a copy of the License at
-
-..    http://www.apache.org/licenses/LICENSE-2.0
-
-..  Unless required by applicable law or agreed to in writing,
-    software distributed under the License is distributed on an
-    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-    KIND, either express or implied.  See the License for the
-    specific language governing permissions and limitations
-    under the License.
-
-tvm.tensor
-----------
-.. automodule:: tvm.tensor
-
-.. autoclass:: tvm.tensor.Tensor
-    :members:
-    :inherited-members:
-
-.. autoclass:: tvm.tensor.Operation
-    :members:
-    :inherited-members:
-
-.. autoclass:: tvm.tensor.ComputeOp
-    :members:
-    :show-inheritance:
-
-.. autoclass:: tvm.tensor.PlaceholderOp
-    :members:
-    :show-inheritance:
-
-.. autoclass:: tvm.tensor.ScanOp
-    :members:
-    :show-inheritance:
-
-.. autoclass:: tvm.tensor.ExternOp
-    :members:
-    :show-inheritance:
diff --git a/python/tvm/autotvm/task/dispatcher.py 
b/python/tvm/autotvm/task/dispatcher.py
index 28a9fbb..e7022fa 100644
--- a/python/tvm/autotvm/task/dispatcher.py
+++ b/python/tvm/autotvm/task/dispatcher.py
@@ -258,8 +258,7 @@ class ApplyHistoryBest(DispatchContext):
     records : str or iterator of (MeasureInput, MeasureResult)
         Collection of tuning records.
         If is str, then it should be the filename of a records log file.
-                   Each row of this file is an encoded record pair.
-        Otherwise, it is an iterator.
+        Each row of this file is an encoded record pair. Otherwise, it is an 
iterator.
     """
     def __init__(self, records):
         super(ApplyHistoryBest, self).__init__()
@@ -279,8 +278,7 @@ class ApplyHistoryBest(DispatchContext):
         records : str or iterator of (MeasureInput, MeasureResult)
             Collection of tuning records.
             If is str, then it should be the filename of a records log file.
-                       Each row of this file is an encoded record pair.
-            Otherwise, it is an iterator.
+            Each row of this file is an encoded record pair. Otherwise, it is 
an iterator.
         """
         # pylint: disable=import-outside-toplevel
         from pathlib import Path
diff --git a/python/tvm/autotvm/task/space.py b/python/tvm/autotvm/task/space.py
index d83a248..fbdd34e 100644
--- a/python/tvm/autotvm/task/space.py
+++ b/python/tvm/autotvm/task/space.py
@@ -54,13 +54,13 @@ class TransformSpace(object):
     """Base class for transform space
     TransformSpace is the node in the computation graph of axes
 
-    Note
-    ----
-    We can regard our schedule code as a transformation graph of axes.
-    Starting from raw axes in the definition of tvm.compute, we can transform 
these axes
-    by some operators. The operator includes 'split', 'reorder' and 'annotate'.
-    Each operator has some tunable parameters (e.g. the split factor).
-    Then the tuning process is just to find good parameters of these op.
+    .. note::
+
+        We can regard our schedule code as a transformation graph of axes.
+        Starting from raw axes in the definition of tvm.compute, we can 
transform these axes
+        by some operators. The operator includes 'split', 'reorder' and 
'annotate'.
+        Each operator has some tunable parameters (e.g. the split factor).
+        Then the tuning process is just to find good parameters of these op.
 
     So the all the combinations of the parameters of these op forms our search 
space.
 
@@ -109,7 +109,8 @@ class VirtualAxis(TransformSpace):
     var: int or tvm.schedule.IterVar
         If is int, return a virtual axis whose length is the provided argument.
         If is IterVar, return a virtual axis whose length is extracted from
-                       the IterVar's extent domain.
+        the IterVar's extent domain.
+
     name: str
     """
     name_ct = 0
@@ -253,9 +254,9 @@ class SplitEntity(object):
     Parameters
     ----------
     size: Array of int
-        the size of every axis after split
+        the size of every axis after split.
         e.g. an axis of extent 128, we split it into 3 axes, a possible
-             size is [4, 4, 8] (4x4x8 = 128)
+        size is [4, 4, 8] (4x4x8 = 128).
     """
     def __init__(self, size):
         self.size = size
@@ -626,7 +627,7 @@ class ConfigSpace(object):
         var: int or tvm.schedule.IterVar
             If is int, return an axis whose length is the provided argument.
             If is IterVar, return an axis whose length is extracted from the
-                           IterVar's extent domain.
+            IterVar's extent domain.
         """
         return VirtualAxis(var)
 
@@ -647,18 +648,19 @@ class ConfigSpace(object):
             If is 'power2', the tuner will try power-of-two factors less or 
equal to the length.
             If is 'verbose', the tuner will try all candidates in above two 
policies.
             If is 'candidate', try given candidates.
-        kwargs: dict
+        **kwargs:
             extra arguments for policy
-            max_factor: int
-                the maximum split factor.
-            filter: function(int) -> bool
-                see examples below for how to use filter.
-            num_outputs: int
-                the total number of axis after split.
-            no_tail: bool
-                should we only include divisible numbers as split factors.
-            candidate: list
-                (policy=candidate) manual candidate list.
+
+            ``max_factor``:
+                the maximum split factor (`int`).
+            ``filter``:
+                see examples below for how to use filter (`Callable[[int], 
bool]`).
+            ``num_outputs``:
+                the total number of axis after split (`int`).
+            ``no_tail``:
+                should we only include divisible numbers as split factors 
(`bool`).
+            `candidate``:
+                (policy=candidate) manual candidate list (`List`).
 
         Examples
         --------
@@ -668,6 +670,7 @@ class ConfigSpace(object):
         >>> # use a filter that only accepts the split scheme whose inner most 
tile is less then 4
         >>> cfg.define_split('tile_y', y, policy='factors', filter=lambda x: 
x.size[-1] <= 4)
         """
+
         axes = [axis]
         return self._add_new_transform(SplitSpace, name, axes, policy, 
**kwargs)
 
@@ -749,8 +752,11 @@ class ConfigSpace(object):
 
     def valid(self):
         """Check whether the config meets all the constraints
-        Note: This check should be called after instantiation of task,
-              because the ConfigEntity/ConfigSpace collects errors during 
instantiation
+
+        .. note::
+
+            This check should be called after instantiation of task,
+            because the ConfigEntity/ConfigSpace collects errors during 
instantiation
 
         Returns
         -------
diff --git a/python/tvm/contrib/cblas.py b/python/tvm/contrib/cblas.py
index cdd4ce2..2337f84 100644
--- a/python/tvm/contrib/cblas.py
+++ b/python/tvm/contrib/cblas.py
@@ -21,23 +21,22 @@ from .. import api as _api
 
 def matmul(lhs, rhs, transa=False, transb=False, **kwargs):
     """Create an extern op that compute matrix mult of A and rhs with CrhsLAS
-
     This function serves as an example on how to call external libraries.
 
     Parameters
     ----------
-    lhs : Tensor
+    lhs: Tensor
         The left matrix operand
-    rhs : Tensor
+    rhs: Tensor
         The right matrix operand
-    transa : bool
+    transa: bool
         Whether transpose lhs
-    transb : bool
+    transb: bool
         Whether transpose rhs
 
     Returns
     -------
-    C : Tensor
+    C: Tensor
         The result tensor.
     """
     n = lhs.shape[1] if transa else lhs.shape[0]
@@ -55,20 +54,22 @@ def matmul(lhs, rhs, transa=False, transb=False, **kwargs):
 
 def batch_matmul(lhs, rhs, transa=False, transb=False, iterative=False, 
**kwargs):
     """Create an extern op that compute batched matrix mult of A and rhs with 
CBLAS
-     This function serves as an example on how to call external libraries.
-     Parameters
+    This function serves as an example on how to call external libraries.
+
+    Parameters
     ----------
-    lhs : Tensor
+    lhs: Tensor
         The left matrix operand
-    rhs : Tensor
+    rhs: Tensor
         The right matrix operand
-    transa : bool
+    transa: bool
         Whether transpose lhs
-    transb : bool
+    transb: bool
         Whether transpose rhs
-     Returns
+
+    Returns
     -------
-    C : Tensor
+    C: Tensor
         The result tensor.
     """
     b = lhs.shape[0]
diff --git a/python/tvm/ir/base.py b/python/tvm/ir/base.py
index 07ed8e8..661a64a 100644
--- a/python/tvm/ir/base.py
+++ b/python/tvm/ir/base.py
@@ -39,11 +39,11 @@ class Node(Object):
             Optionally annotate function to provide additional
             information in the comment block.
 
-        Note
-        ----
-        The meta data section is necessary to fully parse the text format.
-        However, it can contain dumps that are big (e.g constant weights),
-        so it can be helpful to skip printing the meta data section.
+        .. note::
+
+            The meta data section is necessary to fully parse the text format.
+            However, it can contain dumps that are big (e.g constant weights),
+            so it can be helpful to skip printing the meta data section.
 
         Returns
         -------
diff --git a/python/tvm/ir/transform.py b/python/tvm/ir/transform.py
index 6192504..4519fb6 100644
--- a/python/tvm/ir/transform.py
+++ b/python/tvm/ir/transform.py
@@ -160,7 +160,8 @@ class Sequential(Pass):
     Some typical usage of the sequential pass are:
     1. Users provide a list of passes for optimization.
     2. Only an optimization level is provided so that the backend system has
-       to glob all passes at this level and below to perform the optimizations.
+    to glob all passes at this level and below to perform the optimizations.
+
     Note that users can also provide a series of passes that they don't want to
     apply when running a sequential pass. Pass dependency will be resolved in
     the backend as well.
diff --git a/python/tvm/relay/op/reduce.py b/python/tvm/relay/op/reduce.py
index baf896e..d322601 100644
--- a/python/tvm/relay/op/reduce.py
+++ b/python/tvm/relay/op/reduce.py
@@ -145,21 +145,21 @@ def all(data, axis=None, keepdims=False, exclude=False):
     --------
     .. code-block:: python
 
-    data = relay.Constant(tvm.nd.array([[[ True,  True,  True],
-                                         [ True,  True,  True],
-                                         [False,  True, False]],
-                                        [[ True, False, False],
-                                         [ True,  True, False],
-                                         [False,  True,  True]]]))
-
-    relay.all(data, axis=1)
-    # [[False,  True, False],
-    # [False, False, False]]
-
-    relay.all(data, axis=0)
-    # [[ True, False, False],
-    # [ True,  True, False],
-    # [False,  True, False]]
+        data = relay.Constant(tvm.nd.array([[[ True,  True,  True],
+                                           [ True,  True,  True],
+                                           [False,  True, False]],
+                                          [[ True, False, False],
+                                           [ True,  True, False],
+                                           [False,  True,  True]]]))
+
+        relay.all(data, axis=1)
+        # [[False,  True, False],
+        # [False, False, False]]
+
+        relay.all(data, axis=0)
+        # [[ True, False, False],
+        # [ True,  True, False],
+        # [False,  True, False]]
 
     """
     axis = [axis] if isinstance(axis, int) else axis
@@ -197,21 +197,21 @@ def any(data, axis=None, keepdims=False, exclude=False):
     --------
     .. code-block:: python
 
-    data = relay.Constant(tvm.nd.array([[[ True,  True,  True],
-                                         [ True,  True,  True],
-                                         [False,  True, False]],
-                                        [[ True, False, False],
-                                         [ True,  True, False],
-                                         [False,  True,  True]]]))
-
-    relay.any(data, axis=1)
-    # [[True, True, True],
-    # [True,  True, True]]
-
-    relay.any(data, axis=0)
-    # [[ True, True, True],
-    # [ True,  True, True],
-    # [False,  True, True]]
+        data = relay.Constant(tvm.nd.array([[[ True,  True,  True],
+                                            [ True,  True,  True],
+                                            [False,  True, False]],
+                                            [[ True, False, False],
+                                            [ True,  True, False],
+                                            [False,  True,  True]]]))
+
+        relay.any(data, axis=1)
+        # [[True, True, True],
+        # [True,  True, True]]
+
+        relay.any(data, axis=0)
+        # [[ True, True, True],
+        # [ True,  True, True],
+        # [False,  True, True]]
 
     """
     axis = [axis] if isinstance(axis, int) else axis
diff --git a/python/tvm/relay/op/transform.py b/python/tvm/relay/op/transform.py
index 710d203..15c48df 100644
--- a/python/tvm/relay/op/transform.py
+++ b/python/tvm/relay/op/transform.py
@@ -147,56 +147,54 @@ def squeeze(data, axis=None):
     return _make.squeeze(data, axis)
 
 def reshape(data, newshape):
-    """Reshapes the input array.
-
-    Example::
+    """Reshape the input array.
 
     To give user more convenience in without doing manual shape inference,
     some dimensions of the shape can take special values from the set {0, -1, 
-2, -3, -4}.
     The significance of each is explained below:
 
-    - ``0``  copy this dimension from the input to the output shape.
+    ``0`` copy this dimension from the input to the output shape.
 
-    Example::
+        .. code-block:: python
 
-    - data.shape = (2,3,4), newshape = (4,0,2), result.shape = (4,3,2)
-    - data.shape = (2,3,4), newshape = (2,0,0), result.shape = (2,3,4)
+            data.shape = (2,3,4), newshape = (4,0,2), result.shape = (4,3,2)
+            data.shape = (2,3,4), newshape = (2,0,0), result.shape = (2,3,4)
 
-    - ``-1`` infers the dimension of the output shape by using the remainder 
of the input dimensions
-    keeping the size of the new array same as that of the input array.
+    ``-1`` infers the dimension of the output shape by using the remainder of
+    the input dimensions keeping the size of the new array same as that of the 
input array.
     At most one dimension of shape can be -1.
 
-    Example::
+        .. code-block:: python
 
-    - data.shape = (2,3,4), newshape = (6,1,-1), result.shape = (6,1,4)
-    - data.shape = (2,3,4), newshape = (3,-1,8), result.shape = (3,1,8)
-    - data.shape = (2,3,4), newshape = (-1,), result.shape = (24,)
+            data.shape = (2,3,4), newshape = (6,1,-1), result.shape = (6,1,4)
+            data.shape = (2,3,4), newshape = (3,-1,8), result.shape = (3,1,8)
+            data.shape = (2,3,4), newshape = (-1,), result.shape = (24,)
 
-    - ``-2`` copy all/remainder of the input dimensions to the output shape.
+    ``-2`` copy all/remainder of the input dimensions to the output shape.
 
-    Example::
+        .. code-block:: python
 
-    - data.shape = (2,3,4), newshape = (-2,), result.shape = (2,3,4)
-    - data.shape = (2,3,4), newshape = (2,-2), result.shape = (2,3,4)
-    - data.shape = (2,3,4), newshape = (-2,1,1), result.shape = (2,3,4,1,1)
+            data.shape = (2,3,4), newshape = (-2,), result.shape = (2,3,4)
+            data.shape = (2,3,4), newshape = (2,-2), result.shape = (2,3,4)
+            data.shape = (2,3,4), newshape = (-2,1,1), result.shape = 
(2,3,4,1,1)
 
-    - ``-3`` use the product of two consecutive dimensions of the input shape
+    ``-3`` use the product of two consecutive dimensions of the input shape
     as the output dimension.
 
-    Example::
+        .. code-block:: python
 
-    - data.shape = (2,3,4), newshape = (-3,4), result.shape = (6,4)
-    - data.shape = (2,3,4,5), newshape = (-3,-3), result.shape = (6,20)
-    - data.shape = (2,3,4), newshape = (0,-3), result.shape = (2,12)
-    - data.shape = (2,3,4), newshape = (-3,-2), result.shape = (6,4)
+            data.shape = (2,3,4), newshape = (-3,4), result.shape = (6,4)
+            data.shape = (2,3,4,5), newshape = (-3,-3), result.shape = (6,20)
+            data.shape = (2,3,4), newshape = (0,-3), result.shape = (2,12)
+            data.shape = (2,3,4), newshape = (-3,-2), result.shape = (6,4)
 
-    - ``-4`` split one dimension of the input into two dimensions passed 
subsequent
+    ``-4`` split one dimension of the input into two dimensions passed 
subsequent
     to -4 in shape (can contain -1).
 
-    Example::
+        .. code-block:: python
 
-    - data.shape = (2,3,4), newshape = (-4,1,2,-2), result.shape = (1,2,3,4)
-    - data.shape = (2,3,4), newshape = (2,-4,-1,3,-2), result.shape = (2,1,3,4)
+            data.shape = (2,3,4), newshape = (-4,1,2,-2), result.shape = 
(1,2,3,4)
+            data.shape = (2,3,4), newshape = (2,-4,-1,3,-2), result.shape = 
(2,1,3,4)
 
     Parameters
     ----------
@@ -715,14 +713,14 @@ def reverse_reshape(data, newshape):
     """Reshapes the input array where the special values are inferred from
     right to left.
 
-    Example::
-
     The special values have the same semantics as 
:py:class:`tvm.relay.reshape`.
     The difference is that special values are inferred from right to left. It
-    can be explained in the example below::
+    can be explained in the example below.
+
+    .. code-block:: python
 
-    - data.shape = (10,5,4), newshape = (-1,0), reshape results in (40,5)
-    - data.shape = (10,5,4), newshape = (-1,0), reverse_reshape results in 
(40,5)
+        data.shape = (10,5,4), newshape = (-1,0), reshape results in (40,5)
+        data.shape = (10,5,4), newshape = (-1,0), reverse_reshape results in 
(40,5)
 
     Parameters
     ----------
diff --git a/python/tvm/relay/op/vision/yolo.py 
b/python/tvm/relay/op/vision/yolo.py
index 7ecf64c..90dc3b8 100644
--- a/python/tvm/relay/op/vision/yolo.py
+++ b/python/tvm/relay/op/vision/yolo.py
@@ -21,18 +21,25 @@ def yolo_reorg(data, stride):
     """Yolo reorg operation used in darknet models.
     This layer shuffles the input tensor values based on the stride value.
     Along with the shuffling, it does the shape transform.
-    If '(n, c, h, w)' is the data shape and 's' is stride, output shape is 
'(n, c*s*s, h/s, w/s)'
-    Example: data(1, 4, 2, 2) = [[[[ 0  1] [ 2  3]]
-                                  [[ 4  5] [ 6  7]]
-                                  [[ 8  9] [10 11]]
-                                  [[12 13] [14 15]]]]
-             stride = 2
-             ret(1, 16, 1, 1) = [[[[ 0]]  [[ 2]]  [[ 8]]  [[10]]
-                                  [[ 1]]  [[ 3]]  [[ 9]]  [[11]]
-                                  [[ 4]]  [[ 6]]  [[12]]  [[14]]
-                                  [[ 5]]  [[ 7]]  [[13]]  [[15]]]]
-
-    Note: stride=1 has no significance for reorg operation.
+    If '(n, c, h, w)' is the data shape and 's' is stride, output shape is 
'(n, c*s*s, h/s, w/s)'.
+
+    Example:
+
+    .. code-block:: python
+
+        data(1, 4, 2, 2) = [[[[ 0  1] [ 2  3]]
+                            [[ 4  5] [ 6  7]]
+                            [[ 8  9] [10 11]]
+                            [[12 13] [14 15]]]]
+        stride = 2
+        ret(1, 16, 1, 1) = [[[[ 0]]  [[ 2]]  [[ 8]]  [[10]]
+                            [[ 1]]  [[ 3]]  [[ 9]]  [[11]]
+                            [[ 4]]  [[ 6]]  [[12]]  [[14]]
+                            [[ 5]]  [[ 7]]  [[13]]  [[15]]]]
+
+    .. note::
+
+        stride=1 has no significance for reorg operation.
 
     Parameters
     ----------
diff --git a/python/tvm/relay/transform.py b/python/tvm/relay/transform.py
index 4c2bf87..08b41b2 100644
--- a/python/tvm/relay/transform.py
+++ b/python/tvm/relay/transform.py
@@ -256,17 +256,20 @@ def CombineParallelConv2D(min_num_branches=3):
 def CombineParallelDense(min_num_branches=3):
     """Combine multiple dense operators into one. For example:
 
-                data
-          /              \
-     dense (2,2)         dense (2,2)
-         |                 |
-    elemwise/bcast (2,2)  elemwise/bcast (2,2)
+    .. code-block
+                    data
+            /              \
+        dense (2,2)         dense (2,2)
+            |                 |
+        elemwise/bcast (2,2)  elemwise/bcast (2,2)
 
     Would become:
 
-             data
-              |
-        batch_matmul+elemwise/bcast (2,2,2)
+    .. code-block
+
+                data
+                |
+            batch_matmul+elemwise/bcast (2,2,2)
 
     Parameters
     ----------
diff --git a/python/tvm/te/tensor.py b/python/tvm/te/tensor.py
index fcbb68f..739268a 100644
--- a/python/tvm/te/tensor.py
+++ b/python/tvm/te/tensor.py
@@ -102,7 +102,7 @@ class Tensor(Object, _expr.ExprOp):
 
     @property
     def op(self):
-        """The corressponding :any:`Operation`."""
+        """The corressponding :py:class:`Operation`."""
         return self.__getattr__("op")
 
     @property
diff --git a/python/tvm/tir/expr.py b/python/tvm/tir/expr.py
index aeda603..ca11ffc 100644
--- a/python/tvm/tir/expr.py
+++ b/python/tvm/tir/expr.py
@@ -812,7 +812,7 @@ class Select(PrimExprWithOp):
     Note
     ----
     Select may compute both true_value and false_value.
-    Use :any:`tvm.if_then_else` instead if you want to
+    Use :py:class:`tvm.if_then_else` instead if you want to
     get a conditional expression that only evaluates
     the correct branch.
 
diff --git a/topi/python/topi/sparse/csrmv.py b/topi/python/topi/sparse/csrmv.py
index fb9f10b..8a21f0d 100644
--- a/topi/python/topi/sparse/csrmv.py
+++ b/topi/python/topi/sparse/csrmv.py
@@ -87,7 +87,6 @@ def csrmv(a, x, y=None):
     where `x` and `y` are vectors, `A` is an m-by-k sparse matrix in the CSR 
format.
 
     Parameters
-
     ----------
     a : tvm.contrib.sparse.CSRNDArray
         2-D sparse matrix with shape [m, k]

Reply via email to