ashutosh-arm commented on code in PR #11672:
URL: https://github.com/apache/tvm/pull/11672#discussion_r905907601


##########
tests/python/integration/test_ewise.py:
##########
@@ -14,26 +14,29 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
+"""Test ewise integration."""

Review Comment:
   Nit: ewise --> elementwise



##########
tests/python/integration/test_dot.py:
##########
@@ -14,31 +14,46 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
+"""Test scheduling and running a dot product."""
+import numpy as np
+
 import tvm
 import tvm.testing
 from tvm import te
-import numpy as np
 
 
 @tvm.testing.requires_llvm
 def test_dot():
-    nn = 12
-    n = tvm.runtime.convert(nn)
-    A = te.placeholder((n,), name="A")
-    B = te.placeholder((n,), name="B")
-    k = te.reduce_axis((0, n), "k")
-    C = te.compute((), lambda: te.sum(A[k] * B[k], axis=k), name="C")
-    s = te.create_schedule(C.op)
+    """Test dot product."""
+    arr_length = 12
+    arr_lenght_tvm = tvm.runtime.convert(arr_length)

Review Comment:
   Nit: arr_lenght_tvm --> arr_length_tvm. It will probably require change in 
the code that follows.



##########
tests/python/integration/test_ewise_fpga.py:
##########
@@ -14,11 +14,14 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
+"""Test ewise_fpga integration."""

Review Comment:
   Nit: ewise --> elementwise



##########
tests/python/integration/test_lower.py:
##########
@@ -15,41 +15,52 @@
 # specific language governing permissions and limitations
 # under the License.
 # pylint: disable=invalid-name, too-many-locals, too-many-statements, 
unused-argument
-"""Test workload for lowering and build"""
+"""Test workload for lowering and build."""
+import numpy as np
+
 import tvm
-from tvm import tir
-from tvm.script import tir as T
 import tvm.testing
-import numpy as np
+from tvm.script import tir as T

Review Comment:
   Surprised that it does not complain about using T :) If it does not, I am 
totally fine with it.



##########
tests/python/integration/test_ewise.py:
##########
@@ -265,25 +306,26 @@ def check_device(device):
 
 @tvm.testing.requires_gpu
 def try_warp_memory():
-    """skip this in default test because it require higher arch"""
-    m = 128
-    A = te.placeholder((m,), name="A")
-    B = te.compute((m,), lambda i: A[i] + 3, name="B")
+    """Test using warp memory
+    skip this in default test because it require higher arch"""
+    arr_size = 128
+    placeholder_a = te.placeholder((arr_size,), name="A")
+    result_b = te.compute((arr_size,), lambda i: placeholder_a[i] + 3, 
name="B")
     warp_size = 32
-    s = te.create_schedule(B.op)
-    AA = s.cache_read(A, "warp", [B])
-    xo, xi = s[B].split(B.op.axis[0], warp_size * 2)
-    xi0, xi1 = s[B].split(xi, factor=warp_size)
-    tx = te.thread_axis("threadIdx.x")
-    s[B].bind(xi1, tx)
-    s[B].bind(xo, te.thread_axis("blockIdx.x"))
-    s[AA].compute_at(s[B], xo)
-    xo, xi = s[AA].split(s[AA].op.axis[0], warp_size)
-    s[AA].bind(xi, tx)
+    schedule = te.create_schedule(result_b.op)
+    cache_read_aa = schedule.cache_read(placeholder_a, "warp", [result_b])
+    axis_x0, axis_xi = schedule[result_b].split(result_b.op.axis[0], warp_size 
* 2)
+    _, axis_xi1 = schedule[result_b].split(axis_xi, factor=warp_size)
+    thread_axis_tx = te.thread_axis("threadIdx.x")
+    schedule[result_b].bind(axis_xi1, thread_axis_tx)
+    schedule[result_b].bind(axis_x0, te.thread_axis("blockIdx.x"))
+    schedule[cache_read_aa].compute_at(schedule[result_b], axis_x0)
+    axis_x0, axis_xi = 
schedule[cache_read_aa].split(schedule[cache_read_aa].op.axis[0], warp_size)
+    schedule[cache_read_aa].bind(axis_xi, thread_axis_tx)
 
     @tvm.register_func("tvm_callback_cuda_compile", override=True)
-    def tvm_callback_cuda_compile(code):
-        ptx = nvcc.compile_cuda(code, target_format="ptx")
+    def tvm_callback_cuda_compile(code):  # pylint: disable=unused-variable

Review Comment:
   Its strange it complains about it! I can see it being used below.



##########
tests/python/integration/test_lower.py:
##########
@@ -15,41 +15,52 @@
 # specific language governing permissions and limitations
 # under the License.
 # pylint: disable=invalid-name, too-many-locals, too-many-statements, 
unused-argument
-"""Test workload for lowering and build"""
+"""Test workload for lowering and build."""
+import numpy as np
+
 import tvm
-from tvm import tir
-from tvm.script import tir as T
 import tvm.testing
-import numpy as np
+from tvm.script import tir as T
 
 
 @T.prim_func
-def tensorcore_gemm(a: T.handle, b: T.handle, c: T.handle) -> None:
+def tensorcore_gemm(handle_a: T.handle, handle_b: T.handle, handle_c: 
T.handle) -> None:
+    # pylint: disable=missing-function-docstring

Review Comment:
   Are we not able to get rid of it?



##########
tests/python/integration/test_lower.py:
##########
@@ -15,41 +15,52 @@
 # specific language governing permissions and limitations
 # under the License.
 # pylint: disable=invalid-name, too-many-locals, too-many-statements, 
unused-argument

Review Comment:
   Is it possible to get rid of some of them now that we are cleaning up tests.



##########
tests/python/integration/test_tuning.py:
##########
@@ -34,100 +33,138 @@
 from tvm.ir.instrument import pass_instrument
 from tvm.ir.transform import PassContext
 from tvm.target import Target
+from tvm.tir.analysis import _ffi_api as _analysis_ffi_api
 
 
 def setup_module():
+    """Setup the module used for testing."""
+
     @autotvm.template("testing/conv2d_no_batching")
-    def conv2d_no_batching(N, H, W, CI, CO, KH, KW):
+    def conv2d_no_batching(  # pylint: disable=unused-variable

Review Comment:
   Not able to remove the pylint disable?



##########
tests/python/integration/test_tuning.py:
##########
@@ -274,10 +312,13 @@ def __init__(
             do_fork=False,
             runtime=None,
         ):
+            # pylint: disable=too-many-function-args
             super().__init__(timeout, n_parallel, build_kwargs, build_func, 
do_fork, runtime)
+
+            # pylint: disable=too-many-function-args

Review Comment:
   Out of curiosity, here number of args are not > 5. Why does it complain? Any 
clue?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to