This is an automated email from the ASF dual-hosted git repository.

tqchen pushed a commit to branch refactor-s1
in repository https://gitbox.apache.org/repos/asf/tvm.git

commit 6b21dcd97397ca4482d174c7d3f3978d193b44a5
Author: tqchen <[email protected]>
AuthorDate: Wed Apr 16 19:34:44 2025 -0400

    pylint
---
 python/tvm/tir/op.py                               | 45 ++++++----------------
 .../contrib/test_hexagon/test_dma_builtin.py       | 21 ++++++++--
 .../test_meta_schedule_trace_apply.py              |  8 +++-
 3 files changed, 35 insertions(+), 39 deletions(-)

diff --git a/python/tvm/tir/op.py b/python/tvm/tir/op.py
index 2e0d2a64c9..46c634eeb4 100644
--- a/python/tvm/tir/op.py
+++ b/python/tvm/tir/op.py
@@ -32,11 +32,7 @@ from .expr import Call, CommReducer, IntImm, PrimExprWithOp, 
Var
 def _pack_buffer(buf, span=None):
     """Build intrinsics that packs the buffer."""
     shape = Call("handle", "tir.tvm_stack_make_shape", buf.shape, span)
-    strides = (
-        Call("handle", "tir.tvm_stack_make_shape", buf.strides, span)
-        if buf.strides
-        else 0
-    )
+    strides = Call("handle", "tir.tvm_stack_make_shape", buf.strides, span) if 
buf.strides else 0
     pack_args = [
         buf.data,
         shape,
@@ -338,9 +334,7 @@ def tvm_check_return(expected, return_unexpected, 
nested_call):
     call : PrimExpr
         The call expression.
     """
-    return call_intrin(
-        "int32", "tir.tvm_check_return", expected, return_unexpected, 
nested_call
-    )
+    return call_intrin("int32", "tir.tvm_check_return", expected, 
return_unexpected, nested_call)
 
 
 def tvm_stack_alloca(dtype_str, num):
@@ -685,9 +679,7 @@ def tvm_warp_shuffle(mask, value, warp_id, width, 
warp_size):
     call : PrimExpr
         The call expression.
     """
-    return call_intrin(
-        value.dtype, "tir.tvm_warp_shuffle", mask, value, warp_id, width, 
warp_size
-    )
+    return call_intrin(value.dtype, "tir.tvm_warp_shuffle", mask, value, 
warp_id, width, warp_size)
 
 
 def tvm_warp_shuffle_up(mask, value, offset, width, warp_size):
@@ -796,9 +788,7 @@ def tvm_access_ptr(ptype, data, offset, extent, rw_mask):
     call : PrimExpr
         The call expression.
     """
-    return call_intrin(
-        "handle", "tir.tvm_access_ptr", ptype, data, offset, extent, rw_mask
-    )
+    return call_intrin("handle", "tir.tvm_access_ptr", ptype, data, offset, 
extent, rw_mask)
 
 
 def tvm_throw_last_error():
@@ -1334,9 +1324,7 @@ def mma_fill(dtype, local_size, local_ptr, offset):
     )
 
 
-def ptx_ldmatrix(
-    dtype, trans, num, type, local_ptr, local_offset, smem_ptr, smem_offset
-):
+def ptx_ldmatrix(dtype, trans, num, type, local_ptr, local_offset, smem_ptr, 
smem_offset):
     """TVM intrinsic for ptx load matrix from shared memory
     
https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-ldmatrix
 
@@ -1533,9 +1521,7 @@ def ptx_init_barrier_thread_count(barrier_id, 
thread_count):
     call : PrimExpr
         The call expression.
     """
-    return call_intrin(
-        "", "tir.ptx_init_barrier_thread_count", barrier_id, thread_count
-    )
+    return call_intrin("", "tir.ptx_init_barrier_thread_count", barrier_id, 
thread_count)
 
 
 def ptx_arrive_barrier(barrier_id):
@@ -1641,9 +1627,7 @@ def make_filled_simdgroup_matrix(
     call : PrimExpr
         The call expression.
     """
-    return call_intrin(
-        "handle", "tir.make_filled_simdgroup_matrix", d, index, value, col, row
-    )
+    return call_intrin("handle", "tir.make_filled_simdgroup_matrix", d, index, 
value, col, row)
 
 
 def simdgroup_load(
@@ -3404,13 +3388,10 @@ def comm_reducer(fcombine, fidentity, name="reduce"):
         if where is None:
             where = tir.convert(True)
         if init is None:
-            outputs = tuple(
-                tvm.tir.Reduce(combiner, expr, axis, where, i, []) for i in 
range(size)
-            )
+            outputs = tuple(tvm.tir.Reduce(combiner, expr, axis, where, i, []) 
for i in range(size))
         else:
             outputs = tuple(
-                tvm.tir.Reduce(combiner, expr, axis, where, i, init)
-                for i in range(size)
+                tvm.tir.Reduce(combiner, expr, axis, where, i, init) for i in 
range(size)
             )
         return outputs[0] if size == 1 else outputs
 
@@ -3466,9 +3447,7 @@ def comm_reducer(fcombine, fidentity, name="reduce"):
     return reducer
 
 
-def TVMBackendAllocWorkspace(
-    device_type, device_id, nbytes, dtype_code_hint, dtype_bits_hint
-):
+def TVMBackendAllocWorkspace(device_type, device_id, nbytes, dtype_code_hint, 
dtype_bits_hint):
     """Backend function to allocate temporal workspace
 
     Parameters
@@ -3523,9 +3502,7 @@ def TVMBackendFreeWorkspace(device_type, device_id, ptr):
     call : PrimExpr
         The call expression.
     """
-    return call_intrin(
-        "int32", "tir.TVMBackendFreeWorkspace", device_type, device_id, ptr
-    )
+    return call_intrin("int32", "tir.TVMBackendFreeWorkspace", device_type, 
device_id, ptr)
 
 
 def anylist_getitem(list_handle, index):
diff --git a/tests/python/contrib/test_hexagon/test_dma_builtin.py 
b/tests/python/contrib/test_hexagon/test_dma_builtin.py
index 479b680065..1e818dd00a 100644
--- a/tests/python/contrib/test_hexagon/test_dma_builtin.py
+++ b/tests/python/contrib/test_hexagon/test_dma_builtin.py
@@ -65,7 +65,12 @@ class Module_1D:
             dtype=data_type,
             storage_scope="global.vtcm",
         )
-        a: R.Tensor([12800,], dtype=data_type,) = R.vm.alloc_tensor(
+        a: R.Tensor(
+            [
+                12800,
+            ],
+            dtype=data_type,
+        ) = R.vm.alloc_tensor(
             vtcm_obj,
             offset=0,
             shape=R.shape(
@@ -80,7 +85,12 @@ class Module_1D:
             [x, a, 0, True],
             sinfo_args=[],
         )
-        b: R.Tensor([12800,], dtype=data_type,) = R.vm.alloc_tensor(
+        b: R.Tensor(
+            [
+                12800,
+            ],
+            dtype=data_type,
+        ) = R.vm.alloc_tensor(
             vtcm_obj,
             offset=12800 * 4,
             shape=R.shape(
@@ -95,7 +105,12 @@ class Module_1D:
             [y, b, 1, True],
             sinfo_args=[],
         )
-        c: R.Tensor([12800,], dtype=data_type,) = R.vm.alloc_tensor(
+        c: R.Tensor(
+            [
+                12800,
+            ],
+            dtype=data_type,
+        ) = R.vm.alloc_tensor(
             vtcm_obj,
             offset=2 * 12800 * 4,
             shape=R.shape(
diff --git a/tests/python/meta_schedule/test_meta_schedule_trace_apply.py 
b/tests/python/meta_schedule/test_meta_schedule_trace_apply.py
index bbe086c34e..3f6f2e1a65 100644
--- a/tests/python/meta_schedule/test_meta_schedule_trace_apply.py
+++ b/tests/python/meta_schedule/test_meta_schedule_trace_apply.py
@@ -1864,7 +1864,9 @@ def test_dense_add_cpu():
             ),
             pad_value=None,
         )
-        sch.annotate(block_or_loop=b59, 
ann_key="meta_schedule.layout_rewrite_preproc", ann_val=True)
+        sch.annotate(
+            block_or_loop=b59, ann_key="meta_schedule.layout_rewrite_preproc", 
ann_val=True
+        )
 
     verify(Dense, apply_anchor_trace, DenseAdd, "llvm", DenseAdd_scheduled_cpu)
 
@@ -1930,7 +1932,9 @@ def test_dense_add_cpu_no_write_cache():
             ),
             pad_value=None,
         )
-        sch.annotate(block_or_loop=b50, 
ann_key="meta_schedule.layout_rewrite_preproc", ann_val=True)
+        sch.annotate(
+            block_or_loop=b50, ann_key="meta_schedule.layout_rewrite_preproc", 
ann_val=True
+        )
 
     verify(Dense, apply_trace, DenseAdd, "llvm", DenseAdd_cpu_no_write_cache)
 

Reply via email to