This is an automated email from the ASF dual-hosted git repository.

junrushao pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm-ffi.git


The following commit(s) were added to refs/heads/main by this push:
     new 8b46833  chore(lint): Update pre-commit hooks; Enable docstring 
formatter (#339)
8b46833 is described below

commit 8b4683385fd60b5b89f23cc8d4ab53d3a7c1aebb
Author: Junru Shao <[email protected]>
AuthorDate: Fri Dec 12 11:24:54 2025 -0800

    chore(lint): Update pre-commit hooks; Enable docstring formatter (#339)
---
 .pre-commit-config.yaml               | 18 +++++++++---------
 pyproject.toml                        |  4 ++--
 python/tvm_ffi/_convert.py            |  1 +
 python/tvm_ffi/_dtype.py              |  4 +++-
 python/tvm_ffi/_tensor.py             |  1 +
 python/tvm_ffi/cpp/extension.py       | 16 ++++++++--------
 python/tvm_ffi/dataclasses/c_class.py |  2 ++
 python/tvm_ffi/dataclasses/field.py   |  1 +
 python/tvm_ffi/error.py               |  2 ++
 python/tvm_ffi/module.py              |  7 ++++++-
 python/tvm_ffi/registry.py            |  7 +++++++
 python/tvm_ffi/utils/embed_cubin.py   | 29 +++++++++++++++++++----------
 12 files changed, 61 insertions(+), 31 deletions(-)

diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 3c2c43d..99fa44e 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -50,7 +50,7 @@ repos:
         pass_filenames: false
         verbose: false
   - repo: https://github.com/pre-commit/pre-commit-hooks
-    rev: v5.0.0
+    rev: v6.0.0
     hooks:
       - id: check-added-large-files
       - id: check-case-conflict
@@ -63,7 +63,7 @@ repos:
       - id: check-yaml
       - id: check-toml
   - repo: https://github.com/adrienverge/yamllint
-    rev: v1.35.1
+    rev: v1.37.1
     hooks:
       - id: yamllint
         args:
@@ -74,7 +74,7 @@ repos:
     hooks:
       - id: taplo-format
   - repo: https://github.com/astral-sh/ruff-pre-commit
-    rev: v0.12.3
+    rev: v0.14.9
     hooks:
       - id: ruff-check
         types_or: [python, pyi, jupyter]
@@ -82,11 +82,11 @@ repos:
       - id: ruff-format
         types_or: [python, pyi, jupyter]
   - repo: https://github.com/pre-commit/mirrors-clang-format
-    rev: "v20.1.8"
+    rev: "v21.1.7"
     hooks:
       - id: clang-format
   - repo: https://github.com/MarcoGorelli/cython-lint
-    rev: v0.16.7
+    rev: v0.18.1
     hooks:
       - id: cython-lint
         args: [--max-line-length=120]
@@ -97,15 +97,15 @@ repos:
       - id: shfmt
         args: [--indent=2]
   - repo: https://github.com/shellcheck-py/shellcheck-py
-    rev: v0.10.0.1
+    rev: v0.11.0.1
     hooks:
       - id: shellcheck
   - repo: https://github.com/DavidAnson/markdownlint-cli2
-    rev: v0.14.0
+    rev: v0.20.0
     hooks:
       - id: markdownlint-cli2
   - repo: https://github.com/rstcheck/rstcheck
-    rev: v6.2.4
+    rev: v6.2.5
     hooks:
       - id: rstcheck
         additional_dependencies:
@@ -114,7 +114,7 @@ repos:
           - --config
           - docs/.rstcheck.cfg
   - repo: https://github.com/pre-commit/mirrors-mypy
-    rev: "v1.18.2"
+    rev: "v1.19.0"
     hooks:
       - id: mypy
         name: mypy
diff --git a/pyproject.toml b/pyproject.toml
index 95c4f44..13ef672 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -227,8 +227,8 @@ quote-style = "double"
 indent-style = "space"
 skip-magic-trailing-comma = false
 line-ending = "auto"
-docstring-code-format = false
-docstring-code-line-length = "dynamic"
+docstring-code-format = true
+docstring-code-line-length = 80
 
 [tool.cibuildwheel]
 build-verbosity = 1
diff --git a/python/tvm_ffi/_convert.py b/python/tvm_ffi/_convert.py
index 59783f6..d35ad90 100644
--- a/python/tvm_ffi/_convert.py
+++ b/python/tvm_ffi/_convert.py
@@ -81,6 +81,7 @@ def convert(value: Any) -> Any:  # noqa: PLR0911,PLR0912
 
         # Array libraries that support DLPack export can be converted to Tensor
         import numpy as np
+
         x = tvm_ffi.convert(np.arange(4, dtype="int32"))
         assert isinstance(x, tvm_ffi.Tensor)
 
diff --git a/python/tvm_ffi/_dtype.py b/python/tvm_ffi/_dtype.py
index 65ecad2..dcc4d07 100644
--- a/python/tvm_ffi/_dtype.py
+++ b/python/tvm_ffi/_dtype.py
@@ -226,7 +226,9 @@ class dtype(str):
             # The type code is an integer following DLPack conventions
             assert isinstance(f32.type_code, int)
             # Consistent with constructing from an explicit (code, bits, lanes)
-            assert f32.type_code == tvm_ffi.dtype.from_dlpack_data_type((2, 
32, 1)).type_code
+            assert (
+                f32.type_code == tvm_ffi.dtype.from_dlpack_data_type((2, 32, 
1)).type_code
+            )
 
         See Also
         --------
diff --git a/python/tvm_ffi/_tensor.py b/python/tvm_ffi/_tensor.py
index 5005bc6..bb60882 100644
--- a/python/tvm_ffi/_tensor.py
+++ b/python/tvm_ffi/_tensor.py
@@ -96,6 +96,7 @@ def device(device_type: str | int | DLDeviceType, index: int 
| None = None) -> D
     .. code-block:: python
 
       import tvm_ffi
+
       assert tvm_ffi.device("cuda:0") == tvm_ffi.device("cuda", 0)
       assert tvm_ffi.device("cpu:0") == tvm_ffi.device("cpu", 0)
 
diff --git a/python/tvm_ffi/cpp/extension.py b/python/tvm_ffi/cpp/extension.py
index 761f0dd..e988a77 100644
--- a/python/tvm_ffi/cpp/extension.py
+++ b/python/tvm_ffi/cpp/extension.py
@@ -661,9 +661,9 @@ def build_inline(
 
         # compile the cpp source code and load the module
         lib_path: str = tvm_ffi.cpp.build_inline(
-            name='hello',
+            name="hello",
             cpp_sources=cpp_source,
-            functions='add_one_cpu'
+            functions="add_one_cpu",
         )
 
         # load the module
@@ -876,9 +876,9 @@ def load_inline(  # noqa: PLR0913
 
         # compile the cpp source code and load the module
         mod: Module = tvm_ffi.cpp.load_inline(
-            name='hello',
+            name="hello",
             cpp_sources=cpp_source,
-            functions='add_one_cpu'
+            functions="add_one_cpu",
         )
 
         # use the function from the loaded module to perform
@@ -1017,8 +1017,8 @@ def build(
 
         # compile the cpp source file and get the library path
         lib_path: str = tvm_ffi.cpp.build(
-            name='my_ops',
-            cpp_files='my_ops.cpp'
+            name="my_ops",
+            cpp_files="my_ops.cpp",
         )
 
         # load the module
@@ -1165,8 +1165,8 @@ def load(
 
         # compile the cpp source file and load the module
         mod: Module = tvm_ffi.cpp.load(
-            name='my_ops',
-            cpp_files='my_ops.cpp'
+            name="my_ops",
+            cpp_files="my_ops.cpp",
         )
 
         # use the function from the loaded module
diff --git a/python/tvm_ffi/dataclasses/c_class.py 
b/python/tvm_ffi/dataclasses/c_class.py
index 0bc273c..8c2706b 100644
--- a/python/tvm_ffi/dataclasses/c_class.py
+++ b/python/tvm_ffi/dataclasses/c_class.py
@@ -103,6 +103,7 @@ def c_class(
 
         from tvm_ffi.dataclasses import c_class, field
 
+
         @c_class("example.MyClass")
         class MyClass:
             v_i64: int
@@ -110,6 +111,7 @@ def c_class(
             v_f64: float = field(default=0.0)
             v_f32: float = field(default_factory=lambda: 1.0)
 
+
         obj = MyClass(v_i64=4, v_i32=8)
         obj.v_f64 = 3.14  # transparently forwards to the underlying C++ object
 
diff --git a/python/tvm_ffi/dataclasses/field.py 
b/python/tvm_ffi/dataclasses/field.py
index b1d7d78..95618cd 100644
--- a/python/tvm_ffi/dataclasses/field.py
+++ b/python/tvm_ffi/dataclasses/field.py
@@ -114,6 +114,7 @@ def field(
             v_i64: int
             v_i32: int = field(default=16)
 
+
         obj = PyBase(v_i64=4)
         obj.v_i32  # -> 16
 
diff --git a/python/tvm_ffi/error.py b/python/tvm_ffi/error.py
index 9a0a23a..3c4eaf0 100644
--- a/python/tvm_ffi/error.py
+++ b/python/tvm_ffi/error.py
@@ -227,11 +227,13 @@ def register_error(
 
         import tvm_ffi
 
+
         # Register a custom Python exception so tvm_ffi.Error maps to it
         @tvm_ffi.error.register_error
         class MyError(RuntimeError):
             pass
 
+
         # Convert a Python exception to an FFI Error and back
         ffi_err = tvm_ffi.convert(MyError("boom"))
         py_err = ffi_err.py_error()
diff --git a/python/tvm_ffi/module.py b/python/tvm_ffi/module.py
index 0c04d21..d863ae9 100644
--- a/python/tvm_ffi/module.py
+++ b/python/tvm_ffi/module.py
@@ -92,12 +92,15 @@ class Module(core.Object):
             # ... do something with the tensor
             tensor = mod.func_create_and_return_tensor(x)
 
+
         def good_pattern(x):
             # Good: `tensor` is freed before `mod` goes out of scope
             mod = tvm_ffi.load_module("path/to/library.so")
+
             def run_some_tests():
                 tensor = mod.func_create_and_return_tensor(x)
                 # ... do something with the tensor
+
             run_some_tests()
 
     """
@@ -419,7 +422,9 @@ def system_lib(symbol_prefix: str = "") -> Module:
 
         import tvm_ffi
 
-        mod: tvm_ffi.Module = tvm_ffi.system_lib("testing.")  # symbols 
prefixed with `__tvm_ffi_testing.`
+        mod: tvm_ffi.Module = tvm_ffi.system_lib(
+            "testing."
+        )  # symbols prefixed with `__tvm_ffi_testing.`
         func: tvm_ffi.Function = mod["add_one"]  # looks up 
`__tvm_ffi_testing.add_one`
         assert func(10) == 11
 
diff --git a/python/tvm_ffi/registry.py b/python/tvm_ffi/registry.py
index 74f56cb..5b484cd 100644
--- a/python/tvm_ffi/registry.py
+++ b/python/tvm_ffi/registry.py
@@ -112,10 +112,13 @@ def register_global_func(
 
         import tvm_ffi
 
+
         # we can use decorator to register a function
         @tvm_ffi.register_global_func("mytest.echo")
         def echo(x):
             return x
+
+
         # After registering, we can get the function by its name
         f = tvm_ffi.get_global_func("mytest.echo")
         assert f(1) == 1
@@ -177,10 +180,12 @@ def get_global_func(name: str, allow_missing: bool = 
False) -> core.Function | N
 
         import tvm_ffi
 
+
         @tvm_ffi.register_global_func("demo.echo")
         def echo(x):
             return x
 
+
         f = tvm_ffi.get_global_func("demo.echo")
         assert f(123) == 123
 
@@ -220,10 +225,12 @@ def remove_global_func(name: str) -> None:
 
         import tvm_ffi
 
+
         @tvm_ffi.register_global_func("my.temp")
         def temp():
             return 42
 
+
         assert tvm_ffi.get_global_func("my.temp", allow_missing=True) is not 
None
         tvm_ffi.remove_global_func("my.temp")
         assert tvm_ffi.get_global_func("my.temp", allow_missing=True) is None
diff --git a/python/tvm_ffi/utils/embed_cubin.py 
b/python/tvm_ffi/utils/embed_cubin.py
index de70d2a..904b3a2 100644
--- a/python/tvm_ffi/utils/embed_cubin.py
+++ b/python/tvm_ffi/utils/embed_cubin.py
@@ -92,16 +92,25 @@ def embed_cubin(  # noqa: PLR0912, PLR0915
 
     Examples
     --------
-    >>> embed_cubin(
-    ...     Path("kernel.cubin"),
-    ...     Path("old.o"),
-    ...     Path("new.o"),
-    ...     "my_kernels"
-    ... )
-
-    Then in C++ code (in the source that was compiled to old.o):
-    >>> TVM_FFI_EMBED_CUBIN(my_kernels);
-    >>> auto kernel = TVM_FFI_EMBED_CUBIN_GET_KERNEL(my_kernels, 
"kernel_name");
+    In Python,
+
+    ```python
+    embed_cubin(
+        Path("kernel.cubin"),
+        Path("old.o"),
+        Path("new.o"),
+        "my_kernels",
+    )
+    ```
+
+    Then in C++ code,
+
+    ```C++
+    TVM_FFI_EMBED_CUBIN(my_kernels);
+    auto kernel = TVM_FFI_EMBED_CUBIN_GET_KERNEL(my_kernels, "kernel_name");
+    ```
+
+    (in the source that was compiled to old.o).
 
     """
     if not cubin_path.exists():

Reply via email to