This is an automated email from the ASF dual-hosted git repository.
tqchen pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm-ffi.git
The following commit(s) were added to refs/heads/main by this push:
new 4628f06 chore(cython): Specify `--module-name` when building Cython
(#248)
4628f06 is described below
commit 4628f06baaeb9f880c3522388ab33cc8b0736304
Author: Junru Shao <[email protected]>
AuthorDate: Sun Nov 9 08:52:27 2025 -0800
chore(cython): Specify `--module-name` when building Cython (#248)
PR #177 introduces some unnecessary logics in
`python/tvm_ffi/__init__.py` that updates `__module__` of Cython
generated class, which unfortunately is not a good practice and doesn't
take effect in lower Python/Cython versions. This PR removes this logic
and replaces it with `--module-name` at build time.
---
CMakeLists.txt | 3 ++-
docs/conf.py | 2 +-
pyproject.toml | 4 ++--
python/tvm_ffi/__init__.py | 14 --------------
python/tvm_ffi/_dtype.py | 6 +++---
python/tvm_ffi/access_path.py | 2 +-
python/tvm_ffi/container.py | 2 +-
python/tvm_ffi/cpp/extension.py | 2 +-
python/tvm_ffi/cython/object.pxi | 6 +++---
python/tvm_ffi/cython/tensor.pxi | 15 ++++++++-------
10 files changed, 22 insertions(+), 34 deletions(-)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index e0b584a..60ff04a 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -239,7 +239,8 @@ if (TVM_FFI_BUILD_PYTHON_MODULE)
add_custom_command(
OUTPUT ${_core_cpp}
- COMMAND ${Python_EXECUTABLE} -m cython --cplus ${_core_pyx} -o ${_core_cpp}
+ COMMAND ${Python_EXECUTABLE} -m cython --cplus ${_core_pyx} -o
${_core_cpp} --module-name
+ "tvm_ffi.core"
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMENT "Transpiling ${_core_pyx} to ${_core_cpp}"
DEPENDS ${_cython_sources}
diff --git a/docs/conf.py b/docs/conf.py
index ef0f399..3aa1969 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -278,7 +278,7 @@ def _link_inherited_members(app, what, name, obj, options,
lines) -> None: # no
# If it comes from builtins we already hide it; no link needed
if base in _py_native_classes or getattr(base, "__module__", "") ==
"builtins":
return
- owner_fq = f"{base.__module__}.{base.__qualname__}"
+ owner_fq =
f"{base.__module__}.{base.__qualname__}".replace("tvm_ffi.core.", "tvm_ffi.")
role = "attr" if what in {"attribute", "property"} else "meth"
lines.clear()
lines.append(
diff --git a/pyproject.toml b/pyproject.toml
index 43b09ba..3630002 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -64,7 +64,7 @@ dev = [
"pytest",
"numpy",
"ml_dtypes",
- "cython",
+ "cython>=3.0",
"cmake",
"scikit-build-core",
"tomli",
@@ -103,7 +103,7 @@ tvm-ffi-config = "tvm_ffi.config:__main__"
tvm-ffi-stubgen = "tvm_ffi.stub.stubgen:__main__"
[build-system]
-requires = ["scikit-build-core>=0.10.0", "cython", "setuptools-scm"]
+requires = ["scikit-build-core>=0.10.0", "cython>=3.0", "setuptools-scm"]
build-backend = "scikit_build_core.build"
[tool.scikit-build]
diff --git a/python/tvm_ffi/__init__.py b/python/tvm_ffi/__init__.py
index 79f81a1..22bdd52 100644
--- a/python/tvm_ffi/__init__.py
+++ b/python/tvm_ffi/__init__.py
@@ -102,17 +102,3 @@ __all__ = [
"use_raw_stream",
"use_torch_stream",
]
-
-
-def _update_module() -> None:
- for name in __all__:
- obj = globals()[name]
- if not getattr(obj, "__module__", "tvm_ffi").startswith("tvm_ffi"):
- try:
- obj.__module__ = "tvm_ffi"
- except (AttributeError, TypeError):
- # some types don't allow setting __module__
- pass
-
-
-_update_module()
diff --git a/python/tvm_ffi/_dtype.py b/python/tvm_ffi/_dtype.py
index 7fa78d5..8e36bb4 100644
--- a/python/tvm_ffi/_dtype.py
+++ b/python/tvm_ffi/_dtype.py
@@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
-"""Lightweight ``dtype`` wrapper for TVM FFI."""
+"""Lightweight dtype wrapper for TVM FFI."""
# pylint: disable=invalid-name
from __future__ import annotations
@@ -47,9 +47,9 @@ class DataTypeCode(IntEnum):
class dtype(str):
- """Lightweight ``dtype`` in TVM FFI.
+ """Lightweight data type in TVM FFI.
- ``dtype`` behaves like a Python ``str`` but also carries an internal FFI
+ It behaves like a Python :class:`str` but also carries an internal FFI
representation. You can construct it from strings, NumPy/ML dtypes, or
via :py:meth:`from_dlpack_data_type`.
diff --git a/python/tvm_ffi/access_path.py b/python/tvm_ffi/access_path.py
index 6b9db9c..b69e05d 100644
--- a/python/tvm_ffi/access_path.py
+++ b/python/tvm_ffi/access_path.py
@@ -55,7 +55,7 @@ class AccessStep(Object):
class AccessPath(Object):
"""Access path container.
- An ``AccessPath`` describes how to reach a nested attribute or item
+ It describes how to reach a nested attribute or item
inside a complex FFI object by recording a sequence of steps
(attribute, array index, or map key). It is primarily used by
diagnostics to pinpoint structural mismatches.
diff --git a/python/tvm_ffi/container.py b/python/tvm_ffi/container.py
index 90a84a2..06fb92e 100644
--- a/python/tvm_ffi/container.py
+++ b/python/tvm_ffi/container.py
@@ -103,7 +103,7 @@ def getitem_helper(
Returns
-------
result
- The element for integer indices or a ``list`` for slices.
+ The element for integer indices or a :class:`list` for slices.
"""
if isinstance(idx, slice):
diff --git a/python/tvm_ffi/cpp/extension.py b/python/tvm_ffi/cpp/extension.py
index 7d84476..ff4daff 100644
--- a/python/tvm_ffi/cpp/extension.py
+++ b/python/tvm_ffi/cpp/extension.py
@@ -941,7 +941,7 @@ def load(
library. It's possible to only provide cpp_files or cuda_files.
Note that this function does not automatically export functions to the tvm
ffi module. You need to
- manually use the TVM FFI export macros (e.g.,
``TVM_FFI_DLL_EXPORT_TYPED_FUNC``) in your source files to export
+ manually use the TVM FFI export macros (e.g.,
:c:macro:`TVM_FFI_DLL_EXPORT_TYPED_FUNC`) in your source files to export
functions. This gives you more control over which functions are exported
and how they are exported.
Extra compiler and linker flags can be provided via the ``extra_cflags``,
``extra_cuda_cflags``, and ``extra_ldflags``
diff --git a/python/tvm_ffi/cython/object.pxi b/python/tvm_ffi/cython/object.pxi
index c8e1508..c7a9a47 100644
--- a/python/tvm_ffi/cython/object.pxi
+++ b/python/tvm_ffi/cython/object.pxi
@@ -83,16 +83,16 @@ cdef class Object:
This is the root Python type for objects backed by the TVM FFI
runtime. Each instance references a handle to a C++ runtime
object. Python subclasses typically correspond to C++ runtime
- types and are registered via ``tvm_ffi.register_object``.
+ types and are registered via :py:meth:`tvm_ffi.register_object`.
Notes
-----
- - Equality of two ``Object`` instances uses underlying handle
+ - Equality of two :py:class:`Object` instances uses underlying handle
identity unless an overridden implementation is provided on the
concrete type. Use :py:meth:`same_as` to check whether two
references point to the same underlying object.
- Most users interact with subclasses (e.g. :class:`Tensor`,
- :class:`Function`) rather than ``Object`` directly.
+ :class:`Function`) rather than :py:class:`Object` directly.
Examples
--------
diff --git a/python/tvm_ffi/cython/tensor.pxi b/python/tvm_ffi/cython/tensor.pxi
index e6fba34..6a9cf9e 100644
--- a/python/tvm_ffi/cython/tensor.pxi
+++ b/python/tvm_ffi/cython/tensor.pxi
@@ -158,13 +158,14 @@ def from_dlpack(
Parameters
----------
ext_tensor : object
- An object supporting ``__dlpack__`` and ``__dlpack_device__``.
+ An object supporting `__dlpack__
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__dlpack__.html#array_api.array.__dlpack__>`_
+ and `__dlpack_device__
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__dlpack_device__.html#array_api.array.__dlpack_device__>`_.
require_alignment : int, optional
If greater than zero, require the underlying data pointer to be
aligned to this many bytes. Misaligned inputs raise
:class:`ValueError`.
require_contiguous : bool, optional
- When ``True``, require the layout to be contiguous. Non-contiguous
+ When True, require the layout to be contiguous. Non-contiguous
inputs raise :class:`ValueError`.
Returns
@@ -184,7 +185,7 @@ def from_dlpack(
y_np = np.from_dlpack(x)
assert np.shares_memory(x_np, y_np)
- """
+ """ # noqa: E501
cdef TVMFFIObjectHandle chandle
_from_dlpack_universal(ext_tensor, require_alignment, require_contiguous,
&chandle)
return make_tensor_from_chandle(chandle)
@@ -211,7 +212,7 @@ def _make_strides_from_shape(tuple shape: tuple[int, ...])
-> tuple[int, ...]:
cdef class Tensor(Object):
"""Managed n-dimensional array compatible with DLPack.
- ``Tensor`` provides zero-copy interoperability with array libraries
+ It provides zero-copy interoperability with array libraries
through the DLPack protocol. Instances are typically created with
:func:`from_dlpack` or returned from FFI functions.
@@ -275,7 +276,7 @@ cdef class Tensor(Object):
dltensor, _c_str_dltensor_versioned,
<PyCapsule_Destructor>_c_dlpack_versioned_deleter)
def __dlpack_device__(self) -> tuple[int, int]:
- """Implement the standard ``__dlpack_device__`` protocol."""
+ """Implement the standard `__dlpack_device__
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__dlpack_device__.html#array_api.array.__dlpack_device__>`_
protocol.""" # noqa: E501
cdef int device_type = self.cdltensor.device.device_type
cdef int device_id = self.cdltensor.device.device_id
return (device_type, device_id)
@@ -288,7 +289,7 @@ cdef class Tensor(Object):
dl_device: tuple[int, int] | None = None,
copy: bool | None = None,
) -> object:
- """Implement the standard ``__dlpack__`` protocol.
+ """Implement the standard `__dlpack__
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__dlpack__.html#array_api.array.__dlpack__>`_
protocol.
Parameters
----------
@@ -306,7 +307,7 @@ cdef class Tensor(Object):
------
BufferError
If the requested behavior cannot be satisfied.
- """
+ """ # noqa: E501
if max_version is None:
# Keep and use the DLPack 0.X implementation
# Note: from March 2025 onwards (but ideally as late as