This is an automated email from the ASF dual-hosted git repository.

ruihangl pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm-ffi.git


The following commit(s) were added to refs/heads/main by this push:
     new 4fc83d7  [Fix] Enhanced check for CUDA availability (#258)
4fc83d7 is described below

commit 4fc83d789052be1a30ca4678cd8cfb0250413e1d
Author: Ruihang Lai <[email protected]>
AuthorDate: Tue Nov 11 10:51:58 2025 -0500

    [Fix] Enhanced check for CUDA availability (#258)
    
    This commit fixes a bug that only uses `torch.version.cuda` to check if
    CUDA is available. This is insufficient and we need an additional check
    of `torch.cuda.is_available()` to ensure that CUDA can be found in the
    environment.
---
 addons/torch_c_dlpack_ext/build_backend.py   | 13 +++++++++----
 include/tvm/ffi/c_api.h                      |  2 +-
 python/tvm_ffi/_optional_torch_c_dlpack.py   | 15 ++++++++++-----
 tests/python/test_optional_torch_c_dlpack.py | 13 +++++++++----
 tests/python/test_tensor.py                  |  3 ++-
 5 files changed, 31 insertions(+), 15 deletions(-)

diff --git a/addons/torch_c_dlpack_ext/build_backend.py 
b/addons/torch_c_dlpack_ext/build_backend.py
index 9489c5b..17bc2c4 100644
--- a/addons/torch_c_dlpack_ext/build_backend.py
+++ b/addons/torch_c_dlpack_ext/build_backend.py
@@ -75,10 +75,15 @@ def build_wheel(
             )
         else:
             extra_args = []
-            if torch.version.cuda is not None:
-                extra_args.append("--build-with-cuda")
-            elif torch.version.hip is not None:
-                extra_args.append("--build-with-rocm")
+            # First use "torch.cuda.is_available()" to check whether GPU 
environment
+            # is available. Then determine the GPU type.
+            if torch.cuda.is_available():
+                if torch.version.cuda is not None:
+                    extra_args.append("--build-with-cuda")
+                elif torch.version.hip is not None:
+                    extra_args.append("--build-with-rocm")
+                else:
+                    raise ValueError("Cannot determine whether to build with 
CUDA or ROCm.")
             subprocess.run(
                 [
                     sys.executable,
diff --git a/include/tvm/ffi/c_api.h b/include/tvm/ffi/c_api.h
index e773208..152046a 100644
--- a/include/tvm/ffi/c_api.h
+++ b/include/tvm/ffi/c_api.h
@@ -62,7 +62,7 @@
 /*! \brief TVM FFI minor version. */
 #define TVM_FFI_VERSION_MINOR 1
 /*! \brief TVM FFI patch version. */
-#define TVM_FFI_VERSION_PATCH 2
+#define TVM_FFI_VERSION_PATCH 3
 // NOLINTEND(modernize-macro-to-enum)
 
 #ifdef __cplusplus
diff --git a/python/tvm_ffi/_optional_torch_c_dlpack.py 
b/python/tvm_ffi/_optional_torch_c_dlpack.py
index e05fc34..7f259d1 100644
--- a/python/tvm_ffi/_optional_torch_c_dlpack.py
+++ b/python/tvm_ffi/_optional_torch_c_dlpack.py
@@ -41,7 +41,7 @@ from pathlib import Path
 from typing import Any
 
 
-def load_torch_c_dlpack_extension() -> Any:
+def load_torch_c_dlpack_extension() -> Any:  # noqa: PLR0912
     try:
         import torch  # noqa: PLC0415
 
@@ -67,10 +67,15 @@ def load_torch_c_dlpack_extension() -> Any:
         cache_dir = Path(os.environ.get("TVM_FFI_CACHE_DIR", 
"~/.cache/tvm-ffi")).expanduser()
         addon_output_dir = cache_dir
         major, minor = torch.__version__.split(".")[:2]
-        if torch.version.cuda is not None:
-            device = "cuda"
-        elif torch.version.hip is not None:
-            device = "rocm"
+        # First use "torch.cuda.is_available()" to check whether GPU 
environment
+        # is available. Then determine the GPU type.
+        if torch.cuda.is_available():
+            if torch.version.cuda is not None:
+                device = "cuda"
+            elif torch.version.hip is not None:
+                device = "rocm"
+            else:
+                raise ValueError("Cannot determine whether to build with CUDA 
or ROCm.")
         else:
             device = "cpu"
         suffix = ".dll" if sys.platform.startswith("win") else ".so"
diff --git a/tests/python/test_optional_torch_c_dlpack.py 
b/tests/python/test_optional_torch_c_dlpack.py
index 8aded2b..4666d5c 100644
--- a/tests/python/test_optional_torch_c_dlpack.py
+++ b/tests/python/test_optional_torch_c_dlpack.py
@@ -44,10 +44,15 @@ def test_build_torch_c_dlpack_extension() -> None:
         "--libname",
         "libtorch_c_dlpack_addon_test.so",
     ]
-    if torch.version.cuda is not None:
-        args.append("--build-with-cuda")
-    elif torch.version.hip is not None:
-        args.append("--build-with-rocm")
+    # First use "torch.cuda.is_available()" to check whether GPU environment
+    # is available. Then determine the GPU type.
+    if torch.cuda.is_available():
+        if torch.version.cuda is not None:
+            args.append("--build-with-cuda")
+        elif torch.version.hip is not None:
+            args.append("--build-with-rocm")
+        else:
+            raise ValueError("Cannot determine whether to build with CUDA or 
ROCm.")
     subprocess.run(args, check=True)
 
     lib_path = 
str(Path("./output-dir/libtorch_c_dlpack_addon_test.so").resolve())
diff --git a/tests/python/test_tensor.py b/tests/python/test_tensor.py
index d772ab3..7a257cd 100644
--- a/tests/python/test_tensor.py
+++ b/tests/python/test_tensor.py
@@ -115,7 +115,8 @@ def test_tvm_ffi_tensor_compatible() -> None:
 
 
 @pytest.mark.skipif(
-    torch is None or torch.version.hip is None, reason="ROCm is not enabled in 
PyTorch"
+    torch is None or not torch.cuda.is_available() or torch.version.hip is 
None,
+    reason="ROCm is not enabled in PyTorch",
 )
 def test_tensor_from_pytorch_rocm() -> None:
     assert torch is not None

Reply via email to