This is an automated email from the ASF dual-hosted git repository.

tqchen pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm-ffi.git


The following commit(s) were added to refs/heads/main by this push:
     new ef54bda  [fix] Change Py_DECREF to Py_DecRef and make function static 
members (#138)
ef54bda is described below

commit ef54bdac61f4f22ea71a475ed8264333b8626ef9
Author: Kathryn (Jinqi) Chen <[email protected]>
AuthorDate: Wed Oct 15 14:29:21 2025 -0700

    [fix] Change Py_DECREF to Py_DecRef and make function static members (#138)
---
 python/tvm_ffi/_optional_torch_c_dlpack.py | 148 +++++++++++++++--------------
 tests/python/test_dlpack_exchange_api.py   |   8 +-
 2 files changed, 82 insertions(+), 74 deletions(-)

diff --git a/python/tvm_ffi/_optional_torch_c_dlpack.py 
b/python/tvm_ffi/_optional_torch_c_dlpack.py
index c08f82d..ee50e5f 100644
--- a/python/tvm_ffi/_optional_torch_c_dlpack.py
+++ b/python/tvm_ffi/_optional_torch_c_dlpack.py
@@ -482,89 +482,97 @@ void toDLPackNonOwningImpl(const Tensor& tensor, 
DLTensor& out) {
 } // namespace
 } // namespace at
 
-int TorchDLPackDLTensorFromPyObjectNoSync(void* py_obj, DLTensor* out) {
-  try {
-    // Use handle (non-owning) to avoid unnecessary refcount operations
-    py::handle handle(static_cast<PyObject*>(py_obj));
-    at::Tensor tensor = handle.cast<at::Tensor>();
-    at::toDLPackNonOwningImpl(tensor, *out);
-    return 0;
-  } catch (const std::exception& e) {
-    PyErr_SetString(PyExc_RuntimeError, e.what());
-    return -1;
+struct TorchDLPackExchangeAPI : public DLPackExchangeAPI {
+  TorchDLPackExchangeAPI() {
+    header.version.major = DLPACK_MAJOR_VERSION;
+    header.version.minor = DLPACK_MINOR_VERSION;
+    header.prev_api = nullptr;
+    managed_tensor_allocator = ManagedTensorAllocator;
+    managed_tensor_from_py_object_no_sync = ManagedTensorFromPyObjectNoSync;
+    managed_tensor_to_py_object_no_sync = ManagedTensorToPyObjectNoSync;
+    dltensor_from_py_object_no_sync = DLTensorFromPyObjectNoSync;
+    current_work_stream = CurrentWorkStream;
   }
-}
 
-int TorchDLPackManagedTensorFromPyObjectNoSync(void* py_obj, 
DLManagedTensorVersioned** out) {
-  try {
-    py::handle handle(static_cast<PyObject*>(py_obj));
-    at::Tensor tensor = handle.cast<at::Tensor>();
-    *out = at::toDLPackImpl<DLManagedTensorVersioned>(tensor);
-    return 0;
-  } catch (const std::exception& e) {
-    PyErr_SetString(PyExc_RuntimeError, e.what());
-    return -1;
+  static const DLPackExchangeAPI* Global() {
+    static TorchDLPackExchangeAPI inst;
+    return &inst;
   }
-}
 
-int TorchDLPackManagedTensorToPyObjectNoSync(DLManagedTensorVersioned* src, 
void** py_obj_out) {
-  try {
-    at::Tensor tensor = at::fromDLPackImpl<DLManagedTensorVersioned>(src, 
nullptr);
-    *py_obj_out = THPVariable_Wrap(tensor);
-    return 0;
-  } catch (const std::exception& e) {
-    PyErr_SetString(PyExc_RuntimeError, e.what());
-    return -1;
+ private:
+  static int DLTensorFromPyObjectNoSync(void* py_obj, DLTensor* out) {
+    try {
+      // Use handle (non-owning) to avoid unnecessary refcount operations
+      py::handle handle(static_cast<PyObject*>(py_obj));
+      at::Tensor tensor = handle.cast<at::Tensor>();
+      at::toDLPackNonOwningImpl(tensor, *out);
+      return 0;
+    } catch (const std::exception& e) {
+      PyErr_SetString(PyExc_RuntimeError, e.what());
+      return -1;
+    }
   }
-}
 
-int TorchDLPackManagedTensorAllocator(
-    DLTensor* prototype, DLManagedTensorVersioned** out, void* error_ctx,
-    void (*SetError)(void* error_ctx, const char* kind, const char* message)
-) {
-  try {
-    at::IntArrayRef shape(prototype->shape, prototype->shape + 
prototype->ndim);
-    at::TensorOptions options = at::TensorOptions()
-      .dtype(at::toScalarType(prototype->dtype))
-      .device(at::getATenDeviceForDLPackv1(prototype->device.device_type, 
prototype->device.device_id));
-    at::Tensor tensor = at::empty(shape, options);
-    *out = at::toDLPackImpl<DLManagedTensorVersioned>(tensor);
-    return 0;
-  } catch (const std::exception& e) {
-    SetError(error_ctx, "TorchDLPackManagedTensorAllocator", e.what());
-    return -1;
+  static int ManagedTensorFromPyObjectNoSync(void* py_obj, 
DLManagedTensorVersioned** out) {
+    try {
+      py::handle handle(static_cast<PyObject*>(py_obj));
+      at::Tensor tensor = handle.cast<at::Tensor>();
+      *out = at::toDLPackImpl<DLManagedTensorVersioned>(tensor);
+      return 0;
+    } catch (const std::exception& e) {
+      PyErr_SetString(PyExc_RuntimeError, e.what());
+      return -1;
+    }
   }
-}
 
-int TorchDLPackCurrentWorkStream(DLDeviceType device_type, int32_t device_id, 
void** out_stream) {
-  try {
-#ifdef BUILD_WITH_CUDA
-    if (device_type == kDLCUDA || device_type == kDLROCM) {
-      *out_stream = at::cuda::getCurrentCUDAStream(device_id).stream();
+  static int ManagedTensorToPyObjectNoSync(DLManagedTensorVersioned* src, 
void** py_obj_out) {
+    try {
+      at::Tensor tensor = at::fromDLPackImpl<DLManagedTensorVersioned>(src, 
nullptr);
+      *py_obj_out = THPVariable_Wrap(tensor);
+      return 0;
+    } catch (const std::exception& e) {
+      PyErr_SetString(PyExc_RuntimeError, e.what());
+      return -1;
     }
-#endif
-    return 0;
-  } catch (const std::exception& e) {
-    PyErr_SetString(PyExc_RuntimeError, e.what());
-    return -1;
   }
-}
 
-struct TorchDLPackExchangeAPI : public DLPackExchangeAPI {
-  TorchDLPackExchangeAPI() {
-    header.version.major = DLPACK_MAJOR_VERSION;
-    header.version.minor = DLPACK_MINOR_VERSION;
-    header.prev_api = nullptr;
-    managed_tensor_allocator = TorchDLPackManagedTensorAllocator;
-    managed_tensor_from_py_object_no_sync = 
TorchDLPackManagedTensorFromPyObjectNoSync;
-    managed_tensor_to_py_object_no_sync = 
TorchDLPackManagedTensorToPyObjectNoSync;
-    dltensor_from_py_object_no_sync = TorchDLPackDLTensorFromPyObjectNoSync;
-    current_work_stream = TorchDLPackCurrentWorkStream;
+  static int ManagedTensorAllocator(
+      DLTensor* prototype, DLManagedTensorVersioned** out, void* error_ctx,
+      void (*SetError)(void* error_ctx, const char* kind, const char* message)
+  ) {
+    try {
+      at::IntArrayRef shape(prototype->shape, prototype->shape + 
prototype->ndim);
+      at::TensorOptions options = at::TensorOptions()
+        .dtype(at::toScalarType(prototype->dtype))
+        .device(at::getATenDeviceForDLPackv1(prototype->device.device_type, 
prototype->device.device_id));
+      at::Tensor tensor = at::empty(shape, options);
+      *out = at::toDLPackImpl<DLManagedTensorVersioned>(tensor);
+      return 0;
+    } catch (const std::exception& e) {
+      SetError(error_ctx, "TorchDLPackManagedTensorAllocator", e.what());
+      return -1;
+    }
   }
 
-  static const DLPackExchangeAPI* Global() {
-    static TorchDLPackExchangeAPI inst;
-    return &inst;
+  // Get current CUDA/ROCm work stream
+  static int CurrentWorkStream(
+      DLDeviceType device_type,
+      int32_t device_id,
+      void** out_stream) {
+    try {
+#ifdef BUILD_WITH_CUDA
+      if (device_type == kDLCUDA || device_type == kDLROCM) {
+        *out_stream = at::cuda::getCurrentCUDAStream(device_id).stream();
+        return 0;
+      }
+#endif
+      // For CPU and other devices, return NULL (no stream concept)
+      *out_stream = nullptr;
+      return 0;
+    } catch (const std::exception& e) {
+      PyErr_SetString(PyExc_RuntimeError, e.what());
+      return -1;
+    }
   }
 };
 
diff --git a/tests/python/test_dlpack_exchange_api.py 
b/tests/python/test_dlpack_exchange_api.py
index f69ad7a..d5be763 100644
--- a/tests/python/test_dlpack_exchange_api.py
+++ b/tests/python/test_dlpack_exchange_api.py
@@ -108,7 +108,7 @@ def test_dlpack_exchange_api() -> None:
 
         // Test 3: managed_tensor_from_py_object_no_sync
         {
-            std::unique_ptr<PyObject, decltype(&Py_DECREF)> 
py_obj(THPVariable_Wrap(tensor), &Py_DECREF);
+            std::unique_ptr<PyObject, decltype(&Py_DecRef)> 
py_obj(THPVariable_Wrap(tensor), &Py_DecRef);
             TORCH_CHECK(py_obj.get() != nullptr, "Failed to wrap tensor to 
PyObject");
 
             DLManagedTensorVersioned* out_tensor = nullptr;
@@ -135,7 +135,7 @@ def test_dlpack_exchange_api() -> None:
 
         // Test 4: managed_tensor_to_py_object_no_sync
         {
-            std::unique_ptr<PyObject, decltype(&Py_DECREF)> 
py_obj(THPVariable_Wrap(tensor), &Py_DECREF);
+            std::unique_ptr<PyObject, decltype(&Py_DecRef)> 
py_obj(THPVariable_Wrap(tensor), &Py_DecRef);
             TORCH_CHECK(py_obj.get() != nullptr, "Failed to wrap tensor to 
PyObject");
 
             DLManagedTensorVersioned* managed_tensor = nullptr;
@@ -143,7 +143,7 @@ def test_dlpack_exchange_api() -> None:
             TORCH_CHECK(result == 0, "from_py_object_no_sync failed");
             TORCH_CHECK(managed_tensor != nullptr, "from_py_object_no_sync 
returned NULL");
 
-            std::unique_ptr<PyObject, decltype(&Py_DECREF)> 
py_obj_out(nullptr, &Py_DECREF);
+            std::unique_ptr<PyObject, decltype(&Py_DecRef)> 
py_obj_out(nullptr, &Py_DecRef);
             PyObject* py_obj_out_raw = nullptr;
             result = api->managed_tensor_to_py_object_no_sync(managed_tensor, 
reinterpret_cast<void**>(&py_obj_out_raw));
             py_obj_out.reset(py_obj_out_raw);
@@ -162,7 +162,7 @@ def test_dlpack_exchange_api() -> None:
 
         // Test 5: dltensor_from_py_object_no_sync
         {
-            std::unique_ptr<PyObject, decltype(&Py_DECREF)> 
py_obj(THPVariable_Wrap(tensor), &Py_DECREF);
+            std::unique_ptr<PyObject, decltype(&Py_DecRef)> 
py_obj(THPVariable_Wrap(tensor), &Py_DecRef);
             TORCH_CHECK(py_obj.get() != nullptr, "Failed to wrap tensor to 
PyObject");
 
             DLTensor dltensor;

Reply via email to