gemini-code-assist[bot] commented on code in PR #111: URL: https://github.com/apache/tvm-ffi/pull/111#discussion_r2427409971
########## tests/python/test_dlpack_exchange_api.py: ########## @@ -0,0 +1,241 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file to +# you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +from __future__ import annotations + +from typing import Any + +import pytest + +try: + import torch # type: ignore[no-redef] + + # Import tvm_ffi to load the DLPack exchange API extension + # This sets torch.Tensor.__c_dlpack_exchange_api__ + import tvm_ffi # noqa: F401 + from torch.utils import cpp_extension # type: ignore + from tvm_ffi import libinfo +except ImportError: + torch = None + +# Check if DLPack Exchange API is available +_has_dlpack_api = torch is not None and hasattr(torch.Tensor, "__c_dlpack_exchange_api__") + + [email protected](scope="module") +def dlpack_test_module() -> Any: + if not _has_dlpack_api: + pytest.skip("PyTorch DLPack Exchange API not available") + + source = """ + #include <torch/extension.h> + #include <dlpack/dlpack.h> + + void test_api_structure(int64_t api_ptr_int, bool test_cuda) { + DLPackExchangeAPI* api = reinterpret_cast<DLPackExchangeAPI*>(api_ptr_int); + + TORCH_CHECK(api != nullptr, "API pointer is NULL"); + + TORCH_CHECK(api->header.version.major == DLPACK_MAJOR_VERSION, + "Expected major version ", DLPACK_MAJOR_VERSION, ", got ", api->header.version.major); + TORCH_CHECK(api->header.version.minor == DLPACK_MINOR_VERSION, + "Expected minor version ", DLPACK_MINOR_VERSION, ", got ", api->header.version.minor); + + TORCH_CHECK(api->managed_tensor_allocator != nullptr, + "managed_tensor_allocator is NULL"); + TORCH_CHECK(api->managed_tensor_from_py_object_no_sync != nullptr, + "managed_tensor_from_py_object_no_sync is NULL"); + TORCH_CHECK(api->managed_tensor_to_py_object_no_sync != nullptr, + "managed_tensor_to_py_object_no_sync is NULL"); + TORCH_CHECK(api->current_work_stream != nullptr, + "current_work_stream is NULL"); + + DLTensor prototype; + prototype.device.device_type = kDLCPU; + prototype.device.device_id = 0; + prototype.ndim = 3; + + int64_t shape[3] = {3, 4, 5}; + prototype.shape = shape; + prototype.strides = nullptr; + + DLDataType dtype; + dtype.code = kDLFloat; + dtype.bits = 32; + dtype.lanes = 1; + prototype.dtype = dtype; + + prototype.data = nullptr; + prototype.byte_offset = 0; + + DLManagedTensorVersioned* out_tensor = nullptr; + int result = api->managed_tensor_allocator( + &prototype, + &out_tensor, + nullptr, // error_ctx + nullptr // SetError + ); + + TORCH_CHECK(result == 0, "Allocator failed with code ", result); + TORCH_CHECK(out_tensor != nullptr, "Allocator returned NULL"); + TORCH_CHECK(out_tensor->dl_tensor.ndim == 3, "Expected ndim 3, got ", out_tensor->dl_tensor.ndim); + TORCH_CHECK(out_tensor->dl_tensor.shape[0] == 3, "Expected shape[0] = 3, got ", out_tensor->dl_tensor.shape[0]); + TORCH_CHECK(out_tensor->dl_tensor.shape[1] == 4, "Expected shape[1] = 4, got ", out_tensor->dl_tensor.shape[1]); + TORCH_CHECK(out_tensor->dl_tensor.shape[2] == 5, "Expected shape[2] = 5, got ", out_tensor->dl_tensor.shape[2]); + TORCH_CHECK(out_tensor->dl_tensor.dtype.code == kDLFloat, "Expected dtype code kDLFloat, got ", out_tensor->dl_tensor.dtype.code); + TORCH_CHECK(out_tensor->dl_tensor.dtype.bits == 32, "Expected dtype bits 32, got ", out_tensor->dl_tensor.dtype.bits); + TORCH_CHECK(out_tensor->dl_tensor.device.device_type == kDLCPU, "Expected device type kDLCPU, got ", out_tensor->dl_tensor.device.device_type); + + if (out_tensor->deleter) { + out_tensor->deleter(out_tensor); + } + + if (test_cuda) { + void* stream_out = nullptr; + result = api->current_work_stream(kDLCUDA, 0, &stream_out); + TORCH_CHECK(result == 0, "current_work_stream failed with code ", result); + } + } + + void test_tensor_conversions(at::Tensor tensor1, at::Tensor tensor2, at::Tensor tensor3, int64_t api_ptr_int) { + DLPackExchangeAPI* api = reinterpret_cast<DLPackExchangeAPI*>(api_ptr_int); + + { + PyObject* py_obj = THPVariable_Wrap(tensor1); // tensor1: shape (2,3,4), float32 + TORCH_CHECK(py_obj != nullptr, "Failed to wrap tensor1 to PyObject"); + + DLManagedTensorVersioned* out_tensor = nullptr; + int result = api->managed_tensor_from_py_object_no_sync(py_obj, &out_tensor); + + TORCH_CHECK(result == 0, "from_py_object_no_sync failed with code ", result); + TORCH_CHECK(out_tensor != nullptr, "from_py_object_no_sync returned NULL"); + + TORCH_CHECK(out_tensor->version.major == DLPACK_MAJOR_VERSION, + "Expected major version ", DLPACK_MAJOR_VERSION, ", got ", out_tensor->version.major); + TORCH_CHECK(out_tensor->version.minor == DLPACK_MINOR_VERSION, + "Expected minor version ", DLPACK_MINOR_VERSION, ", got ", out_tensor->version.minor); + + TORCH_CHECK(out_tensor->dl_tensor.ndim == 3, "Expected ndim 3, got ", out_tensor->dl_tensor.ndim); + TORCH_CHECK(out_tensor->dl_tensor.shape[0] == 2, "Expected shape[0] = 2, got ", out_tensor->dl_tensor.shape[0]); + TORCH_CHECK(out_tensor->dl_tensor.shape[1] == 3, "Expected shape[1] = 3, got ", out_tensor->dl_tensor.shape[1]); + TORCH_CHECK(out_tensor->dl_tensor.shape[2] == 4, "Expected shape[2] = 4, got ", out_tensor->dl_tensor.shape[2]); + + TORCH_CHECK(out_tensor->dl_tensor.dtype.code == kDLFloat, "Expected dtype code kDLFloat, got ", out_tensor->dl_tensor.dtype.code); + TORCH_CHECK(out_tensor->dl_tensor.dtype.bits == 32, "Expected dtype bits 32, got ", out_tensor->dl_tensor.dtype.bits); + TORCH_CHECK(out_tensor->dl_tensor.data != nullptr, "Data pointer is NULL"); + + if (out_tensor->deleter) { + out_tensor->deleter(out_tensor); + } + Py_DECREF(py_obj); Review Comment:  There's a potential reference leak of `py_obj` if any of the `TORCH_CHECK` macros throw an exception after `py_obj` is created but before `Py_DECREF(py_obj)` is called. Using an RAII wrapper like `std::unique_ptr` with a custom deleter will ensure `Py_DECREF` is called automatically, even in the case of an exception, making the code more robust. ```python std::unique_ptr<PyObject, decltype(&Py_DECREF)> py_obj(THPVariable_Wrap(tensor1), &Py_DECREF); // tensor1: shape (2,3,4), float32 TORCH_CHECK(py_obj.get() != nullptr, "Failed to wrap tensor1 to PyObject"); DLManagedTensorVersioned* out_tensor = nullptr; int result = api->managed_tensor_from_py_object_no_sync(py_obj.get(), &out_tensor); TORCH_CHECK(result == 0, "from_py_object_no_sync failed with code ", result); TORCH_CHECK(out_tensor != nullptr, "from_py_object_no_sync returned NULL"); TORCH_CHECK(out_tensor->version.major == DLPACK_MAJOR_VERSION, "Expected major version ", DLPACK_MAJOR_VERSION); TORCH_CHECK(out_tensor->version.minor == DLPACK_MINOR_VERSION, "Expected minor version ", DLPACK_MINOR_VERSION); TORCH_CHECK(out_tensor->dl_tensor.ndim == 3, "Wrong ndim"); TORCH_CHECK(out_tensor->dl_tensor.shape[0] == 2, "Wrong shape[0]"); TORCH_CHECK(out_tensor->dl_tensor.shape[1] == 3, "Wrong shape[1]"); TORCH_CHECK(out_tensor->dl_tensor.shape[2] == 4, "Wrong shape[2]"); TORCH_CHECK(out_tensor->dl_tensor.dtype.code == kDLFloat, "Wrong dtype code"); TORCH_CHECK(out_tensor->dl_tensor.dtype.bits == 32, "Wrong dtype bits"); TORCH_CHECK(out_tensor->dl_tensor.data != nullptr, "Data pointer is NULL"); if (out_tensor->deleter) { out_tensor->deleter(out_tensor); } ``` ########## tests/python/test_dlpack_exchange_api.py: ########## @@ -0,0 +1,241 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file to +# you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +from __future__ import annotations + +from typing import Any + +import pytest + +try: + import torch # type: ignore[no-redef] + + # Import tvm_ffi to load the DLPack exchange API extension + # This sets torch.Tensor.__c_dlpack_exchange_api__ + import tvm_ffi # noqa: F401 + from torch.utils import cpp_extension # type: ignore + from tvm_ffi import libinfo +except ImportError: + torch = None + +# Check if DLPack Exchange API is available +_has_dlpack_api = torch is not None and hasattr(torch.Tensor, "__c_dlpack_exchange_api__") + + [email protected](scope="module") +def dlpack_test_module() -> Any: + if not _has_dlpack_api: + pytest.skip("PyTorch DLPack Exchange API not available") + + source = """ + #include <torch/extension.h> + #include <dlpack/dlpack.h> + + void test_api_structure(int64_t api_ptr_int, bool test_cuda) { + DLPackExchangeAPI* api = reinterpret_cast<DLPackExchangeAPI*>(api_ptr_int); + + TORCH_CHECK(api != nullptr, "API pointer is NULL"); + + TORCH_CHECK(api->header.version.major == DLPACK_MAJOR_VERSION, + "Expected major version ", DLPACK_MAJOR_VERSION, ", got ", api->header.version.major); + TORCH_CHECK(api->header.version.minor == DLPACK_MINOR_VERSION, + "Expected minor version ", DLPACK_MINOR_VERSION, ", got ", api->header.version.minor); + + TORCH_CHECK(api->managed_tensor_allocator != nullptr, + "managed_tensor_allocator is NULL"); + TORCH_CHECK(api->managed_tensor_from_py_object_no_sync != nullptr, + "managed_tensor_from_py_object_no_sync is NULL"); + TORCH_CHECK(api->managed_tensor_to_py_object_no_sync != nullptr, + "managed_tensor_to_py_object_no_sync is NULL"); + TORCH_CHECK(api->current_work_stream != nullptr, + "current_work_stream is NULL"); + + DLTensor prototype; + prototype.device.device_type = kDLCPU; + prototype.device.device_id = 0; + prototype.ndim = 3; + + int64_t shape[3] = {3, 4, 5}; + prototype.shape = shape; + prototype.strides = nullptr; + + DLDataType dtype; + dtype.code = kDLFloat; + dtype.bits = 32; + dtype.lanes = 1; + prototype.dtype = dtype; + + prototype.data = nullptr; + prototype.byte_offset = 0; + + DLManagedTensorVersioned* out_tensor = nullptr; + int result = api->managed_tensor_allocator( + &prototype, + &out_tensor, + nullptr, // error_ctx + nullptr // SetError + ); + + TORCH_CHECK(result == 0, "Allocator failed with code ", result); + TORCH_CHECK(out_tensor != nullptr, "Allocator returned NULL"); + TORCH_CHECK(out_tensor->dl_tensor.ndim == 3, "Expected ndim 3, got ", out_tensor->dl_tensor.ndim); + TORCH_CHECK(out_tensor->dl_tensor.shape[0] == 3, "Expected shape[0] = 3, got ", out_tensor->dl_tensor.shape[0]); + TORCH_CHECK(out_tensor->dl_tensor.shape[1] == 4, "Expected shape[1] = 4, got ", out_tensor->dl_tensor.shape[1]); + TORCH_CHECK(out_tensor->dl_tensor.shape[2] == 5, "Expected shape[2] = 5, got ", out_tensor->dl_tensor.shape[2]); + TORCH_CHECK(out_tensor->dl_tensor.dtype.code == kDLFloat, "Expected dtype code kDLFloat, got ", out_tensor->dl_tensor.dtype.code); + TORCH_CHECK(out_tensor->dl_tensor.dtype.bits == 32, "Expected dtype bits 32, got ", out_tensor->dl_tensor.dtype.bits); + TORCH_CHECK(out_tensor->dl_tensor.device.device_type == kDLCPU, "Expected device type kDLCPU, got ", out_tensor->dl_tensor.device.device_type); + + if (out_tensor->deleter) { + out_tensor->deleter(out_tensor); + } + + if (test_cuda) { + void* stream_out = nullptr; + result = api->current_work_stream(kDLCUDA, 0, &stream_out); + TORCH_CHECK(result == 0, "current_work_stream failed with code ", result); + } + } + + void test_tensor_conversions(at::Tensor tensor1, at::Tensor tensor2, at::Tensor tensor3, int64_t api_ptr_int) { + DLPackExchangeAPI* api = reinterpret_cast<DLPackExchangeAPI*>(api_ptr_int); + + { + PyObject* py_obj = THPVariable_Wrap(tensor1); // tensor1: shape (2,3,4), float32 + TORCH_CHECK(py_obj != nullptr, "Failed to wrap tensor1 to PyObject"); + + DLManagedTensorVersioned* out_tensor = nullptr; + int result = api->managed_tensor_from_py_object_no_sync(py_obj, &out_tensor); + + TORCH_CHECK(result == 0, "from_py_object_no_sync failed with code ", result); + TORCH_CHECK(out_tensor != nullptr, "from_py_object_no_sync returned NULL"); + + TORCH_CHECK(out_tensor->version.major == DLPACK_MAJOR_VERSION, + "Expected major version ", DLPACK_MAJOR_VERSION, ", got ", out_tensor->version.major); + TORCH_CHECK(out_tensor->version.minor == DLPACK_MINOR_VERSION, + "Expected minor version ", DLPACK_MINOR_VERSION, ", got ", out_tensor->version.minor); + + TORCH_CHECK(out_tensor->dl_tensor.ndim == 3, "Expected ndim 3, got ", out_tensor->dl_tensor.ndim); + TORCH_CHECK(out_tensor->dl_tensor.shape[0] == 2, "Expected shape[0] = 2, got ", out_tensor->dl_tensor.shape[0]); + TORCH_CHECK(out_tensor->dl_tensor.shape[1] == 3, "Expected shape[1] = 3, got ", out_tensor->dl_tensor.shape[1]); + TORCH_CHECK(out_tensor->dl_tensor.shape[2] == 4, "Expected shape[2] = 4, got ", out_tensor->dl_tensor.shape[2]); + + TORCH_CHECK(out_tensor->dl_tensor.dtype.code == kDLFloat, "Expected dtype code kDLFloat, got ", out_tensor->dl_tensor.dtype.code); + TORCH_CHECK(out_tensor->dl_tensor.dtype.bits == 32, "Expected dtype bits 32, got ", out_tensor->dl_tensor.dtype.bits); + TORCH_CHECK(out_tensor->dl_tensor.data != nullptr, "Data pointer is NULL"); + + if (out_tensor->deleter) { + out_tensor->deleter(out_tensor); + } + Py_DECREF(py_obj); + } + + { + PyObject* py_obj = THPVariable_Wrap(tensor2); // tensor2: shape (3,4,1), int64 + TORCH_CHECK(py_obj != nullptr, "Failed to wrap tensor2 to PyObject"); + + DLManagedTensorVersioned* managed_tensor = nullptr; + int result = api->managed_tensor_from_py_object_no_sync(py_obj, &managed_tensor); + TORCH_CHECK(result == 0, "from_py_object_no_sync failed"); + TORCH_CHECK(managed_tensor != nullptr, "from_py_object_no_sync returned NULL"); + Py_DECREF(py_obj); + + PyObject* py_obj_out = nullptr; + result = api->managed_tensor_to_py_object_no_sync( + managed_tensor, + reinterpret_cast<void**>(&py_obj_out) + ); + + TORCH_CHECK(result == 0, "to_py_object_no_sync failed with code ", result); + TORCH_CHECK(py_obj_out != nullptr, "to_py_object_no_sync returned NULL"); + TORCH_CHECK(THPVariable_Check(py_obj_out), "Returned PyObject is not a Tensor"); + + at::Tensor result_tensor = THPVariable_Unpack(py_obj_out); + TORCH_CHECK(result_tensor.dim() == 3, "Expected 3 dimensions, got ", result_tensor.dim()); + TORCH_CHECK(result_tensor.size(0) == 3, "Expected size(0) = 3, got ", result_tensor.size(0)); + TORCH_CHECK(result_tensor.size(1) == 4, "Expected size(1) = 4, got ", result_tensor.size(1)); + TORCH_CHECK(result_tensor.size(2) == 1, "Expected size(2) = 1, got ", result_tensor.size(2)); + TORCH_CHECK(result_tensor.scalar_type() == at::kLong, "Expected dtype kLong, got ", result_tensor.scalar_type()); + + Py_DECREF(py_obj_out); + } + + if (api->dltensor_from_py_object_no_sync != nullptr) { + PyObject* py_obj = THPVariable_Wrap(tensor3); // tensor3: shape (4,5), float32 + TORCH_CHECK(py_obj != nullptr, "Failed to wrap tensor3 to PyObject"); + + DLTensor dltensor; + int result = api->dltensor_from_py_object_no_sync(py_obj, &dltensor); + + TORCH_CHECK(result == 0, "dltensor_from_py_object_no_sync failed with code ", result); + TORCH_CHECK(dltensor.ndim == 2, "Expected ndim 2, got ", dltensor.ndim); + TORCH_CHECK(dltensor.shape[0] == 4, "Expected shape[0] = 4, got ", dltensor.shape[0]); + TORCH_CHECK(dltensor.shape[1] == 5, "Expected shape[1] = 5, got ", dltensor.shape[1]); + TORCH_CHECK(dltensor.dtype.code == kDLFloat, "Expected dtype code kDLFloat, got ", dltensor.dtype.code); + TORCH_CHECK(dltensor.dtype.bits == 32, "Expected dtype bits 32, got ", dltensor.dtype.bits); + TORCH_CHECK(dltensor.data != nullptr, "Data pointer is NULL"); + + Py_DECREF(py_obj); Review Comment:  Similar to the previous blocks, `py_obj` could be leaked if a `TORCH_CHECK` throws an exception. Using `std::unique_ptr` with a custom deleter provides exception safety and ensures `Py_DECREF` is always called. ```suggestion std::unique_ptr<PyObject, decltype(&Py_DECREF)> py_obj(THPVariable_Wrap(tensor3), &Py_DECREF); // tensor3: shape (4,5), float32 TORCH_CHECK(py_obj.get() != nullptr, "Failed to wrap tensor3 to PyObject"); DLTensor dltensor; int result = api->dltensor_from_py_object_no_sync(py_obj.get(), &dltensor); TORCH_CHECK(result == 0, "dltensor_from_py_object_no_sync failed with code ", result); TORCH_CHECK(dltensor.ndim == 2, "Wrong ndim"); TORCH_CHECK(dltensor.shape[0] == 4, "Wrong shape[0]"); TORCH_CHECK(dltensor.shape[1] == 5, "Wrong shape[1]"); TORCH_CHECK(dltensor.dtype.code == kDLFloat, "Wrong dtype code"); TORCH_CHECK(dltensor.dtype.bits == 32, "Wrong dtype bits"); TORCH_CHECK(dltensor.data != nullptr, "Data pointer is NULL"); ``` ########## tests/python/test_dlpack_exchange_api.py: ########## @@ -0,0 +1,241 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file to +# you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +from __future__ import annotations + +from typing import Any + +import pytest + +try: + import torch # type: ignore[no-redef] + + # Import tvm_ffi to load the DLPack exchange API extension + # This sets torch.Tensor.__c_dlpack_exchange_api__ + import tvm_ffi # noqa: F401 + from torch.utils import cpp_extension # type: ignore + from tvm_ffi import libinfo +except ImportError: + torch = None + +# Check if DLPack Exchange API is available +_has_dlpack_api = torch is not None and hasattr(torch.Tensor, "__c_dlpack_exchange_api__") + + [email protected](scope="module") +def dlpack_test_module() -> Any: + if not _has_dlpack_api: + pytest.skip("PyTorch DLPack Exchange API not available") + + source = """ + #include <torch/extension.h> + #include <dlpack/dlpack.h> + + void test_api_structure(int64_t api_ptr_int, bool test_cuda) { + DLPackExchangeAPI* api = reinterpret_cast<DLPackExchangeAPI*>(api_ptr_int); + + TORCH_CHECK(api != nullptr, "API pointer is NULL"); + + TORCH_CHECK(api->header.version.major == DLPACK_MAJOR_VERSION, + "Expected major version ", DLPACK_MAJOR_VERSION, ", got ", api->header.version.major); + TORCH_CHECK(api->header.version.minor == DLPACK_MINOR_VERSION, + "Expected minor version ", DLPACK_MINOR_VERSION, ", got ", api->header.version.minor); + + TORCH_CHECK(api->managed_tensor_allocator != nullptr, + "managed_tensor_allocator is NULL"); + TORCH_CHECK(api->managed_tensor_from_py_object_no_sync != nullptr, + "managed_tensor_from_py_object_no_sync is NULL"); + TORCH_CHECK(api->managed_tensor_to_py_object_no_sync != nullptr, + "managed_tensor_to_py_object_no_sync is NULL"); + TORCH_CHECK(api->current_work_stream != nullptr, + "current_work_stream is NULL"); + + DLTensor prototype; + prototype.device.device_type = kDLCPU; + prototype.device.device_id = 0; + prototype.ndim = 3; + + int64_t shape[3] = {3, 4, 5}; + prototype.shape = shape; + prototype.strides = nullptr; + + DLDataType dtype; + dtype.code = kDLFloat; + dtype.bits = 32; + dtype.lanes = 1; + prototype.dtype = dtype; + + prototype.data = nullptr; + prototype.byte_offset = 0; + + DLManagedTensorVersioned* out_tensor = nullptr; + int result = api->managed_tensor_allocator( + &prototype, + &out_tensor, + nullptr, // error_ctx + nullptr // SetError + ); + + TORCH_CHECK(result == 0, "Allocator failed with code ", result); + TORCH_CHECK(out_tensor != nullptr, "Allocator returned NULL"); + TORCH_CHECK(out_tensor->dl_tensor.ndim == 3, "Expected ndim 3, got ", out_tensor->dl_tensor.ndim); + TORCH_CHECK(out_tensor->dl_tensor.shape[0] == 3, "Expected shape[0] = 3, got ", out_tensor->dl_tensor.shape[0]); + TORCH_CHECK(out_tensor->dl_tensor.shape[1] == 4, "Expected shape[1] = 4, got ", out_tensor->dl_tensor.shape[1]); + TORCH_CHECK(out_tensor->dl_tensor.shape[2] == 5, "Expected shape[2] = 5, got ", out_tensor->dl_tensor.shape[2]); + TORCH_CHECK(out_tensor->dl_tensor.dtype.code == kDLFloat, "Expected dtype code kDLFloat, got ", out_tensor->dl_tensor.dtype.code); + TORCH_CHECK(out_tensor->dl_tensor.dtype.bits == 32, "Expected dtype bits 32, got ", out_tensor->dl_tensor.dtype.bits); + TORCH_CHECK(out_tensor->dl_tensor.device.device_type == kDLCPU, "Expected device type kDLCPU, got ", out_tensor->dl_tensor.device.device_type); + + if (out_tensor->deleter) { + out_tensor->deleter(out_tensor); + } + + if (test_cuda) { + void* stream_out = nullptr; + result = api->current_work_stream(kDLCUDA, 0, &stream_out); + TORCH_CHECK(result == 0, "current_work_stream failed with code ", result); + } + } + + void test_tensor_conversions(at::Tensor tensor1, at::Tensor tensor2, at::Tensor tensor3, int64_t api_ptr_int) { + DLPackExchangeAPI* api = reinterpret_cast<DLPackExchangeAPI*>(api_ptr_int); + + { + PyObject* py_obj = THPVariable_Wrap(tensor1); // tensor1: shape (2,3,4), float32 + TORCH_CHECK(py_obj != nullptr, "Failed to wrap tensor1 to PyObject"); + + DLManagedTensorVersioned* out_tensor = nullptr; + int result = api->managed_tensor_from_py_object_no_sync(py_obj, &out_tensor); + + TORCH_CHECK(result == 0, "from_py_object_no_sync failed with code ", result); + TORCH_CHECK(out_tensor != nullptr, "from_py_object_no_sync returned NULL"); + + TORCH_CHECK(out_tensor->version.major == DLPACK_MAJOR_VERSION, + "Expected major version ", DLPACK_MAJOR_VERSION, ", got ", out_tensor->version.major); + TORCH_CHECK(out_tensor->version.minor == DLPACK_MINOR_VERSION, + "Expected minor version ", DLPACK_MINOR_VERSION, ", got ", out_tensor->version.minor); + + TORCH_CHECK(out_tensor->dl_tensor.ndim == 3, "Expected ndim 3, got ", out_tensor->dl_tensor.ndim); + TORCH_CHECK(out_tensor->dl_tensor.shape[0] == 2, "Expected shape[0] = 2, got ", out_tensor->dl_tensor.shape[0]); + TORCH_CHECK(out_tensor->dl_tensor.shape[1] == 3, "Expected shape[1] = 3, got ", out_tensor->dl_tensor.shape[1]); + TORCH_CHECK(out_tensor->dl_tensor.shape[2] == 4, "Expected shape[2] = 4, got ", out_tensor->dl_tensor.shape[2]); + + TORCH_CHECK(out_tensor->dl_tensor.dtype.code == kDLFloat, "Expected dtype code kDLFloat, got ", out_tensor->dl_tensor.dtype.code); + TORCH_CHECK(out_tensor->dl_tensor.dtype.bits == 32, "Expected dtype bits 32, got ", out_tensor->dl_tensor.dtype.bits); + TORCH_CHECK(out_tensor->dl_tensor.data != nullptr, "Data pointer is NULL"); + + if (out_tensor->deleter) { + out_tensor->deleter(out_tensor); + } + Py_DECREF(py_obj); + } + + { + PyObject* py_obj = THPVariable_Wrap(tensor2); // tensor2: shape (3,4,1), int64 + TORCH_CHECK(py_obj != nullptr, "Failed to wrap tensor2 to PyObject"); + + DLManagedTensorVersioned* managed_tensor = nullptr; + int result = api->managed_tensor_from_py_object_no_sync(py_obj, &managed_tensor); + TORCH_CHECK(result == 0, "from_py_object_no_sync failed"); + TORCH_CHECK(managed_tensor != nullptr, "from_py_object_no_sync returned NULL"); + Py_DECREF(py_obj); + + PyObject* py_obj_out = nullptr; + result = api->managed_tensor_to_py_object_no_sync( + managed_tensor, + reinterpret_cast<void**>(&py_obj_out) + ); + + TORCH_CHECK(result == 0, "to_py_object_no_sync failed with code ", result); + TORCH_CHECK(py_obj_out != nullptr, "to_py_object_no_sync returned NULL"); + TORCH_CHECK(THPVariable_Check(py_obj_out), "Returned PyObject is not a Tensor"); + + at::Tensor result_tensor = THPVariable_Unpack(py_obj_out); + TORCH_CHECK(result_tensor.dim() == 3, "Expected 3 dimensions, got ", result_tensor.dim()); + TORCH_CHECK(result_tensor.size(0) == 3, "Expected size(0) = 3, got ", result_tensor.size(0)); + TORCH_CHECK(result_tensor.size(1) == 4, "Expected size(1) = 4, got ", result_tensor.size(1)); + TORCH_CHECK(result_tensor.size(2) == 1, "Expected size(2) = 1, got ", result_tensor.size(2)); + TORCH_CHECK(result_tensor.scalar_type() == at::kLong, "Expected dtype kLong, got ", result_tensor.scalar_type()); + + Py_DECREF(py_obj_out); Review Comment:  This block has multiple potential resource leaks: 1. `py_obj` can be leaked if `TORCH_CHECK` fails before `Py_DECREF`. 2. `managed_tensor` can be leaked if `managed_tensor_to_py_object_no_sync` fails or a subsequent `TORCH_CHECK` throws. 3. `py_obj_out` can be leaked if any `TORCH_CHECK` throws after it's created. Using RAII with `std::unique_ptr` for all managed resources (`PyObject*` and `DLManagedTensorVersioned*`) will make this code exception-safe and prevent leaks. ```python auto py_obj_deleter = [](PyObject* p) { if (p) Py_DECREF(p); }; std::unique_ptr<PyObject, decltype(py_obj_deleter)> py_obj(THPVariable_Wrap(tensor2), py_obj_deleter); // tensor2: shape (3,4,1), int64 TORCH_CHECK(py_obj.get() != nullptr, "Failed to wrap tensor2 to PyObject"); DLManagedTensorVersioned* managed_tensor = nullptr; int result = api->managed_tensor_from_py_object_no_sync(py_obj.get(), &managed_tensor); TORCH_CHECK(result == 0, "from_py_object_no_sync failed"); TORCH_CHECK(managed_tensor != nullptr, "from_py_object_no_sync returned NULL"); std::unique_ptr<PyObject, decltype(py_obj_deleter)> py_obj_out(nullptr, py_obj_deleter); PyObject* py_obj_out_raw = nullptr; result = api->managed_tensor_to_py_object_no_sync( managed_tensor, reinterpret_cast<void**>(&py_obj_out_raw) ); py_obj_out.reset(py_obj_out_raw); TORCH_CHECK(result == 0, "to_py_object_no_sync failed with code ", result); TORCH_CHECK(py_obj_out.get() != nullptr, "to_py_object_no_sync returned NULL"); TORCH_CHECK(THPVariable_Check(py_obj_out.get()), "Returned PyObject is not a Tensor"); at::Tensor result_tensor = THPVariable_Unpack(py_obj_out.get()); TORCH_CHECK(result_tensor.dim() == 3, "Wrong number of dimensions"); TORCH_CHECK(result_tensor.size(0) == 3, "Wrong size at dim 0"); TORCH_CHECK(result_tensor.size(1) == 4, "Wrong size at dim 1"); TORCH_CHECK(result_tensor.size(2) == 1, "Wrong size at dim 2"); TORCH_CHECK(result_tensor.scalar_type() == at::kLong, "Wrong dtype"); ``` ########## tests/python/test_dlpack_exchange_api.py: ########## @@ -0,0 +1,241 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file to +# you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +from __future__ import annotations + +from typing import Any + +import pytest + +try: + import torch # type: ignore[no-redef] + + # Import tvm_ffi to load the DLPack exchange API extension + # This sets torch.Tensor.__c_dlpack_exchange_api__ + import tvm_ffi # noqa: F401 + from torch.utils import cpp_extension # type: ignore + from tvm_ffi import libinfo +except ImportError: + torch = None + +# Check if DLPack Exchange API is available +_has_dlpack_api = torch is not None and hasattr(torch.Tensor, "__c_dlpack_exchange_api__") + + [email protected](scope="module") +def dlpack_test_module() -> Any: + if not _has_dlpack_api: + pytest.skip("PyTorch DLPack Exchange API not available") + + source = """ + #include <torch/extension.h> + #include <dlpack/dlpack.h> Review Comment:  To use `std::unique_ptr` for robust resource management (RAII), you'll need to include the `<memory>` header. This will help prevent potential resource leaks in the test functions. ```suggestion #include <torch/extension.h> #include <memory> #include <dlpack/dlpack.h> ``` -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
