tqchen commented on code in PR #18229:
URL: https://github.com/apache/tvm/pull/18229#discussion_r2299332790
##########
tests/python/relax/test_dlpack_integration.py:
##########
@@ -0,0 +1,284 @@
+#!/usr/bin/env python3
+"""
+Test DLPack integration between PyTorch and TVM.
+
+This test verifies:
+1. DLPack conversion from PyTorch to TVM
+2. DLPack conversion from TVM to PyTorch
+3. Data integrity preservation during conversion
+4. Performance characteristics of DLPack vs numpy fallback
+5. Error handling for unsupported data types
+"""
+
+import pytest
+import torch
+import tvm
+from tvm import relax, tir
+from tvm.script import relax as R, tir as T
+from tvm.relax import BasePyModule
+import numpy as np
+import time
+
+
+class TestDLPackIntegration:
+ def test_dlpack_pytorch_to_tvm_conversion(self):
+ pytorch_tensor = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0],
dtype=torch.float32)
+
+ tvm_ndarray = tvm.nd.from_dlpack(pytorch_tensor)
+
+ assert isinstance(tvm_ndarray, tvm.nd.NDArray)
+ assert tvm_ndarray.shape == pytorch_tensor.shape
+ assert str(tvm_ndarray.dtype) ==
str(pytorch_tensor.dtype).replace("torch.", "")
+
+ tvm_numpy = tvm_ndarray.numpy()
+ pytorch_numpy = pytorch_tensor.numpy()
+ np.testing.assert_allclose(tvm_numpy, pytorch_numpy, atol=1e-5)
+
+ def test_dlpack_pytorch_to_tvm_conversion_gpu(self):
+ if tvm.cuda().exist:
+ pytorch_tensor = torch.tensor(
+ [1.0, 2.0, 3.0, 4.0, 5.0], dtype=torch.float32, device="cuda"
+ )
+
+ tvm_ndarray = tvm.nd.from_dlpack(pytorch_tensor)
+
+ assert isinstance(tvm_ndarray, tvm.nd.NDArray)
+ assert tvm_ndarray.shape == pytorch_tensor.shape
+ assert str(tvm_ndarray.dtype) ==
str(pytorch_tensor.dtype).replace("torch.", "")
+ assert str(tvm_ndarray.device) == "cuda:0"
+
+ # Move to CPU for numpy conversion
+ tvm_numpy = tvm_ndarray.numpy()
+ pytorch_numpy = pytorch_tensor.cpu().numpy()
+ np.testing.assert_allclose(tvm_numpy, pytorch_numpy, atol=1e-5)
+ else:
+ pytest.skip("CUDA not available")
+
+ def test_dlpack_tvm_to_pytorch_conversion(self):
+ import numpy as np
+
+ data = np.array([1.0, 2.0, 3.0, 5.0], dtype="float32")
+ tvm_ndarray = tvm.nd.array(data)
+
+ pytorch_tensor = torch.from_dlpack(tvm_ndarray)
+
+ assert isinstance(pytorch_tensor, torch.Tensor)
+ assert pytorch_tensor.shape == tvm_ndarray.shape
+ assert pytorch_tensor.dtype == torch.float32
+
+ tvm_numpy = tvm_ndarray.numpy()
+ pytorch_numpy = pytorch_tensor.numpy()
+ np.testing.assert_allclose(tvm_numpy, pytorch_numpy, atol=1e-5)
+
+ def test_dlpack_tvm_to_pytorch_conversion_gpu(self):
+ if tvm.cuda().exist:
+ import numpy as np
+
+ data = np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype="float32")
+ tvm_ndarray = tvm.nd.array(data, device=tvm.cuda(0))
+
+ pytorch_tensor = torch.from_dlpack(tvm_ndarray)
+
+ assert isinstance(pytorch_tensor, torch.Tensor)
+ assert pytorch_tensor.shape == tvm_ndarray.shape
+ assert pytorch_tensor.dtype == torch.float32
+ assert pytorch_tensor.device.type == "cuda"
+
+ tvm_numpy = tvm_ndarray.numpy()
+ pytorch_numpy = pytorch_tensor.cpu().numpy()
+ np.testing.assert_allclose(tvm_numpy, pytorch_numpy, atol=1e-5)
+ else:
+ pytest.skip("CUDA not available")
+
+ def test_dlpack_roundtrip_conversion(self):
+ """Test roundtrip conversion: PyTorch -> TVM -> PyTorch."""
+ # Create PyTorch tensor
+ original_tensor = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0],
dtype=torch.float32)
+
+ # Convert to TVM
+ tvm_ndarray = tvm.nd.from_dlpack(original_tensor)
+
+ # Convert back to PyTorch
+ result_tensor = torch.from_dlpack(tvm_ndarray)
+
+ # Verify roundtrip integrity
+ assert torch.allclose(original_tensor, result_tensor, atol=1e-5)
+ assert original_tensor.dtype == result_tensor.dtype
+ assert original_tensor.shape == result_tensor.shape
+
+ def test_dlpack_different_data_types(self):
+ """Test DLPack conversion with different data types."""
+ test_types = [
+ (torch.float32, "float32"),
+ (torch.float64, "float64"),
+ (torch.int32, "int32"),
+ (torch.int64, "int64"),
+ ]
+
+ for torch_dtype, tvm_dtype in test_types:
+ # Create PyTorch tensor
+ pytorch_tensor = torch.tensor([1, 2, 3], dtype=torch_dtype)
+
+ # Convert to TVM
+ tvm_ndarray = tvm.nd.from_dlpack(pytorch_tensor)
+
+ # Convert back to PyTorch
+ result_tensor = torch.from_dlpack(tvm_ndarray)
+
+ # Verify conversion
+ assert torch.allclose(pytorch_tensor, result_tensor, atol=1e-5)
+ assert pytorch_tensor.dtype == result_tensor.dtype
+
+ def test_dlpack_different_shapes(self):
+ """Test DLPack conversion with different tensor shapes."""
+ test_shapes = [
+ (1,),
+ (2, 3),
+ (4, 5, 6),
+ (1, 1, 1, 1),
+ ]
+
+ for shape in test_shapes:
+ # Create PyTorch tensor
+ pytorch_tensor = torch.randn(shape, dtype=torch.float32)
+
+ # Convert to TVM
+ tvm_ndarray = tvm.nd.from_dlpack(pytorch_tensor)
+
+ # Convert back to PyTorch
+ result_tensor = torch.from_dlpack(tvm_ndarray)
+
+ # Verify conversion
+ assert torch.allclose(pytorch_tensor, result_tensor, atol=1e-5)
+ assert pytorch_tensor.shape == result_tensor.shape
+
+ def test_dlpack_performance_vs_numpy(self):
+ """Test DLPack performance compared to numpy conversion."""
+ # Create large PyTorch tensor
+ size = 1000000
+ pytorch_tensor = torch.randn(size, dtype=torch.float32)
+
+ # Time DLPack conversion
+ start_time = time.time()
+ tvm_ndarray = tvm.nd.from_dlpack(pytorch_tensor)
+ dlpack_time = time.time() - start_time
+
+ # Time numpy conversion
+ start_time = time.time()
+ numpy_array = pytorch_tensor.detach().cpu().numpy()
+ tvm_ndarray_numpy = tvm.nd.array(numpy_array)
+ numpy_time = time.time() - start_time
+
+ # Verify both methods produce same result
+ result_dlpack = torch.from_dlpack(tvm_ndarray)
+ result_numpy = torch.from_numpy(tvm_ndarray_numpy.numpy())
+ assert torch.allclose(result_dlpack, result_numpy, atol=1e-5)
+
+ # DLPack should be faster (this is a basic check)
+ assert dlpack_time < numpy_time * 2, "DLPack should be reasonably fast"
Review Comment:
let us avoid timing comparison here as the perf is expected but UT should
not contain perf bench
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]