jorisvandenbossche commented on code in PR #41889:
URL: https://github.com/apache/arrow/pull/41889#discussion_r1635914530


##########
python/pyarrow/tests/test_io.py:
##########
@@ -669,6 +669,64 @@ def test_allocate_buffer_resizable():
     assert buf.size == 200
 
 
+def test_non_cpu_buffer(pickle_module):
+    cuda = pytest.importorskip("pyarrow.cuda")
+    ctx = cuda.Context(0)
+
+    arr = np.arange(4, dtype=np.int32)
+    cuda_buf = ctx.buffer_from_data(arr)
+
+    arr = pa.Array.from_buffers(pa.int32(), 4, [None, cuda_buf])
+    buf_on_gpu = arr.buffers()[1]
+
+    assert buf_on_gpu.size == cuda_buf.size
+    assert buf_on_gpu.address == cuda_buf.address
+    assert buf_on_gpu.is_cpu == cuda_buf.is_cpu
+    assert buf_on_gpu.is_mutable
+
+    repr1 = "<pyarrow.Buffer address="
+    repr2 = "size=16 is_cpu=False is_mutable=True>"
+    assert repr1 in repr(buf_on_gpu)
+    assert repr2 in repr(buf_on_gpu)
+
+    msg = "Implemented only for data on CPU device"
+    with pytest.raises(NotImplementedError, match=msg):
+        buf_on_gpu.equals(cuda_buf)
+
+    with pytest.raises(NotImplementedError, match=msg):
+        cuda_buf.equals(buf_on_gpu)
+
+    with pytest.raises(NotImplementedError, match=msg):
+        buf_on_gpu.hex()
+
+    with pytest.raises(NotImplementedError, match=msg):
+        cuda_buf.hex()
+
+    with pytest.raises(NotImplementedError, match=msg):
+        buf_on_gpu[1]

Review Comment:
   For getitem, can you then also test this with a slice in addition to integer 
index, in which case it is expected to work for both buf_on_gpu and cuda_buf



##########
python/pyarrow/tests/test_io.py:
##########
@@ -669,6 +669,64 @@ def test_allocate_buffer_resizable():
     assert buf.size == 200
 
 
+def test_non_cpu_buffer(pickle_module):
+    cuda = pytest.importorskip("pyarrow.cuda")
+    ctx = cuda.Context(0)
+
+    arr = np.arange(4, dtype=np.int32)
+    cuda_buf = ctx.buffer_from_data(arr)
+
+    arr = pa.Array.from_buffers(pa.int32(), 4, [None, cuda_buf])
+    buf_on_gpu = arr.buffers()[1]
+
+    assert buf_on_gpu.size == cuda_buf.size
+    assert buf_on_gpu.address == cuda_buf.address
+    assert buf_on_gpu.is_cpu == cuda_buf.is_cpu
+    assert buf_on_gpu.is_mutable
+
+    repr1 = "<pyarrow.Buffer address="
+    repr2 = "size=16 is_cpu=False is_mutable=True>"
+    assert repr1 in repr(buf_on_gpu)
+    assert repr2 in repr(buf_on_gpu)
+
+    msg = "Implemented only for data on CPU device"
+    with pytest.raises(NotImplementedError, match=msg):
+        buf_on_gpu.equals(cuda_buf)
+
+    with pytest.raises(NotImplementedError, match=msg):
+        cuda_buf.equals(buf_on_gpu)
+
+    with pytest.raises(NotImplementedError, match=msg):
+        buf_on_gpu.hex()
+
+    with pytest.raises(NotImplementedError, match=msg):
+        cuda_buf.hex()
+
+    with pytest.raises(NotImplementedError, match=msg):
+        buf_on_gpu[1]
+
+    with pytest.raises(NotImplementedError, match=msg):
+        cuda_buf[1]
+
+    with pytest.raises(NotImplementedError, match=msg):
+        buf_on_gpu.to_pybytes()
+
+    with pytest.raises(NotImplementedError, match=msg):
+        pickle_module.dumps(buf_on_gpu, protocol=4)
+
+    with pytest.raises(NotImplementedError, match=msg):
+        pickle_module.dumps(cuda_buf, protocol=4)
+
+    arr = np.array([b'testing'])
+    cuda_buf = ctx.buffer_from_data(arr)
+    arr = pa.FixedSizeBinaryArray.from_buffers(pa.binary(7), 1, [None, 
cuda_buf])
+    buf_on_gpu = arr.buffers()[1]

Review Comment:
   To simplify the test a bit, you could also use this definition of `cuda_buf` 
and `buf_on_gpu` from the start of the test, and then you don't have to 
redefine the variables here just for the slicing



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to