kkraus14 commented on code in PR #205:
URL: https://github.com/apache/arrow-nanoarrow/pull/205#discussion_r1235564700


##########
extensions/nanoarrow_device/src/nanoarrow/nanoarrow_device.c:
##########
@@ -0,0 +1,518 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <errno.h>
+
+#include "nanoarrow.h"
+
+#include "nanoarrow_device.h"
+
+ArrowErrorCode ArrowDeviceCheckRuntime(struct ArrowError* error) {
+  const char* nanoarrow_runtime_version = ArrowNanoarrowVersion();
+  const char* nanoarrow_ipc_build_time_version = NANOARROW_VERSION;
+
+  if (strcmp(nanoarrow_runtime_version, nanoarrow_ipc_build_time_version) != 
0) {
+    ArrowErrorSet(error, "Expected nanoarrow runtime version '%s' but found 
version '%s'",
+                  nanoarrow_ipc_build_time_version, nanoarrow_runtime_version);
+    return EINVAL;
+  }
+
+  return NANOARROW_OK;
+}
+
+void ArrowDeviceArrayInit(struct ArrowDeviceArray* device_array,
+                          struct ArrowDevice* device) {
+  memset(device_array, 0, sizeof(struct ArrowDeviceArray));
+  device_array->device_type = device->device_type;
+  device_array->device_id = device->device_id;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferInit(struct ArrowDevice* device_src,
+                                               struct ArrowDeviceBufferView 
src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowBuffer* dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  ArrowBufferInit(dst);
+  dst->allocator = ArrowBufferAllocatorDefault();
+  NANOARROW_RETURN_NOT_OK(ArrowBufferAppend(
+      dst, ((uint8_t*)src.private_data) + src.offset_bytes, src.size_bytes));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferMove(struct ArrowDevice* device_src,
+                                               struct ArrowBuffer* src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowBuffer* dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  ArrowBufferMove(src, dst);
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferCopy(struct ArrowDevice* device_src,
+                                               struct ArrowDeviceBufferView 
src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowDeviceBufferView 
dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  memcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+         ((uint8_t*)src.private_data) + src.offset_bytes, dst.size_bytes);
+  return NANOARROW_OK;
+}
+
+static int ArrowDeviceCpuCopyRequired(struct ArrowDevice* device_src,
+                                      struct ArrowArrayView* src,
+                                      struct ArrowDevice* device_dst) {
+  if (device_src->device_type == ARROW_DEVICE_CPU &&
+      device_dst->device_type == ARROW_DEVICE_CPU) {
+    return 0;
+  } else {
+    return -1;
+  }
+}
+
+static ArrowErrorCode ArrowDeviceCpuSynchronize(struct ArrowDevice* device,
+                                                struct ArrowDevice* 
device_event,
+                                                void* sync_event,
+                                                struct ArrowError* error) {
+  switch (device_event->device_type) {
+    case ARROW_DEVICE_CPU:
+      if (sync_event != NULL) {
+        ArrowErrorSet(error, "Expected NULL sync_event for ARROW_DEVICE_CPU 
but got %p",
+                      sync_event);
+        return EINVAL;
+      } else {
+        return NANOARROW_OK;
+      }
+    default:
+      return device_event->synchronize_event(device_event, device, sync_event, 
error);
+  }
+}
+
+static void ArrowDeviceCpuRelease(struct ArrowDevice* device) { 
device->release = NULL; }
+
+struct ArrowDevice* ArrowDeviceCpu(void) {
+  static struct ArrowDevice* cpu_device_singleton = NULL;
+  if (cpu_device_singleton == NULL) {
+    cpu_device_singleton = (struct ArrowDevice*)ArrowMalloc(sizeof(struct 
ArrowDevice));
+    ArrowDeviceInitCpu(cpu_device_singleton);
+  }
+
+  return cpu_device_singleton;
+}
+
+void ArrowDeviceInitCpu(struct ArrowDevice* device) {
+  device->device_type = ARROW_DEVICE_CPU;
+  device->device_id = 0;
+  device->buffer_init = &ArrowDeviceCpuBufferInit;
+  device->buffer_move = &ArrowDeviceCpuBufferMove;
+  device->buffer_copy = &ArrowDeviceCpuBufferCopy;
+  device->copy_required = &ArrowDeviceCpuCopyRequired;
+  device->synchronize_event = &ArrowDeviceCpuSynchronize;
+  device->release = &ArrowDeviceCpuRelease;
+  device->private_data = NULL;
+}
+
+#ifdef NANOARROW_DEVICE_WITH_METAL
+struct ArrowDevice* ArrowDeviceMetalDefaultDevice(void);
+#endif
+
+#ifdef NANOARROW_DEVICE_WITH_CUDA
+struct ArrowDevice* ArrowDeviceCuda(ArrowDeviceType device_type, int64_t 
device_id);
+#endif
+
+struct ArrowDevice* ArrowDeviceResolve(ArrowDeviceType device_type, int64_t 
device_id) {
+  if (device_type == ARROW_DEVICE_CPU && device_id == 0) {
+    return ArrowDeviceCpu();
+  }
+
+#ifdef NANOARROW_DEVICE_WITH_METAL
+  if (device_type == ARROW_DEVICE_METAL) {
+    struct ArrowDevice* default_device = ArrowDeviceMetalDefaultDevice();
+    if (device_id == default_device->device_id) {
+      return default_device;
+    }
+  }
+#endif
+
+#ifdef NANOARROW_DEVICE_WITH_CUDA
+  if (device_type == ARROW_DEVICE_CUDA || device_type == 
ARROW_DEVICE_CUDA_HOST) {
+    return ArrowDeviceCuda(device_type, device_id);
+  }
+#endif
+
+  return NULL;
+}
+
+ArrowErrorCode ArrowDeviceBufferInit(struct ArrowDevice* device_src,
+                                     struct ArrowDeviceBufferView src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowBuffer* dst) {
+  int result = device_dst->buffer_init(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_init(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+ArrowErrorCode ArrowDeviceBufferMove(struct ArrowDevice* device_src,
+                                     struct ArrowBuffer* src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowBuffer* dst) {
+  int result = device_dst->buffer_move(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_move(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+ArrowErrorCode ArrowDeviceBufferCopy(struct ArrowDevice* device_src,
+                                     struct ArrowDeviceBufferView src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowDeviceBufferView dst) {
+  int result = device_dst->buffer_copy(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_copy(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+struct ArrowBasicDeviceArrayStreamPrivate {
+  struct ArrowDevice* device;
+  struct ArrowArrayStream naive_stream;
+};
+
+static int ArrowDeviceBasicArrayStreamGetSchema(
+    struct ArrowDeviceArrayStream* array_stream, struct ArrowSchema* schema) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  return private_data->naive_stream.get_schema(&private_data->naive_stream, 
schema);
+}
+
+static int ArrowDeviceBasicArrayStreamGetNext(struct ArrowDeviceArrayStream* 
array_stream,
+                                              struct ArrowDeviceArray* 
device_array) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+
+  struct ArrowArray tmp;
+  NANOARROW_RETURN_NOT_OK(
+      private_data->naive_stream.get_next(&private_data->naive_stream, &tmp));
+  ArrowDeviceArrayInit(device_array, private_data->device);
+  ArrowArrayMove(&tmp, &device_array->array);
+  return NANOARROW_OK;
+}
+
+static const char* ArrowDeviceBasicArrayStreamGetLastError(
+    struct ArrowDeviceArrayStream* array_stream) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  return 
private_data->naive_stream.get_last_error(&private_data->naive_stream);
+}
+
+static void ArrowDeviceBasicArrayStreamRelease(
+    struct ArrowDeviceArrayStream* array_stream) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  private_data->naive_stream.release(&private_data->naive_stream);
+  ArrowFree(private_data);
+  array_stream->release = NULL;
+}
+
+ArrowErrorCode ArrowDeviceBasicArrayStreamInit(
+    struct ArrowDeviceArrayStream* device_array_stream,
+    struct ArrowArrayStream* array_stream, struct ArrowDevice* device) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)ArrowMalloc(
+          sizeof(struct ArrowBasicDeviceArrayStreamPrivate));
+  if (private_data == NULL) {
+    return ENOMEM;
+  }
+
+  private_data->device = device;
+  ArrowArrayStreamMove(array_stream, &private_data->naive_stream);
+
+  device_array_stream->device_type = device->device_type;
+  device_array_stream->get_schema = &ArrowDeviceBasicArrayStreamGetSchema;
+  device_array_stream->get_next = &ArrowDeviceBasicArrayStreamGetNext;
+  device_array_stream->get_last_error = 
&ArrowDeviceBasicArrayStreamGetLastError;
+  device_array_stream->release = &ArrowDeviceBasicArrayStreamRelease;
+  device_array_stream->private_data = private_data;
+  return NANOARROW_OK;
+}
+
+void ArrowDeviceArrayViewInit(struct ArrowDeviceArrayView* device_array_view) {
+  memset(device_array_view, 0, sizeof(struct ArrowDeviceArrayView));
+}
+
+void ArrowDeviceArrayViewReset(struct ArrowDeviceArrayView* device_array_view) 
{
+  ArrowArrayViewReset(&device_array_view->array_view);
+  device_array_view->device = NULL;
+}
+
+static ArrowErrorCode ArrowDeviceBufferGetInt32(struct ArrowDevice* device,
+                                                struct ArrowBufferView 
buffer_view,
+                                                int64_t i, int32_t* out) {
+  struct ArrowDeviceBufferView device_buffer_view;
+  void* sync_event = NULL;
+
+  struct ArrowDeviceBufferView out_view;
+  out_view.private_data = out;
+  out_view.offset_bytes = 0;
+  out_view.size_bytes = sizeof(int32_t);
+
+  device_buffer_view.private_data = buffer_view.data.data;
+  device_buffer_view.offset_bytes = i * sizeof(int32_t);
+  device_buffer_view.size_bytes = sizeof(int32_t);
+  NANOARROW_RETURN_NOT_OK(
+      ArrowDeviceBufferCopy(device, device_buffer_view, ArrowDeviceCpu(), 
out_view));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceBufferGetInt64(struct ArrowDevice* device,

Review Comment:
   +1 to the int32 function here



##########
extensions/nanoarrow_device/src/nanoarrow/nanoarrow_device.c:
##########
@@ -0,0 +1,518 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <errno.h>
+
+#include "nanoarrow.h"
+
+#include "nanoarrow_device.h"
+
+ArrowErrorCode ArrowDeviceCheckRuntime(struct ArrowError* error) {
+  const char* nanoarrow_runtime_version = ArrowNanoarrowVersion();
+  const char* nanoarrow_ipc_build_time_version = NANOARROW_VERSION;
+
+  if (strcmp(nanoarrow_runtime_version, nanoarrow_ipc_build_time_version) != 
0) {
+    ArrowErrorSet(error, "Expected nanoarrow runtime version '%s' but found 
version '%s'",
+                  nanoarrow_ipc_build_time_version, nanoarrow_runtime_version);
+    return EINVAL;
+  }
+
+  return NANOARROW_OK;
+}
+
+void ArrowDeviceArrayInit(struct ArrowDeviceArray* device_array,
+                          struct ArrowDevice* device) {
+  memset(device_array, 0, sizeof(struct ArrowDeviceArray));
+  device_array->device_type = device->device_type;
+  device_array->device_id = device->device_id;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferInit(struct ArrowDevice* device_src,
+                                               struct ArrowDeviceBufferView 
src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowBuffer* dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  ArrowBufferInit(dst);
+  dst->allocator = ArrowBufferAllocatorDefault();
+  NANOARROW_RETURN_NOT_OK(ArrowBufferAppend(
+      dst, ((uint8_t*)src.private_data) + src.offset_bytes, src.size_bytes));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferMove(struct ArrowDevice* device_src,
+                                               struct ArrowBuffer* src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowBuffer* dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  ArrowBufferMove(src, dst);
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferCopy(struct ArrowDevice* device_src,
+                                               struct ArrowDeviceBufferView 
src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowDeviceBufferView 
dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  memcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+         ((uint8_t*)src.private_data) + src.offset_bytes, dst.size_bytes);
+  return NANOARROW_OK;
+}
+
+static int ArrowDeviceCpuCopyRequired(struct ArrowDevice* device_src,
+                                      struct ArrowArrayView* src,
+                                      struct ArrowDevice* device_dst) {
+  if (device_src->device_type == ARROW_DEVICE_CPU &&
+      device_dst->device_type == ARROW_DEVICE_CPU) {
+    return 0;
+  } else {
+    return -1;
+  }
+}
+
+static ArrowErrorCode ArrowDeviceCpuSynchronize(struct ArrowDevice* device,
+                                                struct ArrowDevice* 
device_event,
+                                                void* sync_event,
+                                                struct ArrowError* error) {
+  switch (device_event->device_type) {
+    case ARROW_DEVICE_CPU:
+      if (sync_event != NULL) {
+        ArrowErrorSet(error, "Expected NULL sync_event for ARROW_DEVICE_CPU 
but got %p",
+                      sync_event);
+        return EINVAL;
+      } else {
+        return NANOARROW_OK;
+      }
+    default:
+      return device_event->synchronize_event(device_event, device, sync_event, 
error);
+  }
+}
+
+static void ArrowDeviceCpuRelease(struct ArrowDevice* device) { 
device->release = NULL; }
+
+struct ArrowDevice* ArrowDeviceCpu(void) {
+  static struct ArrowDevice* cpu_device_singleton = NULL;
+  if (cpu_device_singleton == NULL) {
+    cpu_device_singleton = (struct ArrowDevice*)ArrowMalloc(sizeof(struct 
ArrowDevice));
+    ArrowDeviceInitCpu(cpu_device_singleton);
+  }
+
+  return cpu_device_singleton;
+}
+
+void ArrowDeviceInitCpu(struct ArrowDevice* device) {
+  device->device_type = ARROW_DEVICE_CPU;
+  device->device_id = 0;
+  device->buffer_init = &ArrowDeviceCpuBufferInit;
+  device->buffer_move = &ArrowDeviceCpuBufferMove;
+  device->buffer_copy = &ArrowDeviceCpuBufferCopy;
+  device->copy_required = &ArrowDeviceCpuCopyRequired;
+  device->synchronize_event = &ArrowDeviceCpuSynchronize;
+  device->release = &ArrowDeviceCpuRelease;
+  device->private_data = NULL;
+}
+
+#ifdef NANOARROW_DEVICE_WITH_METAL
+struct ArrowDevice* ArrowDeviceMetalDefaultDevice(void);
+#endif
+
+#ifdef NANOARROW_DEVICE_WITH_CUDA
+struct ArrowDevice* ArrowDeviceCuda(ArrowDeviceType device_type, int64_t 
device_id);
+#endif
+
+struct ArrowDevice* ArrowDeviceResolve(ArrowDeviceType device_type, int64_t 
device_id) {
+  if (device_type == ARROW_DEVICE_CPU && device_id == 0) {
+    return ArrowDeviceCpu();
+  }
+
+#ifdef NANOARROW_DEVICE_WITH_METAL
+  if (device_type == ARROW_DEVICE_METAL) {
+    struct ArrowDevice* default_device = ArrowDeviceMetalDefaultDevice();
+    if (device_id == default_device->device_id) {
+      return default_device;
+    }
+  }
+#endif
+
+#ifdef NANOARROW_DEVICE_WITH_CUDA
+  if (device_type == ARROW_DEVICE_CUDA || device_type == 
ARROW_DEVICE_CUDA_HOST) {
+    return ArrowDeviceCuda(device_type, device_id);
+  }
+#endif
+
+  return NULL;
+}
+
+ArrowErrorCode ArrowDeviceBufferInit(struct ArrowDevice* device_src,
+                                     struct ArrowDeviceBufferView src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowBuffer* dst) {
+  int result = device_dst->buffer_init(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_init(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+ArrowErrorCode ArrowDeviceBufferMove(struct ArrowDevice* device_src,
+                                     struct ArrowBuffer* src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowBuffer* dst) {
+  int result = device_dst->buffer_move(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_move(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+ArrowErrorCode ArrowDeviceBufferCopy(struct ArrowDevice* device_src,
+                                     struct ArrowDeviceBufferView src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowDeviceBufferView dst) {
+  int result = device_dst->buffer_copy(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_copy(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+struct ArrowBasicDeviceArrayStreamPrivate {
+  struct ArrowDevice* device;
+  struct ArrowArrayStream naive_stream;
+};
+
+static int ArrowDeviceBasicArrayStreamGetSchema(
+    struct ArrowDeviceArrayStream* array_stream, struct ArrowSchema* schema) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  return private_data->naive_stream.get_schema(&private_data->naive_stream, 
schema);
+}
+
+static int ArrowDeviceBasicArrayStreamGetNext(struct ArrowDeviceArrayStream* 
array_stream,
+                                              struct ArrowDeviceArray* 
device_array) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+
+  struct ArrowArray tmp;
+  NANOARROW_RETURN_NOT_OK(
+      private_data->naive_stream.get_next(&private_data->naive_stream, &tmp));
+  ArrowDeviceArrayInit(device_array, private_data->device);
+  ArrowArrayMove(&tmp, &device_array->array);
+  return NANOARROW_OK;
+}
+
+static const char* ArrowDeviceBasicArrayStreamGetLastError(
+    struct ArrowDeviceArrayStream* array_stream) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  return 
private_data->naive_stream.get_last_error(&private_data->naive_stream);
+}
+
+static void ArrowDeviceBasicArrayStreamRelease(
+    struct ArrowDeviceArrayStream* array_stream) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  private_data->naive_stream.release(&private_data->naive_stream);
+  ArrowFree(private_data);
+  array_stream->release = NULL;
+}
+
+ArrowErrorCode ArrowDeviceBasicArrayStreamInit(
+    struct ArrowDeviceArrayStream* device_array_stream,
+    struct ArrowArrayStream* array_stream, struct ArrowDevice* device) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)ArrowMalloc(
+          sizeof(struct ArrowBasicDeviceArrayStreamPrivate));
+  if (private_data == NULL) {
+    return ENOMEM;
+  }
+
+  private_data->device = device;
+  ArrowArrayStreamMove(array_stream, &private_data->naive_stream);
+
+  device_array_stream->device_type = device->device_type;
+  device_array_stream->get_schema = &ArrowDeviceBasicArrayStreamGetSchema;
+  device_array_stream->get_next = &ArrowDeviceBasicArrayStreamGetNext;
+  device_array_stream->get_last_error = 
&ArrowDeviceBasicArrayStreamGetLastError;
+  device_array_stream->release = &ArrowDeviceBasicArrayStreamRelease;
+  device_array_stream->private_data = private_data;
+  return NANOARROW_OK;
+}
+
+void ArrowDeviceArrayViewInit(struct ArrowDeviceArrayView* device_array_view) {
+  memset(device_array_view, 0, sizeof(struct ArrowDeviceArrayView));
+}
+
+void ArrowDeviceArrayViewReset(struct ArrowDeviceArrayView* device_array_view) 
{
+  ArrowArrayViewReset(&device_array_view->array_view);
+  device_array_view->device = NULL;
+}
+
+static ArrowErrorCode ArrowDeviceBufferGetInt32(struct ArrowDevice* device,
+                                                struct ArrowBufferView 
buffer_view,
+                                                int64_t i, int32_t* out) {

Review Comment:
   Definitely don't want to issue copies one by one like this even 
asynchronously, that would be really really bad performance wise and put 
significant pressure on the system via the GPU driver.
   
   I would +1 @zeroshade's suggestion of skipping validation and generally 
anything that needs to introspect the data.



##########
extensions/nanoarrow_device/CMakeLists.txt:
##########
@@ -0,0 +1,221 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+message(STATUS "Building using CMake version: ${CMAKE_VERSION}")
+cmake_minimum_required(VERSION 3.14)
+include(FetchContent)
+
+if(NOT DEFINED CMAKE_C_STANDARD)
+  set(CMAKE_C_STANDARD 11)
+endif()
+
+project(nanoarrow_device)
+
+option(NANOARROW_DEVICE_BUILD_TESTS "Build tests" OFF)
+option(NANOARROW_DEVICE_BUNDLE "Create bundled nanoarrow_device.h and 
nanoarrow_device.c" OFF)
+option(NANOARROW_DEVICE_WITH_METAL "Build Apple metal extension" OFF)
+option(NANOARROW_DEVICE_WITH_CUDA "Build CUDA extension" OFF)
+
+
+option(NANOARROW_DEVICE_CODE_COVERAGE "Enable coverage reporting" OFF)
+add_library(device_coverage_config INTERFACE)
+
+if (NANOARROW_DEVICE_BUILD_TESTS OR NOT NANOARROW_DEVICE_BUNDLE)
+  # Add the nanoarrow dependency. nanoarrow is not linked into the
+  # nanoarrow_device library (the caller must link this themselves);
+  # however, we need nanoarrow.h to build nanoarrow_device.c.
+  FetchContent_Declare(
+    nanoarrow
+    SOURCE_DIR ${CMAKE_CURRENT_LIST_DIR}/../..)
+
+  # Don't install nanoarrow because of this configuration
+  FetchContent_GetProperties(nanoarrow)
+  if(NOT nanoarrow_POPULATED)
+    FetchContent_Populate(nanoarrow)
+    add_subdirectory(${nanoarrow_SOURCE_DIR} ${nanoarrow_BINARY_DIR} 
EXCLUDE_FROM_ALL)
+  endif()
+endif()
+
+if (NANOARROW_DEVICE_BUNDLE)
+  # The CMake build step is creating nanoarrow_device.c and nanoarrow_device.h;
+  # the CMake install step is copying them to a specific location
+  file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/amalgamation)
+  file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/amalgamation/nanoarrow)
+
+  # nanoarrow_device.h is currently standalone
+  set(NANOARROW_DEVICE_H_TEMP 
${CMAKE_BINARY_DIR}/amalgamation/nanoarrow/nanoarrow_device.h)
+  file(READ src/nanoarrow/nanoarrow_device.h SRC_FILE_CONTENTS)
+  file(WRITE ${NANOARROW_DEVICE_H_TEMP} "${SRC_FILE_CONTENTS}")
+
+  # nanoarrow_device.c is currently standalone
+  set(NANOARROW_DEVICE_C_TEMP 
${CMAKE_BINARY_DIR}/amalgamation/nanoarrow/nanoarrow_device.c)
+  file(READ src/nanoarrow/nanoarrow_device.c SRC_FILE_CONTENTS)
+  file(WRITE ${NANOARROW_DEVICE_C_TEMP} "${SRC_FILE_CONTENTS}")
+
+  # Add a library that the tests can link against (but don't install it)
+  if(NANOARROW_DEVICE_BUILD_TESTS)
+    add_library(nanoarrow_device ${NANOARROW_DEVICE_C_TEMP})
+
+    target_include_directories(nanoarrow_device PUBLIC
+      $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/src>
+      $<BUILD_INTERFACE:${nanoarrow_SOURCE_DIR}/src/nanoarrow>
+      $<BUILD_INTERFACE:${nanoarrow_BINARY_DIR}/generated>
+      $<BUILD_INTERFACE:${NANOARROW_DEVICE_FLATCC_INCLUDE_DIR}>)
+  endif()
+
+  # Install the amalgamated header and sources
+  install(FILES
+    ${NANOARROW_DEVICE_H_TEMP}
+    ${NANOARROW_DEVICE_C_TEMP}
+    DESTINATION ".")
+else()
+  # This is a normal CMake build that builds + installs some includes and a 
static lib
+  if (NANOARROW_DEVICE_WITH_METAL)
+    if (NOT EXISTS "${CMAKE_BINARY_DIR}/metal-cpp")
+      message(STATUS "Fetching metal-cpp")
+      file(DOWNLOAD
+        
"https://developer.apple.com/metal/cpp/files/metal-cpp_macOS12_iOS15.zip";
+        "${CMAKE_BINARY_DIR}/metal-cpp.zip")
+      file(ARCHIVE_EXTRACT INPUT ${CMAKE_BINARY_DIR}/metal-cpp.zip DESTINATION 
${CMAKE_BINARY_DIR})
+    endif()
+
+    if(NOT DEFINED CMAKE_CXX_STANDARD)
+      set(CMAKE_CXX_STANDARD 17)
+    endif()
+    set(CMAKE_CXX_STANDARD_REQUIRED ON)
+
+    find_library(METAL_LIBRARY Metal REQUIRED)
+    message(STATUS "Metal framework found at '${METAL_LIBRARY}'")
+
+    find_library(FOUNDATION_LIBRARY Foundation REQUIRED)
+    message(STATUS "Foundation framework found at '${FOUNDATION_LIBRARY}'")
+
+    find_library(QUARTZ_CORE_LIBRARY QuartzCore REQUIRED)
+    message(STATUS "CoreFoundation framework found at 
'${QUARTZ_CORE_LIBRARY}'")
+
+    set(NANOARROW_DEVICE_SOURCES_METAL src/nanoarrow/nanoarrow_device_metal.cc)
+    set(NANOARROW_DEVICE_INCLUDE_METAL ${CMAKE_BINARY_DIR}/metal-cpp)
+    set(NANOARROW_DEVICE_LIBS_METAL ${METAL_LIBRARY} ${FOUNDATION_LIBRARY} 
${QUARTZ_CORE_LIBRARY})
+    set(NANOARROW_DEVICE_DEFS_METAL "NANOARROW_DEVICE_WITH_METAL")
+  endif()
+
+  if (NANOARROW_DEVICE_WITH_CUDA)
+    find_package(CUDAToolkit REQUIRED)
+    set(NANOARROW_DEVICE_SOURCES_CUDA src/nanoarrow/nanoarrow_device_cuda.c)
+    set(NANOARROW_DEVICE_LIBS_CUDA CUDA::cudart)

Review Comment:
   If we're going to continue using the runtime, may want to use the static 
library instead: `CUDA::cudart_static`.
   
   Would still recommend using the driver library though.



##########
extensions/nanoarrow_device/src/nanoarrow/nanoarrow_device.c:
##########
@@ -0,0 +1,518 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <errno.h>
+
+#include "nanoarrow.h"
+
+#include "nanoarrow_device.h"
+
+ArrowErrorCode ArrowDeviceCheckRuntime(struct ArrowError* error) {
+  const char* nanoarrow_runtime_version = ArrowNanoarrowVersion();
+  const char* nanoarrow_ipc_build_time_version = NANOARROW_VERSION;
+
+  if (strcmp(nanoarrow_runtime_version, nanoarrow_ipc_build_time_version) != 
0) {
+    ArrowErrorSet(error, "Expected nanoarrow runtime version '%s' but found 
version '%s'",
+                  nanoarrow_ipc_build_time_version, nanoarrow_runtime_version);
+    return EINVAL;
+  }
+
+  return NANOARROW_OK;
+}
+
+void ArrowDeviceArrayInit(struct ArrowDeviceArray* device_array,
+                          struct ArrowDevice* device) {
+  memset(device_array, 0, sizeof(struct ArrowDeviceArray));
+  device_array->device_type = device->device_type;
+  device_array->device_id = device->device_id;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferInit(struct ArrowDevice* device_src,
+                                               struct ArrowDeviceBufferView 
src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowBuffer* dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  ArrowBufferInit(dst);
+  dst->allocator = ArrowBufferAllocatorDefault();
+  NANOARROW_RETURN_NOT_OK(ArrowBufferAppend(
+      dst, ((uint8_t*)src.private_data) + src.offset_bytes, src.size_bytes));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferMove(struct ArrowDevice* device_src,
+                                               struct ArrowBuffer* src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowBuffer* dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  ArrowBufferMove(src, dst);
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferCopy(struct ArrowDevice* device_src,
+                                               struct ArrowDeviceBufferView 
src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowDeviceBufferView 
dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  memcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+         ((uint8_t*)src.private_data) + src.offset_bytes, dst.size_bytes);
+  return NANOARROW_OK;
+}
+
+static int ArrowDeviceCpuCopyRequired(struct ArrowDevice* device_src,
+                                      struct ArrowArrayView* src,
+                                      struct ArrowDevice* device_dst) {
+  if (device_src->device_type == ARROW_DEVICE_CPU &&
+      device_dst->device_type == ARROW_DEVICE_CPU) {
+    return 0;
+  } else {
+    return -1;
+  }
+}
+
+static ArrowErrorCode ArrowDeviceCpuSynchronize(struct ArrowDevice* device,
+                                                struct ArrowDevice* 
device_event,
+                                                void* sync_event,
+                                                struct ArrowError* error) {
+  switch (device_event->device_type) {
+    case ARROW_DEVICE_CPU:
+      if (sync_event != NULL) {
+        ArrowErrorSet(error, "Expected NULL sync_event for ARROW_DEVICE_CPU 
but got %p",
+                      sync_event);
+        return EINVAL;
+      } else {
+        return NANOARROW_OK;
+      }
+    default:
+      return device_event->synchronize_event(device_event, device, sync_event, 
error);
+  }
+}
+
+static void ArrowDeviceCpuRelease(struct ArrowDevice* device) { 
device->release = NULL; }
+
+struct ArrowDevice* ArrowDeviceCpu(void) {
+  static struct ArrowDevice* cpu_device_singleton = NULL;
+  if (cpu_device_singleton == NULL) {
+    cpu_device_singleton = (struct ArrowDevice*)ArrowMalloc(sizeof(struct 
ArrowDevice));
+    ArrowDeviceInitCpu(cpu_device_singleton);
+  }
+
+  return cpu_device_singleton;
+}
+
+void ArrowDeviceInitCpu(struct ArrowDevice* device) {
+  device->device_type = ARROW_DEVICE_CPU;
+  device->device_id = 0;
+  device->buffer_init = &ArrowDeviceCpuBufferInit;
+  device->buffer_move = &ArrowDeviceCpuBufferMove;
+  device->buffer_copy = &ArrowDeviceCpuBufferCopy;
+  device->copy_required = &ArrowDeviceCpuCopyRequired;
+  device->synchronize_event = &ArrowDeviceCpuSynchronize;
+  device->release = &ArrowDeviceCpuRelease;
+  device->private_data = NULL;
+}
+
+#ifdef NANOARROW_DEVICE_WITH_METAL
+struct ArrowDevice* ArrowDeviceMetalDefaultDevice(void);
+#endif
+
+#ifdef NANOARROW_DEVICE_WITH_CUDA
+struct ArrowDevice* ArrowDeviceCuda(ArrowDeviceType device_type, int64_t 
device_id);
+#endif
+
+struct ArrowDevice* ArrowDeviceResolve(ArrowDeviceType device_type, int64_t 
device_id) {
+  if (device_type == ARROW_DEVICE_CPU && device_id == 0) {
+    return ArrowDeviceCpu();
+  }
+
+#ifdef NANOARROW_DEVICE_WITH_METAL
+  if (device_type == ARROW_DEVICE_METAL) {
+    struct ArrowDevice* default_device = ArrowDeviceMetalDefaultDevice();
+    if (device_id == default_device->device_id) {
+      return default_device;
+    }

Review Comment:
   Should we handle the situation where someone doesn't want to use the default 
Metal device? Intel Macs supported multiple graphics devices and presumably we 
could imagine apple silicon similarly supporting it in the future as well.



##########
extensions/nanoarrow_device/src/nanoarrow/nanoarrow_device.c:
##########
@@ -0,0 +1,518 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <errno.h>
+
+#include "nanoarrow.h"
+
+#include "nanoarrow_device.h"
+
+ArrowErrorCode ArrowDeviceCheckRuntime(struct ArrowError* error) {
+  const char* nanoarrow_runtime_version = ArrowNanoarrowVersion();
+  const char* nanoarrow_ipc_build_time_version = NANOARROW_VERSION;
+
+  if (strcmp(nanoarrow_runtime_version, nanoarrow_ipc_build_time_version) != 
0) {
+    ArrowErrorSet(error, "Expected nanoarrow runtime version '%s' but found 
version '%s'",
+                  nanoarrow_ipc_build_time_version, nanoarrow_runtime_version);
+    return EINVAL;
+  }
+
+  return NANOARROW_OK;
+}
+
+void ArrowDeviceArrayInit(struct ArrowDeviceArray* device_array,
+                          struct ArrowDevice* device) {
+  memset(device_array, 0, sizeof(struct ArrowDeviceArray));
+  device_array->device_type = device->device_type;
+  device_array->device_id = device->device_id;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferInit(struct ArrowDevice* device_src,
+                                               struct ArrowDeviceBufferView 
src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowBuffer* dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  ArrowBufferInit(dst);
+  dst->allocator = ArrowBufferAllocatorDefault();
+  NANOARROW_RETURN_NOT_OK(ArrowBufferAppend(
+      dst, ((uint8_t*)src.private_data) + src.offset_bytes, src.size_bytes));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferMove(struct ArrowDevice* device_src,
+                                               struct ArrowBuffer* src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowBuffer* dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  ArrowBufferMove(src, dst);
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferCopy(struct ArrowDevice* device_src,
+                                               struct ArrowDeviceBufferView 
src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowDeviceBufferView 
dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  memcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+         ((uint8_t*)src.private_data) + src.offset_bytes, dst.size_bytes);
+  return NANOARROW_OK;
+}
+
+static int ArrowDeviceCpuCopyRequired(struct ArrowDevice* device_src,
+                                      struct ArrowArrayView* src,
+                                      struct ArrowDevice* device_dst) {
+  if (device_src->device_type == ARROW_DEVICE_CPU &&
+      device_dst->device_type == ARROW_DEVICE_CPU) {
+    return 0;
+  } else {
+    return -1;
+  }
+}
+
+static ArrowErrorCode ArrowDeviceCpuSynchronize(struct ArrowDevice* device,
+                                                struct ArrowDevice* 
device_event,
+                                                void* sync_event,
+                                                struct ArrowError* error) {
+  switch (device_event->device_type) {
+    case ARROW_DEVICE_CPU:
+      if (sync_event != NULL) {
+        ArrowErrorSet(error, "Expected NULL sync_event for ARROW_DEVICE_CPU 
but got %p",
+                      sync_event);
+        return EINVAL;
+      } else {
+        return NANOARROW_OK;
+      }
+    default:
+      return device_event->synchronize_event(device_event, device, sync_event, 
error);
+  }
+}
+
+static void ArrowDeviceCpuRelease(struct ArrowDevice* device) { 
device->release = NULL; }
+
+struct ArrowDevice* ArrowDeviceCpu(void) {
+  static struct ArrowDevice* cpu_device_singleton = NULL;
+  if (cpu_device_singleton == NULL) {
+    cpu_device_singleton = (struct ArrowDevice*)ArrowMalloc(sizeof(struct 
ArrowDevice));
+    ArrowDeviceInitCpu(cpu_device_singleton);
+  }
+
+  return cpu_device_singleton;
+}
+
+void ArrowDeviceInitCpu(struct ArrowDevice* device) {
+  device->device_type = ARROW_DEVICE_CPU;
+  device->device_id = 0;
+  device->buffer_init = &ArrowDeviceCpuBufferInit;
+  device->buffer_move = &ArrowDeviceCpuBufferMove;
+  device->buffer_copy = &ArrowDeviceCpuBufferCopy;
+  device->copy_required = &ArrowDeviceCpuCopyRequired;
+  device->synchronize_event = &ArrowDeviceCpuSynchronize;
+  device->release = &ArrowDeviceCpuRelease;
+  device->private_data = NULL;
+}
+
+#ifdef NANOARROW_DEVICE_WITH_METAL
+struct ArrowDevice* ArrowDeviceMetalDefaultDevice(void);
+#endif
+
+#ifdef NANOARROW_DEVICE_WITH_CUDA
+struct ArrowDevice* ArrowDeviceCuda(ArrowDeviceType device_type, int64_t 
device_id);
+#endif
+
+struct ArrowDevice* ArrowDeviceResolve(ArrowDeviceType device_type, int64_t 
device_id) {
+  if (device_type == ARROW_DEVICE_CPU && device_id == 0) {
+    return ArrowDeviceCpu();
+  }
+
+#ifdef NANOARROW_DEVICE_WITH_METAL
+  if (device_type == ARROW_DEVICE_METAL) {
+    struct ArrowDevice* default_device = ArrowDeviceMetalDefaultDevice();
+    if (device_id == default_device->device_id) {
+      return default_device;
+    }
+  }
+#endif
+
+#ifdef NANOARROW_DEVICE_WITH_CUDA
+  if (device_type == ARROW_DEVICE_CUDA || device_type == 
ARROW_DEVICE_CUDA_HOST) {
+    return ArrowDeviceCuda(device_type, device_id);
+  }
+#endif
+
+  return NULL;
+}
+
+ArrowErrorCode ArrowDeviceBufferInit(struct ArrowDevice* device_src,
+                                     struct ArrowDeviceBufferView src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowBuffer* dst) {
+  int result = device_dst->buffer_init(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_init(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+ArrowErrorCode ArrowDeviceBufferMove(struct ArrowDevice* device_src,
+                                     struct ArrowBuffer* src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowBuffer* dst) {
+  int result = device_dst->buffer_move(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_move(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+ArrowErrorCode ArrowDeviceBufferCopy(struct ArrowDevice* device_src,
+                                     struct ArrowDeviceBufferView src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowDeviceBufferView dst) {
+  int result = device_dst->buffer_copy(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_copy(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+struct ArrowBasicDeviceArrayStreamPrivate {
+  struct ArrowDevice* device;
+  struct ArrowArrayStream naive_stream;
+};
+
+static int ArrowDeviceBasicArrayStreamGetSchema(
+    struct ArrowDeviceArrayStream* array_stream, struct ArrowSchema* schema) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  return private_data->naive_stream.get_schema(&private_data->naive_stream, 
schema);
+}
+
+static int ArrowDeviceBasicArrayStreamGetNext(struct ArrowDeviceArrayStream* 
array_stream,
+                                              struct ArrowDeviceArray* 
device_array) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+
+  struct ArrowArray tmp;
+  NANOARROW_RETURN_NOT_OK(
+      private_data->naive_stream.get_next(&private_data->naive_stream, &tmp));
+  ArrowDeviceArrayInit(device_array, private_data->device);
+  ArrowArrayMove(&tmp, &device_array->array);
+  return NANOARROW_OK;
+}
+
+static const char* ArrowDeviceBasicArrayStreamGetLastError(
+    struct ArrowDeviceArrayStream* array_stream) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  return 
private_data->naive_stream.get_last_error(&private_data->naive_stream);
+}
+
+static void ArrowDeviceBasicArrayStreamRelease(
+    struct ArrowDeviceArrayStream* array_stream) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  private_data->naive_stream.release(&private_data->naive_stream);
+  ArrowFree(private_data);
+  array_stream->release = NULL;
+}
+
+ArrowErrorCode ArrowDeviceBasicArrayStreamInit(
+    struct ArrowDeviceArrayStream* device_array_stream,
+    struct ArrowArrayStream* array_stream, struct ArrowDevice* device) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)ArrowMalloc(
+          sizeof(struct ArrowBasicDeviceArrayStreamPrivate));
+  if (private_data == NULL) {
+    return ENOMEM;
+  }
+
+  private_data->device = device;
+  ArrowArrayStreamMove(array_stream, &private_data->naive_stream);
+
+  device_array_stream->device_type = device->device_type;
+  device_array_stream->get_schema = &ArrowDeviceBasicArrayStreamGetSchema;
+  device_array_stream->get_next = &ArrowDeviceBasicArrayStreamGetNext;
+  device_array_stream->get_last_error = 
&ArrowDeviceBasicArrayStreamGetLastError;
+  device_array_stream->release = &ArrowDeviceBasicArrayStreamRelease;
+  device_array_stream->private_data = private_data;
+  return NANOARROW_OK;
+}
+
+void ArrowDeviceArrayViewInit(struct ArrowDeviceArrayView* device_array_view) {
+  memset(device_array_view, 0, sizeof(struct ArrowDeviceArrayView));
+}
+
+void ArrowDeviceArrayViewReset(struct ArrowDeviceArrayView* device_array_view) 
{
+  ArrowArrayViewReset(&device_array_view->array_view);
+  device_array_view->device = NULL;
+}
+
+static ArrowErrorCode ArrowDeviceBufferGetInt32(struct ArrowDevice* device,
+                                                struct ArrowBufferView 
buffer_view,
+                                                int64_t i, int32_t* out) {
+  struct ArrowDeviceBufferView device_buffer_view;
+  void* sync_event = NULL;
+
+  struct ArrowDeviceBufferView out_view;
+  out_view.private_data = out;
+  out_view.offset_bytes = 0;
+  out_view.size_bytes = sizeof(int32_t);
+
+  device_buffer_view.private_data = buffer_view.data.data;
+  device_buffer_view.offset_bytes = i * sizeof(int32_t);
+  device_buffer_view.size_bytes = sizeof(int32_t);
+  NANOARROW_RETURN_NOT_OK(
+      ArrowDeviceBufferCopy(device, device_buffer_view, ArrowDeviceCpu(), 
out_view));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceBufferGetInt64(struct ArrowDevice* device,
+                                                struct ArrowBufferView 
buffer_view,
+                                                int64_t i, int64_t* out) {
+  struct ArrowDeviceBufferView device_buffer_view;
+  void* sync_event = NULL;
+
+  struct ArrowDeviceBufferView out_view;
+  out_view.private_data = out;
+  out_view.offset_bytes = 0;
+  out_view.size_bytes = sizeof(int64_t);
+
+  device_buffer_view.private_data = buffer_view.data.data;
+  device_buffer_view.offset_bytes = i * sizeof(int64_t);
+  device_buffer_view.size_bytes = sizeof(int64_t);
+  NANOARROW_RETURN_NOT_OK(
+      ArrowDeviceBufferCopy(device, device_buffer_view, ArrowDeviceCpu(), 
out_view));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceArrayViewValidateDefault(
+    struct ArrowDevice* device, struct ArrowArrayView* array_view) {
+  // Calculate buffer sizes or child lengths that require accessing the offsets
+  // buffer. Unlike the nanoarrow core default validation, this just checks the
+  // last buffer and doesn't set a nice error message (could implement those, 
too
+  // later on).
+  int64_t offset_plus_length = array_view->offset + array_view->length;
+  int32_t last_offset32;
+  int64_t last_offset64;
+
+  switch (array_view->storage_type) {
+    case NANOARROW_TYPE_STRING:
+    case NANOARROW_TYPE_BINARY:
+      if (array_view->buffer_views[1].size_bytes != 0) {
+        NANOARROW_RETURN_NOT_OK(ArrowDeviceBufferGetInt32(
+            device, array_view->buffer_views[1], offset_plus_length, 
&last_offset32));
+
+        // If the data buffer size is unknown, assign it; otherwise, check it
+        if (array_view->buffer_views[2].size_bytes == -1) {
+          array_view->buffer_views[2].size_bytes = last_offset32;
+        } else if (array_view->buffer_views[2].size_bytes < last_offset32) {
+          return EINVAL;
+        }
+      }
+      break;
+
+    case NANOARROW_TYPE_LARGE_STRING:
+    case NANOARROW_TYPE_LARGE_BINARY:
+      if (array_view->buffer_views[1].size_bytes != 0) {
+        NANOARROW_RETURN_NOT_OK(ArrowDeviceBufferGetInt64(
+            device, array_view->buffer_views[1], offset_plus_length, 
&last_offset64));
+
+        // If the data buffer size is unknown, assign it; otherwise, check it
+        if (array_view->buffer_views[2].size_bytes == -1) {
+          array_view->buffer_views[2].size_bytes = last_offset64;
+        } else if (array_view->buffer_views[2].size_bytes < last_offset64) {
+          return EINVAL;
+        }
+      }
+      break;
+
+    case NANOARROW_TYPE_LIST:
+    case NANOARROW_TYPE_MAP:
+      if (array_view->buffer_views[1].size_bytes != 0) {
+        NANOARROW_RETURN_NOT_OK(ArrowDeviceBufferGetInt32(
+            device, array_view->buffer_views[1], offset_plus_length, 
&last_offset32));
+        if (array_view->children[0]->length < last_offset32) {
+          return EINVAL;
+        }
+      }
+      break;
+
+    case NANOARROW_TYPE_LARGE_LIST:
+      if (array_view->buffer_views[1].size_bytes != 0) {
+        NANOARROW_RETURN_NOT_OK(ArrowDeviceBufferGetInt64(
+            device, array_view->buffer_views[1], offset_plus_length, 
&last_offset64));
+        if (array_view->children[0]->length < last_offset64) {
+          return EINVAL;
+        }
+      }
+      break;
+    default:
+      break;
+  }
+
+  // Recurse for children
+  for (int64_t i = 0; i < array_view->n_children; i++) {
+    NANOARROW_RETURN_NOT_OK(
+        ArrowDeviceArrayViewValidateDefault(device, array_view->children[i]));
+  }
+
+  return NANOARROW_OK;
+}
+
+ArrowErrorCode ArrowDeviceArrayViewSetArray(
+    struct ArrowDeviceArrayView* device_array_view, struct ArrowDeviceArray* 
device_array,
+    struct ArrowError* error) {
+  struct ArrowDevice* device =
+      ArrowDeviceResolve(device_array->device_type, device_array->device_id);
+  if (device == NULL) {
+    ArrowErrorSet(error, "Can't resolve device with type %d and identifier 
%ld",
+                  (int)device_array->device_type, 
(long)device_array->device_id);
+    return EINVAL;
+  }
+
+  // Wait on device_array to synchronize with the CPU
+  NANOARROW_RETURN_NOT_OK(device->synchronize_event(ArrowDeviceCpu(), device,
+                                                    device_array->sync_event, 
error));

Review Comment:
   Why do we need to synchronize on the event here? This function ultimately is 
just responsible for setting the points in the array view from the passed in 
array, correct?
   
   If so, synchronize guarantees that the data underneath the pointer is 
synchronized, but doesn't impact the pointers themselves at all.



##########
extensions/nanoarrow_device/src/nanoarrow/nanoarrow_device_cuda.c:
##########
@@ -0,0 +1,362 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <cuda_runtime_api.h>
+
+#include "nanoarrow_device.h"
+
+static void ArrowDeviceCudaAllocatorFree(struct ArrowBufferAllocator* 
allocator,
+                                         uint8_t* ptr, int64_t old_size) {
+  if (ptr != NULL) {
+    cudaFree(ptr);
+  }
+}
+
+static uint8_t* ArrowDeviceCudaAllocatorReallocate(struct 
ArrowBufferAllocator* allocator,
+                                                   uint8_t* ptr, int64_t 
old_size,
+                                                   int64_t new_size) {
+  ArrowDeviceCudaAllocatorFree(allocator, ptr, old_size);
+  return NULL;
+}
+
+static ArrowErrorCode ArrowDeviceCudaAllocateBuffer(struct ArrowBuffer* buffer,
+                                                    int64_t size_bytes) {
+  void* ptr = NULL;
+  cudaError_t result = cudaMalloc(&ptr, (int64_t)size_bytes);
+  if (result != cudaSuccess) {
+    return EINVAL;
+  }
+
+  buffer->data = (uint8_t*)ptr;
+  buffer->size_bytes = size_bytes;
+  buffer->capacity_bytes = size_bytes;
+  buffer->allocator.reallocate = &ArrowDeviceCudaAllocatorReallocate;
+  buffer->allocator.free = &ArrowDeviceCudaAllocatorFree;
+  // TODO: We almost certainly need device_id here
+  buffer->allocator.private_data = NULL;
+  return NANOARROW_OK;
+}
+
+static void ArrowDeviceCudaHostAllocatorFree(struct ArrowBufferAllocator* 
allocator,
+                                             uint8_t* ptr, int64_t old_size) {
+  if (ptr != NULL) {
+    cudaFreeHost(ptr);
+  }
+}
+
+static uint8_t* ArrowDeviceCudaHostAllocatorReallocate(
+    struct ArrowBufferAllocator* allocator, uint8_t* ptr, int64_t old_size,
+    int64_t new_size) {
+  ArrowDeviceCudaHostAllocatorFree(allocator, ptr, old_size);
+  return NULL;
+}
+
+static ArrowErrorCode ArrowDeviceCudaHostAllocateBuffer(struct ArrowBuffer* 
buffer,
+                                                        int64_t size_bytes) {
+  void* ptr = NULL;
+  cudaError_t result = cudaMallocHost(&ptr, (int64_t)size_bytes);
+  if (result != cudaSuccess) {
+    return EINVAL;
+  }
+
+  buffer->data = (uint8_t*)ptr;
+  buffer->size_bytes = size_bytes;
+  buffer->capacity_bytes = size_bytes;
+  buffer->allocator.reallocate = &ArrowDeviceCudaHostAllocatorReallocate;
+  buffer->allocator.free = &ArrowDeviceCudaHostAllocatorFree;
+  // TODO: We almost certainly need device_id here
+  buffer->allocator.private_data = NULL;
+  return NANOARROW_OK;
+}
+
+// TODO: All these buffer copiers would benefit from cudaMemcpyAsync but there 
is
+// no good way to incorporate that just yet
+
+static ArrowErrorCode ArrowDeviceCudaBufferInit(struct ArrowDevice* device_src,
+                                                struct ArrowBufferView src,
+                                                struct ArrowDevice* device_dst,
+                                                struct ArrowBuffer* dst) {
+  if (device_src->device_type == ARROW_DEVICE_CPU &&
+      device_dst->device_type == ARROW_DEVICE_CUDA) {
+    struct ArrowBuffer tmp;
+    NANOARROW_RETURN_NOT_OK(ArrowDeviceCudaAllocateBuffer(&tmp, 
src.size_bytes));
+    cudaError_t result = cudaMemcpy(tmp.data, src.data.as_uint8, 
(size_t)src.size_bytes,
+                                    cudaMemcpyHostToDevice);
+    if (result != cudaSuccess) {
+      ArrowBufferReset(&tmp);
+      return EINVAL;
+    }
+
+    ArrowBufferMove(&tmp, dst);
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA &&
+             device_dst->device_type == ARROW_DEVICE_CUDA) {
+    struct ArrowBuffer tmp;
+    NANOARROW_RETURN_NOT_OK(ArrowDeviceCudaAllocateBuffer(&tmp, 
src.size_bytes));
+    cudaError_t result = cudaMemcpy(tmp.data, src.data.as_uint8, 
(size_t)src.size_bytes,
+                                    cudaMemcpyDeviceToDevice);
+    if (result != cudaSuccess) {
+      ArrowBufferReset(&tmp);
+      return EINVAL;
+    }
+
+    ArrowBufferMove(&tmp, dst);
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA &&
+             device_dst->device_type == ARROW_DEVICE_CPU) {
+    struct ArrowBuffer tmp;
+    ArrowBufferInit(&tmp);
+    NANOARROW_RETURN_NOT_OK(ArrowBufferReserve(&tmp, src.size_bytes));
+    tmp.size_bytes = src.size_bytes;
+    cudaError_t result = cudaMemcpy(tmp.data, src.data.as_uint8, 
(size_t)src.size_bytes,
+                                    cudaMemcpyDeviceToHost);
+    if (result != cudaSuccess) {
+      ArrowBufferReset(&tmp);
+      return EINVAL;
+    }
+
+    ArrowBufferMove(&tmp, dst);
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CPU &&
+             device_dst->device_type == ARROW_DEVICE_CUDA_HOST) {
+    NANOARROW_RETURN_NOT_OK(ArrowDeviceCudaHostAllocateBuffer(dst, 
src.size_bytes));
+    memcpy(dst->data, src.data.as_uint8, (size_t)src.size_bytes);
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA_HOST &&
+             device_dst->device_type == ARROW_DEVICE_CUDA_HOST) {
+    NANOARROW_RETURN_NOT_OK(ArrowDeviceCudaHostAllocateBuffer(dst, 
src.size_bytes));
+    memcpy(dst->data, src.data.as_uint8, (size_t)src.size_bytes);
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA_HOST &&
+             device_dst->device_type == ARROW_DEVICE_CPU) {
+    struct ArrowBuffer tmp;
+    ArrowBufferInit(&tmp);
+    NANOARROW_RETURN_NOT_OK(ArrowBufferReserve(&tmp, src.size_bytes));
+    tmp.size_bytes = src.size_bytes;
+    memcpy(tmp.data, src.data.as_uint8, (size_t)src.size_bytes);
+    ArrowBufferMove(&tmp, dst);
+    return NANOARROW_OK;
+
+  } else {
+    return ENOTSUP;
+  }
+}
+
+static ArrowErrorCode ArrowDeviceCudaBufferCopy(struct ArrowDevice* device_src,
+                                                struct ArrowBufferView src,
+                                                struct ArrowDevice* device_dst,
+                                                struct ArrowBufferView dst) {
+  // This is all just cudaMemcpy or memcpy
+  if (device_src->device_type == ARROW_DEVICE_CPU &&
+      device_dst->device_type == ARROW_DEVICE_CUDA) {
+    cudaError_t result = cudaMemcpy(dst.data.as_uint8, src.data.as_uint8, 
dst.size_bytes,
+                                    cudaMemcpyHostToDevice);
+    if (result != cudaSuccess) {
+      return EINVAL;
+    }
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA &&
+             device_dst->device_type == ARROW_DEVICE_CUDA) {
+    cudaError_t result = cudaMemcpy(dst.data.as_uint8, src.data.as_uint8, 
dst.size_bytes,
+                                    cudaMemcpyDeviceToDevice);
+    if (result != cudaSuccess) {
+      return EINVAL;
+    }
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA &&
+             device_dst->device_type == ARROW_DEVICE_CPU) {
+    cudaError_t result = cudaMemcpy(dst.data.as_uint8, src.data.as_uint8, 
dst.size_bytes,
+                                    cudaMemcpyDeviceToHost);
+    if (result != cudaSuccess) {
+      return EINVAL;
+    }
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CPU &&
+             device_dst->device_type == ARROW_DEVICE_CUDA_HOST) {
+    memcpy(dst.data.as_uint8, src.data.as_uint8, dst.size_bytes);
+    return NANOARROW_OK;
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA_HOST &&
+             device_dst->device_type == ARROW_DEVICE_CUDA_HOST) {
+    memcpy(dst.data.as_uint8, src.data.as_uint8, dst.size_bytes);
+    return NANOARROW_OK;
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA_HOST &&
+             device_dst->device_type == ARROW_DEVICE_CPU) {
+    memcpy(dst.data.as_uint8, src.data.as_uint8, dst.size_bytes);
+    return NANOARROW_OK;

Review Comment:
   These aren't safe because CUDA host memory is subject to the asynchronous 
behavior of GPU execution. Should still use `cudaMemCpy` with 
`cudaMemcpyHostToHost` as that handles the synchronization



##########
extensions/nanoarrow_device/src/nanoarrow/nanoarrow_device.c:
##########
@@ -0,0 +1,518 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <errno.h>
+
+#include "nanoarrow.h"
+
+#include "nanoarrow_device.h"
+
+ArrowErrorCode ArrowDeviceCheckRuntime(struct ArrowError* error) {
+  const char* nanoarrow_runtime_version = ArrowNanoarrowVersion();
+  const char* nanoarrow_ipc_build_time_version = NANOARROW_VERSION;
+
+  if (strcmp(nanoarrow_runtime_version, nanoarrow_ipc_build_time_version) != 
0) {
+    ArrowErrorSet(error, "Expected nanoarrow runtime version '%s' but found 
version '%s'",
+                  nanoarrow_ipc_build_time_version, nanoarrow_runtime_version);
+    return EINVAL;
+  }
+
+  return NANOARROW_OK;
+}
+
+void ArrowDeviceArrayInit(struct ArrowDeviceArray* device_array,
+                          struct ArrowDevice* device) {
+  memset(device_array, 0, sizeof(struct ArrowDeviceArray));
+  device_array->device_type = device->device_type;
+  device_array->device_id = device->device_id;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferInit(struct ArrowDevice* device_src,
+                                               struct ArrowDeviceBufferView 
src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowBuffer* dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  ArrowBufferInit(dst);
+  dst->allocator = ArrowBufferAllocatorDefault();
+  NANOARROW_RETURN_NOT_OK(ArrowBufferAppend(
+      dst, ((uint8_t*)src.private_data) + src.offset_bytes, src.size_bytes));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferMove(struct ArrowDevice* device_src,
+                                               struct ArrowBuffer* src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowBuffer* dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  ArrowBufferMove(src, dst);
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferCopy(struct ArrowDevice* device_src,
+                                               struct ArrowDeviceBufferView 
src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowDeviceBufferView 
dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  memcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+         ((uint8_t*)src.private_data) + src.offset_bytes, dst.size_bytes);
+  return NANOARROW_OK;
+}
+
+static int ArrowDeviceCpuCopyRequired(struct ArrowDevice* device_src,
+                                      struct ArrowArrayView* src,
+                                      struct ArrowDevice* device_dst) {
+  if (device_src->device_type == ARROW_DEVICE_CPU &&
+      device_dst->device_type == ARROW_DEVICE_CPU) {
+    return 0;
+  } else {
+    return -1;
+  }
+}
+
+static ArrowErrorCode ArrowDeviceCpuSynchronize(struct ArrowDevice* device,
+                                                struct ArrowDevice* 
device_event,
+                                                void* sync_event,
+                                                struct ArrowError* error) {
+  switch (device_event->device_type) {
+    case ARROW_DEVICE_CPU:
+      if (sync_event != NULL) {
+        ArrowErrorSet(error, "Expected NULL sync_event for ARROW_DEVICE_CPU 
but got %p",
+                      sync_event);
+        return EINVAL;
+      } else {
+        return NANOARROW_OK;
+      }
+    default:
+      return device_event->synchronize_event(device_event, device, sync_event, 
error);
+  }
+}
+
+static void ArrowDeviceCpuRelease(struct ArrowDevice* device) { 
device->release = NULL; }
+
+struct ArrowDevice* ArrowDeviceCpu(void) {
+  static struct ArrowDevice* cpu_device_singleton = NULL;
+  if (cpu_device_singleton == NULL) {
+    cpu_device_singleton = (struct ArrowDevice*)ArrowMalloc(sizeof(struct 
ArrowDevice));
+    ArrowDeviceInitCpu(cpu_device_singleton);
+  }
+
+  return cpu_device_singleton;
+}
+
+void ArrowDeviceInitCpu(struct ArrowDevice* device) {
+  device->device_type = ARROW_DEVICE_CPU;
+  device->device_id = 0;
+  device->buffer_init = &ArrowDeviceCpuBufferInit;
+  device->buffer_move = &ArrowDeviceCpuBufferMove;
+  device->buffer_copy = &ArrowDeviceCpuBufferCopy;
+  device->copy_required = &ArrowDeviceCpuCopyRequired;
+  device->synchronize_event = &ArrowDeviceCpuSynchronize;
+  device->release = &ArrowDeviceCpuRelease;
+  device->private_data = NULL;
+}
+
+#ifdef NANOARROW_DEVICE_WITH_METAL
+struct ArrowDevice* ArrowDeviceMetalDefaultDevice(void);
+#endif
+
+#ifdef NANOARROW_DEVICE_WITH_CUDA
+struct ArrowDevice* ArrowDeviceCuda(ArrowDeviceType device_type, int64_t 
device_id);
+#endif
+
+struct ArrowDevice* ArrowDeviceResolve(ArrowDeviceType device_type, int64_t 
device_id) {
+  if (device_type == ARROW_DEVICE_CPU && device_id == 0) {
+    return ArrowDeviceCpu();
+  }
+
+#ifdef NANOARROW_DEVICE_WITH_METAL
+  if (device_type == ARROW_DEVICE_METAL) {
+    struct ArrowDevice* default_device = ArrowDeviceMetalDefaultDevice();
+    if (device_id == default_device->device_id) {
+      return default_device;
+    }
+  }
+#endif
+
+#ifdef NANOARROW_DEVICE_WITH_CUDA
+  if (device_type == ARROW_DEVICE_CUDA || device_type == 
ARROW_DEVICE_CUDA_HOST) {
+    return ArrowDeviceCuda(device_type, device_id);
+  }
+#endif
+
+  return NULL;
+}
+
+ArrowErrorCode ArrowDeviceBufferInit(struct ArrowDevice* device_src,
+                                     struct ArrowDeviceBufferView src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowBuffer* dst) {
+  int result = device_dst->buffer_init(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_init(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+ArrowErrorCode ArrowDeviceBufferMove(struct ArrowDevice* device_src,
+                                     struct ArrowBuffer* src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowBuffer* dst) {
+  int result = device_dst->buffer_move(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_move(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+ArrowErrorCode ArrowDeviceBufferCopy(struct ArrowDevice* device_src,
+                                     struct ArrowDeviceBufferView src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowDeviceBufferView dst) {
+  int result = device_dst->buffer_copy(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_copy(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+struct ArrowBasicDeviceArrayStreamPrivate {
+  struct ArrowDevice* device;
+  struct ArrowArrayStream naive_stream;
+};
+
+static int ArrowDeviceBasicArrayStreamGetSchema(
+    struct ArrowDeviceArrayStream* array_stream, struct ArrowSchema* schema) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  return private_data->naive_stream.get_schema(&private_data->naive_stream, 
schema);
+}
+
+static int ArrowDeviceBasicArrayStreamGetNext(struct ArrowDeviceArrayStream* 
array_stream,
+                                              struct ArrowDeviceArray* 
device_array) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+
+  struct ArrowArray tmp;
+  NANOARROW_RETURN_NOT_OK(
+      private_data->naive_stream.get_next(&private_data->naive_stream, &tmp));
+  ArrowDeviceArrayInit(device_array, private_data->device);
+  ArrowArrayMove(&tmp, &device_array->array);
+  return NANOARROW_OK;
+}
+
+static const char* ArrowDeviceBasicArrayStreamGetLastError(
+    struct ArrowDeviceArrayStream* array_stream) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  return 
private_data->naive_stream.get_last_error(&private_data->naive_stream);
+}
+
+static void ArrowDeviceBasicArrayStreamRelease(
+    struct ArrowDeviceArrayStream* array_stream) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  private_data->naive_stream.release(&private_data->naive_stream);
+  ArrowFree(private_data);
+  array_stream->release = NULL;
+}
+
+ArrowErrorCode ArrowDeviceBasicArrayStreamInit(
+    struct ArrowDeviceArrayStream* device_array_stream,
+    struct ArrowArrayStream* array_stream, struct ArrowDevice* device) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)ArrowMalloc(
+          sizeof(struct ArrowBasicDeviceArrayStreamPrivate));
+  if (private_data == NULL) {
+    return ENOMEM;
+  }
+
+  private_data->device = device;
+  ArrowArrayStreamMove(array_stream, &private_data->naive_stream);
+
+  device_array_stream->device_type = device->device_type;
+  device_array_stream->get_schema = &ArrowDeviceBasicArrayStreamGetSchema;
+  device_array_stream->get_next = &ArrowDeviceBasicArrayStreamGetNext;
+  device_array_stream->get_last_error = 
&ArrowDeviceBasicArrayStreamGetLastError;
+  device_array_stream->release = &ArrowDeviceBasicArrayStreamRelease;
+  device_array_stream->private_data = private_data;
+  return NANOARROW_OK;
+}
+
+void ArrowDeviceArrayViewInit(struct ArrowDeviceArrayView* device_array_view) {
+  memset(device_array_view, 0, sizeof(struct ArrowDeviceArrayView));
+}
+
+void ArrowDeviceArrayViewReset(struct ArrowDeviceArrayView* device_array_view) 
{
+  ArrowArrayViewReset(&device_array_view->array_view);
+  device_array_view->device = NULL;
+}
+
+static ArrowErrorCode ArrowDeviceBufferGetInt32(struct ArrowDevice* device,
+                                                struct ArrowBufferView 
buffer_view,
+                                                int64_t i, int32_t* out) {
+  struct ArrowDeviceBufferView device_buffer_view;
+  void* sync_event = NULL;
+
+  struct ArrowDeviceBufferView out_view;
+  out_view.private_data = out;
+  out_view.offset_bytes = 0;
+  out_view.size_bytes = sizeof(int32_t);
+
+  device_buffer_view.private_data = buffer_view.data.data;
+  device_buffer_view.offset_bytes = i * sizeof(int32_t);
+  device_buffer_view.size_bytes = sizeof(int32_t);
+  NANOARROW_RETURN_NOT_OK(
+      ArrowDeviceBufferCopy(device, device_buffer_view, ArrowDeviceCpu(), 
out_view));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceBufferGetInt64(struct ArrowDevice* device,
+                                                struct ArrowBufferView 
buffer_view,
+                                                int64_t i, int64_t* out) {
+  struct ArrowDeviceBufferView device_buffer_view;
+  void* sync_event = NULL;
+
+  struct ArrowDeviceBufferView out_view;
+  out_view.private_data = out;
+  out_view.offset_bytes = 0;
+  out_view.size_bytes = sizeof(int64_t);
+
+  device_buffer_view.private_data = buffer_view.data.data;
+  device_buffer_view.offset_bytes = i * sizeof(int64_t);
+  device_buffer_view.size_bytes = sizeof(int64_t);
+  NANOARROW_RETURN_NOT_OK(
+      ArrowDeviceBufferCopy(device, device_buffer_view, ArrowDeviceCpu(), 
out_view));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceArrayViewValidateDefault(
+    struct ArrowDevice* device, struct ArrowArrayView* array_view) {
+  // Calculate buffer sizes or child lengths that require accessing the offsets
+  // buffer. Unlike the nanoarrow core default validation, this just checks the
+  // last buffer and doesn't set a nice error message (could implement those, 
too
+  // later on).
+  int64_t offset_plus_length = array_view->offset + array_view->length;
+  int32_t last_offset32;
+  int64_t last_offset64;
+
+  switch (array_view->storage_type) {
+    case NANOARROW_TYPE_STRING:
+    case NANOARROW_TYPE_BINARY:
+      if (array_view->buffer_views[1].size_bytes != 0) {
+        NANOARROW_RETURN_NOT_OK(ArrowDeviceBufferGetInt32(
+            device, array_view->buffer_views[1], offset_plus_length, 
&last_offset32));
+
+        // If the data buffer size is unknown, assign it; otherwise, check it
+        if (array_view->buffer_views[2].size_bytes == -1) {
+          array_view->buffer_views[2].size_bytes = last_offset32;
+        } else if (array_view->buffer_views[2].size_bytes < last_offset32) {
+          return EINVAL;
+        }
+      }
+      break;
+
+    case NANOARROW_TYPE_LARGE_STRING:
+    case NANOARROW_TYPE_LARGE_BINARY:
+      if (array_view->buffer_views[1].size_bytes != 0) {
+        NANOARROW_RETURN_NOT_OK(ArrowDeviceBufferGetInt64(
+            device, array_view->buffer_views[1], offset_plus_length, 
&last_offset64));
+
+        // If the data buffer size is unknown, assign it; otherwise, check it
+        if (array_view->buffer_views[2].size_bytes == -1) {
+          array_view->buffer_views[2].size_bytes = last_offset64;
+        } else if (array_view->buffer_views[2].size_bytes < last_offset64) {
+          return EINVAL;
+        }
+      }
+      break;
+
+    case NANOARROW_TYPE_LIST:
+    case NANOARROW_TYPE_MAP:
+      if (array_view->buffer_views[1].size_bytes != 0) {
+        NANOARROW_RETURN_NOT_OK(ArrowDeviceBufferGetInt32(
+            device, array_view->buffer_views[1], offset_plus_length, 
&last_offset32));
+        if (array_view->children[0]->length < last_offset32) {
+          return EINVAL;
+        }
+      }
+      break;
+
+    case NANOARROW_TYPE_LARGE_LIST:
+      if (array_view->buffer_views[1].size_bytes != 0) {
+        NANOARROW_RETURN_NOT_OK(ArrowDeviceBufferGetInt64(
+            device, array_view->buffer_views[1], offset_plus_length, 
&last_offset64));
+        if (array_view->children[0]->length < last_offset64) {
+          return EINVAL;
+        }
+      }
+      break;
+    default:
+      break;
+  }
+
+  // Recurse for children
+  for (int64_t i = 0; i < array_view->n_children; i++) {
+    NANOARROW_RETURN_NOT_OK(
+        ArrowDeviceArrayViewValidateDefault(device, array_view->children[i]));
+  }
+
+  return NANOARROW_OK;
+}
+
+ArrowErrorCode ArrowDeviceArrayViewSetArray(
+    struct ArrowDeviceArrayView* device_array_view, struct ArrowDeviceArray* 
device_array,
+    struct ArrowError* error) {
+  struct ArrowDevice* device =
+      ArrowDeviceResolve(device_array->device_type, device_array->device_id);
+  if (device == NULL) {
+    ArrowErrorSet(error, "Can't resolve device with type %d and identifier 
%ld",
+                  (int)device_array->device_type, 
(long)device_array->device_id);
+    return EINVAL;
+  }
+
+  // Wait on device_array to synchronize with the CPU
+  NANOARROW_RETURN_NOT_OK(device->synchronize_event(ArrowDeviceCpu(), device,
+                                                    device_array->sync_event, 
error));
+  device_array->sync_event = NULL;
+
+  // Set the device array device
+  device_array_view->device = device;
+
+  // nanoarrow's minimal validation is fine here (sets buffer sizes for non 
offset-buffer
+  // types and errors for invalid ones)
+  
NANOARROW_RETURN_NOT_OK(ArrowArrayViewSetArrayMinimal(&device_array_view->array_view,
+                                                        &device_array->array, 
error));
+  // Run custom validator that copies memory to the CPU where required.
+  // The custom implementation doesn't set nice error messages yet.
+  NANOARROW_RETURN_NOT_OK_WITH_ERROR(
+      ArrowDeviceArrayViewValidateDefault(device, 
&device_array_view->array_view), error);
+
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceArrayViewCopyInternal(struct ArrowDevice* 
device_src,
+                                                       struct ArrowArrayView* 
src,
+                                                       struct ArrowDevice* 
device_dst,
+                                                       struct ArrowArray* dst) 
{
+  // Currently no attempt to minimize the amount of meory copied (i.e.,
+  // by applying offset + length and copying potentially fewer bytes)
+  dst->length = src->length;
+  dst->offset = src->offset;
+  dst->null_count = src->null_count;
+
+  struct ArrowDeviceBufferView buffer_view_src;
+  buffer_view_src.offset_bytes = 0;
+
+  for (int i = 0; i < 3; i++) {
+    if (src->layout.buffer_type[i] == NANOARROW_BUFFER_TYPE_NONE) {
+      break;
+    }
+
+    buffer_view_src.private_data = src->buffer_views[i].data.data;
+    buffer_view_src.size_bytes = src->buffer_views[i].size_bytes;
+    NANOARROW_RETURN_NOT_OK(ArrowDeviceBufferInit(device_src, buffer_view_src, 
device_dst,
+                                                  ArrowArrayBuffer(dst, i)));
+  }
+
+  for (int64_t i = 0; i < src->n_children; i++) {
+    NANOARROW_RETURN_NOT_OK(ArrowDeviceArrayViewCopyInternal(
+        device_src, src->children[i], device_dst, dst->children[i]));
+  }
+
+  if (src->dictionary != NULL) {
+    NANOARROW_RETURN_NOT_OK(ArrowDeviceArrayViewCopyInternal(
+        device_src, src->dictionary, device_dst, dst->dictionary));
+  }
+
+  return NANOARROW_OK;
+}
+
+ArrowErrorCode ArrowDeviceArrayViewCopy(struct ArrowDeviceArrayView* src,
+                                        struct ArrowDevice* device_dst,
+                                        struct ArrowDeviceArray* dst) {
+  struct ArrowArray tmp;
+  NANOARROW_RETURN_NOT_OK(ArrowArrayInitFromArrayView(&tmp, &src->array_view, 
NULL));
+
+  int result =
+      ArrowDeviceArrayViewCopyInternal(src->device, &src->array_view, 
device_dst, &tmp);
+  if (result != NANOARROW_OK) {
+    tmp.release(&tmp);
+    return result;
+  }
+
+  result = ArrowArrayFinishBuilding(&tmp, NANOARROW_VALIDATION_LEVEL_MINIMAL, 
NULL);
+  if (result != NANOARROW_OK) {
+    tmp.release(&tmp);
+    return result;
+  }
+
+  ArrowDeviceArrayInit(dst, device_dst);
+  ArrowArrayMove(&tmp, &dst->array);
+  dst->device_type = device_dst->device_type;
+  dst->device_id = device_dst->device_id;
+  return result;
+}
+
+int ArrowDeviceArrayViewCopyRequired(struct ArrowDeviceArrayView* src,
+                                     struct ArrowDevice* device_dst) {

Review Comment:
   I think we should add some specification on the expected behavior of this 
function. I.E. a `CUDA` device could read a `CUDA_HOST` buffer without a copy. 
Similarly, a `CUDA` device may be able to read a `CUDA` buffer from a different 
device without copying it.
   
   Neither of these behaviors may be desired though.



##########
extensions/nanoarrow_device/src/nanoarrow/nanoarrow_device.c:
##########
@@ -0,0 +1,518 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <errno.h>
+
+#include "nanoarrow.h"
+
+#include "nanoarrow_device.h"
+
+ArrowErrorCode ArrowDeviceCheckRuntime(struct ArrowError* error) {
+  const char* nanoarrow_runtime_version = ArrowNanoarrowVersion();
+  const char* nanoarrow_ipc_build_time_version = NANOARROW_VERSION;
+
+  if (strcmp(nanoarrow_runtime_version, nanoarrow_ipc_build_time_version) != 
0) {
+    ArrowErrorSet(error, "Expected nanoarrow runtime version '%s' but found 
version '%s'",
+                  nanoarrow_ipc_build_time_version, nanoarrow_runtime_version);
+    return EINVAL;
+  }
+
+  return NANOARROW_OK;
+}
+
+void ArrowDeviceArrayInit(struct ArrowDeviceArray* device_array,
+                          struct ArrowDevice* device) {
+  memset(device_array, 0, sizeof(struct ArrowDeviceArray));
+  device_array->device_type = device->device_type;
+  device_array->device_id = device->device_id;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferInit(struct ArrowDevice* device_src,
+                                               struct ArrowDeviceBufferView 
src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowBuffer* dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  ArrowBufferInit(dst);
+  dst->allocator = ArrowBufferAllocatorDefault();
+  NANOARROW_RETURN_NOT_OK(ArrowBufferAppend(
+      dst, ((uint8_t*)src.private_data) + src.offset_bytes, src.size_bytes));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferMove(struct ArrowDevice* device_src,
+                                               struct ArrowBuffer* src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowBuffer* dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  ArrowBufferMove(src, dst);
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferCopy(struct ArrowDevice* device_src,
+                                               struct ArrowDeviceBufferView 
src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowDeviceBufferView 
dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  memcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+         ((uint8_t*)src.private_data) + src.offset_bytes, dst.size_bytes);
+  return NANOARROW_OK;
+}
+
+static int ArrowDeviceCpuCopyRequired(struct ArrowDevice* device_src,
+                                      struct ArrowArrayView* src,
+                                      struct ArrowDevice* device_dst) {
+  if (device_src->device_type == ARROW_DEVICE_CPU &&
+      device_dst->device_type == ARROW_DEVICE_CPU) {
+    return 0;
+  } else {
+    return -1;
+  }
+}
+
+static ArrowErrorCode ArrowDeviceCpuSynchronize(struct ArrowDevice* device,
+                                                struct ArrowDevice* 
device_event,
+                                                void* sync_event,
+                                                struct ArrowError* error) {
+  switch (device_event->device_type) {
+    case ARROW_DEVICE_CPU:
+      if (sync_event != NULL) {
+        ArrowErrorSet(error, "Expected NULL sync_event for ARROW_DEVICE_CPU 
but got %p",
+                      sync_event);
+        return EINVAL;
+      } else {
+        return NANOARROW_OK;
+      }
+    default:
+      return device_event->synchronize_event(device_event, device, sync_event, 
error);
+  }
+}
+
+static void ArrowDeviceCpuRelease(struct ArrowDevice* device) { 
device->release = NULL; }
+
+struct ArrowDevice* ArrowDeviceCpu(void) {
+  static struct ArrowDevice* cpu_device_singleton = NULL;
+  if (cpu_device_singleton == NULL) {
+    cpu_device_singleton = (struct ArrowDevice*)ArrowMalloc(sizeof(struct 
ArrowDevice));
+    ArrowDeviceInitCpu(cpu_device_singleton);
+  }
+
+  return cpu_device_singleton;
+}
+
+void ArrowDeviceInitCpu(struct ArrowDevice* device) {
+  device->device_type = ARROW_DEVICE_CPU;
+  device->device_id = 0;
+  device->buffer_init = &ArrowDeviceCpuBufferInit;
+  device->buffer_move = &ArrowDeviceCpuBufferMove;
+  device->buffer_copy = &ArrowDeviceCpuBufferCopy;
+  device->copy_required = &ArrowDeviceCpuCopyRequired;
+  device->synchronize_event = &ArrowDeviceCpuSynchronize;
+  device->release = &ArrowDeviceCpuRelease;
+  device->private_data = NULL;
+}
+
+#ifdef NANOARROW_DEVICE_WITH_METAL
+struct ArrowDevice* ArrowDeviceMetalDefaultDevice(void);
+#endif
+
+#ifdef NANOARROW_DEVICE_WITH_CUDA
+struct ArrowDevice* ArrowDeviceCuda(ArrowDeviceType device_type, int64_t 
device_id);
+#endif
+
+struct ArrowDevice* ArrowDeviceResolve(ArrowDeviceType device_type, int64_t 
device_id) {
+  if (device_type == ARROW_DEVICE_CPU && device_id == 0) {
+    return ArrowDeviceCpu();
+  }
+
+#ifdef NANOARROW_DEVICE_WITH_METAL
+  if (device_type == ARROW_DEVICE_METAL) {
+    struct ArrowDevice* default_device = ArrowDeviceMetalDefaultDevice();
+    if (device_id == default_device->device_id) {
+      return default_device;
+    }
+  }
+#endif
+
+#ifdef NANOARROW_DEVICE_WITH_CUDA
+  if (device_type == ARROW_DEVICE_CUDA || device_type == 
ARROW_DEVICE_CUDA_HOST) {
+    return ArrowDeviceCuda(device_type, device_id);
+  }
+#endif
+
+  return NULL;
+}
+
+ArrowErrorCode ArrowDeviceBufferInit(struct ArrowDevice* device_src,
+                                     struct ArrowDeviceBufferView src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowBuffer* dst) {
+  int result = device_dst->buffer_init(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_init(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+ArrowErrorCode ArrowDeviceBufferMove(struct ArrowDevice* device_src,
+                                     struct ArrowBuffer* src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowBuffer* dst) {
+  int result = device_dst->buffer_move(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_move(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+ArrowErrorCode ArrowDeviceBufferCopy(struct ArrowDevice* device_src,
+                                     struct ArrowDeviceBufferView src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowDeviceBufferView dst) {
+  int result = device_dst->buffer_copy(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_copy(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+struct ArrowBasicDeviceArrayStreamPrivate {
+  struct ArrowDevice* device;
+  struct ArrowArrayStream naive_stream;
+};
+
+static int ArrowDeviceBasicArrayStreamGetSchema(
+    struct ArrowDeviceArrayStream* array_stream, struct ArrowSchema* schema) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  return private_data->naive_stream.get_schema(&private_data->naive_stream, 
schema);
+}
+
+static int ArrowDeviceBasicArrayStreamGetNext(struct ArrowDeviceArrayStream* 
array_stream,
+                                              struct ArrowDeviceArray* 
device_array) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+
+  struct ArrowArray tmp;
+  NANOARROW_RETURN_NOT_OK(
+      private_data->naive_stream.get_next(&private_data->naive_stream, &tmp));
+  ArrowDeviceArrayInit(device_array, private_data->device);
+  ArrowArrayMove(&tmp, &device_array->array);
+  return NANOARROW_OK;
+}
+
+static const char* ArrowDeviceBasicArrayStreamGetLastError(
+    struct ArrowDeviceArrayStream* array_stream) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  return 
private_data->naive_stream.get_last_error(&private_data->naive_stream);
+}
+
+static void ArrowDeviceBasicArrayStreamRelease(
+    struct ArrowDeviceArrayStream* array_stream) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  private_data->naive_stream.release(&private_data->naive_stream);
+  ArrowFree(private_data);
+  array_stream->release = NULL;
+}
+
+ArrowErrorCode ArrowDeviceBasicArrayStreamInit(
+    struct ArrowDeviceArrayStream* device_array_stream,
+    struct ArrowArrayStream* array_stream, struct ArrowDevice* device) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)ArrowMalloc(
+          sizeof(struct ArrowBasicDeviceArrayStreamPrivate));
+  if (private_data == NULL) {
+    return ENOMEM;
+  }
+
+  private_data->device = device;
+  ArrowArrayStreamMove(array_stream, &private_data->naive_stream);
+
+  device_array_stream->device_type = device->device_type;
+  device_array_stream->get_schema = &ArrowDeviceBasicArrayStreamGetSchema;
+  device_array_stream->get_next = &ArrowDeviceBasicArrayStreamGetNext;
+  device_array_stream->get_last_error = 
&ArrowDeviceBasicArrayStreamGetLastError;
+  device_array_stream->release = &ArrowDeviceBasicArrayStreamRelease;
+  device_array_stream->private_data = private_data;
+  return NANOARROW_OK;
+}
+
+void ArrowDeviceArrayViewInit(struct ArrowDeviceArrayView* device_array_view) {
+  memset(device_array_view, 0, sizeof(struct ArrowDeviceArrayView));
+}
+
+void ArrowDeviceArrayViewReset(struct ArrowDeviceArrayView* device_array_view) 
{
+  ArrowArrayViewReset(&device_array_view->array_view);
+  device_array_view->device = NULL;
+}
+
+static ArrowErrorCode ArrowDeviceBufferGetInt32(struct ArrowDevice* device,
+                                                struct ArrowBufferView 
buffer_view,
+                                                int64_t i, int32_t* out) {
+  struct ArrowDeviceBufferView device_buffer_view;
+  void* sync_event = NULL;
+
+  struct ArrowDeviceBufferView out_view;
+  out_view.private_data = out;
+  out_view.offset_bytes = 0;
+  out_view.size_bytes = sizeof(int32_t);
+
+  device_buffer_view.private_data = buffer_view.data.data;
+  device_buffer_view.offset_bytes = i * sizeof(int32_t);
+  device_buffer_view.size_bytes = sizeof(int32_t);
+  NANOARROW_RETURN_NOT_OK(
+      ArrowDeviceBufferCopy(device, device_buffer_view, ArrowDeviceCpu(), 
out_view));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceBufferGetInt64(struct ArrowDevice* device,
+                                                struct ArrowBufferView 
buffer_view,
+                                                int64_t i, int64_t* out) {
+  struct ArrowDeviceBufferView device_buffer_view;
+  void* sync_event = NULL;
+
+  struct ArrowDeviceBufferView out_view;
+  out_view.private_data = out;
+  out_view.offset_bytes = 0;
+  out_view.size_bytes = sizeof(int64_t);
+
+  device_buffer_view.private_data = buffer_view.data.data;
+  device_buffer_view.offset_bytes = i * sizeof(int64_t);
+  device_buffer_view.size_bytes = sizeof(int64_t);
+  NANOARROW_RETURN_NOT_OK(
+      ArrowDeviceBufferCopy(device, device_buffer_view, ArrowDeviceCpu(), 
out_view));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceArrayViewValidateDefault(
+    struct ArrowDevice* device, struct ArrowArrayView* array_view) {
+  // Calculate buffer sizes or child lengths that require accessing the offsets
+  // buffer. Unlike the nanoarrow core default validation, this just checks the
+  // last buffer and doesn't set a nice error message (could implement those, 
too
+  // later on).
+  int64_t offset_plus_length = array_view->offset + array_view->length;
+  int32_t last_offset32;
+  int64_t last_offset64;
+
+  switch (array_view->storage_type) {
+    case NANOARROW_TYPE_STRING:
+    case NANOARROW_TYPE_BINARY:
+      if (array_view->buffer_views[1].size_bytes != 0) {
+        NANOARROW_RETURN_NOT_OK(ArrowDeviceBufferGetInt32(
+            device, array_view->buffer_views[1], offset_plus_length, 
&last_offset32));
+
+        // If the data buffer size is unknown, assign it; otherwise, check it
+        if (array_view->buffer_views[2].size_bytes == -1) {
+          array_view->buffer_views[2].size_bytes = last_offset32;
+        } else if (array_view->buffer_views[2].size_bytes < last_offset32) {
+          return EINVAL;
+        }
+      }
+      break;
+
+    case NANOARROW_TYPE_LARGE_STRING:
+    case NANOARROW_TYPE_LARGE_BINARY:
+      if (array_view->buffer_views[1].size_bytes != 0) {
+        NANOARROW_RETURN_NOT_OK(ArrowDeviceBufferGetInt64(
+            device, array_view->buffer_views[1], offset_plus_length, 
&last_offset64));
+
+        // If the data buffer size is unknown, assign it; otherwise, check it
+        if (array_view->buffer_views[2].size_bytes == -1) {
+          array_view->buffer_views[2].size_bytes = last_offset64;
+        } else if (array_view->buffer_views[2].size_bytes < last_offset64) {
+          return EINVAL;
+        }
+      }
+      break;
+
+    case NANOARROW_TYPE_LIST:
+    case NANOARROW_TYPE_MAP:
+      if (array_view->buffer_views[1].size_bytes != 0) {
+        NANOARROW_RETURN_NOT_OK(ArrowDeviceBufferGetInt32(
+            device, array_view->buffer_views[1], offset_plus_length, 
&last_offset32));
+        if (array_view->children[0]->length < last_offset32) {
+          return EINVAL;
+        }
+      }
+      break;
+
+    case NANOARROW_TYPE_LARGE_LIST:
+      if (array_view->buffer_views[1].size_bytes != 0) {
+        NANOARROW_RETURN_NOT_OK(ArrowDeviceBufferGetInt64(
+            device, array_view->buffer_views[1], offset_plus_length, 
&last_offset64));
+        if (array_view->children[0]->length < last_offset64) {
+          return EINVAL;
+        }
+      }
+      break;
+    default:
+      break;
+  }
+
+  // Recurse for children
+  for (int64_t i = 0; i < array_view->n_children; i++) {
+    NANOARROW_RETURN_NOT_OK(
+        ArrowDeviceArrayViewValidateDefault(device, array_view->children[i]));
+  }
+
+  return NANOARROW_OK;
+}
+
+ArrowErrorCode ArrowDeviceArrayViewSetArray(
+    struct ArrowDeviceArrayView* device_array_view, struct ArrowDeviceArray* 
device_array,
+    struct ArrowError* error) {
+  struct ArrowDevice* device =
+      ArrowDeviceResolve(device_array->device_type, device_array->device_id);
+  if (device == NULL) {
+    ArrowErrorSet(error, "Can't resolve device with type %d and identifier 
%ld",
+                  (int)device_array->device_type, 
(long)device_array->device_id);
+    return EINVAL;
+  }
+
+  // Wait on device_array to synchronize with the CPU
+  NANOARROW_RETURN_NOT_OK(device->synchronize_event(ArrowDeviceCpu(), device,
+                                                    device_array->sync_event, 
error));
+  device_array->sync_event = NULL;
+
+  // Set the device array device
+  device_array_view->device = device;
+
+  // nanoarrow's minimal validation is fine here (sets buffer sizes for non 
offset-buffer
+  // types and errors for invalid ones)
+  
NANOARROW_RETURN_NOT_OK(ArrowArrayViewSetArrayMinimal(&device_array_view->array_view,
+                                                        &device_array->array, 
error));
+  // Run custom validator that copies memory to the CPU where required.
+  // The custom implementation doesn't set nice error messages yet.
+  NANOARROW_RETURN_NOT_OK_WITH_ERROR(
+      ArrowDeviceArrayViewValidateDefault(device, 
&device_array_view->array_view), error);
+
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceArrayViewCopyInternal(struct ArrowDevice* 
device_src,
+                                                       struct ArrowArrayView* 
src,
+                                                       struct ArrowDevice* 
device_dst,
+                                                       struct ArrowArray* dst) 
{
+  // Currently no attempt to minimize the amount of meory copied (i.e.,
+  // by applying offset + length and copying potentially fewer bytes)
+  dst->length = src->length;
+  dst->offset = src->offset;
+  dst->null_count = src->null_count;
+
+  struct ArrowDeviceBufferView buffer_view_src;
+  buffer_view_src.offset_bytes = 0;
+
+  for (int i = 0; i < 3; i++) {
+    if (src->layout.buffer_type[i] == NANOARROW_BUFFER_TYPE_NONE) {
+      break;
+    }
+
+    buffer_view_src.private_data = src->buffer_views[i].data.data;
+    buffer_view_src.size_bytes = src->buffer_views[i].size_bytes;
+    NANOARROW_RETURN_NOT_OK(ArrowDeviceBufferInit(device_src, buffer_view_src, 
device_dst,
+                                                  ArrowArrayBuffer(dst, i)));

Review Comment:
   If this buffer initialization is asynchronous then we need to set a 
synchronization event somewhere I think?



##########
extensions/nanoarrow_device/src/nanoarrow/nanoarrow_device.c:
##########
@@ -0,0 +1,518 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <errno.h>
+
+#include "nanoarrow.h"
+
+#include "nanoarrow_device.h"
+
+ArrowErrorCode ArrowDeviceCheckRuntime(struct ArrowError* error) {
+  const char* nanoarrow_runtime_version = ArrowNanoarrowVersion();
+  const char* nanoarrow_ipc_build_time_version = NANOARROW_VERSION;
+
+  if (strcmp(nanoarrow_runtime_version, nanoarrow_ipc_build_time_version) != 
0) {
+    ArrowErrorSet(error, "Expected nanoarrow runtime version '%s' but found 
version '%s'",
+                  nanoarrow_ipc_build_time_version, nanoarrow_runtime_version);
+    return EINVAL;
+  }
+
+  return NANOARROW_OK;
+}
+
+void ArrowDeviceArrayInit(struct ArrowDeviceArray* device_array,
+                          struct ArrowDevice* device) {
+  memset(device_array, 0, sizeof(struct ArrowDeviceArray));
+  device_array->device_type = device->device_type;
+  device_array->device_id = device->device_id;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferInit(struct ArrowDevice* device_src,
+                                               struct ArrowDeviceBufferView 
src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowBuffer* dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  ArrowBufferInit(dst);
+  dst->allocator = ArrowBufferAllocatorDefault();
+  NANOARROW_RETURN_NOT_OK(ArrowBufferAppend(
+      dst, ((uint8_t*)src.private_data) + src.offset_bytes, src.size_bytes));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferMove(struct ArrowDevice* device_src,
+                                               struct ArrowBuffer* src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowBuffer* dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  ArrowBufferMove(src, dst);
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferCopy(struct ArrowDevice* device_src,
+                                               struct ArrowDeviceBufferView 
src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowDeviceBufferView 
dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  memcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+         ((uint8_t*)src.private_data) + src.offset_bytes, dst.size_bytes);
+  return NANOARROW_OK;
+}
+
+static int ArrowDeviceCpuCopyRequired(struct ArrowDevice* device_src,
+                                      struct ArrowArrayView* src,
+                                      struct ArrowDevice* device_dst) {
+  if (device_src->device_type == ARROW_DEVICE_CPU &&
+      device_dst->device_type == ARROW_DEVICE_CPU) {
+    return 0;
+  } else {
+    return -1;
+  }
+}
+
+static ArrowErrorCode ArrowDeviceCpuSynchronize(struct ArrowDevice* device,
+                                                struct ArrowDevice* 
device_event,
+                                                void* sync_event,
+                                                struct ArrowError* error) {
+  switch (device_event->device_type) {
+    case ARROW_DEVICE_CPU:
+      if (sync_event != NULL) {
+        ArrowErrorSet(error, "Expected NULL sync_event for ARROW_DEVICE_CPU 
but got %p",
+                      sync_event);
+        return EINVAL;
+      } else {
+        return NANOARROW_OK;
+      }
+    default:
+      return device_event->synchronize_event(device_event, device, sync_event, 
error);
+  }
+}
+
+static void ArrowDeviceCpuRelease(struct ArrowDevice* device) { 
device->release = NULL; }
+
+struct ArrowDevice* ArrowDeviceCpu(void) {
+  static struct ArrowDevice* cpu_device_singleton = NULL;
+  if (cpu_device_singleton == NULL) {
+    cpu_device_singleton = (struct ArrowDevice*)ArrowMalloc(sizeof(struct 
ArrowDevice));
+    ArrowDeviceInitCpu(cpu_device_singleton);
+  }
+
+  return cpu_device_singleton;
+}
+
+void ArrowDeviceInitCpu(struct ArrowDevice* device) {
+  device->device_type = ARROW_DEVICE_CPU;
+  device->device_id = 0;
+  device->buffer_init = &ArrowDeviceCpuBufferInit;
+  device->buffer_move = &ArrowDeviceCpuBufferMove;
+  device->buffer_copy = &ArrowDeviceCpuBufferCopy;
+  device->copy_required = &ArrowDeviceCpuCopyRequired;
+  device->synchronize_event = &ArrowDeviceCpuSynchronize;
+  device->release = &ArrowDeviceCpuRelease;
+  device->private_data = NULL;
+}
+
+#ifdef NANOARROW_DEVICE_WITH_METAL
+struct ArrowDevice* ArrowDeviceMetalDefaultDevice(void);
+#endif
+
+#ifdef NANOARROW_DEVICE_WITH_CUDA
+struct ArrowDevice* ArrowDeviceCuda(ArrowDeviceType device_type, int64_t 
device_id);
+#endif
+
+struct ArrowDevice* ArrowDeviceResolve(ArrowDeviceType device_type, int64_t 
device_id) {
+  if (device_type == ARROW_DEVICE_CPU && device_id == 0) {
+    return ArrowDeviceCpu();
+  }
+
+#ifdef NANOARROW_DEVICE_WITH_METAL
+  if (device_type == ARROW_DEVICE_METAL) {
+    struct ArrowDevice* default_device = ArrowDeviceMetalDefaultDevice();
+    if (device_id == default_device->device_id) {
+      return default_device;
+    }
+  }
+#endif
+
+#ifdef NANOARROW_DEVICE_WITH_CUDA
+  if (device_type == ARROW_DEVICE_CUDA || device_type == 
ARROW_DEVICE_CUDA_HOST) {
+    return ArrowDeviceCuda(device_type, device_id);
+  }
+#endif
+
+  return NULL;
+}
+
+ArrowErrorCode ArrowDeviceBufferInit(struct ArrowDevice* device_src,
+                                     struct ArrowDeviceBufferView src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowBuffer* dst) {
+  int result = device_dst->buffer_init(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_init(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+ArrowErrorCode ArrowDeviceBufferMove(struct ArrowDevice* device_src,
+                                     struct ArrowBuffer* src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowBuffer* dst) {
+  int result = device_dst->buffer_move(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_move(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+ArrowErrorCode ArrowDeviceBufferCopy(struct ArrowDevice* device_src,
+                                     struct ArrowDeviceBufferView src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowDeviceBufferView dst) {
+  int result = device_dst->buffer_copy(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_copy(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+struct ArrowBasicDeviceArrayStreamPrivate {
+  struct ArrowDevice* device;
+  struct ArrowArrayStream naive_stream;
+};
+
+static int ArrowDeviceBasicArrayStreamGetSchema(
+    struct ArrowDeviceArrayStream* array_stream, struct ArrowSchema* schema) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  return private_data->naive_stream.get_schema(&private_data->naive_stream, 
schema);
+}
+
+static int ArrowDeviceBasicArrayStreamGetNext(struct ArrowDeviceArrayStream* 
array_stream,
+                                              struct ArrowDeviceArray* 
device_array) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+
+  struct ArrowArray tmp;
+  NANOARROW_RETURN_NOT_OK(
+      private_data->naive_stream.get_next(&private_data->naive_stream, &tmp));
+  ArrowDeviceArrayInit(device_array, private_data->device);
+  ArrowArrayMove(&tmp, &device_array->array);
+  return NANOARROW_OK;
+}
+
+static const char* ArrowDeviceBasicArrayStreamGetLastError(
+    struct ArrowDeviceArrayStream* array_stream) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  return 
private_data->naive_stream.get_last_error(&private_data->naive_stream);
+}
+
+static void ArrowDeviceBasicArrayStreamRelease(
+    struct ArrowDeviceArrayStream* array_stream) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  private_data->naive_stream.release(&private_data->naive_stream);
+  ArrowFree(private_data);
+  array_stream->release = NULL;
+}
+
+ArrowErrorCode ArrowDeviceBasicArrayStreamInit(
+    struct ArrowDeviceArrayStream* device_array_stream,
+    struct ArrowArrayStream* array_stream, struct ArrowDevice* device) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)ArrowMalloc(
+          sizeof(struct ArrowBasicDeviceArrayStreamPrivate));
+  if (private_data == NULL) {
+    return ENOMEM;
+  }
+
+  private_data->device = device;
+  ArrowArrayStreamMove(array_stream, &private_data->naive_stream);
+
+  device_array_stream->device_type = device->device_type;
+  device_array_stream->get_schema = &ArrowDeviceBasicArrayStreamGetSchema;
+  device_array_stream->get_next = &ArrowDeviceBasicArrayStreamGetNext;
+  device_array_stream->get_last_error = 
&ArrowDeviceBasicArrayStreamGetLastError;
+  device_array_stream->release = &ArrowDeviceBasicArrayStreamRelease;
+  device_array_stream->private_data = private_data;
+  return NANOARROW_OK;
+}
+
+void ArrowDeviceArrayViewInit(struct ArrowDeviceArrayView* device_array_view) {
+  memset(device_array_view, 0, sizeof(struct ArrowDeviceArrayView));
+}
+
+void ArrowDeviceArrayViewReset(struct ArrowDeviceArrayView* device_array_view) 
{
+  ArrowArrayViewReset(&device_array_view->array_view);
+  device_array_view->device = NULL;
+}
+
+static ArrowErrorCode ArrowDeviceBufferGetInt32(struct ArrowDevice* device,
+                                                struct ArrowBufferView 
buffer_view,
+                                                int64_t i, int32_t* out) {
+  struct ArrowDeviceBufferView device_buffer_view;
+  void* sync_event = NULL;
+
+  struct ArrowDeviceBufferView out_view;
+  out_view.private_data = out;
+  out_view.offset_bytes = 0;
+  out_view.size_bytes = sizeof(int32_t);
+
+  device_buffer_view.private_data = buffer_view.data.data;
+  device_buffer_view.offset_bytes = i * sizeof(int32_t);
+  device_buffer_view.size_bytes = sizeof(int32_t);
+  NANOARROW_RETURN_NOT_OK(
+      ArrowDeviceBufferCopy(device, device_buffer_view, ArrowDeviceCpu(), 
out_view));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceBufferGetInt64(struct ArrowDevice* device,
+                                                struct ArrowBufferView 
buffer_view,
+                                                int64_t i, int64_t* out) {
+  struct ArrowDeviceBufferView device_buffer_view;
+  void* sync_event = NULL;
+
+  struct ArrowDeviceBufferView out_view;
+  out_view.private_data = out;
+  out_view.offset_bytes = 0;
+  out_view.size_bytes = sizeof(int64_t);
+
+  device_buffer_view.private_data = buffer_view.data.data;
+  device_buffer_view.offset_bytes = i * sizeof(int64_t);
+  device_buffer_view.size_bytes = sizeof(int64_t);
+  NANOARROW_RETURN_NOT_OK(
+      ArrowDeviceBufferCopy(device, device_buffer_view, ArrowDeviceCpu(), 
out_view));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceArrayViewValidateDefault(

Review Comment:
   When building general purposes APIs for accelerators I generally approach 
with the mindset of "make copies to/from device explicit, not implicit". In 
this situation, if someone wanted to validate their data, I think a reasonable 
tradeoff could be that they explicitly copy the data to the CPU device 
themselves and then call validation against the CPU device.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to