You are viewing a plain text version of this content. The canonical link for it is here.
Posted to github@arrow.apache.org by "zeroshade (via GitHub)" <gi...@apache.org> on 2023/06/13 19:06:32 UTC

[GitHub] [arrow-nanoarrow] zeroshade commented on a diff in pull request #205: feat(extensions/nanoarrow_device): Draft DeviceArray interface

zeroshade commented on code in PR #205:
URL: https://github.com/apache/arrow-nanoarrow/pull/205#discussion_r1228535899


##########
extensions/nanoarrow_device/src/nanoarrow/nanoarrow_device.h:
##########
@@ -0,0 +1,439 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#ifndef NANOARROW_DEVICE_H_INCLUDED
+#define NANOARROW_DEVICE_H_INCLUDED
+
+#include "nanoarrow.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// \defgroup nanoarrow_device-arrow-cdata Arrow C Device interface
+///
+/// The Arrow Device and Stream interfaces are part of the
+/// Arrow Columnar Format specification
+/// (https://arrow.apache.org/docs/format/Columnar.html). See the Arrow documentation for

Review Comment:
   should we link directly to the device interface docs? (https://arrow.apache.org/docs/dev/format/CDeviceDataInterface.html)



##########
extensions/nanoarrow_device/src/nanoarrow/nanoarrow_device.h:
##########
@@ -0,0 +1,439 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#ifndef NANOARROW_DEVICE_H_INCLUDED
+#define NANOARROW_DEVICE_H_INCLUDED
+
+#include "nanoarrow.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// \defgroup nanoarrow_device-arrow-cdata Arrow C Device interface
+///
+/// The Arrow Device and Stream interfaces are part of the
+/// Arrow Columnar Format specification
+/// (https://arrow.apache.org/docs/format/Columnar.html). See the Arrow documentation for
+/// detailed documentation of these structures.
+///
+/// @{
+
+#ifndef ARROW_C_DEVICE_DATA_INTERFACE
+#define ARROW_C_DEVICE_DATA_INTERFACE
+
+/// \defgroup arrow-device-types Device Types
+/// These macros are compatible with the dlpack DLDeviceType values,
+/// using the same value for each enum as the equivalent kDL<type>
+/// from dlpack.h. This list should continue to be kept in sync with
+/// the equivalent dlpack.h enum values over time to ensure
+/// compatibility, rather than potentially diverging.
+///
+/// To ensure predictability with the ABI we use macros instead of
+/// an enum so the storage type is not compiler dependent.
+///
+/// @{
+
+/// \brief DeviceType for the allocated memory
+typedef int32_t ArrowDeviceType;
+
+/// \brief CPU device, same as using ArrowArray directly
+#define ARROW_DEVICE_CPU 1
+/// \brief CUDA GPU Device
+#define ARROW_DEVICE_CUDA 2
+/// \brief Pinned CUDA CPU memory by cudaMallocHost
+#define ARROW_DEVICE_CUDA_HOST 3
+/// \brief OpenCL Device
+#define ARROW_DEVICE_OPENCL 4
+/// \brief Vulkan buffer for next-gen graphics
+#define ARROW_DEVICE_VULKAN 7
+/// \brief Metal for Apple GPU
+#define ARROW_DEVICE_METAL 8
+/// \brief Verilog simulator buffer
+#define ARROW_DEVICE_VPI 9
+/// \brief ROCm GPUs for AMD GPUs
+#define ARROW_DEVICE_ROCM 10
+/// \brief Pinned ROCm CPU memory allocated by hipMallocHost
+#define ARROW_DEVICE_ROCM_HOST 11
+/// \brief Reserved for extension
+///
+/// used to quickly test extension devices, semantics
+/// can differ based on the implementation
+#define ARROW_DEVICE_EXT_DEV 12
+/// \brief CUDA managed/unified memory allocated by cudaMallocManaged
+#define ARROW_DEVICE_CUDA_MANAGED 13
+/// \brief unified shared memory allocated on a oneAPI
+/// non-partitioned device.
+///
+/// A call to the oneAPI runtime is required to determine the device
+/// type, the USM allocation type, and the sycl context it is bound to.
+#define ARROW_DEVICE_ONEAPI 14
+/// \brief GPU support for next-gen WebGPU standard
+#define ARROW_DEVICE_WEBGPU 15
+/// \brief Qualcomm Hexagon DSP
+#define ARROW_DEVICE_HEXAGON 16
+
+/// @}
+
+/// \brief Struct for passing an Arrow Array alongside
+/// device memory information.
+struct ArrowDeviceArray {
+  /// \brief the Allocated Array
+  ///
+  /// the buffers in the array (along with the buffers of any
+  /// children) are what is allocated on the device.
+  ///
+  /// the private_data and release callback of the arrow array
+  /// should contain any necessary information and structures
+  /// related to freeing the array according to the device it
+  /// is allocated on, rather than having a separate release
+  /// callback embedded here.
+  struct ArrowArray array;
+  /// \brief The device id to identify a specific device
+  /// if multiple of this type are on the system.
+  ///
+  /// the semantics of the id will be hardware dependant.
+  int64_t device_id;
+  /// \brief The type of device which can access this memory.
+  ArrowDeviceType device_type;
+  /// \brief An event-like object to synchronize on if needed.
+  ///
+  /// Many devices, like GPUs, are primarily asynchronous with
+  /// respect to CPU processing. As such in order to safely access
+  /// memory, it is often necessary to have an object to synchronize
+  /// processing on. Since different devices will use different types
+  /// to specify this we use a void* that can be coerced into
+  /// whatever the device appropriate type is (e.g. cudaEvent_t for
+  /// CUDA and hipEvent_t for HIP).
+  ///
+  /// If synchronization is not needed this can be null. If this is
+  /// non-null, then it should be used to call the appropriate sync
+  /// method for the device (e.g. cudaStreamWaitEvent / hipStreamWaitEvent).
+  ///
+  /// Expected type to coerce this void* to depending on device type:
+  ///   cuda: cudaEvent_t*
+  ///   ROCm: hipEvent_t*
+  ///   OpenCL: cl_event*
+  ///   Vulkan: VkEvent*
+  ///   Metal: MTLEvent*
+  ///   OneAPI: sycl::event*
+  ///
+  void* sync_event;
+  /// \brief Reserved bytes for future expansion.
+  ///
+  /// As non-CPU development expands we can update this struct
+  /// without ABI breaking changes. This also rounds out the
+  /// total size of this struct to be 128 bytes (power of 2)
+  /// on 64-bit systems. These bytes should be zero'd out after
+  /// allocation in order to ensure safe evolution of the ABI in
+  /// the future.
+  int64_t reserved[3];
+};
+
+#endif  // ARROW_C_DEVICE_DATA_INTERFACE
+
+#ifndef ARROW_C_DEVICE_STREAM_INTERFACE
+#define ARROW_C_DEVICE_STREAM_INTERFACE
+
+/// \brief Equivalent to ArrowArrayStream, but for ArrowDeviceArrays.
+///
+/// This stream is intended to provide a stream of data on a single
+/// device, if a producer wants data to be produced on multiple devices
+/// then multiple streams should be provided. One per device.
+struct ArrowDeviceArrayStream {
+  /// \brief The device that this stream produces data on.
+  ///
+  /// All ArrowDeviceArrays that are produced by this
+  /// stream should have the same device_type as set
+  /// here. Including it here in the stream object is
+  /// a convenience to allow consumers simpler processing
+  /// since they can assume all arrays that result from
+  /// this stream to be on this device type.
+  ArrowDeviceType device_type;
+
+  /// \brief Callback to get the stream schema
+  /// (will be the same for all arrays in the stream).
+  ///
+  /// If successful, the ArrowSchema must be released independantly from the stream.
+  /// The schema should be accessible via CPU memory.
+  ///
+  /// \param[in] self The ArrowDeviceArrayStream object itself
+  /// \param[out] out C struct to export the schema to
+  /// \return 0 if successful, an `errno`-compatible error code otherwise.
+  int (*get_schema)(struct ArrowDeviceArrayStream* self, struct ArrowSchema* out);
+
+  /// \brief Callback to get the next array
+  ///
+  /// If there is no error and the returned array has been released, the stream
+  /// has ended. If successful, the ArrowArray must be released independently
+  /// from the stream.
+  ///
+  /// \param[in] self The ArrowDeviceArrayStream object itself
+  /// \param[out] out C struct where to export the Array and device info
+  /// \return 0 if successful, an `errno`-compatible error code otherwise.
+  int (*get_next)(struct ArrowDeviceArrayStream* self, struct ArrowDeviceArray* out);
+
+  /// \brief Callback to get optional detailed error information.
+  ///
+  /// This must only be called if the last stream operation failed
+  /// with a non-0 return code.
+  ///
+  /// The returned pointer is only valid until the next operation on this stream
+  /// (including release).
+  ///
+  /// \param[in] self The ArrowDeviceArrayStream object itself
+  /// \return pointer to a null-terminated character array describing
+  /// the last error, or NULL if no description is available.
+  const char* (*get_last_error)(struct ArrowDeviceArrayStream* self);
+
+  /// \brief Release callback: release the stream's own resources.
+  ///
+  /// Note that arrays returned by `get_next` must be individually released.
+  ///
+  /// \param[in] self The ArrowDeviceArrayStream object itself
+  void (*release)(struct ArrowDeviceArrayStream* self);
+
+  /// \brief Opaque producer-specific data
+  void* private_data;
+};
+
+#endif  // ARROW_C_DEVICE_STREAM_INTERFACE
+
+/// \brief Move the contents of src into dst and set src->array.release to NULL
+static inline void ArrowDeviceArrayMove(struct ArrowDeviceArray* src,
+                                        struct ArrowDeviceArray* dst) {
+  memcpy(dst, src, sizeof(struct ArrowDeviceArray));
+  src->array.release = 0;
+}
+
+/// @}
+
+#ifdef NANOARROW_NAMESPACE
+
+#define ArrowDeviceCheckRuntime \
+  NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceCheckRuntime)
+#define ArrowDeviceArrayInit NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceArrayInit)
+#define ArrowDeviceArrayViewInit \
+  NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceArrayViewInit)
+#define ArrowDeviceArrayViewReset \
+  NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceArrayViewReset)
+#define ArrowDeviceArrayViewSetArray \
+  NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceArrayViewSetArray)
+#define ArrowDeviceArrayViewCopy \
+  NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceArrayViewCopy)
+#define ArrowDeviceArrayViewCopyRequired \
+  NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceArrayViewCopyRequired)
+#define ArrowDeviceArrayTryMove \
+  NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceArrayTryMove)
+#define ArrowDeviceResolve NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceResolve)
+#define ArrowDeviceCpu NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceCpu)
+#define ArrowDeviceInitCpu NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceInitCpu)
+#define ArrowDeviceBufferInit NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceBufferInit)
+#define ArrowDeviceBufferMove NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceBufferMove)
+#define ArrowDeviceBufferCopy NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceBufferCopy)
+#define ArrowDeviceBasicArrayStreamInit \
+  NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceBasicArrayStreamInit)
+
+#endif
+
+/// \defgroup nanoarrow_device Nanoarrow Device extension
+///
+/// Except where noted, objects are not thread-safe and clients should
+/// take care to serialize accesses to methods.
+///
+/// @{
+
+/// \brief Checks the nanoarrow runtime to make sure the run/build versions match
+ArrowErrorCode ArrowDeviceCheckRuntime(struct ArrowError* error);
+
+/// \brief A description of a buffer
+struct ArrowDeviceBufferView {
+  /// \brief Device-defined handle for a buffer.
+  ///
+  /// For the CPU device, this is a normal memory address; for all other types that are
+  /// currently supported, this is a device memory address on which CPU-like arithmetic
+  /// can be performed. This may not be true for future devices (i.e., it may be a pointer
+  /// to some buffer abstraction if the concept of a memory address does not exist or
+  /// is impractical).
+  const void* private_data;
+
+  /// \brief An offset into the buffer handle defined by private_data
+  int64_t offset_bytes;
+
+  /// \brief The size of the buffer in bytes
+  int64_t size_bytes;
+};
+
+/// \brief A Device wrapper with callbacks for basic memory management tasks
+///
+/// All device objects are currently implemented as singletons; however, this
+/// may change as implementations progress.
+struct ArrowDevice {
+  /// \brief The device type integer identifier (see ArrowDeviceArray)
+  ArrowDeviceType device_type;
+
+  /// \brief The device identifier (see ArrowDeviceArray)
+  int64_t device_id;
+
+  /// \brief Initialize an owning buffer from existing content
+  ///
+  /// Creates a new buffer whose data member can be accessed by the GPU by
+  /// copying existing content.
+  /// Implementations must check device_src and device_dst and return ENOTSUP if
+  /// not prepared to handle this operation.
+  ArrowErrorCode (*buffer_init)(struct ArrowDevice* device_src,
+                                struct ArrowDeviceBufferView src,
+                                struct ArrowDevice* device_dst, struct ArrowBuffer* dst);
+
+  /// \brief Move an owning buffer to a device
+  ///
+  /// Creates a new buffer whose data member can be accessed by the GPU by
+  /// moving an existing buffer. If NANOARROW_OK is returned, src will have
+  /// been released or moved by the implementation and dst must be released by
+  /// the caller.
+  /// Implementations must check device_src and device_dst and return ENOTSUP if
+  /// not prepared to handle this operation.
+  ArrowErrorCode (*buffer_move)(struct ArrowDevice* device_src, struct ArrowBuffer* src,
+                                struct ArrowDevice* device_dst, struct ArrowBuffer* dst);
+
+  /// \brief Copy a section of memory into a preallocated buffer
+  ///
+  /// As opposed to the other buffer operations, this is designed to support
+  /// copying very small slices of memory.
+  /// Implementations must check device_src and device_dst and return ENOTSUP if
+  /// not prepared to handle this operation.
+  ArrowErrorCode (*buffer_copy)(struct ArrowDevice* device_src,
+                                struct ArrowDeviceBufferView src,
+                                struct ArrowDevice* device_dst,
+                                struct ArrowDeviceBufferView dst);

Review Comment:
   same question, should we put specifics as to the semantics of the device types? Should implementations have to check the device type every time for both source and destination or have to implement multiple devices?



##########
extensions/nanoarrow_device/src/nanoarrow/nanoarrow_device.h:
##########
@@ -0,0 +1,439 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#ifndef NANOARROW_DEVICE_H_INCLUDED
+#define NANOARROW_DEVICE_H_INCLUDED
+
+#include "nanoarrow.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// \defgroup nanoarrow_device-arrow-cdata Arrow C Device interface
+///
+/// The Arrow Device and Stream interfaces are part of the
+/// Arrow Columnar Format specification
+/// (https://arrow.apache.org/docs/format/Columnar.html). See the Arrow documentation for
+/// detailed documentation of these structures.
+///
+/// @{
+
+#ifndef ARROW_C_DEVICE_DATA_INTERFACE
+#define ARROW_C_DEVICE_DATA_INTERFACE
+
+/// \defgroup arrow-device-types Device Types
+/// These macros are compatible with the dlpack DLDeviceType values,
+/// using the same value for each enum as the equivalent kDL<type>
+/// from dlpack.h. This list should continue to be kept in sync with
+/// the equivalent dlpack.h enum values over time to ensure
+/// compatibility, rather than potentially diverging.
+///
+/// To ensure predictability with the ABI we use macros instead of
+/// an enum so the storage type is not compiler dependent.
+///
+/// @{
+
+/// \brief DeviceType for the allocated memory
+typedef int32_t ArrowDeviceType;
+
+/// \brief CPU device, same as using ArrowArray directly
+#define ARROW_DEVICE_CPU 1
+/// \brief CUDA GPU Device
+#define ARROW_DEVICE_CUDA 2
+/// \brief Pinned CUDA CPU memory by cudaMallocHost
+#define ARROW_DEVICE_CUDA_HOST 3
+/// \brief OpenCL Device
+#define ARROW_DEVICE_OPENCL 4
+/// \brief Vulkan buffer for next-gen graphics
+#define ARROW_DEVICE_VULKAN 7
+/// \brief Metal for Apple GPU
+#define ARROW_DEVICE_METAL 8
+/// \brief Verilog simulator buffer
+#define ARROW_DEVICE_VPI 9
+/// \brief ROCm GPUs for AMD GPUs
+#define ARROW_DEVICE_ROCM 10
+/// \brief Pinned ROCm CPU memory allocated by hipMallocHost
+#define ARROW_DEVICE_ROCM_HOST 11
+/// \brief Reserved for extension
+///
+/// used to quickly test extension devices, semantics
+/// can differ based on the implementation
+#define ARROW_DEVICE_EXT_DEV 12
+/// \brief CUDA managed/unified memory allocated by cudaMallocManaged
+#define ARROW_DEVICE_CUDA_MANAGED 13
+/// \brief unified shared memory allocated on a oneAPI
+/// non-partitioned device.
+///
+/// A call to the oneAPI runtime is required to determine the device
+/// type, the USM allocation type, and the sycl context it is bound to.
+#define ARROW_DEVICE_ONEAPI 14
+/// \brief GPU support for next-gen WebGPU standard
+#define ARROW_DEVICE_WEBGPU 15
+/// \brief Qualcomm Hexagon DSP
+#define ARROW_DEVICE_HEXAGON 16
+
+/// @}
+
+/// \brief Struct for passing an Arrow Array alongside
+/// device memory information.
+struct ArrowDeviceArray {
+  /// \brief the Allocated Array
+  ///
+  /// the buffers in the array (along with the buffers of any
+  /// children) are what is allocated on the device.
+  ///
+  /// the private_data and release callback of the arrow array
+  /// should contain any necessary information and structures
+  /// related to freeing the array according to the device it
+  /// is allocated on, rather than having a separate release
+  /// callback embedded here.
+  struct ArrowArray array;
+  /// \brief The device id to identify a specific device
+  /// if multiple of this type are on the system.
+  ///
+  /// the semantics of the id will be hardware dependant.
+  int64_t device_id;
+  /// \brief The type of device which can access this memory.
+  ArrowDeviceType device_type;
+  /// \brief An event-like object to synchronize on if needed.
+  ///
+  /// Many devices, like GPUs, are primarily asynchronous with
+  /// respect to CPU processing. As such in order to safely access
+  /// memory, it is often necessary to have an object to synchronize
+  /// processing on. Since different devices will use different types
+  /// to specify this we use a void* that can be coerced into
+  /// whatever the device appropriate type is (e.g. cudaEvent_t for
+  /// CUDA and hipEvent_t for HIP).
+  ///
+  /// If synchronization is not needed this can be null. If this is
+  /// non-null, then it should be used to call the appropriate sync
+  /// method for the device (e.g. cudaStreamWaitEvent / hipStreamWaitEvent).
+  ///
+  /// Expected type to coerce this void* to depending on device type:
+  ///   cuda: cudaEvent_t*
+  ///   ROCm: hipEvent_t*
+  ///   OpenCL: cl_event*
+  ///   Vulkan: VkEvent*
+  ///   Metal: MTLEvent*
+  ///   OneAPI: sycl::event*
+  ///
+  void* sync_event;
+  /// \brief Reserved bytes for future expansion.
+  ///
+  /// As non-CPU development expands we can update this struct
+  /// without ABI breaking changes. This also rounds out the
+  /// total size of this struct to be 128 bytes (power of 2)
+  /// on 64-bit systems. These bytes should be zero'd out after
+  /// allocation in order to ensure safe evolution of the ABI in
+  /// the future.
+  int64_t reserved[3];
+};
+
+#endif  // ARROW_C_DEVICE_DATA_INTERFACE
+
+#ifndef ARROW_C_DEVICE_STREAM_INTERFACE
+#define ARROW_C_DEVICE_STREAM_INTERFACE
+
+/// \brief Equivalent to ArrowArrayStream, but for ArrowDeviceArrays.
+///
+/// This stream is intended to provide a stream of data on a single
+/// device, if a producer wants data to be produced on multiple devices
+/// then multiple streams should be provided. One per device.
+struct ArrowDeviceArrayStream {
+  /// \brief The device that this stream produces data on.
+  ///
+  /// All ArrowDeviceArrays that are produced by this
+  /// stream should have the same device_type as set
+  /// here. Including it here in the stream object is
+  /// a convenience to allow consumers simpler processing
+  /// since they can assume all arrays that result from
+  /// this stream to be on this device type.
+  ArrowDeviceType device_type;
+
+  /// \brief Callback to get the stream schema
+  /// (will be the same for all arrays in the stream).
+  ///
+  /// If successful, the ArrowSchema must be released independantly from the stream.
+  /// The schema should be accessible via CPU memory.
+  ///
+  /// \param[in] self The ArrowDeviceArrayStream object itself
+  /// \param[out] out C struct to export the schema to
+  /// \return 0 if successful, an `errno`-compatible error code otherwise.
+  int (*get_schema)(struct ArrowDeviceArrayStream* self, struct ArrowSchema* out);
+
+  /// \brief Callback to get the next array
+  ///
+  /// If there is no error and the returned array has been released, the stream
+  /// has ended. If successful, the ArrowArray must be released independently
+  /// from the stream.
+  ///
+  /// \param[in] self The ArrowDeviceArrayStream object itself
+  /// \param[out] out C struct where to export the Array and device info
+  /// \return 0 if successful, an `errno`-compatible error code otherwise.
+  int (*get_next)(struct ArrowDeviceArrayStream* self, struct ArrowDeviceArray* out);
+
+  /// \brief Callback to get optional detailed error information.
+  ///
+  /// This must only be called if the last stream operation failed
+  /// with a non-0 return code.
+  ///
+  /// The returned pointer is only valid until the next operation on this stream
+  /// (including release).
+  ///
+  /// \param[in] self The ArrowDeviceArrayStream object itself
+  /// \return pointer to a null-terminated character array describing
+  /// the last error, or NULL if no description is available.
+  const char* (*get_last_error)(struct ArrowDeviceArrayStream* self);
+
+  /// \brief Release callback: release the stream's own resources.
+  ///
+  /// Note that arrays returned by `get_next` must be individually released.
+  ///
+  /// \param[in] self The ArrowDeviceArrayStream object itself
+  void (*release)(struct ArrowDeviceArrayStream* self);
+
+  /// \brief Opaque producer-specific data
+  void* private_data;
+};
+
+#endif  // ARROW_C_DEVICE_STREAM_INTERFACE
+
+/// \brief Move the contents of src into dst and set src->array.release to NULL
+static inline void ArrowDeviceArrayMove(struct ArrowDeviceArray* src,
+                                        struct ArrowDeviceArray* dst) {
+  memcpy(dst, src, sizeof(struct ArrowDeviceArray));
+  src->array.release = 0;
+}
+
+/// @}
+
+#ifdef NANOARROW_NAMESPACE
+
+#define ArrowDeviceCheckRuntime \
+  NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceCheckRuntime)
+#define ArrowDeviceArrayInit NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceArrayInit)
+#define ArrowDeviceArrayViewInit \
+  NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceArrayViewInit)
+#define ArrowDeviceArrayViewReset \
+  NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceArrayViewReset)
+#define ArrowDeviceArrayViewSetArray \
+  NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceArrayViewSetArray)
+#define ArrowDeviceArrayViewCopy \
+  NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceArrayViewCopy)
+#define ArrowDeviceArrayViewCopyRequired \
+  NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceArrayViewCopyRequired)
+#define ArrowDeviceArrayTryMove \
+  NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceArrayTryMove)
+#define ArrowDeviceResolve NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceResolve)
+#define ArrowDeviceCpu NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceCpu)
+#define ArrowDeviceInitCpu NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceInitCpu)
+#define ArrowDeviceBufferInit NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceBufferInit)
+#define ArrowDeviceBufferMove NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceBufferMove)
+#define ArrowDeviceBufferCopy NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceBufferCopy)
+#define ArrowDeviceBasicArrayStreamInit \
+  NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceBasicArrayStreamInit)
+
+#endif
+
+/// \defgroup nanoarrow_device Nanoarrow Device extension
+///
+/// Except where noted, objects are not thread-safe and clients should
+/// take care to serialize accesses to methods.
+///
+/// @{
+
+/// \brief Checks the nanoarrow runtime to make sure the run/build versions match
+ArrowErrorCode ArrowDeviceCheckRuntime(struct ArrowError* error);
+
+/// \brief A description of a buffer
+struct ArrowDeviceBufferView {
+  /// \brief Device-defined handle for a buffer.
+  ///
+  /// For the CPU device, this is a normal memory address; for all other types that are
+  /// currently supported, this is a device memory address on which CPU-like arithmetic
+  /// can be performed. This may not be true for future devices (i.e., it may be a pointer
+  /// to some buffer abstraction if the concept of a memory address does not exist or
+  /// is impractical).
+  const void* private_data;
+
+  /// \brief An offset into the buffer handle defined by private_data
+  int64_t offset_bytes;
+
+  /// \brief The size of the buffer in bytes
+  int64_t size_bytes;
+};
+
+/// \brief A Device wrapper with callbacks for basic memory management tasks
+///
+/// All device objects are currently implemented as singletons; however, this
+/// may change as implementations progress.
+struct ArrowDevice {
+  /// \brief The device type integer identifier (see ArrowDeviceArray)
+  ArrowDeviceType device_type;
+
+  /// \brief The device identifier (see ArrowDeviceArray)
+  int64_t device_id;
+
+  /// \brief Initialize an owning buffer from existing content
+  ///
+  /// Creates a new buffer whose data member can be accessed by the GPU by
+  /// copying existing content.
+  /// Implementations must check device_src and device_dst and return ENOTSUP if
+  /// not prepared to handle this operation.
+  ArrowErrorCode (*buffer_init)(struct ArrowDevice* device_src,
+                                struct ArrowDeviceBufferView src,
+                                struct ArrowDevice* device_dst, struct ArrowBuffer* dst);
+
+  /// \brief Move an owning buffer to a device
+  ///
+  /// Creates a new buffer whose data member can be accessed by the GPU by
+  /// moving an existing buffer. If NANOARROW_OK is returned, src will have
+  /// been released or moved by the implementation and dst must be released by
+  /// the caller.
+  /// Implementations must check device_src and device_dst and return ENOTSUP if
+  /// not prepared to handle this operation.
+  ArrowErrorCode (*buffer_move)(struct ArrowDevice* device_src, struct ArrowBuffer* src,
+                                struct ArrowDevice* device_dst, struct ArrowBuffer* dst);

Review Comment:
   Should we specify semantics about the src and dst devices such as the src being CPU memory? etc.



##########
extensions/nanoarrow_device/src/nanoarrow/nanoarrow_device_cuda.c:
##########
@@ -0,0 +1,376 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <cuda_runtime_api.h>
+
+#include "nanoarrow_device.h"
+
+static void ArrowDeviceCudaAllocatorFree(struct ArrowBufferAllocator* allocator,
+                                         uint8_t* ptr, int64_t old_size) {
+  if (ptr != NULL) {
+    cudaFree(ptr);
+  }
+}
+
+static uint8_t* ArrowDeviceCudaAllocatorReallocate(struct ArrowBufferAllocator* allocator,
+                                                   uint8_t* ptr, int64_t old_size,
+                                                   int64_t new_size) {
+  ArrowDeviceCudaAllocatorFree(allocator, ptr, old_size);
+  return NULL;
+}
+
+static ArrowErrorCode ArrowDeviceCudaAllocateBuffer(struct ArrowBuffer* buffer,
+                                                    int64_t size_bytes) {
+  void* ptr = NULL;
+  cudaError_t result = cudaMalloc(&ptr, (int64_t)size_bytes);
+  if (result != cudaSuccess) {
+    return EINVAL;
+  }
+
+  buffer->data = (uint8_t*)ptr;
+  buffer->size_bytes = size_bytes;
+  buffer->capacity_bytes = size_bytes;
+  buffer->allocator.reallocate = &ArrowDeviceCudaAllocatorReallocate;
+  buffer->allocator.free = &ArrowDeviceCudaAllocatorFree;
+  // TODO: We almost certainly need device_id here
+  buffer->allocator.private_data = NULL;
+  return NANOARROW_OK;
+}
+
+static void ArrowDeviceCudaHostAllocatorFree(struct ArrowBufferAllocator* allocator,
+                                             uint8_t* ptr, int64_t old_size) {
+  if (ptr != NULL) {
+    cudaFreeHost(ptr);
+  }
+}
+
+static uint8_t* ArrowDeviceCudaHostAllocatorReallocate(
+    struct ArrowBufferAllocator* allocator, uint8_t* ptr, int64_t old_size,
+    int64_t new_size) {
+  ArrowDeviceCudaHostAllocatorFree(allocator, ptr, old_size);
+  return NULL;
+}
+
+static ArrowErrorCode ArrowDeviceCudaHostAllocateBuffer(struct ArrowBuffer* buffer,
+                                                        int64_t size_bytes) {
+  void* ptr = NULL;
+  cudaError_t result = cudaMallocHost(&ptr, (int64_t)size_bytes);
+  if (result != cudaSuccess) {
+    return EINVAL;
+  }
+
+  buffer->data = (uint8_t*)ptr;
+  buffer->size_bytes = size_bytes;
+  buffer->capacity_bytes = size_bytes;
+  buffer->allocator.reallocate = &ArrowDeviceCudaHostAllocatorReallocate;
+  buffer->allocator.free = &ArrowDeviceCudaHostAllocatorFree;
+  // TODO: We almost certainly need device_id here
+  buffer->allocator.private_data = NULL;
+  return NANOARROW_OK;
+}
+
+// TODO: All these buffer copiers would benefit from cudaMemcpyAsync but there is
+// no good way to incorporate that just yet
+
+static ArrowErrorCode ArrowDeviceCudaBufferInit(struct ArrowDevice* device_src,
+                                                struct ArrowDeviceBufferView src,
+                                                struct ArrowDevice* device_dst,
+                                                struct ArrowBuffer* dst) {
+  if (device_src->device_type == ARROW_DEVICE_CPU &&
+      device_dst->device_type == ARROW_DEVICE_CUDA) {
+    struct ArrowBuffer tmp;
+    NANOARROW_RETURN_NOT_OK(ArrowDeviceCudaAllocateBuffer(&tmp, src.size_bytes));
+    cudaError_t result =
+        cudaMemcpy(tmp.data, ((uint8_t*)src.private_data) + src.offset_bytes,
+                   (size_t)src.size_bytes, cudaMemcpyHostToDevice);
+    if (result != cudaSuccess) {
+      ArrowBufferReset(&tmp);
+      return EINVAL;
+    }
+
+    ArrowBufferMove(&tmp, dst);
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA &&
+             device_dst->device_type == ARROW_DEVICE_CUDA) {
+    struct ArrowBuffer tmp;
+    NANOARROW_RETURN_NOT_OK(ArrowDeviceCudaAllocateBuffer(&tmp, src.size_bytes));
+    cudaError_t result =
+        cudaMemcpy(tmp.data, ((uint8_t*)src.private_data) + src.offset_bytes,
+                   (size_t)src.size_bytes, cudaMemcpyDeviceToDevice);
+    if (result != cudaSuccess) {
+      ArrowBufferReset(&tmp);
+      return EINVAL;
+    }
+
+    ArrowBufferMove(&tmp, dst);
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA &&
+             device_dst->device_type == ARROW_DEVICE_CPU) {
+    struct ArrowBuffer tmp;
+    ArrowBufferInit(&tmp);
+    NANOARROW_RETURN_NOT_OK(ArrowBufferReserve(&tmp, src.size_bytes));
+    tmp.size_bytes = src.size_bytes;
+    cudaError_t result =
+        cudaMemcpy(tmp.data, ((uint8_t*)src.private_data) + src.offset_bytes,
+                   (size_t)src.size_bytes, cudaMemcpyDeviceToHost);
+    if (result != cudaSuccess) {
+      ArrowBufferReset(&tmp);
+      return EINVAL;
+    }
+
+    ArrowBufferMove(&tmp, dst);
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CPU &&
+             device_dst->device_type == ARROW_DEVICE_CUDA_HOST) {
+    NANOARROW_RETURN_NOT_OK(ArrowDeviceCudaHostAllocateBuffer(dst, src.size_bytes));
+    memcpy(dst->data, ((uint8_t*)src.private_data) + src.offset_bytes,
+           (size_t)src.size_bytes);
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA_HOST &&
+             device_dst->device_type == ARROW_DEVICE_CUDA_HOST) {
+    NANOARROW_RETURN_NOT_OK(ArrowDeviceCudaHostAllocateBuffer(dst, src.size_bytes));
+    memcpy(dst->data, ((uint8_t*)src.private_data) + src.offset_bytes,
+           (size_t)src.size_bytes);
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA_HOST &&
+             device_dst->device_type == ARROW_DEVICE_CPU) {
+    struct ArrowBuffer tmp;
+    ArrowBufferInit(&tmp);
+    NANOARROW_RETURN_NOT_OK(ArrowBufferReserve(&tmp, src.size_bytes));
+    tmp.size_bytes = src.size_bytes;
+    memcpy(tmp.data, ((uint8_t*)src.private_data) + src.offset_bytes,
+           (size_t)src.size_bytes);
+    ArrowBufferMove(&tmp, dst);
+    return NANOARROW_OK;
+
+  } else {
+    return ENOTSUP;
+  }
+}
+
+static ArrowErrorCode ArrowDeviceCudaBufferCopy(struct ArrowDevice* device_src,
+                                                struct ArrowDeviceBufferView src,
+                                                struct ArrowDevice* device_dst,
+                                                struct ArrowDeviceBufferView dst) {
+  // This is all just cudaMemcpy or memcpy
+  if (device_src->device_type == ARROW_DEVICE_CPU &&
+      device_dst->device_type == ARROW_DEVICE_CUDA) {
+    cudaError_t result = cudaMemcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+                                    ((uint8_t*)src.private_data) + src.offset_bytes,
+                                    dst.size_bytes, cudaMemcpyHostToDevice);
+    if (result != cudaSuccess) {
+      return EINVAL;
+    }
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA &&
+             device_dst->device_type == ARROW_DEVICE_CUDA) {
+    cudaError_t result = cudaMemcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+                                    ((uint8_t*)src.private_data) + src.offset_bytes,
+                                    dst.size_bytes, cudaMemcpyDeviceToDevice);
+    if (result != cudaSuccess) {
+      return EINVAL;
+    }
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA &&
+             device_dst->device_type == ARROW_DEVICE_CPU) {
+    cudaError_t result = cudaMemcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+                                    ((uint8_t*)src.private_data) + src.offset_bytes,
+                                    dst.size_bytes, cudaMemcpyDeviceToHost);
+    if (result != cudaSuccess) {
+      return EINVAL;
+    }
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CPU &&
+             device_dst->device_type == ARROW_DEVICE_CUDA_HOST) {
+    memcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+           ((uint8_t*)src.private_data) + src.offset_bytes, dst.size_bytes);
+    return NANOARROW_OK;
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA_HOST &&
+             device_dst->device_type == ARROW_DEVICE_CUDA_HOST) {
+    memcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+           ((uint8_t*)src.private_data) + src.offset_bytes, dst.size_bytes);
+    return NANOARROW_OK;
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA_HOST &&
+             device_dst->device_type == ARROW_DEVICE_CPU) {
+    memcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+           ((uint8_t*)src.private_data) + src.offset_bytes, dst.size_bytes);
+    return NANOARROW_OK;
+  } else {
+    return ENOTSUP;
+  }
+}
+
+static int ArrowDeviceCudaCopyRequired(struct ArrowDevice* device_src,
+                                       struct ArrowArrayView* src,
+                                       struct ArrowDevice* device_dst) {
+  if (device_src->device_type == ARROW_DEVICE_CPU &&
+      device_dst->device_type == ARROW_DEVICE_CUDA) {
+    // Copy
+    return 1;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA &&
+             device_dst->device_type == ARROW_DEVICE_CUDA &&
+             device_src->device_id == device_dst->device_id) {
+    // Move
+    return 0;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA &&
+             device_dst->device_type == ARROW_DEVICE_CPU) {
+    // Copy
+    return 1;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CPU &&
+             device_dst->device_type == ARROW_DEVICE_CUDA_HOST) {
+    // Copy: we can't assume the memory has been registered. A user can force
+    // this by registering the memory and setting device->device_type manually.
+    // A copy will ensure all buffers are allocated with cudaMallocHost().
+    return 1;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA_HOST &&
+             device_dst->device_type == ARROW_DEVICE_CUDA_HOST &&
+             device_src->device_id == device_dst->device_id) {
+    // Move
+    return 0;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA_HOST &&
+             device_dst->device_type == ARROW_DEVICE_CPU) {
+    // Move: the array's release callback is responsible for cudaFreeHost or
+    // deregistration (or perhaps this has been handled at a higher level)
+    return 0;
+
+  } else {
+    // Fall back to the other device's implementation
+    return -1;
+  }
+}
+
+static ArrowErrorCode ArrowDeviceCudaSynchronize(struct ArrowDevice* device,
+                                                 struct ArrowDevice* device_event,
+                                                 void* sync_event,
+                                                 struct ArrowError* error) {

Review Comment:
   `device` isn't used here, only `device_event`, why the need for both?



##########
extensions/nanoarrow_device/src/nanoarrow/nanoarrow_device.c:
##########
@@ -0,0 +1,518 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <errno.h>
+
+#include "nanoarrow.h"
+
+#include "nanoarrow_device.h"
+
+ArrowErrorCode ArrowDeviceCheckRuntime(struct ArrowError* error) {
+  const char* nanoarrow_runtime_version = ArrowNanoarrowVersion();
+  const char* nanoarrow_ipc_build_time_version = NANOARROW_VERSION;
+
+  if (strcmp(nanoarrow_runtime_version, nanoarrow_ipc_build_time_version) != 0) {
+    ArrowErrorSet(error, "Expected nanoarrow runtime version '%s' but found version '%s'",
+                  nanoarrow_ipc_build_time_version, nanoarrow_runtime_version);
+    return EINVAL;
+  }
+
+  return NANOARROW_OK;
+}
+
+void ArrowDeviceArrayInit(struct ArrowDeviceArray* device_array,
+                          struct ArrowDevice* device) {
+  memset(device_array, 0, sizeof(struct ArrowDeviceArray));
+  device_array->device_type = device->device_type;
+  device_array->device_id = device->device_id;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferInit(struct ArrowDevice* device_src,
+                                               struct ArrowDeviceBufferView src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowBuffer* dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  ArrowBufferInit(dst);
+  dst->allocator = ArrowBufferAllocatorDefault();
+  NANOARROW_RETURN_NOT_OK(ArrowBufferAppend(
+      dst, ((uint8_t*)src.private_data) + src.offset_bytes, src.size_bytes));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferMove(struct ArrowDevice* device_src,
+                                               struct ArrowBuffer* src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowBuffer* dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  ArrowBufferMove(src, dst);
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferCopy(struct ArrowDevice* device_src,
+                                               struct ArrowDeviceBufferView src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowDeviceBufferView dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  memcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+         ((uint8_t*)src.private_data) + src.offset_bytes, dst.size_bytes);
+  return NANOARROW_OK;
+}
+
+static int ArrowDeviceCpuCopyRequired(struct ArrowDevice* device_src,
+                                      struct ArrowArrayView* src,
+                                      struct ArrowDevice* device_dst) {
+  if (device_src->device_type == ARROW_DEVICE_CPU &&
+      device_dst->device_type == ARROW_DEVICE_CPU) {
+    return 0;
+  } else {
+    return -1;
+  }
+}
+
+static ArrowErrorCode ArrowDeviceCpuSynchronize(struct ArrowDevice* device,
+                                                struct ArrowDevice* device_event,
+                                                void* sync_event,
+                                                struct ArrowError* error) {
+  switch (device_event->device_type) {
+    case ARROW_DEVICE_CPU:
+      if (sync_event != NULL) {
+        ArrowErrorSet(error, "Expected NULL sync_event for ARROW_DEVICE_CPU but got %p",
+                      sync_event);
+        return EINVAL;
+      } else {
+        return NANOARROW_OK;
+      }
+    default:
+      return device_event->synchronize_event(device_event, device, sync_event, error);
+  }
+}
+
+static void ArrowDeviceCpuRelease(struct ArrowDevice* device) { device->release = NULL; }
+
+struct ArrowDevice* ArrowDeviceCpu(void) {
+  static struct ArrowDevice* cpu_device_singleton = NULL;
+  if (cpu_device_singleton == NULL) {
+    cpu_device_singleton = (struct ArrowDevice*)ArrowMalloc(sizeof(struct ArrowDevice));
+    ArrowDeviceInitCpu(cpu_device_singleton);
+  }
+
+  return cpu_device_singleton;
+}
+
+void ArrowDeviceInitCpu(struct ArrowDevice* device) {
+  device->device_type = ARROW_DEVICE_CPU;
+  device->device_id = 0;
+  device->buffer_init = &ArrowDeviceCpuBufferInit;
+  device->buffer_move = &ArrowDeviceCpuBufferMove;
+  device->buffer_copy = &ArrowDeviceCpuBufferCopy;
+  device->copy_required = &ArrowDeviceCpuCopyRequired;
+  device->synchronize_event = &ArrowDeviceCpuSynchronize;
+  device->release = &ArrowDeviceCpuRelease;
+  device->private_data = NULL;
+}
+
+#ifdef NANOARROW_DEVICE_WITH_METAL
+struct ArrowDevice* ArrowDeviceMetalDefaultDevice(void);
+#endif
+
+#ifdef NANOARROW_DEVICE_WITH_CUDA
+struct ArrowDevice* ArrowDeviceCuda(ArrowDeviceType device_type, int64_t device_id);
+#endif
+
+struct ArrowDevice* ArrowDeviceResolve(ArrowDeviceType device_type, int64_t device_id) {
+  if (device_type == ARROW_DEVICE_CPU && device_id == 0) {
+    return ArrowDeviceCpu();
+  }
+
+#ifdef NANOARROW_DEVICE_WITH_METAL
+  if (device_type == ARROW_DEVICE_METAL) {
+    struct ArrowDevice* default_device = ArrowDeviceMetalDefaultDevice();
+    if (device_id == default_device->device_id) {
+      return default_device;
+    }
+  }
+#endif
+
+#ifdef NANOARROW_DEVICE_WITH_CUDA
+  if (device_type == ARROW_DEVICE_CUDA || device_type == ARROW_DEVICE_CUDA_HOST) {
+    return ArrowDeviceCuda(device_type, device_id);
+  }
+#endif
+
+  return NULL;
+}
+
+ArrowErrorCode ArrowDeviceBufferInit(struct ArrowDevice* device_src,
+                                     struct ArrowDeviceBufferView src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowBuffer* dst) {
+  int result = device_dst->buffer_init(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_init(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+ArrowErrorCode ArrowDeviceBufferMove(struct ArrowDevice* device_src,
+                                     struct ArrowBuffer* src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowBuffer* dst) {
+  int result = device_dst->buffer_move(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_move(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+ArrowErrorCode ArrowDeviceBufferCopy(struct ArrowDevice* device_src,
+                                     struct ArrowDeviceBufferView src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowDeviceBufferView dst) {
+  int result = device_dst->buffer_copy(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_copy(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+struct ArrowBasicDeviceArrayStreamPrivate {
+  struct ArrowDevice* device;
+  struct ArrowArrayStream naive_stream;
+};
+
+static int ArrowDeviceBasicArrayStreamGetSchema(
+    struct ArrowDeviceArrayStream* array_stream, struct ArrowSchema* schema) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  return private_data->naive_stream.get_schema(&private_data->naive_stream, schema);
+}
+
+static int ArrowDeviceBasicArrayStreamGetNext(struct ArrowDeviceArrayStream* array_stream,
+                                              struct ArrowDeviceArray* device_array) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+
+  struct ArrowArray tmp;
+  NANOARROW_RETURN_NOT_OK(
+      private_data->naive_stream.get_next(&private_data->naive_stream, &tmp));
+  ArrowDeviceArrayInit(device_array, private_data->device);
+  ArrowArrayMove(&tmp, &device_array->array);
+  return NANOARROW_OK;
+}
+
+static const char* ArrowDeviceBasicArrayStreamGetLastError(
+    struct ArrowDeviceArrayStream* array_stream) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  return private_data->naive_stream.get_last_error(&private_data->naive_stream);
+}
+
+static void ArrowDeviceBasicArrayStreamRelease(
+    struct ArrowDeviceArrayStream* array_stream) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  private_data->naive_stream.release(&private_data->naive_stream);
+  ArrowFree(private_data);
+  array_stream->release = NULL;
+}
+
+ArrowErrorCode ArrowDeviceBasicArrayStreamInit(
+    struct ArrowDeviceArrayStream* device_array_stream,
+    struct ArrowArrayStream* array_stream, struct ArrowDevice* device) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)ArrowMalloc(
+          sizeof(struct ArrowBasicDeviceArrayStreamPrivate));
+  if (private_data == NULL) {
+    return ENOMEM;
+  }
+
+  private_data->device = device;
+  ArrowArrayStreamMove(array_stream, &private_data->naive_stream);
+
+  device_array_stream->device_type = device->device_type;
+  device_array_stream->get_schema = &ArrowDeviceBasicArrayStreamGetSchema;
+  device_array_stream->get_next = &ArrowDeviceBasicArrayStreamGetNext;
+  device_array_stream->get_last_error = &ArrowDeviceBasicArrayStreamGetLastError;
+  device_array_stream->release = &ArrowDeviceBasicArrayStreamRelease;
+  device_array_stream->private_data = private_data;
+  return NANOARROW_OK;
+}
+
+void ArrowDeviceArrayViewInit(struct ArrowDeviceArrayView* device_array_view) {
+  memset(device_array_view, 0, sizeof(struct ArrowDeviceArrayView));
+}
+
+void ArrowDeviceArrayViewReset(struct ArrowDeviceArrayView* device_array_view) {
+  ArrowArrayViewReset(&device_array_view->array_view);
+  device_array_view->device = NULL;
+}
+
+static ArrowErrorCode ArrowDeviceBufferGetInt32(struct ArrowDevice* device,
+                                                struct ArrowBufferView buffer_view,
+                                                int64_t i, int32_t* out) {

Review Comment:
   Should there be an equivalent that *doesn't* do the copy?



##########
extensions/nanoarrow_device/src/nanoarrow/nanoarrow_device.c:
##########
@@ -0,0 +1,518 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <errno.h>
+
+#include "nanoarrow.h"
+
+#include "nanoarrow_device.h"
+
+ArrowErrorCode ArrowDeviceCheckRuntime(struct ArrowError* error) {
+  const char* nanoarrow_runtime_version = ArrowNanoarrowVersion();
+  const char* nanoarrow_ipc_build_time_version = NANOARROW_VERSION;
+
+  if (strcmp(nanoarrow_runtime_version, nanoarrow_ipc_build_time_version) != 0) {
+    ArrowErrorSet(error, "Expected nanoarrow runtime version '%s' but found version '%s'",
+                  nanoarrow_ipc_build_time_version, nanoarrow_runtime_version);
+    return EINVAL;
+  }
+
+  return NANOARROW_OK;
+}
+
+void ArrowDeviceArrayInit(struct ArrowDeviceArray* device_array,
+                          struct ArrowDevice* device) {
+  memset(device_array, 0, sizeof(struct ArrowDeviceArray));
+  device_array->device_type = device->device_type;
+  device_array->device_id = device->device_id;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferInit(struct ArrowDevice* device_src,
+                                               struct ArrowDeviceBufferView src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowBuffer* dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  ArrowBufferInit(dst);
+  dst->allocator = ArrowBufferAllocatorDefault();
+  NANOARROW_RETURN_NOT_OK(ArrowBufferAppend(
+      dst, ((uint8_t*)src.private_data) + src.offset_bytes, src.size_bytes));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferMove(struct ArrowDevice* device_src,
+                                               struct ArrowBuffer* src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowBuffer* dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  ArrowBufferMove(src, dst);
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferCopy(struct ArrowDevice* device_src,
+                                               struct ArrowDeviceBufferView src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowDeviceBufferView dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  memcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+         ((uint8_t*)src.private_data) + src.offset_bytes, dst.size_bytes);
+  return NANOARROW_OK;
+}
+
+static int ArrowDeviceCpuCopyRequired(struct ArrowDevice* device_src,
+                                      struct ArrowArrayView* src,
+                                      struct ArrowDevice* device_dst) {
+  if (device_src->device_type == ARROW_DEVICE_CPU &&
+      device_dst->device_type == ARROW_DEVICE_CPU) {
+    return 0;
+  } else {
+    return -1;
+  }
+}
+
+static ArrowErrorCode ArrowDeviceCpuSynchronize(struct ArrowDevice* device,
+                                                struct ArrowDevice* device_event,
+                                                void* sync_event,
+                                                struct ArrowError* error) {
+  switch (device_event->device_type) {
+    case ARROW_DEVICE_CPU:
+      if (sync_event != NULL) {
+        ArrowErrorSet(error, "Expected NULL sync_event for ARROW_DEVICE_CPU but got %p",
+                      sync_event);
+        return EINVAL;
+      } else {
+        return NANOARROW_OK;
+      }
+    default:
+      return device_event->synchronize_event(device_event, device, sync_event, error);
+  }
+}
+
+static void ArrowDeviceCpuRelease(struct ArrowDevice* device) { device->release = NULL; }
+
+struct ArrowDevice* ArrowDeviceCpu(void) {
+  static struct ArrowDevice* cpu_device_singleton = NULL;
+  if (cpu_device_singleton == NULL) {
+    cpu_device_singleton = (struct ArrowDevice*)ArrowMalloc(sizeof(struct ArrowDevice));
+    ArrowDeviceInitCpu(cpu_device_singleton);
+  }
+
+  return cpu_device_singleton;
+}
+
+void ArrowDeviceInitCpu(struct ArrowDevice* device) {
+  device->device_type = ARROW_DEVICE_CPU;
+  device->device_id = 0;
+  device->buffer_init = &ArrowDeviceCpuBufferInit;
+  device->buffer_move = &ArrowDeviceCpuBufferMove;
+  device->buffer_copy = &ArrowDeviceCpuBufferCopy;
+  device->copy_required = &ArrowDeviceCpuCopyRequired;
+  device->synchronize_event = &ArrowDeviceCpuSynchronize;
+  device->release = &ArrowDeviceCpuRelease;
+  device->private_data = NULL;
+}
+
+#ifdef NANOARROW_DEVICE_WITH_METAL
+struct ArrowDevice* ArrowDeviceMetalDefaultDevice(void);
+#endif
+
+#ifdef NANOARROW_DEVICE_WITH_CUDA
+struct ArrowDevice* ArrowDeviceCuda(ArrowDeviceType device_type, int64_t device_id);
+#endif
+
+struct ArrowDevice* ArrowDeviceResolve(ArrowDeviceType device_type, int64_t device_id) {
+  if (device_type == ARROW_DEVICE_CPU && device_id == 0) {
+    return ArrowDeviceCpu();
+  }
+
+#ifdef NANOARROW_DEVICE_WITH_METAL
+  if (device_type == ARROW_DEVICE_METAL) {
+    struct ArrowDevice* default_device = ArrowDeviceMetalDefaultDevice();
+    if (device_id == default_device->device_id) {
+      return default_device;
+    }
+  }
+#endif
+
+#ifdef NANOARROW_DEVICE_WITH_CUDA
+  if (device_type == ARROW_DEVICE_CUDA || device_type == ARROW_DEVICE_CUDA_HOST) {
+    return ArrowDeviceCuda(device_type, device_id);
+  }
+#endif
+
+  return NULL;
+}
+
+ArrowErrorCode ArrowDeviceBufferInit(struct ArrowDevice* device_src,
+                                     struct ArrowDeviceBufferView src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowBuffer* dst) {
+  int result = device_dst->buffer_init(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_init(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+ArrowErrorCode ArrowDeviceBufferMove(struct ArrowDevice* device_src,
+                                     struct ArrowBuffer* src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowBuffer* dst) {
+  int result = device_dst->buffer_move(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_move(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+ArrowErrorCode ArrowDeviceBufferCopy(struct ArrowDevice* device_src,
+                                     struct ArrowDeviceBufferView src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowDeviceBufferView dst) {
+  int result = device_dst->buffer_copy(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_copy(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+struct ArrowBasicDeviceArrayStreamPrivate {
+  struct ArrowDevice* device;
+  struct ArrowArrayStream naive_stream;
+};
+
+static int ArrowDeviceBasicArrayStreamGetSchema(
+    struct ArrowDeviceArrayStream* array_stream, struct ArrowSchema* schema) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  return private_data->naive_stream.get_schema(&private_data->naive_stream, schema);
+}
+
+static int ArrowDeviceBasicArrayStreamGetNext(struct ArrowDeviceArrayStream* array_stream,
+                                              struct ArrowDeviceArray* device_array) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+
+  struct ArrowArray tmp;
+  NANOARROW_RETURN_NOT_OK(
+      private_data->naive_stream.get_next(&private_data->naive_stream, &tmp));
+  ArrowDeviceArrayInit(device_array, private_data->device);
+  ArrowArrayMove(&tmp, &device_array->array);
+  return NANOARROW_OK;
+}
+
+static const char* ArrowDeviceBasicArrayStreamGetLastError(
+    struct ArrowDeviceArrayStream* array_stream) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  return private_data->naive_stream.get_last_error(&private_data->naive_stream);
+}
+
+static void ArrowDeviceBasicArrayStreamRelease(
+    struct ArrowDeviceArrayStream* array_stream) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  private_data->naive_stream.release(&private_data->naive_stream);
+  ArrowFree(private_data);
+  array_stream->release = NULL;
+}
+
+ArrowErrorCode ArrowDeviceBasicArrayStreamInit(
+    struct ArrowDeviceArrayStream* device_array_stream,
+    struct ArrowArrayStream* array_stream, struct ArrowDevice* device) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)ArrowMalloc(
+          sizeof(struct ArrowBasicDeviceArrayStreamPrivate));
+  if (private_data == NULL) {
+    return ENOMEM;
+  }
+
+  private_data->device = device;
+  ArrowArrayStreamMove(array_stream, &private_data->naive_stream);
+
+  device_array_stream->device_type = device->device_type;
+  device_array_stream->get_schema = &ArrowDeviceBasicArrayStreamGetSchema;
+  device_array_stream->get_next = &ArrowDeviceBasicArrayStreamGetNext;
+  device_array_stream->get_last_error = &ArrowDeviceBasicArrayStreamGetLastError;
+  device_array_stream->release = &ArrowDeviceBasicArrayStreamRelease;
+  device_array_stream->private_data = private_data;
+  return NANOARROW_OK;
+}
+
+void ArrowDeviceArrayViewInit(struct ArrowDeviceArrayView* device_array_view) {
+  memset(device_array_view, 0, sizeof(struct ArrowDeviceArrayView));
+}
+
+void ArrowDeviceArrayViewReset(struct ArrowDeviceArrayView* device_array_view) {
+  ArrowArrayViewReset(&device_array_view->array_view);
+  device_array_view->device = NULL;
+}
+
+static ArrowErrorCode ArrowDeviceBufferGetInt32(struct ArrowDevice* device,
+                                                struct ArrowBufferView buffer_view,
+                                                int64_t i, int32_t* out) {
+  struct ArrowDeviceBufferView device_buffer_view;
+  void* sync_event = NULL;
+
+  struct ArrowDeviceBufferView out_view;
+  out_view.private_data = out;
+  out_view.offset_bytes = 0;
+  out_view.size_bytes = sizeof(int32_t);
+
+  device_buffer_view.private_data = buffer_view.data.data;
+  device_buffer_view.offset_bytes = i * sizeof(int32_t);
+  device_buffer_view.size_bytes = sizeof(int32_t);
+  NANOARROW_RETURN_NOT_OK(
+      ArrowDeviceBufferCopy(device, device_buffer_view, ArrowDeviceCpu(), out_view));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceBufferGetInt64(struct ArrowDevice* device,
+                                                struct ArrowBufferView buffer_view,
+                                                int64_t i, int64_t* out) {
+  struct ArrowDeviceBufferView device_buffer_view;
+  void* sync_event = NULL;
+
+  struct ArrowDeviceBufferView out_view;
+  out_view.private_data = out;
+  out_view.offset_bytes = 0;
+  out_view.size_bytes = sizeof(int64_t);
+
+  device_buffer_view.private_data = buffer_view.data.data;
+  device_buffer_view.offset_bytes = i * sizeof(int64_t);
+  device_buffer_view.size_bytes = sizeof(int64_t);
+  NANOARROW_RETURN_NOT_OK(
+      ArrowDeviceBufferCopy(device, device_buffer_view, ArrowDeviceCpu(), out_view));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceArrayViewValidateDefault(
+    struct ArrowDevice* device, struct ArrowArrayView* array_view) {
+  // Calculate buffer sizes or child lengths that require accessing the offsets
+  // buffer. Unlike the nanoarrow core default validation, this just checks the
+  // last buffer and doesn't set a nice error message (could implement those, too
+  // later on).
+  int64_t offset_plus_length = array_view->offset + array_view->length;
+  int32_t last_offset32;
+  int64_t last_offset64;
+
+  switch (array_view->storage_type) {
+    case NANOARROW_TYPE_STRING:
+    case NANOARROW_TYPE_BINARY:
+      if (array_view->buffer_views[1].size_bytes != 0) {
+        NANOARROW_RETURN_NOT_OK(ArrowDeviceBufferGetInt32(
+            device, array_view->buffer_views[1], offset_plus_length, &last_offset32));
+
+        // If the data buffer size is unknown, assign it; otherwise, check it
+        if (array_view->buffer_views[2].size_bytes == -1) {
+          array_view->buffer_views[2].size_bytes = last_offset32;
+        } else if (array_view->buffer_views[2].size_bytes < last_offset32) {
+          return EINVAL;
+        }
+      }
+      break;
+
+    case NANOARROW_TYPE_LARGE_STRING:
+    case NANOARROW_TYPE_LARGE_BINARY:
+      if (array_view->buffer_views[1].size_bytes != 0) {
+        NANOARROW_RETURN_NOT_OK(ArrowDeviceBufferGetInt64(
+            device, array_view->buffer_views[1], offset_plus_length, &last_offset64));
+
+        // If the data buffer size is unknown, assign it; otherwise, check it
+        if (array_view->buffer_views[2].size_bytes == -1) {
+          array_view->buffer_views[2].size_bytes = last_offset64;
+        } else if (array_view->buffer_views[2].size_bytes < last_offset64) {
+          return EINVAL;
+        }
+      }
+      break;
+
+    case NANOARROW_TYPE_LIST:
+    case NANOARROW_TYPE_MAP:
+      if (array_view->buffer_views[1].size_bytes != 0) {
+        NANOARROW_RETURN_NOT_OK(ArrowDeviceBufferGetInt32(
+            device, array_view->buffer_views[1], offset_plus_length, &last_offset32));
+        if (array_view->children[0]->length < last_offset32) {
+          return EINVAL;
+        }
+      }
+      break;
+
+    case NANOARROW_TYPE_LARGE_LIST:
+      if (array_view->buffer_views[1].size_bytes != 0) {
+        NANOARROW_RETURN_NOT_OK(ArrowDeviceBufferGetInt64(
+            device, array_view->buffer_views[1], offset_plus_length, &last_offset64));
+        if (array_view->children[0]->length < last_offset64) {
+          return EINVAL;
+        }
+      }
+      break;
+    default:
+      break;
+  }
+
+  // Recurse for children
+  for (int64_t i = 0; i < array_view->n_children; i++) {
+    NANOARROW_RETURN_NOT_OK(
+        ArrowDeviceArrayViewValidateDefault(device, array_view->children[i]));
+  }
+
+  return NANOARROW_OK;
+}
+
+ArrowErrorCode ArrowDeviceArrayViewSetArray(
+    struct ArrowDeviceArrayView* device_array_view, struct ArrowDeviceArray* device_array,
+    struct ArrowError* error) {
+  struct ArrowDevice* device =
+      ArrowDeviceResolve(device_array->device_type, device_array->device_id);
+  if (device == NULL) {
+    ArrowErrorSet(error, "Can't resolve device with type %d and identifier %ld",
+                  (int)device_array->device_type, (long)device_array->device_id);
+    return EINVAL;
+  }
+
+  // Wait on device_array to synchronize with the CPU
+  NANOARROW_RETURN_NOT_OK(device->synchronize_event(ArrowDeviceCpu(), device,
+                                                    device_array->sync_event, error));
+  device_array->sync_event = NULL;

Review Comment:
   why set it to null?



##########
extensions/nanoarrow_device/src/nanoarrow/nanoarrow_device.h:
##########
@@ -0,0 +1,439 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#ifndef NANOARROW_DEVICE_H_INCLUDED
+#define NANOARROW_DEVICE_H_INCLUDED
+
+#include "nanoarrow.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// \defgroup nanoarrow_device-arrow-cdata Arrow C Device interface
+///
+/// The Arrow Device and Stream interfaces are part of the
+/// Arrow Columnar Format specification
+/// (https://arrow.apache.org/docs/format/Columnar.html). See the Arrow documentation for
+/// detailed documentation of these structures.
+///
+/// @{
+
+#ifndef ARROW_C_DEVICE_DATA_INTERFACE
+#define ARROW_C_DEVICE_DATA_INTERFACE
+
+/// \defgroup arrow-device-types Device Types
+/// These macros are compatible with the dlpack DLDeviceType values,
+/// using the same value for each enum as the equivalent kDL<type>
+/// from dlpack.h. This list should continue to be kept in sync with
+/// the equivalent dlpack.h enum values over time to ensure
+/// compatibility, rather than potentially diverging.
+///
+/// To ensure predictability with the ABI we use macros instead of
+/// an enum so the storage type is not compiler dependent.
+///
+/// @{
+
+/// \brief DeviceType for the allocated memory
+typedef int32_t ArrowDeviceType;
+
+/// \brief CPU device, same as using ArrowArray directly
+#define ARROW_DEVICE_CPU 1
+/// \brief CUDA GPU Device
+#define ARROW_DEVICE_CUDA 2
+/// \brief Pinned CUDA CPU memory by cudaMallocHost
+#define ARROW_DEVICE_CUDA_HOST 3
+/// \brief OpenCL Device
+#define ARROW_DEVICE_OPENCL 4
+/// \brief Vulkan buffer for next-gen graphics
+#define ARROW_DEVICE_VULKAN 7
+/// \brief Metal for Apple GPU
+#define ARROW_DEVICE_METAL 8
+/// \brief Verilog simulator buffer
+#define ARROW_DEVICE_VPI 9
+/// \brief ROCm GPUs for AMD GPUs
+#define ARROW_DEVICE_ROCM 10
+/// \brief Pinned ROCm CPU memory allocated by hipMallocHost
+#define ARROW_DEVICE_ROCM_HOST 11
+/// \brief Reserved for extension
+///
+/// used to quickly test extension devices, semantics
+/// can differ based on the implementation
+#define ARROW_DEVICE_EXT_DEV 12
+/// \brief CUDA managed/unified memory allocated by cudaMallocManaged
+#define ARROW_DEVICE_CUDA_MANAGED 13
+/// \brief unified shared memory allocated on a oneAPI
+/// non-partitioned device.
+///
+/// A call to the oneAPI runtime is required to determine the device
+/// type, the USM allocation type, and the sycl context it is bound to.
+#define ARROW_DEVICE_ONEAPI 14
+/// \brief GPU support for next-gen WebGPU standard
+#define ARROW_DEVICE_WEBGPU 15
+/// \brief Qualcomm Hexagon DSP
+#define ARROW_DEVICE_HEXAGON 16
+
+/// @}
+
+/// \brief Struct for passing an Arrow Array alongside
+/// device memory information.
+struct ArrowDeviceArray {

Review Comment:
   Update with the updated briefer version of the documentation which leaves the detailed explanations for the official spec in prose rather than in the comments?



##########
extensions/nanoarrow_device/src/nanoarrow/nanoarrow_device.c:
##########
@@ -0,0 +1,518 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <errno.h>
+
+#include "nanoarrow.h"
+
+#include "nanoarrow_device.h"
+
+ArrowErrorCode ArrowDeviceCheckRuntime(struct ArrowError* error) {
+  const char* nanoarrow_runtime_version = ArrowNanoarrowVersion();
+  const char* nanoarrow_ipc_build_time_version = NANOARROW_VERSION;
+
+  if (strcmp(nanoarrow_runtime_version, nanoarrow_ipc_build_time_version) != 0) {
+    ArrowErrorSet(error, "Expected nanoarrow runtime version '%s' but found version '%s'",
+                  nanoarrow_ipc_build_time_version, nanoarrow_runtime_version);
+    return EINVAL;
+  }
+
+  return NANOARROW_OK;
+}
+
+void ArrowDeviceArrayInit(struct ArrowDeviceArray* device_array,
+                          struct ArrowDevice* device) {
+  memset(device_array, 0, sizeof(struct ArrowDeviceArray));
+  device_array->device_type = device->device_type;
+  device_array->device_id = device->device_id;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferInit(struct ArrowDevice* device_src,
+                                               struct ArrowDeviceBufferView src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowBuffer* dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  ArrowBufferInit(dst);
+  dst->allocator = ArrowBufferAllocatorDefault();
+  NANOARROW_RETURN_NOT_OK(ArrowBufferAppend(
+      dst, ((uint8_t*)src.private_data) + src.offset_bytes, src.size_bytes));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferMove(struct ArrowDevice* device_src,
+                                               struct ArrowBuffer* src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowBuffer* dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  ArrowBufferMove(src, dst);
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferCopy(struct ArrowDevice* device_src,
+                                               struct ArrowDeviceBufferView src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowDeviceBufferView dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  memcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+         ((uint8_t*)src.private_data) + src.offset_bytes, dst.size_bytes);
+  return NANOARROW_OK;
+}
+
+static int ArrowDeviceCpuCopyRequired(struct ArrowDevice* device_src,
+                                      struct ArrowArrayView* src,
+                                      struct ArrowDevice* device_dst) {
+  if (device_src->device_type == ARROW_DEVICE_CPU &&
+      device_dst->device_type == ARROW_DEVICE_CPU) {
+    return 0;
+  } else {
+    return -1;
+  }
+}
+
+static ArrowErrorCode ArrowDeviceCpuSynchronize(struct ArrowDevice* device,
+                                                struct ArrowDevice* device_event,
+                                                void* sync_event,
+                                                struct ArrowError* error) {
+  switch (device_event->device_type) {
+    case ARROW_DEVICE_CPU:
+      if (sync_event != NULL) {
+        ArrowErrorSet(error, "Expected NULL sync_event for ARROW_DEVICE_CPU but got %p",
+                      sync_event);
+        return EINVAL;
+      } else {
+        return NANOARROW_OK;
+      }
+    default:
+      return device_event->synchronize_event(device_event, device, sync_event, error);
+  }
+}
+
+static void ArrowDeviceCpuRelease(struct ArrowDevice* device) { device->release = NULL; }
+
+struct ArrowDevice* ArrowDeviceCpu(void) {
+  static struct ArrowDevice* cpu_device_singleton = NULL;
+  if (cpu_device_singleton == NULL) {
+    cpu_device_singleton = (struct ArrowDevice*)ArrowMalloc(sizeof(struct ArrowDevice));
+    ArrowDeviceInitCpu(cpu_device_singleton);
+  }
+
+  return cpu_device_singleton;
+}
+
+void ArrowDeviceInitCpu(struct ArrowDevice* device) {
+  device->device_type = ARROW_DEVICE_CPU;
+  device->device_id = 0;
+  device->buffer_init = &ArrowDeviceCpuBufferInit;
+  device->buffer_move = &ArrowDeviceCpuBufferMove;
+  device->buffer_copy = &ArrowDeviceCpuBufferCopy;
+  device->copy_required = &ArrowDeviceCpuCopyRequired;
+  device->synchronize_event = &ArrowDeviceCpuSynchronize;
+  device->release = &ArrowDeviceCpuRelease;
+  device->private_data = NULL;
+}
+
+#ifdef NANOARROW_DEVICE_WITH_METAL
+struct ArrowDevice* ArrowDeviceMetalDefaultDevice(void);
+#endif
+
+#ifdef NANOARROW_DEVICE_WITH_CUDA
+struct ArrowDevice* ArrowDeviceCuda(ArrowDeviceType device_type, int64_t device_id);
+#endif
+
+struct ArrowDevice* ArrowDeviceResolve(ArrowDeviceType device_type, int64_t device_id) {
+  if (device_type == ARROW_DEVICE_CPU && device_id == 0) {
+    return ArrowDeviceCpu();
+  }
+
+#ifdef NANOARROW_DEVICE_WITH_METAL
+  if (device_type == ARROW_DEVICE_METAL) {
+    struct ArrowDevice* default_device = ArrowDeviceMetalDefaultDevice();
+    if (device_id == default_device->device_id) {
+      return default_device;
+    }
+  }
+#endif
+
+#ifdef NANOARROW_DEVICE_WITH_CUDA
+  if (device_type == ARROW_DEVICE_CUDA || device_type == ARROW_DEVICE_CUDA_HOST) {
+    return ArrowDeviceCuda(device_type, device_id);
+  }
+#endif
+
+  return NULL;
+}
+
+ArrowErrorCode ArrowDeviceBufferInit(struct ArrowDevice* device_src,
+                                     struct ArrowDeviceBufferView src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowBuffer* dst) {
+  int result = device_dst->buffer_init(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_init(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+ArrowErrorCode ArrowDeviceBufferMove(struct ArrowDevice* device_src,
+                                     struct ArrowBuffer* src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowBuffer* dst) {
+  int result = device_dst->buffer_move(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_move(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+ArrowErrorCode ArrowDeviceBufferCopy(struct ArrowDevice* device_src,
+                                     struct ArrowDeviceBufferView src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowDeviceBufferView dst) {
+  int result = device_dst->buffer_copy(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_copy(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+struct ArrowBasicDeviceArrayStreamPrivate {
+  struct ArrowDevice* device;
+  struct ArrowArrayStream naive_stream;
+};
+
+static int ArrowDeviceBasicArrayStreamGetSchema(
+    struct ArrowDeviceArrayStream* array_stream, struct ArrowSchema* schema) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  return private_data->naive_stream.get_schema(&private_data->naive_stream, schema);
+}
+
+static int ArrowDeviceBasicArrayStreamGetNext(struct ArrowDeviceArrayStream* array_stream,
+                                              struct ArrowDeviceArray* device_array) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+
+  struct ArrowArray tmp;
+  NANOARROW_RETURN_NOT_OK(
+      private_data->naive_stream.get_next(&private_data->naive_stream, &tmp));
+  ArrowDeviceArrayInit(device_array, private_data->device);
+  ArrowArrayMove(&tmp, &device_array->array);
+  return NANOARROW_OK;
+}
+
+static const char* ArrowDeviceBasicArrayStreamGetLastError(
+    struct ArrowDeviceArrayStream* array_stream) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  return private_data->naive_stream.get_last_error(&private_data->naive_stream);
+}
+
+static void ArrowDeviceBasicArrayStreamRelease(
+    struct ArrowDeviceArrayStream* array_stream) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  private_data->naive_stream.release(&private_data->naive_stream);
+  ArrowFree(private_data);
+  array_stream->release = NULL;
+}
+
+ArrowErrorCode ArrowDeviceBasicArrayStreamInit(
+    struct ArrowDeviceArrayStream* device_array_stream,
+    struct ArrowArrayStream* array_stream, struct ArrowDevice* device) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)ArrowMalloc(
+          sizeof(struct ArrowBasicDeviceArrayStreamPrivate));
+  if (private_data == NULL) {
+    return ENOMEM;
+  }
+
+  private_data->device = device;
+  ArrowArrayStreamMove(array_stream, &private_data->naive_stream);
+
+  device_array_stream->device_type = device->device_type;
+  device_array_stream->get_schema = &ArrowDeviceBasicArrayStreamGetSchema;
+  device_array_stream->get_next = &ArrowDeviceBasicArrayStreamGetNext;
+  device_array_stream->get_last_error = &ArrowDeviceBasicArrayStreamGetLastError;
+  device_array_stream->release = &ArrowDeviceBasicArrayStreamRelease;
+  device_array_stream->private_data = private_data;
+  return NANOARROW_OK;
+}
+
+void ArrowDeviceArrayViewInit(struct ArrowDeviceArrayView* device_array_view) {
+  memset(device_array_view, 0, sizeof(struct ArrowDeviceArrayView));
+}
+
+void ArrowDeviceArrayViewReset(struct ArrowDeviceArrayView* device_array_view) {
+  ArrowArrayViewReset(&device_array_view->array_view);
+  device_array_view->device = NULL;
+}
+
+static ArrowErrorCode ArrowDeviceBufferGetInt32(struct ArrowDevice* device,
+                                                struct ArrowBufferView buffer_view,
+                                                int64_t i, int32_t* out) {
+  struct ArrowDeviceBufferView device_buffer_view;
+  void* sync_event = NULL;
+
+  struct ArrowDeviceBufferView out_view;
+  out_view.private_data = out;
+  out_view.offset_bytes = 0;
+  out_view.size_bytes = sizeof(int32_t);
+
+  device_buffer_view.private_data = buffer_view.data.data;
+  device_buffer_view.offset_bytes = i * sizeof(int32_t);
+  device_buffer_view.size_bytes = sizeof(int32_t);
+  NANOARROW_RETURN_NOT_OK(
+      ArrowDeviceBufferCopy(device, device_buffer_view, ArrowDeviceCpu(), out_view));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceBufferGetInt64(struct ArrowDevice* device,
+                                                struct ArrowBufferView buffer_view,
+                                                int64_t i, int64_t* out) {
+  struct ArrowDeviceBufferView device_buffer_view;
+  void* sync_event = NULL;
+
+  struct ArrowDeviceBufferView out_view;
+  out_view.private_data = out;
+  out_view.offset_bytes = 0;
+  out_view.size_bytes = sizeof(int64_t);
+
+  device_buffer_view.private_data = buffer_view.data.data;
+  device_buffer_view.offset_bytes = i * sizeof(int64_t);
+  device_buffer_view.size_bytes = sizeof(int64_t);
+  NANOARROW_RETURN_NOT_OK(
+      ArrowDeviceBufferCopy(device, device_buffer_view, ArrowDeviceCpu(), out_view));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceArrayViewValidateDefault(

Review Comment:
   Should include a comment that states that this will copy data from the device to the CPU in order to perform the validation. This means that there might be a need for synchronization to happen before this can be called.



##########
extensions/nanoarrow_device/src/nanoarrow/nanoarrow_device.h:
##########
@@ -0,0 +1,439 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#ifndef NANOARROW_DEVICE_H_INCLUDED
+#define NANOARROW_DEVICE_H_INCLUDED
+
+#include "nanoarrow.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// \defgroup nanoarrow_device-arrow-cdata Arrow C Device interface
+///
+/// The Arrow Device and Stream interfaces are part of the
+/// Arrow Columnar Format specification
+/// (https://arrow.apache.org/docs/format/Columnar.html). See the Arrow documentation for
+/// detailed documentation of these structures.
+///
+/// @{
+
+#ifndef ARROW_C_DEVICE_DATA_INTERFACE
+#define ARROW_C_DEVICE_DATA_INTERFACE
+
+/// \defgroup arrow-device-types Device Types
+/// These macros are compatible with the dlpack DLDeviceType values,
+/// using the same value for each enum as the equivalent kDL<type>
+/// from dlpack.h. This list should continue to be kept in sync with
+/// the equivalent dlpack.h enum values over time to ensure
+/// compatibility, rather than potentially diverging.
+///
+/// To ensure predictability with the ABI we use macros instead of
+/// an enum so the storage type is not compiler dependent.
+///
+/// @{
+
+/// \brief DeviceType for the allocated memory
+typedef int32_t ArrowDeviceType;
+
+/// \brief CPU device, same as using ArrowArray directly
+#define ARROW_DEVICE_CPU 1
+/// \brief CUDA GPU Device
+#define ARROW_DEVICE_CUDA 2
+/// \brief Pinned CUDA CPU memory by cudaMallocHost
+#define ARROW_DEVICE_CUDA_HOST 3
+/// \brief OpenCL Device
+#define ARROW_DEVICE_OPENCL 4
+/// \brief Vulkan buffer for next-gen graphics
+#define ARROW_DEVICE_VULKAN 7
+/// \brief Metal for Apple GPU
+#define ARROW_DEVICE_METAL 8
+/// \brief Verilog simulator buffer
+#define ARROW_DEVICE_VPI 9
+/// \brief ROCm GPUs for AMD GPUs
+#define ARROW_DEVICE_ROCM 10
+/// \brief Pinned ROCm CPU memory allocated by hipMallocHost
+#define ARROW_DEVICE_ROCM_HOST 11
+/// \brief Reserved for extension
+///
+/// used to quickly test extension devices, semantics
+/// can differ based on the implementation
+#define ARROW_DEVICE_EXT_DEV 12
+/// \brief CUDA managed/unified memory allocated by cudaMallocManaged
+#define ARROW_DEVICE_CUDA_MANAGED 13
+/// \brief unified shared memory allocated on a oneAPI
+/// non-partitioned device.
+///
+/// A call to the oneAPI runtime is required to determine the device
+/// type, the USM allocation type, and the sycl context it is bound to.
+#define ARROW_DEVICE_ONEAPI 14
+/// \brief GPU support for next-gen WebGPU standard
+#define ARROW_DEVICE_WEBGPU 15
+/// \brief Qualcomm Hexagon DSP
+#define ARROW_DEVICE_HEXAGON 16
+
+/// @}
+
+/// \brief Struct for passing an Arrow Array alongside
+/// device memory information.
+struct ArrowDeviceArray {
+  /// \brief the Allocated Array
+  ///
+  /// the buffers in the array (along with the buffers of any
+  /// children) are what is allocated on the device.
+  ///
+  /// the private_data and release callback of the arrow array
+  /// should contain any necessary information and structures
+  /// related to freeing the array according to the device it
+  /// is allocated on, rather than having a separate release
+  /// callback embedded here.
+  struct ArrowArray array;
+  /// \brief The device id to identify a specific device
+  /// if multiple of this type are on the system.
+  ///
+  /// the semantics of the id will be hardware dependant.
+  int64_t device_id;
+  /// \brief The type of device which can access this memory.
+  ArrowDeviceType device_type;
+  /// \brief An event-like object to synchronize on if needed.
+  ///
+  /// Many devices, like GPUs, are primarily asynchronous with
+  /// respect to CPU processing. As such in order to safely access
+  /// memory, it is often necessary to have an object to synchronize
+  /// processing on. Since different devices will use different types
+  /// to specify this we use a void* that can be coerced into
+  /// whatever the device appropriate type is (e.g. cudaEvent_t for
+  /// CUDA and hipEvent_t for HIP).
+  ///
+  /// If synchronization is not needed this can be null. If this is
+  /// non-null, then it should be used to call the appropriate sync
+  /// method for the device (e.g. cudaStreamWaitEvent / hipStreamWaitEvent).
+  ///
+  /// Expected type to coerce this void* to depending on device type:
+  ///   cuda: cudaEvent_t*
+  ///   ROCm: hipEvent_t*
+  ///   OpenCL: cl_event*
+  ///   Vulkan: VkEvent*
+  ///   Metal: MTLEvent*
+  ///   OneAPI: sycl::event*
+  ///
+  void* sync_event;
+  /// \brief Reserved bytes for future expansion.
+  ///
+  /// As non-CPU development expands we can update this struct
+  /// without ABI breaking changes. This also rounds out the
+  /// total size of this struct to be 128 bytes (power of 2)
+  /// on 64-bit systems. These bytes should be zero'd out after
+  /// allocation in order to ensure safe evolution of the ABI in
+  /// the future.
+  int64_t reserved[3];
+};
+
+#endif  // ARROW_C_DEVICE_DATA_INTERFACE
+
+#ifndef ARROW_C_DEVICE_STREAM_INTERFACE
+#define ARROW_C_DEVICE_STREAM_INTERFACE
+
+/// \brief Equivalent to ArrowArrayStream, but for ArrowDeviceArrays.
+///
+/// This stream is intended to provide a stream of data on a single
+/// device, if a producer wants data to be produced on multiple devices
+/// then multiple streams should be provided. One per device.
+struct ArrowDeviceArrayStream {
+  /// \brief The device that this stream produces data on.
+  ///
+  /// All ArrowDeviceArrays that are produced by this
+  /// stream should have the same device_type as set
+  /// here. Including it here in the stream object is
+  /// a convenience to allow consumers simpler processing
+  /// since they can assume all arrays that result from
+  /// this stream to be on this device type.
+  ArrowDeviceType device_type;
+
+  /// \brief Callback to get the stream schema
+  /// (will be the same for all arrays in the stream).
+  ///
+  /// If successful, the ArrowSchema must be released independantly from the stream.
+  /// The schema should be accessible via CPU memory.
+  ///
+  /// \param[in] self The ArrowDeviceArrayStream object itself
+  /// \param[out] out C struct to export the schema to
+  /// \return 0 if successful, an `errno`-compatible error code otherwise.
+  int (*get_schema)(struct ArrowDeviceArrayStream* self, struct ArrowSchema* out);
+
+  /// \brief Callback to get the next array
+  ///
+  /// If there is no error and the returned array has been released, the stream
+  /// has ended. If successful, the ArrowArray must be released independently
+  /// from the stream.
+  ///
+  /// \param[in] self The ArrowDeviceArrayStream object itself
+  /// \param[out] out C struct where to export the Array and device info
+  /// \return 0 if successful, an `errno`-compatible error code otherwise.
+  int (*get_next)(struct ArrowDeviceArrayStream* self, struct ArrowDeviceArray* out);
+
+  /// \brief Callback to get optional detailed error information.
+  ///
+  /// This must only be called if the last stream operation failed
+  /// with a non-0 return code.
+  ///
+  /// The returned pointer is only valid until the next operation on this stream
+  /// (including release).
+  ///
+  /// \param[in] self The ArrowDeviceArrayStream object itself
+  /// \return pointer to a null-terminated character array describing
+  /// the last error, or NULL if no description is available.
+  const char* (*get_last_error)(struct ArrowDeviceArrayStream* self);
+
+  /// \brief Release callback: release the stream's own resources.
+  ///
+  /// Note that arrays returned by `get_next` must be individually released.
+  ///
+  /// \param[in] self The ArrowDeviceArrayStream object itself
+  void (*release)(struct ArrowDeviceArrayStream* self);
+
+  /// \brief Opaque producer-specific data
+  void* private_data;
+};
+
+#endif  // ARROW_C_DEVICE_STREAM_INTERFACE
+
+/// \brief Move the contents of src into dst and set src->array.release to NULL
+static inline void ArrowDeviceArrayMove(struct ArrowDeviceArray* src,
+                                        struct ArrowDeviceArray* dst) {
+  memcpy(dst, src, sizeof(struct ArrowDeviceArray));
+  src->array.release = 0;
+}
+
+/// @}
+
+#ifdef NANOARROW_NAMESPACE
+
+#define ArrowDeviceCheckRuntime \
+  NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceCheckRuntime)
+#define ArrowDeviceArrayInit NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceArrayInit)
+#define ArrowDeviceArrayViewInit \
+  NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceArrayViewInit)
+#define ArrowDeviceArrayViewReset \
+  NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceArrayViewReset)
+#define ArrowDeviceArrayViewSetArray \
+  NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceArrayViewSetArray)
+#define ArrowDeviceArrayViewCopy \
+  NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceArrayViewCopy)
+#define ArrowDeviceArrayViewCopyRequired \
+  NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceArrayViewCopyRequired)
+#define ArrowDeviceArrayTryMove \
+  NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceArrayTryMove)
+#define ArrowDeviceResolve NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceResolve)
+#define ArrowDeviceCpu NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceCpu)
+#define ArrowDeviceInitCpu NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceInitCpu)
+#define ArrowDeviceBufferInit NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceBufferInit)
+#define ArrowDeviceBufferMove NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceBufferMove)
+#define ArrowDeviceBufferCopy NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceBufferCopy)
+#define ArrowDeviceBasicArrayStreamInit \
+  NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDeviceBasicArrayStreamInit)
+
+#endif
+
+/// \defgroup nanoarrow_device Nanoarrow Device extension
+///
+/// Except where noted, objects are not thread-safe and clients should
+/// take care to serialize accesses to methods.
+///
+/// @{
+
+/// \brief Checks the nanoarrow runtime to make sure the run/build versions match
+ArrowErrorCode ArrowDeviceCheckRuntime(struct ArrowError* error);
+
+/// \brief A description of a buffer
+struct ArrowDeviceBufferView {
+  /// \brief Device-defined handle for a buffer.
+  ///
+  /// For the CPU device, this is a normal memory address; for all other types that are
+  /// currently supported, this is a device memory address on which CPU-like arithmetic
+  /// can be performed. This may not be true for future devices (i.e., it may be a pointer
+  /// to some buffer abstraction if the concept of a memory address does not exist or
+  /// is impractical).
+  const void* private_data;
+
+  /// \brief An offset into the buffer handle defined by private_data
+  int64_t offset_bytes;
+
+  /// \brief The size of the buffer in bytes
+  int64_t size_bytes;
+};
+
+/// \brief A Device wrapper with callbacks for basic memory management tasks
+///
+/// All device objects are currently implemented as singletons; however, this
+/// may change as implementations progress.
+struct ArrowDevice {
+  /// \brief The device type integer identifier (see ArrowDeviceArray)
+  ArrowDeviceType device_type;
+
+  /// \brief The device identifier (see ArrowDeviceArray)
+  int64_t device_id;
+
+  /// \brief Initialize an owning buffer from existing content
+  ///
+  /// Creates a new buffer whose data member can be accessed by the GPU by
+  /// copying existing content.
+  /// Implementations must check device_src and device_dst and return ENOTSUP if
+  /// not prepared to handle this operation.
+  ArrowErrorCode (*buffer_init)(struct ArrowDevice* device_src,
+                                struct ArrowDeviceBufferView src,
+                                struct ArrowDevice* device_dst, struct ArrowBuffer* dst);
+
+  /// \brief Move an owning buffer to a device
+  ///
+  /// Creates a new buffer whose data member can be accessed by the GPU by
+  /// moving an existing buffer. If NANOARROW_OK is returned, src will have
+  /// been released or moved by the implementation and dst must be released by
+  /// the caller.
+  /// Implementations must check device_src and device_dst and return ENOTSUP if
+  /// not prepared to handle this operation.
+  ArrowErrorCode (*buffer_move)(struct ArrowDevice* device_src, struct ArrowBuffer* src,
+                                struct ArrowDevice* device_dst, struct ArrowBuffer* dst);
+
+  /// \brief Copy a section of memory into a preallocated buffer
+  ///
+  /// As opposed to the other buffer operations, this is designed to support
+  /// copying very small slices of memory.
+  /// Implementations must check device_src and device_dst and return ENOTSUP if
+  /// not prepared to handle this operation.
+  ArrowErrorCode (*buffer_copy)(struct ArrowDevice* device_src,
+                                struct ArrowDeviceBufferView src,
+                                struct ArrowDevice* device_dst,
+                                struct ArrowDeviceBufferView dst);
+
+  /// \brief Check if a copy is required to move between devices
+  ///
+  /// Returns 1 (copy is required), 0 (copy not required; move is OK), or -1 (don't know)
+  int (*copy_required)(struct ArrowDevice* device_src, struct ArrowArrayView* src,
+                       struct ArrowDevice* device_dst);
+
+  /// \brief Wait for an event
+  ///
+  /// Implementations should handle at least waiting on the CPU host.
+  /// Implementations do not have to handle a NULL sync_event.
+  ArrowErrorCode (*synchronize_event)(struct ArrowDevice* device,
+                                      struct ArrowDevice* device_event, void* sync_event,
+                                      struct ArrowError* error);

Review Comment:
   why is the event itself an `ArrowDevice`?



##########
extensions/nanoarrow_device/src/nanoarrow/nanoarrow_device_metal.cc:
##########
@@ -0,0 +1,331 @@
+
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+
+#define NS_PRIVATE_IMPLEMENTATION
+#define MTL_PRIVATE_IMPLEMENTATION
+#include <Metal/Metal.hpp>
+
+#include "nanoarrow_device.hpp"
+
+#include "nanoarrow_device_metal.h"
+
+// If non-null, caller must ->release() the return value. This doesn't
+// release the underlying memory (which must be managed separately).
+static MTL::Buffer* ArrowDeviceMetalWrapBufferNonOwning(MTL::Device* mtl_device,
+                                                        const void* arbitrary_addr,
+                                                        int64_t size_bytes) {
+  // We can wrap any zero-size buffer
+  if (size_bytes == 0) {
+    return mtl_device->newBuffer(0, MTL::ResourceStorageModeShared);
+  }
+
+  // Cache the page size from the system call
+  static int pagesize = 0;
+  if (pagesize == 0) {
+    pagesize = getpagesize();
+  }
+
+  int64_t allocation_size;
+  if (size_bytes % pagesize == 0) {
+    allocation_size = size_bytes;
+  } else {
+    allocation_size = (size_bytes / pagesize) + 1 * pagesize;
+  }
+
+  // Will return nullptr if the memory is improperly aligned
+  return mtl_device->newBuffer(arbitrary_addr, allocation_size,
+                               MTL::ResourceStorageModeShared, nullptr);
+}
+
+static uint8_t* ArrowDeviceMetalAllocatorReallocate(
+    struct ArrowBufferAllocator* allocator, uint8_t* ptr, int64_t old_size,
+    int64_t new_size) {
+  // Cache the page size from the system call
+  static int pagesize = 0;
+  if (pagesize == 0) {
+    pagesize = getpagesize();
+  }
+
+  int64_t allocation_size;
+  if (new_size % pagesize == 0) {
+    allocation_size = new_size;
+  } else {
+    allocation_size = (new_size / pagesize) + 1 * pagesize;
+  }
+
+  // If growing an existing buffer but the allocation size is still big enough,
+  // return the same pointer and do nothing.
+  if (ptr != nullptr && new_size >= old_size && new_size <= allocation_size) {
+    return ptr;
+  }
+
+  int64_t copy_size;
+  if (new_size > old_size) {
+    copy_size = old_size;
+  } else {
+    copy_size = new_size;
+  }
+
+  void* new_ptr = nullptr;
+  posix_memalign(&new_ptr, pagesize, allocation_size);
+  if (new_ptr != nullptr && ptr != nullptr) {
+    memcpy(new_ptr, ptr, copy_size);
+  }
+
+  if (ptr != nullptr) {
+    free(ptr);
+  }
+
+  return reinterpret_cast<uint8_t*>(new_ptr);
+}
+
+static void ArrowDeviceMetalAllocatorFree(struct ArrowBufferAllocator* allocator,
+                                          uint8_t* ptr, int64_t old_size) {
+  free(ptr);
+}
+
+void ArrowDeviceMetalInitBuffer(struct ArrowBuffer* buffer) {
+  buffer->allocator.reallocate = &ArrowDeviceMetalAllocatorReallocate;
+  buffer->allocator.free = &ArrowDeviceMetalAllocatorFree;
+  buffer->allocator.private_data = nullptr;
+  buffer->data = nullptr;

Review Comment:
   device_id? same as in the cuda code, we should have a `TODO` for getting the device id represented here along with possibly the device itself?



##########
extensions/nanoarrow_device/src/nanoarrow/nanoarrow_device_cuda.c:
##########
@@ -0,0 +1,376 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <cuda_runtime_api.h>
+
+#include "nanoarrow_device.h"
+
+static void ArrowDeviceCudaAllocatorFree(struct ArrowBufferAllocator* allocator,
+                                         uint8_t* ptr, int64_t old_size) {
+  if (ptr != NULL) {
+    cudaFree(ptr);
+  }
+}
+
+static uint8_t* ArrowDeviceCudaAllocatorReallocate(struct ArrowBufferAllocator* allocator,
+                                                   uint8_t* ptr, int64_t old_size,
+                                                   int64_t new_size) {
+  ArrowDeviceCudaAllocatorFree(allocator, ptr, old_size);
+  return NULL;

Review Comment:
   `Reallocate` just calls free? Shouldn't this allocate a new thing and then copy?



##########
extensions/nanoarrow_device/src/nanoarrow/nanoarrow_device_cuda.c:
##########
@@ -0,0 +1,376 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <cuda_runtime_api.h>
+
+#include "nanoarrow_device.h"
+
+static void ArrowDeviceCudaAllocatorFree(struct ArrowBufferAllocator* allocator,
+                                         uint8_t* ptr, int64_t old_size) {
+  if (ptr != NULL) {
+    cudaFree(ptr);
+  }
+}
+
+static uint8_t* ArrowDeviceCudaAllocatorReallocate(struct ArrowBufferAllocator* allocator,
+                                                   uint8_t* ptr, int64_t old_size,
+                                                   int64_t new_size) {
+  ArrowDeviceCudaAllocatorFree(allocator, ptr, old_size);
+  return NULL;
+}
+
+static ArrowErrorCode ArrowDeviceCudaAllocateBuffer(struct ArrowBuffer* buffer,
+                                                    int64_t size_bytes) {
+  void* ptr = NULL;
+  cudaError_t result = cudaMalloc(&ptr, (int64_t)size_bytes);
+  if (result != cudaSuccess) {
+    return EINVAL;
+  }
+
+  buffer->data = (uint8_t*)ptr;
+  buffer->size_bytes = size_bytes;
+  buffer->capacity_bytes = size_bytes;
+  buffer->allocator.reallocate = &ArrowDeviceCudaAllocatorReallocate;
+  buffer->allocator.free = &ArrowDeviceCudaAllocatorFree;

Review Comment:
   Allocator should probably be able to tell what device it's allocating on, right?



##########
extensions/nanoarrow_device/src/nanoarrow/nanoarrow_device_cuda.c:
##########
@@ -0,0 +1,376 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <cuda_runtime_api.h>
+
+#include "nanoarrow_device.h"
+
+static void ArrowDeviceCudaAllocatorFree(struct ArrowBufferAllocator* allocator,
+                                         uint8_t* ptr, int64_t old_size) {
+  if (ptr != NULL) {
+    cudaFree(ptr);
+  }
+}
+
+static uint8_t* ArrowDeviceCudaAllocatorReallocate(struct ArrowBufferAllocator* allocator,
+                                                   uint8_t* ptr, int64_t old_size,
+                                                   int64_t new_size) {
+  ArrowDeviceCudaAllocatorFree(allocator, ptr, old_size);
+  return NULL;
+}
+
+static ArrowErrorCode ArrowDeviceCudaAllocateBuffer(struct ArrowBuffer* buffer,
+                                                    int64_t size_bytes) {
+  void* ptr = NULL;
+  cudaError_t result = cudaMalloc(&ptr, (int64_t)size_bytes);
+  if (result != cudaSuccess) {
+    return EINVAL;
+  }
+
+  buffer->data = (uint8_t*)ptr;
+  buffer->size_bytes = size_bytes;
+  buffer->capacity_bytes = size_bytes;
+  buffer->allocator.reallocate = &ArrowDeviceCudaAllocatorReallocate;
+  buffer->allocator.free = &ArrowDeviceCudaAllocatorFree;
+  // TODO: We almost certainly need device_id here
+  buffer->allocator.private_data = NULL;
+  return NANOARROW_OK;
+}
+
+static void ArrowDeviceCudaHostAllocatorFree(struct ArrowBufferAllocator* allocator,
+                                             uint8_t* ptr, int64_t old_size) {
+  if (ptr != NULL) {
+    cudaFreeHost(ptr);
+  }
+}
+
+static uint8_t* ArrowDeviceCudaHostAllocatorReallocate(
+    struct ArrowBufferAllocator* allocator, uint8_t* ptr, int64_t old_size,
+    int64_t new_size) {
+  ArrowDeviceCudaHostAllocatorFree(allocator, ptr, old_size);
+  return NULL;
+}
+
+static ArrowErrorCode ArrowDeviceCudaHostAllocateBuffer(struct ArrowBuffer* buffer,
+                                                        int64_t size_bytes) {
+  void* ptr = NULL;
+  cudaError_t result = cudaMallocHost(&ptr, (int64_t)size_bytes);
+  if (result != cudaSuccess) {
+    return EINVAL;
+  }
+
+  buffer->data = (uint8_t*)ptr;
+  buffer->size_bytes = size_bytes;
+  buffer->capacity_bytes = size_bytes;
+  buffer->allocator.reallocate = &ArrowDeviceCudaHostAllocatorReallocate;
+  buffer->allocator.free = &ArrowDeviceCudaHostAllocatorFree;
+  // TODO: We almost certainly need device_id here
+  buffer->allocator.private_data = NULL;
+  return NANOARROW_OK;
+}
+
+// TODO: All these buffer copiers would benefit from cudaMemcpyAsync but there is
+// no good way to incorporate that just yet
+
+static ArrowErrorCode ArrowDeviceCudaBufferInit(struct ArrowDevice* device_src,
+                                                struct ArrowDeviceBufferView src,
+                                                struct ArrowDevice* device_dst,
+                                                struct ArrowBuffer* dst) {
+  if (device_src->device_type == ARROW_DEVICE_CPU &&
+      device_dst->device_type == ARROW_DEVICE_CUDA) {
+    struct ArrowBuffer tmp;
+    NANOARROW_RETURN_NOT_OK(ArrowDeviceCudaAllocateBuffer(&tmp, src.size_bytes));
+    cudaError_t result =
+        cudaMemcpy(tmp.data, ((uint8_t*)src.private_data) + src.offset_bytes,
+                   (size_t)src.size_bytes, cudaMemcpyHostToDevice);
+    if (result != cudaSuccess) {
+      ArrowBufferReset(&tmp);
+      return EINVAL;
+    }
+
+    ArrowBufferMove(&tmp, dst);
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA &&
+             device_dst->device_type == ARROW_DEVICE_CUDA) {
+    struct ArrowBuffer tmp;
+    NANOARROW_RETURN_NOT_OK(ArrowDeviceCudaAllocateBuffer(&tmp, src.size_bytes));
+    cudaError_t result =
+        cudaMemcpy(tmp.data, ((uint8_t*)src.private_data) + src.offset_bytes,
+                   (size_t)src.size_bytes, cudaMemcpyDeviceToDevice);
+    if (result != cudaSuccess) {
+      ArrowBufferReset(&tmp);
+      return EINVAL;
+    }
+
+    ArrowBufferMove(&tmp, dst);
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA &&
+             device_dst->device_type == ARROW_DEVICE_CPU) {
+    struct ArrowBuffer tmp;
+    ArrowBufferInit(&tmp);
+    NANOARROW_RETURN_NOT_OK(ArrowBufferReserve(&tmp, src.size_bytes));
+    tmp.size_bytes = src.size_bytes;
+    cudaError_t result =
+        cudaMemcpy(tmp.data, ((uint8_t*)src.private_data) + src.offset_bytes,
+                   (size_t)src.size_bytes, cudaMemcpyDeviceToHost);
+    if (result != cudaSuccess) {
+      ArrowBufferReset(&tmp);
+      return EINVAL;
+    }
+
+    ArrowBufferMove(&tmp, dst);
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CPU &&
+             device_dst->device_type == ARROW_DEVICE_CUDA_HOST) {
+    NANOARROW_RETURN_NOT_OK(ArrowDeviceCudaHostAllocateBuffer(dst, src.size_bytes));
+    memcpy(dst->data, ((uint8_t*)src.private_data) + src.offset_bytes,
+           (size_t)src.size_bytes);
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA_HOST &&
+             device_dst->device_type == ARROW_DEVICE_CUDA_HOST) {
+    NANOARROW_RETURN_NOT_OK(ArrowDeviceCudaHostAllocateBuffer(dst, src.size_bytes));
+    memcpy(dst->data, ((uint8_t*)src.private_data) + src.offset_bytes,
+           (size_t)src.size_bytes);
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA_HOST &&
+             device_dst->device_type == ARROW_DEVICE_CPU) {
+    struct ArrowBuffer tmp;
+    ArrowBufferInit(&tmp);
+    NANOARROW_RETURN_NOT_OK(ArrowBufferReserve(&tmp, src.size_bytes));
+    tmp.size_bytes = src.size_bytes;
+    memcpy(tmp.data, ((uint8_t*)src.private_data) + src.offset_bytes,
+           (size_t)src.size_bytes);
+    ArrowBufferMove(&tmp, dst);
+    return NANOARROW_OK;
+
+  } else {
+    return ENOTSUP;
+  }
+}
+
+static ArrowErrorCode ArrowDeviceCudaBufferCopy(struct ArrowDevice* device_src,
+                                                struct ArrowDeviceBufferView src,
+                                                struct ArrowDevice* device_dst,
+                                                struct ArrowDeviceBufferView dst) {
+  // This is all just cudaMemcpy or memcpy
+  if (device_src->device_type == ARROW_DEVICE_CPU &&
+      device_dst->device_type == ARROW_DEVICE_CUDA) {
+    cudaError_t result = cudaMemcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+                                    ((uint8_t*)src.private_data) + src.offset_bytes,
+                                    dst.size_bytes, cudaMemcpyHostToDevice);
+    if (result != cudaSuccess) {
+      return EINVAL;
+    }
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA &&
+             device_dst->device_type == ARROW_DEVICE_CUDA) {
+    cudaError_t result = cudaMemcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+                                    ((uint8_t*)src.private_data) + src.offset_bytes,
+                                    dst.size_bytes, cudaMemcpyDeviceToDevice);
+    if (result != cudaSuccess) {
+      return EINVAL;
+    }
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA &&
+             device_dst->device_type == ARROW_DEVICE_CPU) {
+    cudaError_t result = cudaMemcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+                                    ((uint8_t*)src.private_data) + src.offset_bytes,
+                                    dst.size_bytes, cudaMemcpyDeviceToHost);
+    if (result != cudaSuccess) {
+      return EINVAL;
+    }
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CPU &&
+             device_dst->device_type == ARROW_DEVICE_CUDA_HOST) {
+    memcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+           ((uint8_t*)src.private_data) + src.offset_bytes, dst.size_bytes);
+    return NANOARROW_OK;
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA_HOST &&
+             device_dst->device_type == ARROW_DEVICE_CUDA_HOST) {
+    memcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+           ((uint8_t*)src.private_data) + src.offset_bytes, dst.size_bytes);
+    return NANOARROW_OK;
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA_HOST &&
+             device_dst->device_type == ARROW_DEVICE_CPU) {
+    memcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+           ((uint8_t*)src.private_data) + src.offset_bytes, dst.size_bytes);
+    return NANOARROW_OK;
+  } else {
+    return ENOTSUP;
+  }
+}
+
+static int ArrowDeviceCudaCopyRequired(struct ArrowDevice* device_src,
+                                       struct ArrowArrayView* src,
+                                       struct ArrowDevice* device_dst) {
+  if (device_src->device_type == ARROW_DEVICE_CPU &&
+      device_dst->device_type == ARROW_DEVICE_CUDA) {
+    // Copy
+    return 1;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA &&
+             device_dst->device_type == ARROW_DEVICE_CUDA &&
+             device_src->device_id == device_dst->device_id) {
+    // Move
+    return 0;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA &&
+             device_dst->device_type == ARROW_DEVICE_CPU) {
+    // Copy
+    return 1;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CPU &&
+             device_dst->device_type == ARROW_DEVICE_CUDA_HOST) {
+    // Copy: we can't assume the memory has been registered. A user can force
+    // this by registering the memory and setting device->device_type manually.
+    // A copy will ensure all buffers are allocated with cudaMallocHost().
+    return 1;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA_HOST &&
+             device_dst->device_type == ARROW_DEVICE_CUDA_HOST &&
+             device_src->device_id == device_dst->device_id) {
+    // Move
+    return 0;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA_HOST &&
+             device_dst->device_type == ARROW_DEVICE_CPU) {
+    // Move: the array's release callback is responsible for cudaFreeHost or
+    // deregistration (or perhaps this has been handled at a higher level)
+    return 0;
+
+  } else {
+    // Fall back to the other device's implementation
+    return -1;
+  }
+}
+
+static ArrowErrorCode ArrowDeviceCudaSynchronize(struct ArrowDevice* device,
+                                                 struct ArrowDevice* device_event,
+                                                 void* sync_event,
+                                                 struct ArrowError* error) {
+  if (sync_event == NULL) {
+    return NANOARROW_OK;
+  }
+
+  if (device_event->device_type != ARROW_DEVICE_CUDA ||
+      device_event->device_type != ARROW_DEVICE_CUDA_HOST) {
+    return ENOTSUP;
+  }
+
+  // Pointer vs. not pointer...is there memory ownership to consider here?
+  cudaEvent_t* cuda_event = (cudaEvent_t*)sync_event;
+  cudaError_t result = cudaEventSynchronize(*cuda_event);
+
+  if (result != cudaSuccess) {
+    ArrowErrorSet(error, "cudaEventSynchronize() failed: %s", cudaGetErrorString(result));
+    return EINVAL;
+  }
+
+  cudaEventDestroy(*cuda_event);

Review Comment:
   @kkraus14 is correct. The release callback on `ArrowDeviceArray` should clean up the event it contains.



##########
extensions/nanoarrow_device/src/nanoarrow/nanoarrow_device.c:
##########
@@ -0,0 +1,518 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <errno.h>
+
+#include "nanoarrow.h"
+
+#include "nanoarrow_device.h"
+
+ArrowErrorCode ArrowDeviceCheckRuntime(struct ArrowError* error) {
+  const char* nanoarrow_runtime_version = ArrowNanoarrowVersion();
+  const char* nanoarrow_ipc_build_time_version = NANOARROW_VERSION;
+
+  if (strcmp(nanoarrow_runtime_version, nanoarrow_ipc_build_time_version) != 0) {
+    ArrowErrorSet(error, "Expected nanoarrow runtime version '%s' but found version '%s'",
+                  nanoarrow_ipc_build_time_version, nanoarrow_runtime_version);
+    return EINVAL;
+  }
+
+  return NANOARROW_OK;
+}
+
+void ArrowDeviceArrayInit(struct ArrowDeviceArray* device_array,
+                          struct ArrowDevice* device) {
+  memset(device_array, 0, sizeof(struct ArrowDeviceArray));
+  device_array->device_type = device->device_type;
+  device_array->device_id = device->device_id;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferInit(struct ArrowDevice* device_src,
+                                               struct ArrowDeviceBufferView src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowBuffer* dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  ArrowBufferInit(dst);
+  dst->allocator = ArrowBufferAllocatorDefault();
+  NANOARROW_RETURN_NOT_OK(ArrowBufferAppend(
+      dst, ((uint8_t*)src.private_data) + src.offset_bytes, src.size_bytes));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferMove(struct ArrowDevice* device_src,
+                                               struct ArrowBuffer* src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowBuffer* dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  ArrowBufferMove(src, dst);
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceCpuBufferCopy(struct ArrowDevice* device_src,
+                                               struct ArrowDeviceBufferView src,
+                                               struct ArrowDevice* device_dst,
+                                               struct ArrowDeviceBufferView dst) {
+  if (device_dst->device_type != ARROW_DEVICE_CPU ||
+      device_src->device_type != ARROW_DEVICE_CPU) {
+    return ENOTSUP;
+  }
+
+  memcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+         ((uint8_t*)src.private_data) + src.offset_bytes, dst.size_bytes);
+  return NANOARROW_OK;
+}
+
+static int ArrowDeviceCpuCopyRequired(struct ArrowDevice* device_src,
+                                      struct ArrowArrayView* src,
+                                      struct ArrowDevice* device_dst) {
+  if (device_src->device_type == ARROW_DEVICE_CPU &&
+      device_dst->device_type == ARROW_DEVICE_CPU) {
+    return 0;
+  } else {
+    return -1;
+  }
+}
+
+static ArrowErrorCode ArrowDeviceCpuSynchronize(struct ArrowDevice* device,
+                                                struct ArrowDevice* device_event,
+                                                void* sync_event,
+                                                struct ArrowError* error) {
+  switch (device_event->device_type) {
+    case ARROW_DEVICE_CPU:
+      if (sync_event != NULL) {
+        ArrowErrorSet(error, "Expected NULL sync_event for ARROW_DEVICE_CPU but got %p",
+                      sync_event);
+        return EINVAL;
+      } else {
+        return NANOARROW_OK;
+      }
+    default:
+      return device_event->synchronize_event(device_event, device, sync_event, error);
+  }
+}
+
+static void ArrowDeviceCpuRelease(struct ArrowDevice* device) { device->release = NULL; }
+
+struct ArrowDevice* ArrowDeviceCpu(void) {
+  static struct ArrowDevice* cpu_device_singleton = NULL;
+  if (cpu_device_singleton == NULL) {
+    cpu_device_singleton = (struct ArrowDevice*)ArrowMalloc(sizeof(struct ArrowDevice));
+    ArrowDeviceInitCpu(cpu_device_singleton);
+  }
+
+  return cpu_device_singleton;
+}
+
+void ArrowDeviceInitCpu(struct ArrowDevice* device) {
+  device->device_type = ARROW_DEVICE_CPU;
+  device->device_id = 0;
+  device->buffer_init = &ArrowDeviceCpuBufferInit;
+  device->buffer_move = &ArrowDeviceCpuBufferMove;
+  device->buffer_copy = &ArrowDeviceCpuBufferCopy;
+  device->copy_required = &ArrowDeviceCpuCopyRequired;
+  device->synchronize_event = &ArrowDeviceCpuSynchronize;
+  device->release = &ArrowDeviceCpuRelease;
+  device->private_data = NULL;
+}
+
+#ifdef NANOARROW_DEVICE_WITH_METAL
+struct ArrowDevice* ArrowDeviceMetalDefaultDevice(void);
+#endif
+
+#ifdef NANOARROW_DEVICE_WITH_CUDA
+struct ArrowDevice* ArrowDeviceCuda(ArrowDeviceType device_type, int64_t device_id);
+#endif
+
+struct ArrowDevice* ArrowDeviceResolve(ArrowDeviceType device_type, int64_t device_id) {
+  if (device_type == ARROW_DEVICE_CPU && device_id == 0) {
+    return ArrowDeviceCpu();
+  }
+
+#ifdef NANOARROW_DEVICE_WITH_METAL
+  if (device_type == ARROW_DEVICE_METAL) {
+    struct ArrowDevice* default_device = ArrowDeviceMetalDefaultDevice();
+    if (device_id == default_device->device_id) {
+      return default_device;
+    }
+  }
+#endif
+
+#ifdef NANOARROW_DEVICE_WITH_CUDA
+  if (device_type == ARROW_DEVICE_CUDA || device_type == ARROW_DEVICE_CUDA_HOST) {
+    return ArrowDeviceCuda(device_type, device_id);
+  }
+#endif
+
+  return NULL;
+}
+
+ArrowErrorCode ArrowDeviceBufferInit(struct ArrowDevice* device_src,
+                                     struct ArrowDeviceBufferView src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowBuffer* dst) {
+  int result = device_dst->buffer_init(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_init(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+ArrowErrorCode ArrowDeviceBufferMove(struct ArrowDevice* device_src,
+                                     struct ArrowBuffer* src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowBuffer* dst) {
+  int result = device_dst->buffer_move(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_move(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+ArrowErrorCode ArrowDeviceBufferCopy(struct ArrowDevice* device_src,
+                                     struct ArrowDeviceBufferView src,
+                                     struct ArrowDevice* device_dst,
+                                     struct ArrowDeviceBufferView dst) {
+  int result = device_dst->buffer_copy(device_src, src, device_dst, dst);
+  if (result == ENOTSUP) {
+    result = device_src->buffer_copy(device_src, src, device_dst, dst);
+  }
+
+  return result;
+}
+
+struct ArrowBasicDeviceArrayStreamPrivate {
+  struct ArrowDevice* device;
+  struct ArrowArrayStream naive_stream;
+};
+
+static int ArrowDeviceBasicArrayStreamGetSchema(
+    struct ArrowDeviceArrayStream* array_stream, struct ArrowSchema* schema) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  return private_data->naive_stream.get_schema(&private_data->naive_stream, schema);
+}
+
+static int ArrowDeviceBasicArrayStreamGetNext(struct ArrowDeviceArrayStream* array_stream,
+                                              struct ArrowDeviceArray* device_array) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+
+  struct ArrowArray tmp;
+  NANOARROW_RETURN_NOT_OK(
+      private_data->naive_stream.get_next(&private_data->naive_stream, &tmp));
+  ArrowDeviceArrayInit(device_array, private_data->device);
+  ArrowArrayMove(&tmp, &device_array->array);
+  return NANOARROW_OK;
+}
+
+static const char* ArrowDeviceBasicArrayStreamGetLastError(
+    struct ArrowDeviceArrayStream* array_stream) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  return private_data->naive_stream.get_last_error(&private_data->naive_stream);
+}
+
+static void ArrowDeviceBasicArrayStreamRelease(
+    struct ArrowDeviceArrayStream* array_stream) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)array_stream->private_data;
+  private_data->naive_stream.release(&private_data->naive_stream);
+  ArrowFree(private_data);
+  array_stream->release = NULL;
+}
+
+ArrowErrorCode ArrowDeviceBasicArrayStreamInit(
+    struct ArrowDeviceArrayStream* device_array_stream,
+    struct ArrowArrayStream* array_stream, struct ArrowDevice* device) {
+  struct ArrowBasicDeviceArrayStreamPrivate* private_data =
+      (struct ArrowBasicDeviceArrayStreamPrivate*)ArrowMalloc(
+          sizeof(struct ArrowBasicDeviceArrayStreamPrivate));
+  if (private_data == NULL) {
+    return ENOMEM;
+  }
+
+  private_data->device = device;
+  ArrowArrayStreamMove(array_stream, &private_data->naive_stream);
+
+  device_array_stream->device_type = device->device_type;
+  device_array_stream->get_schema = &ArrowDeviceBasicArrayStreamGetSchema;
+  device_array_stream->get_next = &ArrowDeviceBasicArrayStreamGetNext;
+  device_array_stream->get_last_error = &ArrowDeviceBasicArrayStreamGetLastError;
+  device_array_stream->release = &ArrowDeviceBasicArrayStreamRelease;
+  device_array_stream->private_data = private_data;
+  return NANOARROW_OK;
+}
+
+void ArrowDeviceArrayViewInit(struct ArrowDeviceArrayView* device_array_view) {
+  memset(device_array_view, 0, sizeof(struct ArrowDeviceArrayView));
+}
+
+void ArrowDeviceArrayViewReset(struct ArrowDeviceArrayView* device_array_view) {
+  ArrowArrayViewReset(&device_array_view->array_view);
+  device_array_view->device = NULL;
+}
+
+static ArrowErrorCode ArrowDeviceBufferGetInt32(struct ArrowDevice* device,
+                                                struct ArrowBufferView buffer_view,
+                                                int64_t i, int32_t* out) {
+  struct ArrowDeviceBufferView device_buffer_view;
+  void* sync_event = NULL;
+
+  struct ArrowDeviceBufferView out_view;
+  out_view.private_data = out;
+  out_view.offset_bytes = 0;
+  out_view.size_bytes = sizeof(int32_t);
+
+  device_buffer_view.private_data = buffer_view.data.data;
+  device_buffer_view.offset_bytes = i * sizeof(int32_t);
+  device_buffer_view.size_bytes = sizeof(int32_t);
+  NANOARROW_RETURN_NOT_OK(
+      ArrowDeviceBufferCopy(device, device_buffer_view, ArrowDeviceCpu(), out_view));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceBufferGetInt64(struct ArrowDevice* device,
+                                                struct ArrowBufferView buffer_view,
+                                                int64_t i, int64_t* out) {
+  struct ArrowDeviceBufferView device_buffer_view;
+  void* sync_event = NULL;
+
+  struct ArrowDeviceBufferView out_view;
+  out_view.private_data = out;
+  out_view.offset_bytes = 0;
+  out_view.size_bytes = sizeof(int64_t);
+
+  device_buffer_view.private_data = buffer_view.data.data;
+  device_buffer_view.offset_bytes = i * sizeof(int64_t);
+  device_buffer_view.size_bytes = sizeof(int64_t);
+  NANOARROW_RETURN_NOT_OK(
+      ArrowDeviceBufferCopy(device, device_buffer_view, ArrowDeviceCpu(), out_view));
+  return NANOARROW_OK;
+}
+
+static ArrowErrorCode ArrowDeviceArrayViewValidateDefault(
+    struct ArrowDevice* device, struct ArrowArrayView* array_view) {
+  // Calculate buffer sizes or child lengths that require accessing the offsets
+  // buffer. Unlike the nanoarrow core default validation, this just checks the
+  // last buffer and doesn't set a nice error message (could implement those, too
+  // later on).
+  int64_t offset_plus_length = array_view->offset + array_view->length;
+  int32_t last_offset32;
+  int64_t last_offset64;
+
+  switch (array_view->storage_type) {
+    case NANOARROW_TYPE_STRING:
+    case NANOARROW_TYPE_BINARY:
+      if (array_view->buffer_views[1].size_bytes != 0) {
+        NANOARROW_RETURN_NOT_OK(ArrowDeviceBufferGetInt32(
+            device, array_view->buffer_views[1], offset_plus_length, &last_offset32));
+
+        // If the data buffer size is unknown, assign it; otherwise, check it
+        if (array_view->buffer_views[2].size_bytes == -1) {
+          array_view->buffer_views[2].size_bytes = last_offset32;
+        } else if (array_view->buffer_views[2].size_bytes < last_offset32) {
+          return EINVAL;
+        }
+      }
+      break;
+
+    case NANOARROW_TYPE_LARGE_STRING:
+    case NANOARROW_TYPE_LARGE_BINARY:
+      if (array_view->buffer_views[1].size_bytes != 0) {
+        NANOARROW_RETURN_NOT_OK(ArrowDeviceBufferGetInt64(
+            device, array_view->buffer_views[1], offset_plus_length, &last_offset64));
+
+        // If the data buffer size is unknown, assign it; otherwise, check it
+        if (array_view->buffer_views[2].size_bytes == -1) {
+          array_view->buffer_views[2].size_bytes = last_offset64;
+        } else if (array_view->buffer_views[2].size_bytes < last_offset64) {
+          return EINVAL;
+        }
+      }
+      break;
+
+    case NANOARROW_TYPE_LIST:
+    case NANOARROW_TYPE_MAP:
+      if (array_view->buffer_views[1].size_bytes != 0) {
+        NANOARROW_RETURN_NOT_OK(ArrowDeviceBufferGetInt32(
+            device, array_view->buffer_views[1], offset_plus_length, &last_offset32));
+        if (array_view->children[0]->length < last_offset32) {
+          return EINVAL;
+        }
+      }
+      break;
+
+    case NANOARROW_TYPE_LARGE_LIST:
+      if (array_view->buffer_views[1].size_bytes != 0) {
+        NANOARROW_RETURN_NOT_OK(ArrowDeviceBufferGetInt64(
+            device, array_view->buffer_views[1], offset_plus_length, &last_offset64));
+        if (array_view->children[0]->length < last_offset64) {
+          return EINVAL;
+        }
+      }
+      break;
+    default:
+      break;
+  }
+
+  // Recurse for children
+  for (int64_t i = 0; i < array_view->n_children; i++) {
+    NANOARROW_RETURN_NOT_OK(
+        ArrowDeviceArrayViewValidateDefault(device, array_view->children[i]));
+  }
+
+  return NANOARROW_OK;
+}
+
+ArrowErrorCode ArrowDeviceArrayViewSetArray(
+    struct ArrowDeviceArrayView* device_array_view, struct ArrowDeviceArray* device_array,
+    struct ArrowError* error) {
+  struct ArrowDevice* device =
+      ArrowDeviceResolve(device_array->device_type, device_array->device_id);
+  if (device == NULL) {
+    ArrowErrorSet(error, "Can't resolve device with type %d and identifier %ld",
+                  (int)device_array->device_type, (long)device_array->device_id);
+    return EINVAL;
+  }
+
+  // Wait on device_array to synchronize with the CPU
+  NANOARROW_RETURN_NOT_OK(device->synchronize_event(ArrowDeviceCpu(), device,
+                                                    device_array->sync_event, error));
+  device_array->sync_event = NULL;
+
+  // Set the device array device
+  device_array_view->device = device;
+
+  // nanoarrow's minimal validation is fine here (sets buffer sizes for non offset-buffer
+  // types and errors for invalid ones)
+  NANOARROW_RETURN_NOT_OK(ArrowArrayViewSetArrayMinimal(&device_array_view->array_view,
+                                                        &device_array->array, error));
+  // Run custom validator that copies memory to the CPU where required.
+  // The custom implementation doesn't set nice error messages yet.
+  NANOARROW_RETURN_NOT_OK_WITH_ERROR(
+      ArrowDeviceArrayViewValidateDefault(device, &device_array_view->array_view), error);

Review Comment:
   can the validation be optional? If so, can the synchronize on the event only happen if they are doing the validation?



##########
extensions/nanoarrow_device/src/nanoarrow/nanoarrow_device_cuda.c:
##########
@@ -0,0 +1,376 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <cuda_runtime_api.h>
+
+#include "nanoarrow_device.h"
+
+static void ArrowDeviceCudaAllocatorFree(struct ArrowBufferAllocator* allocator,
+                                         uint8_t* ptr, int64_t old_size) {
+  if (ptr != NULL) {
+    cudaFree(ptr);
+  }
+}
+
+static uint8_t* ArrowDeviceCudaAllocatorReallocate(struct ArrowBufferAllocator* allocator,
+                                                   uint8_t* ptr, int64_t old_size,
+                                                   int64_t new_size) {
+  ArrowDeviceCudaAllocatorFree(allocator, ptr, old_size);
+  return NULL;
+}
+
+static ArrowErrorCode ArrowDeviceCudaAllocateBuffer(struct ArrowBuffer* buffer,
+                                                    int64_t size_bytes) {
+  void* ptr = NULL;
+  cudaError_t result = cudaMalloc(&ptr, (int64_t)size_bytes);
+  if (result != cudaSuccess) {
+    return EINVAL;
+  }
+
+  buffer->data = (uint8_t*)ptr;
+  buffer->size_bytes = size_bytes;
+  buffer->capacity_bytes = size_bytes;
+  buffer->allocator.reallocate = &ArrowDeviceCudaAllocatorReallocate;
+  buffer->allocator.free = &ArrowDeviceCudaAllocatorFree;
+  // TODO: We almost certainly need device_id here
+  buffer->allocator.private_data = NULL;
+  return NANOARROW_OK;
+}
+
+static void ArrowDeviceCudaHostAllocatorFree(struct ArrowBufferAllocator* allocator,
+                                             uint8_t* ptr, int64_t old_size) {
+  if (ptr != NULL) {
+    cudaFreeHost(ptr);
+  }
+}
+
+static uint8_t* ArrowDeviceCudaHostAllocatorReallocate(
+    struct ArrowBufferAllocator* allocator, uint8_t* ptr, int64_t old_size,
+    int64_t new_size) {
+  ArrowDeviceCudaHostAllocatorFree(allocator, ptr, old_size);
+  return NULL;
+}
+
+static ArrowErrorCode ArrowDeviceCudaHostAllocateBuffer(struct ArrowBuffer* buffer,
+                                                        int64_t size_bytes) {
+  void* ptr = NULL;
+  cudaError_t result = cudaMallocHost(&ptr, (int64_t)size_bytes);
+  if (result != cudaSuccess) {
+    return EINVAL;
+  }
+
+  buffer->data = (uint8_t*)ptr;
+  buffer->size_bytes = size_bytes;
+  buffer->capacity_bytes = size_bytes;
+  buffer->allocator.reallocate = &ArrowDeviceCudaHostAllocatorReallocate;
+  buffer->allocator.free = &ArrowDeviceCudaHostAllocatorFree;
+  // TODO: We almost certainly need device_id here
+  buffer->allocator.private_data = NULL;
+  return NANOARROW_OK;
+}
+
+// TODO: All these buffer copiers would benefit from cudaMemcpyAsync but there is
+// no good way to incorporate that just yet
+
+static ArrowErrorCode ArrowDeviceCudaBufferInit(struct ArrowDevice* device_src,
+                                                struct ArrowDeviceBufferView src,
+                                                struct ArrowDevice* device_dst,
+                                                struct ArrowBuffer* dst) {
+  if (device_src->device_type == ARROW_DEVICE_CPU &&
+      device_dst->device_type == ARROW_DEVICE_CUDA) {
+    struct ArrowBuffer tmp;
+    NANOARROW_RETURN_NOT_OK(ArrowDeviceCudaAllocateBuffer(&tmp, src.size_bytes));
+    cudaError_t result =
+        cudaMemcpy(tmp.data, ((uint8_t*)src.private_data) + src.offset_bytes,
+                   (size_t)src.size_bytes, cudaMemcpyHostToDevice);
+    if (result != cudaSuccess) {
+      ArrowBufferReset(&tmp);
+      return EINVAL;
+    }
+
+    ArrowBufferMove(&tmp, dst);
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA &&
+             device_dst->device_type == ARROW_DEVICE_CUDA) {
+    struct ArrowBuffer tmp;
+    NANOARROW_RETURN_NOT_OK(ArrowDeviceCudaAllocateBuffer(&tmp, src.size_bytes));
+    cudaError_t result =
+        cudaMemcpy(tmp.data, ((uint8_t*)src.private_data) + src.offset_bytes,
+                   (size_t)src.size_bytes, cudaMemcpyDeviceToDevice);
+    if (result != cudaSuccess) {
+      ArrowBufferReset(&tmp);
+      return EINVAL;
+    }
+
+    ArrowBufferMove(&tmp, dst);
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA &&
+             device_dst->device_type == ARROW_DEVICE_CPU) {
+    struct ArrowBuffer tmp;
+    ArrowBufferInit(&tmp);
+    NANOARROW_RETURN_NOT_OK(ArrowBufferReserve(&tmp, src.size_bytes));
+    tmp.size_bytes = src.size_bytes;
+    cudaError_t result =
+        cudaMemcpy(tmp.data, ((uint8_t*)src.private_data) + src.offset_bytes,
+                   (size_t)src.size_bytes, cudaMemcpyDeviceToHost);
+    if (result != cudaSuccess) {
+      ArrowBufferReset(&tmp);
+      return EINVAL;
+    }
+
+    ArrowBufferMove(&tmp, dst);
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CPU &&
+             device_dst->device_type == ARROW_DEVICE_CUDA_HOST) {
+    NANOARROW_RETURN_NOT_OK(ArrowDeviceCudaHostAllocateBuffer(dst, src.size_bytes));
+    memcpy(dst->data, ((uint8_t*)src.private_data) + src.offset_bytes,
+           (size_t)src.size_bytes);
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA_HOST &&
+             device_dst->device_type == ARROW_DEVICE_CUDA_HOST) {
+    NANOARROW_RETURN_NOT_OK(ArrowDeviceCudaHostAllocateBuffer(dst, src.size_bytes));
+    memcpy(dst->data, ((uint8_t*)src.private_data) + src.offset_bytes,
+           (size_t)src.size_bytes);
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA_HOST &&
+             device_dst->device_type == ARROW_DEVICE_CPU) {
+    struct ArrowBuffer tmp;
+    ArrowBufferInit(&tmp);
+    NANOARROW_RETURN_NOT_OK(ArrowBufferReserve(&tmp, src.size_bytes));
+    tmp.size_bytes = src.size_bytes;
+    memcpy(tmp.data, ((uint8_t*)src.private_data) + src.offset_bytes,
+           (size_t)src.size_bytes);
+    ArrowBufferMove(&tmp, dst);
+    return NANOARROW_OK;
+
+  } else {
+    return ENOTSUP;
+  }
+}
+
+static ArrowErrorCode ArrowDeviceCudaBufferCopy(struct ArrowDevice* device_src,
+                                                struct ArrowDeviceBufferView src,
+                                                struct ArrowDevice* device_dst,
+                                                struct ArrowDeviceBufferView dst) {
+  // This is all just cudaMemcpy or memcpy
+  if (device_src->device_type == ARROW_DEVICE_CPU &&
+      device_dst->device_type == ARROW_DEVICE_CUDA) {
+    cudaError_t result = cudaMemcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+                                    ((uint8_t*)src.private_data) + src.offset_bytes,
+                                    dst.size_bytes, cudaMemcpyHostToDevice);
+    if (result != cudaSuccess) {
+      return EINVAL;
+    }
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA &&
+             device_dst->device_type == ARROW_DEVICE_CUDA) {
+    cudaError_t result = cudaMemcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+                                    ((uint8_t*)src.private_data) + src.offset_bytes,
+                                    dst.size_bytes, cudaMemcpyDeviceToDevice);
+    if (result != cudaSuccess) {
+      return EINVAL;
+    }
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA &&
+             device_dst->device_type == ARROW_DEVICE_CPU) {
+    cudaError_t result = cudaMemcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+                                    ((uint8_t*)src.private_data) + src.offset_bytes,
+                                    dst.size_bytes, cudaMemcpyDeviceToHost);
+    if (result != cudaSuccess) {
+      return EINVAL;
+    }
+    return NANOARROW_OK;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CPU &&
+             device_dst->device_type == ARROW_DEVICE_CUDA_HOST) {
+    memcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+           ((uint8_t*)src.private_data) + src.offset_bytes, dst.size_bytes);
+    return NANOARROW_OK;
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA_HOST &&
+             device_dst->device_type == ARROW_DEVICE_CUDA_HOST) {
+    memcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+           ((uint8_t*)src.private_data) + src.offset_bytes, dst.size_bytes);
+    return NANOARROW_OK;
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA_HOST &&
+             device_dst->device_type == ARROW_DEVICE_CPU) {
+    memcpy(((uint8_t*)dst.private_data) + dst.offset_bytes,
+           ((uint8_t*)src.private_data) + src.offset_bytes, dst.size_bytes);
+    return NANOARROW_OK;
+  } else {
+    return ENOTSUP;
+  }
+}
+
+static int ArrowDeviceCudaCopyRequired(struct ArrowDevice* device_src,
+                                       struct ArrowArrayView* src,
+                                       struct ArrowDevice* device_dst) {
+  if (device_src->device_type == ARROW_DEVICE_CPU &&
+      device_dst->device_type == ARROW_DEVICE_CUDA) {
+    // Copy
+    return 1;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA &&
+             device_dst->device_type == ARROW_DEVICE_CUDA &&
+             device_src->device_id == device_dst->device_id) {
+    // Move
+    return 0;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA &&
+             device_dst->device_type == ARROW_DEVICE_CPU) {
+    // Copy
+    return 1;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CPU &&
+             device_dst->device_type == ARROW_DEVICE_CUDA_HOST) {
+    // Copy: we can't assume the memory has been registered. A user can force
+    // this by registering the memory and setting device->device_type manually.
+    // A copy will ensure all buffers are allocated with cudaMallocHost().
+    return 1;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA_HOST &&
+             device_dst->device_type == ARROW_DEVICE_CUDA_HOST &&
+             device_src->device_id == device_dst->device_id) {
+    // Move
+    return 0;
+
+  } else if (device_src->device_type == ARROW_DEVICE_CUDA_HOST &&
+             device_dst->device_type == ARROW_DEVICE_CPU) {
+    // Move: the array's release callback is responsible for cudaFreeHost or
+    // deregistration (or perhaps this has been handled at a higher level)
+    return 0;
+
+  } else {
+    // Fall back to the other device's implementation
+    return -1;
+  }
+}
+
+static ArrowErrorCode ArrowDeviceCudaSynchronize(struct ArrowDevice* device,
+                                                 struct ArrowDevice* device_event,
+                                                 void* sync_event,
+                                                 struct ArrowError* error) {
+  if (sync_event == NULL) {
+    return NANOARROW_OK;
+  }
+
+  if (device_event->device_type != ARROW_DEVICE_CUDA ||
+      device_event->device_type != ARROW_DEVICE_CUDA_HOST) {
+    return ENOTSUP;
+  }
+
+  // Pointer vs. not pointer...is there memory ownership to consider here?
+  cudaEvent_t* cuda_event = (cudaEvent_t*)sync_event;
+  cudaError_t result = cudaEventSynchronize(*cuda_event);

Review Comment:
   To call `cudaStreamWaitEvent` you'd need to know what stream to wait on.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: github-unsubscribe@arrow.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org