This is an automated email from the ASF dual-hosted git repository.

lidavidm pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/arrow-adbc.git


The following commit(s) were added to refs/heads/main by this push:
     new a134635  feat(rust): define the rust adbc api (#478)
a134635 is described below

commit a134635cdf273c98398a0b5f36b9ede7d3847945
Author: Will Jones <[email protected]>
AuthorDate: Fri May 26 09:53:05 2023 -0700

    feat(rust): define the rust adbc api (#478)
    
    This PR introduces a Rust API for ADBC. This is meant to be parallel to
    the API in other languages:
    
     * C: https://github.com/apache/arrow-adbc/blob/main/adbc.h
     * Go: https://github.com/apache/arrow-adbc/blob/main/go/adbc/adbc.go
    * Java:
    
([Database](https://github.com/apache/arrow-adbc/blob/main/java/core/src/main/java/org/apache/arrow/adbc/core/AdbcDatabase.java),
    
[Connection](https://github.com/apache/arrow-adbc/blob/main/java/core/src/main/java/org/apache/arrow/adbc/core/AdbcConnection.java),
    
[Statement](https://github.com/apache/arrow-adbc/blob/main/java/core/src/main/java/org/apache/arrow/adbc/core/AdbcStatement.java))
    
    It is similar to the others, except for two methods on `AdbcConnection`:
    `get_info()` and `get_objects()` return Rust data structures instead of
    Arrow record batch readers. This is to make the API more ergonomic. Both
    are designed to be convertible from the C API. The info values from
    `get_info()` are converted from a `UnionArray` to a Rust enum. The
    `get_objects()` return value is an associated type implementing a trait
    that defines an easy to use API; the batches returned by C drivers will
    be wrapped in a struct that provides a zero-copy view ([prototype
    
implementation](https://github.com/apache/arrow-adbc/blob/75ba2cef9080b19f4d242d0a03c17db304609d85/rust/src/driver_manager.rs#L970)).
    
    ---------
    
    Co-authored-by: Matthijs Brobbel <[email protected]>
---
 .github/workflows/rust.yml |  68 ++++++
 CONTRIBUTING.md            |  10 +
 rust/.gitignore            |  19 ++
 rust/Cargo.toml            |  46 +++++
 rust/src/error.rs          | 193 +++++++++++++++++
 rust/src/info.rs           | 290 ++++++++++++++++++++++++++
 rust/src/lib.rs            | 455 ++++++++++++++++++++++++++++++++++++++++
 rust/src/objects.rs        | 502 +++++++++++++++++++++++++++++++++++++++++++++
 8 files changed, 1583 insertions(+)

diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml
new file mode 100644
index 0000000..248a9ee
--- /dev/null
+++ b/.github/workflows/rust.yml
@@ -0,0 +1,68 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+name: Rust
+
+on:
+  pull_request:
+    branches:
+      - main
+    paths:
+      - "rust/**"
+      - ".github/workflows/rust.yml"
+  push:
+    paths:
+      - "rust/**"
+      - ".github/workflows/rust.yml"
+
+concurrency:
+  group: ${{ github.repository }}-${{ github.ref }}-${{ github.workflow }}
+  cancel-in-progress: true
+
+permissions:
+  contents: read
+
+defaults:
+  run:
+    working-directory: rust
+
+jobs:
+  rust:
+    strategy:
+      matrix:
+        os: [windows-latest, macos-latest, ubuntu-latest]
+    name: "Rust ${{ matrix.os }}"
+    runs-on: ${{ matrix.os }}
+    steps:
+      - uses: actions/checkout@v3
+        with:
+          fetch-depth: 0
+          persist-credentials: false
+      - name: Install stable toolchain
+        uses: dtolnay/rust-toolchain@stable
+        with:
+          components: clippy, rustfmt
+      - uses: Swatinem/rust-cache@v2
+      - name: Check format
+        run: cargo fmt -- --check
+      - name: Clippy
+        run: cargo clippy --tests
+      - name: Test
+        shell: bash -l {0}
+        run: cargo test
+      - name: Check docs
+        run: cargo doc
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index e97110a..d174fe9 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -197,6 +197,16 @@ $ pytest -vvx
 
 The Ruby libraries are bindings around the GLib libraries.
 
+### Rust
+
+The Rust components are a standard Rust project.
+
+```shell
+$ cd rust
+# Build and run tests
+$ cargo test
+```
+
 ## Opening a Pull Request
 
 Before opening a pull request, please run the static checks, which are
diff --git a/rust/.gitignore b/rust/.gitignore
new file mode 100644
index 0000000..95a3a86
--- /dev/null
+++ b/rust/.gitignore
@@ -0,0 +1,19 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+/target
+Cargo.lock
diff --git a/rust/Cargo.toml b/rust/Cargo.toml
new file mode 100644
index 0000000..fe9578d
--- /dev/null
+++ b/rust/Cargo.toml
@@ -0,0 +1,46 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+[package]
+name = "arrow-adbc"
+version = "0.1.0"
+edition = "2021"
+rust-version = "1.62"
+description = "Rust implementation of Arrow Database Connectivity (ADBC)"
+homepage = "https://arrow.apache.org/adbc/";
+repository = "https://github.com/apache/arrow-adbc";
+authors = ["Apache Arrow <[email protected]>"]
+license = "Apache-2.0"
+keywords = ["arrow", "database", "sql"]
+
+# See more keys and their definitions at 
https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+arrow-array = { version = "34.0.0", default-features = false}
+arrow-schema = { version = "34.0.0", features = ["ffi"], default-features = 
false}
+arrow-data = { version = "34.0.0", features = ["ffi"], default-features = 
false}
+async-trait = "0.1"
+libloading = "0.7"
+num_enum = "0.5"
+once_cell = "1"
+substrait = { version = "0.5", optional = true }
+
+[dev-dependencies]
+itertools = "0.10"
+
+[features]
+substrait = ["dep:substrait"]
diff --git a/rust/src/error.rs b/rust/src/error.rs
new file mode 100644
index 0000000..a05f93d
--- /dev/null
+++ b/rust/src/error.rs
@@ -0,0 +1,193 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! ADBC error enums and structs
+
+use arrow_schema::ArrowError;
+
+#[derive(Debug, PartialEq, Copy, Clone)]
+#[repr(u8)]
+pub enum AdbcStatusCode {
+    /// No error.
+    Ok = 0,
+    /// An unknown error occurred.
+    ///
+    /// May indicate a driver-side or database-side error.
+    Unknown = 1,
+    /// The operation is not implemented or supported.
+    ///
+    /// May indicate a driver-side or database-side error.
+    NotImplemented = 2,
+    /// A requested resource was not found.
+    ///
+    /// May indicate a driver-side or database-side error.
+    NotFound = 3,
+    /// A requested resource already exists.
+    ///
+    /// May indicate a driver-side or database-side error.
+    AlreadyExists = 4,
+    /// The arguments are invalid, likely a programming error.
+    ///
+    /// May indicate a driver-side or database-side error.
+    InvalidArguments = 5,
+    /// The preconditions for the operation are not met, likely a
+    ///   programming error.
+    ///
+    /// For instance, the object may be uninitialized, or may have not
+    /// been fully configured.
+    ///
+    /// May indicate a driver-side or database-side error.
+    InvalidState = 6,
+    /// Invalid data was processed (not a programming error).
+    ///
+    /// For instance, a division by zero may have occurred during query
+    /// execution.
+    ///
+    /// May indicate a database-side error only.
+    InvalidData = 7,
+    /// The database's integrity was affected.
+    ///
+    /// For instance, a foreign key check may have failed, or a uniqueness
+    /// constraint may have been violated.
+    ///
+    /// May indicate a database-side error only.
+    Integrity = 8,
+    /// An error internal to the driver or database occurred.
+    ///
+    /// May indicate a driver-side or database-side error.
+    Internal = 9,
+    /// An I/O error occurred.
+    ///
+    /// For instance, a remote service may be unavailable.
+    ///
+    /// May indicate a driver-side or database-side error.
+    IO = 10,
+    /// The operation was cancelled, not due to a timeout.
+    ///
+    /// May indicate a driver-side or database-side error.
+    Cancelled = 11,
+    /// The operation was cancelled due to a timeout.
+    ///
+    /// May indicate a driver-side or database-side error.
+    Timeout = 12,
+    /// Authentication failed.
+    ///
+    /// May indicate a database-side error only.
+    Unauthenticated = 13,
+    /// The client is not authorized to perform the given operation.
+    ///
+    /// May indicate a database-side error only.
+    Unauthorized = 14,
+}
+
+impl std::fmt::Display for AdbcStatusCode {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        match self {
+            AdbcStatusCode::Ok => write!(f, "Ok"),
+            AdbcStatusCode::Unknown => write!(f, "Unknown"),
+            AdbcStatusCode::NotImplemented => write!(f, "Not Implemented"),
+            AdbcStatusCode::NotFound => write!(f, "Not Found"),
+            AdbcStatusCode::AlreadyExists => write!(f, "Already Exists"),
+            AdbcStatusCode::InvalidArguments => write!(f, "Invalid Arguments"),
+            AdbcStatusCode::InvalidState => write!(f, "Invalid State"),
+            AdbcStatusCode::InvalidData => write!(f, "Invalid Data"),
+            AdbcStatusCode::Integrity => write!(f, "Integrity"),
+            AdbcStatusCode::Internal => write!(f, "Internal Error"),
+            AdbcStatusCode::IO => write!(f, "IO Error"),
+            AdbcStatusCode::Cancelled => write!(f, "Cancelled"),
+            AdbcStatusCode::Timeout => write!(f, "Timeout"),
+            AdbcStatusCode::Unauthenticated => write!(f, "Unauthenticated"),
+            AdbcStatusCode::Unauthorized => write!(f, "Unauthorized"),
+        }
+    }
+}
+
+/// An error from an ADBC driver.
+#[derive(Debug, Clone)]
+pub struct AdbcError {
+    /// An error message
+    pub message: String,
+    /// A vendor-specific error code.
+    pub vendor_code: i32,
+    /// A SQLSTATE error code, if provided, as defined by the SQL:2003 
standard.
+    /// If not set, it is left as `[0; 5]`.
+    pub sqlstate: [i8; 5usize],
+    /// The status code indicating the type of error.
+    pub status_code: AdbcStatusCode,
+}
+
+impl std::fmt::Display for AdbcError {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        write!(
+            f,
+            "{}: {} (sqlstate: {:?}, vendor_code: {})",
+            self.status_code, self.message, self.sqlstate, self.vendor_code
+        )
+    }
+}
+
+impl std::error::Error for AdbcError {
+    fn description(&self) -> &str {
+        &self.message
+    }
+}
+
+impl From<std::str::Utf8Error> for AdbcError {
+    fn from(value: std::str::Utf8Error) -> Self {
+        Self {
+            message: format!(
+                "Invalid UTF-8 character at position {}",
+                value.valid_up_to()
+            ),
+            vendor_code: -1,
+            // A character is not in the coded character set or the conversion 
is not supported.
+            sqlstate: [2, 2, 0, 2, 1],
+            status_code: AdbcStatusCode::InvalidArguments,
+        }
+    }
+}
+
+impl From<std::ffi::NulError> for AdbcError {
+    fn from(value: std::ffi::NulError) -> Self {
+        Self {
+            message: format!(
+                "An input string contained an interior nul at position {}",
+                value.nul_position()
+            ),
+            vendor_code: -1,
+            sqlstate: [0; 5],
+            status_code: AdbcStatusCode::InvalidArguments,
+        }
+    }
+}
+
+impl From<ArrowError> for AdbcError {
+    fn from(value: ArrowError) -> Self {
+        let message = match value {
+            ArrowError::CDataInterface(msg) => msg,
+            ArrowError::SchemaError(msg) => msg,
+            _ => "Arrow error".to_string(), // TODO: Fill in remainder
+        };
+
+        Self {
+            message,
+            vendor_code: -1,
+            sqlstate: [0; 5],
+            status_code: AdbcStatusCode::Internal,
+        }
+    }
+}
diff --git a/rust/src/info.rs b/rust/src/info.rs
new file mode 100644
index 0000000..6a30ddd
--- /dev/null
+++ b/rust/src/info.rs
@@ -0,0 +1,290 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! Utilities for driver info
+//!
+//! For use with [crate::AdbcConnection::get_info].
+
+use arrow_array::builder::{
+    ArrayBuilder, BooleanBuilder, Int32Builder, Int64Builder, ListBuilder, 
MapBuilder,
+    StringBuilder, UInt32BufferBuilder, UInt32Builder, UInt8BufferBuilder,
+};
+use arrow_array::cast::{as_primitive_array, as_string_array, as_union_array};
+use arrow_array::types::UInt32Type;
+use arrow_array::{Array, ArrayRef, UnionArray};
+use arrow_array::{RecordBatch, RecordBatchIterator, RecordBatchReader};
+use arrow_schema::{ArrowError, DataType, Field, Schema, UnionMode};
+use num_enum::{FromPrimitive, IntoPrimitive};
+use once_cell::sync::Lazy;
+use std::{borrow::Cow, collections::HashMap, sync::Arc};
+
+/// Contains known info codes defined by ADBC.
+#[repr(u32)]
+#[derive(Debug, Clone, Copy, Eq, PartialEq, FromPrimitive, IntoPrimitive, 
Hash)]
+pub enum InfoCode {
+    /// The database vendor/product version (type: utf8).
+    VendorName = 0,
+    /// The database vendor/product version (type: utf8).
+    VendorVersion = 1,
+    /// The database vendor/product Arrow library version (type: utf8).
+    VendorArrowVersion = 2,
+    /// The driver name (type: utf8).
+    DriverName = 100,
+    /// The driver version (type: utf8).
+    DriverVersion = 101,
+    /// The driver Arrow library version (type: utf8).
+    DriverArrowVersion = 102,
+    /// Some other info code.
+    #[num_enum(catch_all)]
+    Other(u32),
+}
+
+static INFO_SCHEMA: Lazy<Arc<Schema>> = Lazy::new(|| {
+    Arc::new(Schema::new(vec![
+        Field::new("info_name", DataType::UInt32, false),
+        Field::new(
+            "info_value",
+            DataType::Union(
+                vec![
+                    Field::new("string_value", DataType::Utf8, true),
+                    Field::new("bool_value", DataType::Boolean, true),
+                    Field::new("int64_value", DataType::Int64, true),
+                    Field::new("int32_bitmask", DataType::Int32, true),
+                    Field::new(
+                        "string_list",
+                        DataType::List(Box::new(Field::new("item", 
DataType::Utf8, true))),
+                        true,
+                    ),
+                    Field::new(
+                        "int32_to_int32_list_map",
+                        DataType::Map(
+                            Box::new(Field::new(
+                                "entries",
+                                DataType::Struct(vec![
+                                    Field::new("keys", DataType::Int32, false),
+                                    Field::new(
+                                        "values",
+                                        DataType::List(Box::new(Field::new(
+                                            "item",
+                                            DataType::Int32,
+                                            true,
+                                        ))),
+                                        true,
+                                    ),
+                                ]),
+                                false,
+                            )),
+                            false,
+                        ),
+                        true,
+                    ),
+                ],
+                vec![0, 1, 2, 3, 4, 5],
+                UnionMode::Dense,
+            ),
+            true,
+        ),
+    ]))
+});
+
+/// Rust representations of database/drier metadata
+#[derive(Clone, Debug, PartialEq)]
+pub enum InfoData {
+    StringValue(Cow<'static, str>),
+    BoolValue(bool),
+    Int64Value(i64),
+    Int32Bitmask(i32),
+    StringList(Vec<String>),
+    Int32ToInt32ListMap(HashMap<i32, Vec<i32>>),
+}
+
+pub fn export_info_data(
+    info_iter: impl IntoIterator<Item = (InfoCode, InfoData)>,
+) -> impl RecordBatchReader {
+    let info_iter = info_iter.into_iter();
+
+    let mut codes = UInt32Builder::with_capacity(info_iter.size_hint().0);
+
+    // Type id tells which array the value is in
+    let mut type_id = UInt8BufferBuilder::new(info_iter.size_hint().0);
+    // Value offset tells the offset of the value in the respective array
+    let mut value_offsets = UInt32BufferBuilder::new(info_iter.size_hint().0);
+
+    // Make one builder per child of union array. Will combine after.
+    let mut string_values = StringBuilder::new();
+    let mut bool_values = BooleanBuilder::new();
+    let mut int64_values = Int64Builder::new();
+    let mut int32_bitmasks = Int32Builder::new();
+    let mut string_lists = ListBuilder::new(StringBuilder::new());
+    let mut int32_to_int32_list_maps = MapBuilder::new(
+        None,
+        Int32Builder::new(),
+        ListBuilder::new(Int32Builder::new()),
+    );
+
+    for (code, info) in info_iter {
+        codes.append_value(code.into());
+
+        match info {
+            InfoData::StringValue(val) => {
+                string_values.append_value(val);
+                type_id.append(0);
+                let value_offset = string_values.len() - 1;
+                value_offsets.append(
+                    value_offset
+                        .try_into()
+                        .expect("Array has more values than can be indexed by 
u32"),
+                );
+            }
+            _ => {
+                todo!("support other types in info_data")
+            }
+        };
+    }
+
+    let arrays: Vec<ArrayRef> = vec![
+        Arc::new(string_values.finish()),
+        Arc::new(bool_values.finish()),
+        Arc::new(int64_values.finish()),
+        Arc::new(int32_bitmasks.finish()),
+        Arc::new(string_lists.finish()),
+        Arc::new(int32_to_int32_list_maps.finish()),
+    ];
+    let info_schema = INFO_SCHEMA.clone();
+    let union_fields = {
+        match info_schema.field(1).data_type() {
+            DataType::Union(fields, _, _) => fields,
+            _ => unreachable!(),
+        }
+    };
+    let children = union_fields
+        .iter()
+        .map(|f| f.to_owned())
+        .zip(arrays.into_iter())
+        .collect();
+    let info_value = UnionArray::try_new(
+        &[0, 1, 2, 3, 4, 5],
+        type_id.finish(),
+        Some(value_offsets.finish()),
+        children,
+    )
+    .expect("Info value array is always valid.");
+
+    let batch: RecordBatch = RecordBatch::try_new(
+        info_schema,
+        vec![Arc::new(codes.finish()), Arc::new(info_value)],
+    )
+    .expect("Info data batch is always valid.");
+
+    let schema = batch.schema();
+    RecordBatchIterator::new(std::iter::once(batch).map(Ok), schema)
+}
+
+pub fn import_info_data(
+    reader: impl RecordBatchReader,
+) -> Result<Vec<(InfoCode, InfoData)>, ArrowError> {
+    let batches = reader.collect::<Result<Vec<RecordBatch>, ArrowError>>()?;
+
+    Ok(batches
+        .iter()
+        .flat_map(|batch| {
+            let codes = as_primitive_array::<UInt32Type>(batch.column(0));
+            let codes = codes.into_iter().map(|code| code.unwrap().into());
+
+            let info_data = as_union_array(batch.column(1));
+            let info_data = (0..info_data.len()).map(|i| -> InfoData {
+                let type_id = info_data.type_id(i);
+                match type_id {
+                    0 => InfoData::StringValue(Cow::Owned(
+                        
as_string_array(&info_data.value(i)).value(0).to_string(),
+                    )),
+                    _ => todo!("Support other types"),
+                }
+            });
+
+            std::iter::zip(codes, info_data)
+        })
+        .collect())
+}
+
+#[cfg(test)]
+mod test {
+    use arrow_array::cast::{as_primitive_array, as_string_array, 
as_union_array};
+    use arrow_array::types::UInt32Type;
+
+    use super::*;
+
+    #[test]
+    fn test_export_info_data() {
+        let example_info = vec![
+            (
+                InfoCode::VendorName,
+                InfoData::StringValue(Cow::Borrowed("test vendor")),
+            ),
+            (
+                InfoCode::DriverName,
+                InfoData::StringValue(Cow::Borrowed("test driver")),
+            ),
+        ];
+
+        let info = export_info_data(example_info.clone());
+
+        assert_eq!(info.schema(), *INFO_SCHEMA);
+        let info: HashMap<InfoCode, String> = info
+            .flat_map(|maybe_batch| {
+                let batch = maybe_batch.unwrap();
+                let id = as_primitive_array::<UInt32Type>(batch.column(0));
+                let values = as_union_array(batch.column(1));
+                let string_values = as_string_array(values.child(0));
+                let mut out = vec![];
+                for i in 0..batch.num_rows() {
+                    assert_eq!(values.type_id(i), 0);
+                    let code = InfoCode::from(id.value(i));
+                    out.push((code, string_values.value(i).to_string()));
+                }
+                out
+            })
+            .collect();
+
+        assert_eq!(
+            info.get(&InfoCode::VendorName),
+            Some(&"test vendor".to_string())
+        );
+        assert_eq!(
+            info.get(&InfoCode::DriverName),
+            Some(&"test driver".to_string())
+        );
+
+        let info = export_info_data(example_info);
+
+        let info: HashMap<InfoCode, InfoData> =
+            import_info_data(info).unwrap().into_iter().collect();
+
+        assert_eq!(
+            info.get(&InfoCode::VendorName),
+            Some(&InfoData::StringValue(Cow::Owned(
+                "test vendor".to_string()
+            )))
+        );
+        assert_eq!(
+            info.get(&InfoCode::DriverName),
+            Some(&InfoData::StringValue(Cow::Owned(
+                "test driver".to_string()
+            )))
+        );
+    }
+}
diff --git a/rust/src/lib.rs b/rust/src/lib.rs
new file mode 100644
index 0000000..13f0c74
--- /dev/null
+++ b/rust/src/lib.rs
@@ -0,0 +1,455 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! Arrow Database Connectivity (ADBC) allows efficient connections to 
databases
+//! for OLAP workloads:
+//!
+//!  * Uses the Arrow [C Data 
interface](https://arrow.apache.org/docs/format/CDataInterface.html)
+//!    and [C Stream 
Interface](https://arrow.apache.org/docs/format/CStreamInterface.html)
+//!    for efficient data interchange.
+//!  * Supports partitioned result sets for multi-threaded or distributed
+//!    applications.
+//!  * Support for [Substrait](https://substrait.io/) plans in addition to SQL 
queries.
+//!
+//! When implemented for remote databases, [Flight 
SQL](https://arrow.apache.org/docs/format/FlightSql.html)
+//! can be used as the communication protocol. This means data can be in Arrow
+//! format through the whole connection, minimizing serialization and 
deserialization
+//! overhead.
+//!
+//! Read more about ADBC at <https://arrow.apache.org/adbc/>
+//!
+//! There are two flavors of ADBC that this library supports:
+//!
+//!  * **Native Rust implementations**. These implement the traits at the top 
level of
+//!    this crate, starting with [AdbcDatabase].
+//!  * **C API ADBC drivers**. These can be implemented in any language (that 
compiles
+//!    to native code) and can be used by any language.
+//!
+//! # Native Rust drivers
+//!
+//! Native Rust drivers will implement the traits:
+//!
+//!  * [AdbcDatabase]
+//!  * [AdbcConnection]
+//!  * [AdbcStatement]
+//!
+//! For drivers implemented in Rust, using these will be more efficient and 
safe,
+//! since it avoids the overhead of going through C FFI.
+//!
+//! # Using C API drivers
+//!
+//! 🚧 TODO
+//!
+//! # Creating C API drivers
+//!
+//! 🚧 TODO
+//!
+pub mod error;
+pub mod info;
+pub mod objects;
+
+use std::collections::HashMap;
+
+use arrow_array::{RecordBatch, RecordBatchReader};
+use arrow_schema::Schema;
+use async_trait::async_trait;
+use info::InfoCode;
+
+use crate::error::AdbcError;
+use crate::info::InfoData;
+
+/// Databases hold state shared by multiple connections. This typically means
+/// configuration and caches. For in-memory databases, it provides a place to
+/// hold ownership of the in-memory database.
+#[async_trait]
+pub trait AdbcDatabase {
+    type ConnectionType: AdbcConnection;
+
+    /// Set an option on the database.
+    ///
+    /// Some databases may not allow setting options after it has been 
initialized.
+    fn set_option(&self, key: impl AsRef<str>, value: impl AsRef<str>) -> 
Result<(), AdbcError>;
+
+    /// Initialize a connection to the database.
+    ///
+    /// `options` provided will configure the connection, including the 
isolation
+    /// level. See standard options in [options].
+    async fn connect<K, V>(
+        &self,
+        options: impl IntoIterator<Item = (K, V)>,
+    ) -> Result<Self::ConnectionType, AdbcError>
+    where
+        K: AsRef<str>,
+        V: AsRef<str>;
+}
+
+/// A connection is a single connection to a database.
+///
+/// It is never accessed concurrently from multiple threads.
+///
+/// # Autocommit
+///
+/// Connections should start in autocommit mode. They can be moved out by
+/// setting [options::AdbcOptionKey::AutoCommit] to `"false"` (using
+/// [AdbcConnection::set_option]). Turning off autocommit allows customizing
+/// the isolation level. Read more in 
[adbc.h](https://github.com/apache/arrow-adbc/blob/main/adbc.h).
+#[async_trait]
+pub trait AdbcConnection {
+    type StatementType: AdbcStatement;
+    type ObjectCollectionType: objects::DatabaseCatalogCollection;
+
+    /// Set an option on the connection.
+    ///
+    /// Some connections may not allow setting options after it has been 
initialized.
+    fn set_option(&self, key: impl AsRef<str>, value: impl AsRef<str>) -> 
Result<(), AdbcError>;
+
+    /// Create a new [AdbcStatement].
+    fn new_statement(&self) -> Result<Self::StatementType, AdbcError>;
+
+    /// Get metadata about the database/driver.
+    ///
+    /// If None is passed for `info_codes`, the method will return all info.
+    /// Otherwise will return the specified info, in any order. If an 
unrecognized
+    /// code is passed, it will return an error.
+    ///
+    /// Each metadatum is identified by an integer code.  The recognized
+    /// codes are defined as constants.  Codes [0, 10_000) are reserved
+    /// for ADBC usage.  Drivers/vendors will ignore requests for
+    /// unrecognized codes (the row will be omitted from the result).
+    /// Known codes are provided in [info::InfoCode].
+    async fn get_info(
+        &self,
+        info_codes: Option<&[InfoCode]>,
+    ) -> Result<HashMap<u32, InfoData>, AdbcError>;
+
+    /// Get a single data base metadata. See [AdbcConnection::get_info()].
+    ///
+    /// Will return `None` if the code is not recognized.
+    async fn get_single_info(&self, info_code: InfoCode) -> 
Result<Option<InfoData>, AdbcError> {
+        let info_codes = &[info_code];
+        Ok(self
+            .get_info(Some(info_codes.as_slice()))
+            .await?
+            .into_iter()
+            .next()
+            .map(|(_, val)| val))
+    }
+
+    /// Get a hierarchical view of all catalogs, database schemas, tables, and 
columns.
+    ///
+    /// # Parameters
+    ///
+    /// * **depth**: The level of nesting to display. If 
[AdbcObjectDepth::All], display
+    ///   all levels. If [AdbcObjectDepth::Catalogs], display only catalogs 
(i.e.  `catalog_schemas`
+    ///   will be null). If [AdbcObjectDepth::DBSchemas], display only 
catalogs and schemas
+    ///   (i.e. `db_schema_tables` will be null), and so on.
+    /// * **catalog**: Only show tables in the given catalog. If None,
+    ///   do not filter by catalog. If an empty string, only show tables
+    ///   without a catalog.  May be a search pattern (see next section).
+    /// * **db_schema**: Only show tables in the given database schema. If
+    ///   None, do not filter by database schema. If an empty string, only show
+    ///   tables without a database schema. May be a search pattern (see next 
section).
+    /// * **table_name**: Only show tables with the given name. If None, do not
+    ///   filter by name. May be a search pattern (see next section).
+    /// * **table_type**: Only show tables matching one of the given table
+    ///   types. If None, show tables of any type. Valid table types should
+    ///   match those returned by [AdbcConnection::get_table_schema].
+    /// * **column_name**: Only show columns with the given name. If
+    ///   None, do not filter by name.  May be a search pattern (see next 
section).
+    ///
+    /// # Search patterns
+    ///
+    /// Some parameters accept "search patterns", which are
+    /// strings that can contain the special character `"%"` to match zero
+    /// or more characters, or `"_"` to match exactly one character.  (See
+    /// the documentation of DatabaseMetaData in JDBC or "Pattern Value
+    /// Arguments" in the ODBC documentation.)
+    async fn get_objects(
+        &self,
+        depth: AdbcObjectDepth,
+        catalog: Option<&str>,
+        db_schema: Option<&str>,
+        table_name: Option<&str>,
+        table_type: Option<&[&str]>,
+        column_name: Option<&str>,
+    ) -> Result<Self::ObjectCollectionType, AdbcError>;
+
+    /// Get the Arrow schema of a table.
+    ///
+    /// `catalog` or `db_schema` may be `None` when not applicable.
+    async fn get_table_schema(
+        &self,
+        catalog: Option<&str>,
+        db_schema: Option<&str>,
+        table_name: &str,
+    ) -> Result<Schema, AdbcError>;
+
+    /// Get a list of table types in the database.
+    ///
+    /// The result is an Arrow dataset with the following schema:
+    ///
+    /// Field Name       | Field Type
+    /// -----------------|--------------
+    /// `table_type`     | `utf8 not null`
+    async fn get_table_types(&self) -> Result<Vec<String>, AdbcError>;
+
+    /// Read part of a partitioned result set.
+    async fn read_partition(
+        &self,
+        partition: &[u8],
+    ) -> Result<Box<dyn RecordBatchReader>, AdbcError>;
+
+    /// Commit any pending transactions. Only used if autocommit is disabled.
+    async fn commit(&self) -> Result<(), AdbcError>;
+
+    /// Roll back any pending transactions. Only used if autocommit is 
disabled.
+    async fn rollback(&self) -> Result<(), AdbcError>;
+}
+
+/// Depth parameter for GetObjects method.
+#[derive(Debug, Copy, Clone)]
+#[repr(i32)]
+pub enum AdbcObjectDepth {
+    /// Metadata on catalogs, schemas, tables, and columns.
+    All = 0,
+    /// Metadata on catalogs only.
+    Catalogs = 1,
+    /// Metadata on catalogs and schemas.
+    DBSchemas = 2,
+    /// Metadata on catalogs, schemas, and tables.
+    Tables = 3,
+}
+
+/// A container for all state needed to execute a database query, such as the
+/// query itself, parameters for prepared statements, driver parameters, etc.
+///
+/// Statements may represent queries or prepared statements.
+///
+/// Statements may be used multiple times and can be reconfigured
+/// (e.g. they can be reused to execute multiple different queries).
+/// However, executing a statement (and changing certain other state)
+/// will invalidate result sets obtained prior to that execution.
+///
+/// Multiple statements may be created from a single connection.
+/// However, the driver may block or error if they are used
+/// concurrently (whether from a single thread or multiple threads).
+#[async_trait]
+pub trait AdbcStatement {
+    /// Turn this statement into a prepared statement to be executed multiple 
time.
+    ///
+    /// This should return an error if called before 
[AdbcStatement::set_sql_query].
+    async fn prepare(&mut self) -> Result<(), AdbcError>;
+
+    /// Set a string option on a statement.
+    fn set_option(&mut self, key: impl AsRef<str>, value: impl AsRef<str>)
+        -> Result<(), AdbcError>;
+
+    /// Set the SQL query to execute.
+    fn set_sql_query(&mut self, query: &str) -> Result<(), AdbcError>;
+
+    /// Get the schema for bound parameters.
+    ///
+    /// This retrieves an Arrow schema describing the number, names, and
+    /// types of the parameters in a parameterized statement.  The fields
+    /// of the schema should be in order of the ordinal position of the
+    /// parameters; named parameters should appear only once.
+    ///
+    /// If the parameter does not have a name, or the name cannot be
+    /// determined, the name of the corresponding field in the schema will
+    /// be an empty string.  If the type cannot be determined, the type of
+    /// the corresponding field will be NA (NullType).
+    ///
+    /// This should return an error if this was called before 
[AdbcStatement::prepare].
+    async fn get_param_schema(&self) -> Result<Schema, AdbcError>;
+
+    /// Bind Arrow data, either for bulk inserts or prepared statements.
+    fn bind_data(&mut self, batch: RecordBatch) -> Result<(), AdbcError>;
+
+    /// Bind Arrow data, either for bulk inserts or prepared statements.
+    fn bind_stream(&mut self, stream: Box<dyn RecordBatchReader>) -> 
Result<(), AdbcError>;
+
+    /// Execute a statement and get the results.
+    ///
+    /// See [StatementResult].
+    async fn execute(&mut self) -> Result<StatementResult, AdbcError>;
+
+    /// Execute a query that doesn't have a result set.
+    ///
+    /// Will return the number of rows affected. If the affected row count is
+    /// unknown or unsupported by the database, will return `Ok(-1)`.
+    async fn execute_update(&mut self) -> Result<i64, AdbcError>;
+
+    /// Execute a statement with a partitioned result set.
+    ///
+    /// This is not required to be implemented, as it only applies to backends
+    /// that internally partition results. These backends can use this method
+    /// to support threaded or distributed clients.
+    ///
+    /// See [PartitionedStatementResult].
+    async fn execute_partitioned(&mut self) -> 
Result<PartitionedStatementResult, AdbcError>;
+}
+
+#[cfg(substrait)]
+pub trait AdbcStatementSubstrait: AdbcStatement {
+    /// Set the Substrait plan to execute.
+    fn set_substrait_plan(&mut self, plan: substrait::proto::Plan) -> 
Result<(), AdbcError>;
+}
+
+/// Result of calling [AdbcStatement::execute].
+///
+/// `result` may be None if there is no meaningful result.
+/// `row_affected` may be -1 if not applicable or if it is not supported.
+pub struct StatementResult {
+    pub result: Option<Box<dyn RecordBatchReader>>,
+    pub rows_affected: i64,
+}
+
+/// Partitioned results
+///
+/// [AdbcConnection::read_partition] will be called to get the output stream
+/// for each partition.
+///
+/// These may be used by a multi-threaded or a distributed client. Each 
partition
+/// will be retrieved by a separate connection. For in-memory databases, these
+/// may be connections on different threads that all reference the same 
database.
+/// For remote databases, these may be connections in different processes.
+#[derive(Debug, Clone)]
+pub struct PartitionedStatementResult {
+    pub schema: Schema,
+    pub partition_ids: Vec<Vec<u8>>,
+    pub rows_affected: i64,
+}
+
+/// Known options that can be set on databases, connections, and statements.
+///
+/// For use with [crate::AdbcDatabase::set_option],
+/// [crate::AdbcConnection::set_option],
+/// and [crate::AdbcStatement::set_option].
+pub mod options {
+    /// Various known options for ADBC connections.
+    ///
+    /// These convert to canonical option strings as defined in the C API.
+    pub enum AdbcOptionKey {
+        /// When ingesting a data stream, table name to write to.
+        IngestTargetTable,
+        /// How to ingest a table. See [IngestMode] for canonical possible 
values.
+        IngestMode,
+        /// Whether autocommit is enabled.
+        AutoCommit,
+        /// Whether the current connection should be restricted to being 
read-only.
+        ReadOnly,
+        /// The name of the canonical option for setting the isolation level 
of a
+        /// transaction.
+        ///
+        /// Should only be used in conjunction with autocommit disabled and
+        /// AdbcConnectionCommit / AdbcConnectionRollback. If the desired
+        /// isolation level is not supported by a driver, it should return an
+        /// appropriate error.
+        ///
+        /// See [IsolationLevel] for possible values.
+        IsolationLevel,
+    }
+
+    impl AsRef<str> for AdbcOptionKey {
+        fn as_ref(&self) -> &str {
+            match self {
+                Self::IngestTargetTable => "adbc.ingest.target_table",
+                Self::IngestMode => "adbc.ingest.mode",
+                Self::AutoCommit => "adbc.connection.autocommit",
+                Self::ReadOnly => "adbc.connection.readonly",
+                Self::IsolationLevel => 
"adbc.connection.transaction.isolation_level",
+            }
+        }
+    }
+
+    /// Possible ingest mode for use with option [AdbcOptionKey::IngestMode].
+    ///
+    /// These convert to canonical option strings as defined in the C API.
+    pub enum IngestMode {
+        Create,
+        Append,
+    }
+
+    impl AsRef<str> for IngestMode {
+        fn as_ref(&self) -> &str {
+            match self {
+                Self::Create => "adbc.ingest.mode.create",
+                Self::Append => "adbc.ingest.mode.append",
+            }
+        }
+    }
+
+    /// Possible isolation level values for use with option 
[AdbcOptionKey::IsolationLevel].
+    pub enum IsolationLevel {
+        /// Use database or driver default isolation level
+        Default,
+        /// The lowest isolation level. Dirty reads are allowed, so one 
transaction
+        /// may see not-yet-committed changes made by others.
+        ReadUncommitted,
+        /// Lock-based concurrency control keeps write locks until the
+        /// end of the transaction, but read locks are released as soon as a
+        /// SELECT is performed. Non-repeatable reads can occur in this
+        /// isolation level.
+        ///
+        /// More simply put, Read Committed is an isolation level that 
guarantees
+        /// that any data read is committed at the moment it is read. It simply
+        /// restricts the reader from seeing any intermediate, uncommitted,
+        /// 'dirty' reads. It makes no promise whatsoever that if the 
transaction
+        /// re-issues the read, it will find the same data; data is free to 
change
+        /// after it is read.
+        ReadCommitted,
+        /// Lock-based concurrency control keeps read AND write locks
+        /// (acquired on selection data) until the end of the transaction.
+        ///
+        /// However, range-locks are not managed, so phantom reads can occur.
+        /// Write skew is possible at this isolation level in some systems.
+        RepeatableRead,
+        /// This isolation guarantees that all reads in the transaction
+        /// will see a consistent snapshot of the database and the transaction
+        /// should only successfully commit if no updates conflict with any
+        /// concurrent updates made since that snapshot.
+        Snapshot,
+        /// Serializability requires read and write locks to be released
+        /// only at the end of the transaction. This includes acquiring range-
+        /// locks when a select query uses a ranged WHERE clause to avoid
+        /// phantom reads.
+        Serializable,
+        /// The central distinction between serializability and linearizability
+        /// is that serializability is a global property; a property of an 
entire
+        /// history of operations and transactions. Linearizability is a local
+        /// property; a property of a single operation/transaction.
+        ///
+        /// Linearizability can be viewed as a special case of strict 
serializability
+        /// where transactions are restricted to consist of a single operation 
applied
+        /// to a single object.
+        Linearizable,
+    }
+
+    impl AsRef<str> for IsolationLevel {
+        fn as_ref(&self) -> &str {
+            match self {
+                Self::Default => 
"adbc.connection.transaction.isolation.default",
+                Self::ReadUncommitted => 
"adbc.connection.transaction.isolation.read_uncommitted",
+                Self::ReadCommitted => 
"adbc.connection.transaction.isolation.read_committed",
+                Self::RepeatableRead => 
"adbc.connection.transaction.isolation.repeatable_read",
+                Self::Snapshot => 
"adbc.connection.transaction.isolation.snapshot",
+                Self::Serializable => 
"adbc.connection.transaction.isolation.serializable",
+                Self::Linearizable => 
"adbc.connection.transaction.isolation.linearizable",
+            }
+        }
+    }
+}
diff --git a/rust/src/objects.rs b/rust/src/objects.rs
new file mode 100644
index 0000000..fbfbc0f
--- /dev/null
+++ b/rust/src/objects.rs
@@ -0,0 +1,502 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! Structs and traits for representing database objects (tables, columns, 
schemas).
+//!
+//! When [crate::AdbcConnection::get_objects] is called, it returns an 
associated type that
+//! implements [DatabaseCatalogCollection]. This collection contains a 
hierarchical data
+//! structure representing:
+//!
+//!  * Database catalogs
+//!  * Database schemas
+//!  * Tables
+//!  * Columns
+//!  * Table constraints
+//!
+//! A database catalog, schema, and table are represented by a type 
implementing
+//! [DatabaseCatalogEntry], [DatabaseSchemaEntry], and [DatabaseTableEntry],
+//! respectively. These can be concrete Rust structs, such as 
[SimpleCatalogEntry],
+//! [SimpleSchemaEntry], and [SimpleTableEntry]. Or they can be zero-copy views
+//! onto Arrow record batches as returned by the C API ADBC drivers (TODO).
+//!
+//! | Trait                        | Simple Rust-based    |
+//! |------------------------------|----------------------|
+//! | [DatabaseCatalogCollection]  | [SimpleSchemaEntry]  |
+//! | [DatabaseCatalogEntry]       | [SimpleCatalogEntry] |
+//! | [DatabaseSchemaEntry]        | [SimpleSchemaEntry]  |
+//! | [DatabaseTableEntry]         | [SimpleTableEntry]   |
+//!
+//! There are owned and reference variations of columns, table constraints,
+//! and foreign key usage. Each have a `borrow()` method to transform a owned
+//! variant into its reference variant, and a `to_owned()` method to transform 
the
+//! reference variant into the owned. These mimic the [std::borrow::Borrow] and
+//! [std::borrow::ToOwned] traits, but do not actually implement them.
+//!
+//! | Owned             | Reference            |
+//! |-------------------|----------------------|
+//! | [ColumnSchema]    | [ColumnSchemaRef]    |
+//! | [TableConstraint] | [TableConstraintRef] |
+//! | [ForeignKeyUsage] | [ForeignKeyUsageRef] |
+
+/// A collection of database catalogs, returned by 
[crate::AdbcConnection::get_objects].
+pub trait DatabaseCatalogCollection {
+    type CatalogEntryType<'a>: DatabaseCatalogEntry<'a>
+    where
+        Self: 'a;
+    type CatalogIterator<'a>: Iterator<Item = Self::CatalogEntryType<'a>> + 'a
+    where
+        Self: 'a;
+
+    /// List all catalogs in the result set.
+    fn catalogs(&self) -> Self::CatalogIterator<'_>;
+
+    /// Get a particular catalog by name.
+    ///
+    /// Databases that have no notion of catalogs will have one with None for 
a name.
+    /// This is case sensitive.
+    fn catalog(&self, name: Option<&str>) -> 
Option<Self::CatalogEntryType<'_>> {
+        self.catalogs().find(|catalog| catalog.name() == name)
+    }
+}
+
+/// An entry in a [DatabaseCatalogCollection] representing a single catalog.
+pub trait DatabaseCatalogEntry<'a> {
+    type SchemaEntryType: DatabaseSchemaEntry<'a> + 'a;
+    type SchemaIterator: Iterator<Item = Self::SchemaEntryType> + 'a;
+
+    /// Get the name of the catalog.
+    fn name(&self) -> Option<&'a str>;
+
+    /// List all schemas in this catalog that are in the result set.
+    fn schemas(&self) -> Self::SchemaIterator;
+
+    /// Get a particular schema by name.
+    ///
+    /// Databases that have no notion of schemas will have one with None for a 
name.
+    /// This is case sensitive.
+    fn schema(&self, name: Option<&str>) -> Option<Self::SchemaEntryType> {
+        self.schemas().find(|schema| schema.name() == name)
+    }
+}
+
+/// An entry in [DatabaseCatalogCollection] representing a single schema.
+pub trait DatabaseSchemaEntry<'a> {
+    type TableEntryType: DatabaseTableEntry<'a>;
+    type TableIterator: Iterator<Item = Self::TableEntryType> + 'a;
+
+    /// Get the name of the schema.
+    fn name(&self) -> Option<&'a str>;
+
+    /// List all the tables in this schema that are in the result set.
+    fn tables(&self) -> Self::TableIterator;
+
+    /// Get a particular table by name.
+    ///
+    /// This is case sensitive.
+    fn table(&self, name: &str) -> Option<Self::TableEntryType> {
+        self.tables().find(|table| table.name() == name)
+    }
+}
+
+/// An entry in the [DatabaseCatalogCollection] representing a single table.
+pub trait DatabaseTableEntry<'a> {
+    type ColumnIterator: Iterator<Item = ColumnSchemaRef<'a>> + 'a;
+    type ConstraintIterator: Iterator<Item = TableConstraintRef<'a>> + 'a;
+
+    /// The name of the table.
+    fn name(&self) -> &'a str;
+
+    /// The table type.
+    ///
+    /// Use [crate::AdbcConnection::get_table_types] to get a list of 
supported types for
+    /// the database.
+    fn table_type(&self) -> &'a str;
+
+    /// List all the columns in the table.
+    fn columns(&self) -> Self::ColumnIterator;
+
+    /// Get a column for a particular ordinal position.
+    ///
+    /// Will return None if the column is not found.
+    fn column(&self, i: i32) -> Option<ColumnSchemaRef<'a>> {
+        self.columns().find(|col| col.ordinal_position == i)
+    }
+
+    /// Get a column by name.
+    ///
+    /// This is case sensitive. Will return None if the column is not found.
+    fn column_by_name(&self, name: &str) -> Option<ColumnSchemaRef<'a>> {
+        self.columns().find(|col| col.name == name)
+    }
+
+    /// List all the constraints on the table.
+    fn constraints(&self) -> Self::ConstraintIterator;
+}
+
+/// An entry in the [DatabaseCatalogCollection] representing a column.
+///
+/// `xdbc_` columns are provided for compatibility with ODBC/JDBC column 
metadata.
+pub struct ColumnSchemaRef<'a> {
+    /// The name of the column.
+    pub name: &'a str,
+    /// The ordinal position of the column.
+    pub ordinal_position: i32,
+    pub remarks: Option<&'a str>,
+    pub xdbc_data_type: Option<i16>,
+    pub xdbc_type_name: Option<&'a str>,
+    pub xdbc_column_size: Option<i32>,
+    pub xdbc_decimal_digits: Option<i16>,
+    pub xdbc_num_prec_radix: Option<i16>,
+    pub xdbc_nullable: Option<i16>,
+    pub xdbc_column_def: Option<&'a str>,
+    pub xdbc_sql_data_type: Option<i16>,
+    pub xdbc_datetime_sub: Option<i16>,
+    pub xdbc_char_octet_length: Option<i32>,
+    pub xdbc_is_nullable: Option<&'a str>,
+    pub xdbc_scope_catalog: Option<&'a str>,
+    pub xdbc_scope_schema: Option<&'a str>,
+    pub xdbc_scope_table: Option<&'a str>,
+    pub xdbc_is_autoincrement: Option<bool>,
+    pub xdbc_is_generatedcolumn: Option<bool>,
+}
+
+impl<'a> ColumnSchemaRef<'a> {
+    pub fn to_owned(&self) -> ColumnSchema {
+        ColumnSchema {
+            name: self.name.to_owned(),
+            ordinal_position: self.ordinal_position,
+            remarks: self.remarks.as_ref().map(|&s| s.to_owned()),
+            xdbc_data_type: self.xdbc_data_type,
+            xdbc_type_name: self.xdbc_type_name.as_ref().map(|&s| 
s.to_owned()),
+            xdbc_column_size: self.xdbc_column_size,
+            xdbc_decimal_digits: self.xdbc_decimal_digits,
+            xdbc_num_prec_radix: self.xdbc_num_prec_radix,
+            xdbc_nullable: self.xdbc_nullable,
+            xdbc_column_def: self.xdbc_column_def.as_ref().map(|&s| 
s.to_owned()),
+            xdbc_sql_data_type: self.xdbc_sql_data_type,
+            xdbc_datetime_sub: self.xdbc_datetime_sub,
+            xdbc_char_octet_length: self.xdbc_char_octet_length,
+            xdbc_is_nullable: self.xdbc_is_nullable.as_ref().map(|&s| 
s.to_owned()),
+            xdbc_scope_catalog: self.xdbc_scope_catalog.as_ref().map(|&s| 
s.to_owned()),
+            xdbc_scope_schema: self.xdbc_scope_schema.as_ref().map(|&s| 
s.to_owned()),
+            xdbc_scope_table: self.xdbc_scope_table.as_ref().map(|&s| 
s.to_owned()),
+            xdbc_is_autoincrement: self.xdbc_is_autoincrement,
+            xdbc_is_generatedcolumn: self.xdbc_is_generatedcolumn,
+        }
+    }
+}
+
+/// An owning version of [ColumnSchema].
+#[derive(Debug, Clone, Default, PartialEq)]
+pub struct ColumnSchema {
+    name: String,
+    ordinal_position: i32,
+    remarks: Option<String>,
+    xdbc_data_type: Option<i16>,
+    xdbc_type_name: Option<String>,
+    xdbc_column_size: Option<i32>,
+    xdbc_decimal_digits: Option<i16>,
+    xdbc_num_prec_radix: Option<i16>,
+    xdbc_nullable: Option<i16>,
+    xdbc_column_def: Option<String>,
+    xdbc_sql_data_type: Option<i16>,
+    xdbc_datetime_sub: Option<i16>,
+    xdbc_char_octet_length: Option<i32>,
+    xdbc_is_nullable: Option<String>,
+    xdbc_scope_catalog: Option<String>,
+    xdbc_scope_schema: Option<String>,
+    xdbc_scope_table: Option<String>,
+    xdbc_is_autoincrement: Option<bool>,
+    xdbc_is_generatedcolumn: Option<bool>,
+}
+
+impl ColumnSchema {
+    pub fn borrow(&self) -> ColumnSchemaRef<'_> {
+        ColumnSchemaRef {
+            name: &self.name,
+            ordinal_position: self.ordinal_position,
+            remarks: self.remarks.as_deref(),
+            xdbc_data_type: self.xdbc_data_type,
+            xdbc_type_name: self.xdbc_type_name.as_deref(),
+            xdbc_column_size: self.xdbc_column_size,
+            xdbc_decimal_digits: self.xdbc_decimal_digits,
+            xdbc_num_prec_radix: self.xdbc_num_prec_radix,
+            xdbc_nullable: self.xdbc_nullable,
+            xdbc_column_def: self.xdbc_column_def.as_deref(),
+            xdbc_sql_data_type: self.xdbc_sql_data_type,
+            xdbc_datetime_sub: self.xdbc_datetime_sub,
+            xdbc_char_octet_length: self.xdbc_char_octet_length,
+            xdbc_is_nullable: self.xdbc_is_nullable.as_deref(),
+            xdbc_scope_catalog: self.xdbc_scope_catalog.as_deref(),
+            xdbc_scope_schema: self.xdbc_scope_schema.as_deref(),
+            xdbc_scope_table: self.xdbc_scope_table.as_deref(),
+            xdbc_is_autoincrement: self.xdbc_is_autoincrement,
+            xdbc_is_generatedcolumn: self.xdbc_is_generatedcolumn,
+        }
+    }
+}
+
+/// An entry in the [DatabaseCatalogCollection] representing a table 
constraint.
+pub struct TableConstraintRef<'a> {
+    pub name: Option<&'a str>,
+    pub columns: Vec<&'a str>,
+    pub constraint_type: TableConstraintTypeRef<'a>,
+}
+
+/// The type of table constraint. Used in [TableConstraintRef].
+pub enum TableConstraintTypeRef<'a> {
+    Check,
+    PrimaryKey,
+    ForeignKey { usage: Vec<ForeignKeyUsageRef<'a>> },
+    Unique,
+}
+
+impl<'a> TableConstraintRef<'a> {
+    pub fn to_owned(&self) -> TableConstraint {
+        let name = self.name.as_ref().map(|&s| s.to_owned());
+        let columns = self.columns.iter().map(|&s| s.to_owned()).collect();
+
+        let constraint_type = match &self.constraint_type {
+            TableConstraintTypeRef::ForeignKey { usage } => 
TableConstraintType::ForeignKey {
+                usage: usage.iter().map(|u| u.to_owned()).collect(),
+            },
+            TableConstraintTypeRef::Check => TableConstraintType::Check,
+            TableConstraintTypeRef::PrimaryKey => 
TableConstraintType::PrimaryKey,
+            TableConstraintTypeRef::Unique => TableConstraintType::Unique,
+        };
+
+        TableConstraint {
+            name,
+            columns,
+            constraint_type,
+        }
+    }
+}
+
+impl<'a> TableConstraintTypeRef<'a> {
+    pub fn variant_name(&self) -> &'static str {
+        match self {
+            Self::Check => "CHECK",
+            Self::ForeignKey { usage: _ } => "FOREIGN_KEY",
+            Self::PrimaryKey => "PRIMARY_KEY",
+            Self::Unique => "UNIQUE",
+        }
+    }
+}
+
+/// The location of a foreign key. Used in [TableConstraint].
+pub struct ForeignKeyUsageRef<'a> {
+    pub catalog: Option<&'a str>,
+    pub db_schema: Option<&'a str>,
+    pub table: &'a str,
+    pub column_name: &'a str,
+}
+
+impl<'a> ForeignKeyUsageRef<'a> {
+    pub fn to_owned(&self) -> ForeignKeyUsage {
+        ForeignKeyUsage {
+            catalog: self.catalog.as_ref().map(|&s| s.to_owned()),
+            db_schema: self.db_schema.as_ref().map(|&s| s.to_owned()),
+            table: self.table.to_owned(),
+            column_name: self.column_name.to_owned(),
+        }
+    }
+}
+
+/// An owning version of [TableConstraintRef].
+#[derive(Debug, Clone, PartialEq)]
+pub struct TableConstraint {
+    name: Option<String>,
+    columns: Vec<String>,
+    constraint_type: TableConstraintType,
+}
+
+#[derive(Debug, Clone, PartialEq)]
+pub enum TableConstraintType {
+    Check,
+    PrimaryKey,
+    ForeignKey { usage: Vec<ForeignKeyUsage> },
+    Unique,
+}
+
+impl TableConstraint {
+    pub fn borrow(&self) -> TableConstraintRef<'_> {
+        let name = self.name.as_deref();
+        let columns = self.columns.iter().map(|s| s.as_str()).collect();
+
+        let constraint_type = match &self.constraint_type {
+            TableConstraintType::ForeignKey { usage } => 
TableConstraintTypeRef::ForeignKey {
+                usage: usage.iter().map(|u| u.borrow()).collect(),
+            },
+            TableConstraintType::Check => TableConstraintTypeRef::Check,
+            TableConstraintType::PrimaryKey => 
TableConstraintTypeRef::PrimaryKey,
+            TableConstraintType::Unique => TableConstraintTypeRef::Unique,
+        };
+
+        TableConstraintRef {
+            name,
+            columns,
+            constraint_type,
+        }
+    }
+}
+
+/// An owning version of [ForeignKeyUsageRef].
+#[derive(Debug, Clone, Default, PartialEq)]
+pub struct ForeignKeyUsage {
+    pub catalog: Option<String>,
+    pub db_schema: Option<String>,
+    pub table: String,
+    pub column_name: String,
+}
+
+impl ForeignKeyUsage {
+    pub fn borrow(&self) -> ForeignKeyUsageRef<'_> {
+        ForeignKeyUsageRef {
+            catalog: self.catalog.as_deref(),
+            db_schema: self.db_schema.as_deref(),
+            table: &self.table,
+            column_name: &self.column_name,
+        }
+    }
+}
+
+/// A simple collection of database objects, made up of Rust data structures.
+#[derive(Debug, Clone, PartialEq)]
+pub struct SimpleCatalogCollection {
+    catalogs: Vec<SimpleCatalogEntry>,
+}
+
+impl SimpleCatalogCollection {
+    pub fn new(catalogs: Vec<SimpleCatalogEntry>) -> Self {
+        Self { catalogs }
+    }
+}
+
+impl DatabaseCatalogCollection for SimpleCatalogCollection {
+    type CatalogEntryType<'a> = &'a SimpleCatalogEntry;
+    type CatalogIterator<'a> = std::slice::Iter<'a, SimpleCatalogEntry>;
+    fn catalogs(&self) -> Self::CatalogIterator<'_> {
+        self.catalogs.iter()
+    }
+}
+
+/// A single database catalog. See [DatabaseCatalogEntry].
+#[derive(Debug, Clone, PartialEq)]
+pub struct SimpleCatalogEntry {
+    name: Option<String>,
+    db_schemas: Vec<SimpleSchemaEntry>,
+}
+
+impl SimpleCatalogEntry {
+    pub fn new(name: Option<String>, db_schemas: Vec<SimpleSchemaEntry>) -> 
Self {
+        Self { name, db_schemas }
+    }
+}
+
+impl<'a> DatabaseCatalogEntry<'a> for &'a SimpleCatalogEntry {
+    type SchemaEntryType = &'a SimpleSchemaEntry;
+    type SchemaIterator = std::slice::Iter<'a, SimpleSchemaEntry>;
+
+    fn name(&self) -> Option<&'a str> {
+        self.name.as_deref()
+    }
+
+    fn schemas(&self) -> Self::SchemaIterator {
+        self.db_schemas.iter()
+    }
+}
+
+/// A single database schema. See [DatabaseSchemaEntry].
+#[derive(Debug, Clone, PartialEq)]
+pub struct SimpleSchemaEntry {
+    name: Option<String>,
+    tables: Vec<SimpleTableEntry>,
+}
+
+impl SimpleSchemaEntry {
+    pub fn new(name: Option<String>, tables: Vec<SimpleTableEntry>) -> Self {
+        Self { name, tables }
+    }
+}
+
+impl<'a> DatabaseSchemaEntry<'a> for &'a SimpleSchemaEntry {
+    type TableEntryType = &'a SimpleTableEntry;
+    type TableIterator = std::slice::Iter<'a, SimpleTableEntry>;
+
+    fn name(&self) -> Option<&'a str> {
+        self.name.as_deref()
+    }
+
+    fn tables(&self) -> Self::TableIterator {
+        self.tables.iter()
+    }
+}
+
+/// A single table. See [DatabaseTableEntry].
+#[derive(Debug, Clone, PartialEq)]
+pub struct SimpleTableEntry {
+    name: String,
+    table_type: String,
+    columns: Vec<ColumnSchema>,
+    constraints: Vec<TableConstraint>,
+}
+
+impl SimpleTableEntry {
+    pub fn new(
+        name: String,
+        table_type: String,
+        columns: Vec<ColumnSchema>,
+        constraints: Vec<TableConstraint>,
+    ) -> Self {
+        Self {
+            name,
+            table_type,
+            columns,
+            constraints,
+        }
+    }
+}
+
+impl<'a> DatabaseTableEntry<'a> for &'a SimpleTableEntry {
+    type ColumnIterator = std::iter::Map<
+        std::slice::Iter<'a, ColumnSchema>,
+        fn(&ColumnSchema) -> ColumnSchemaRef<'_>,
+    >;
+    type ConstraintIterator = std::iter::Map<
+        std::slice::Iter<'a, TableConstraint>,
+        fn(&TableConstraint) -> TableConstraintRef<'_>,
+    >;
+
+    fn name(&self) -> &'a str {
+        &self.name
+    }
+
+    fn table_type(&self) -> &'a str {
+        &self.table_type
+    }
+
+    fn columns(&self) -> Self::ColumnIterator {
+        self.columns.iter().map(|col| col.borrow())
+    }
+
+    fn constraints(&self) -> Self::ConstraintIterator {
+        self.constraints
+            .iter()
+            .map(|constraint| constraint.borrow())
+    }
+}

Reply via email to