alamb commented on code in PR #15018:
URL: https://github.com/apache/datafusion/pull/15018#discussion_r1986295023


##########
datafusion/storage/src/write.rs:
##########
@@ -0,0 +1,79 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use crate::context::StorageContext;
+use async_trait::async_trait;
+use bytes::{Buf, Bytes};
+use datafusion_common::Result;
+use futures::AsyncWrite;
+use std::pin::Pin;
+use std::sync::Arc;
+use std::task::{Context, Poll};
+
+#[derive(Debug, Clone, Default, PartialEq, Eq)]
+pub struct StorageWriteOptions {
+    /// A version indicator for the newly created object
+    pub version: Option<String>,
+}
+
+/// Result for a write request
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct StorageWriteResult {
+    /// The unique identifier for the newly created object
+    ///
+    /// <https://datatracker.ietf.org/doc/html/rfc9110#name-etag>
+    pub e_tag: Option<String>,
+    /// A version indicator for the newly created object
+    pub version: Option<String>,
+}
+
+#[async_trait]
+pub trait StorageFileWrite: Send + Sync + 'static {
+    /// Write a single chunk of data to the file
+    async fn write(&mut self, ctx: &StorageContext, data: Bytes) -> Result<()>;
+
+    /// Finish writing to the file and return the result
+    async fn finish(&mut self, ctx: &StorageContext) -> 
Result<StorageWriteResult>;
+}
+
+pub struct StorageFileWriter {
+    inner: Box<dyn StorageFileWrite>,
+}
+
+/// Expose public API for writing to a file
+impl StorageFileWriter {
+    pub fn into_future_writer(self) -> StorageFileFuturesWriter {
+        todo!()
+    }
+
+    pub fn into_sink(self) -> StorageFileBytesSink {
+        todo!()
+    }
+
+    pub fn into_tokio_writer(self) -> StorageFileTokioWriter {
+        todo!()
+    }
+}
+
+/// Adapter to allow using `futures::io::AsyncWrite` with `StorageFileWriter`
+pub struct StorageFileFuturesWriter {}
+
+/// Adapter to allow using `futures::io::Sink` with `StorageFileWriter`
+pub struct StorageFileBytesSink {}
+
+/// Adapter to allow using `tokio::io::AsyncWrite` with `StorageFileWriter`

Review Comment:
   One of the concerns I have heard about the [`AsyncWrite` 
API](https://docs.rs/futures/0.3.31/futures/io/trait.AsyncWrite.html) is that 
it take a `&[u8]` pretty much requiring an extra copy
   
   I wonder if you have ideas about having the API take ownership of data -- 
for example 
https://docs.rs/object_store/latest/object_store/struct.PutPayload.html (which 
uses `Bytes` to potentially avoid copying)
   
   
   
   
   https://docs.rs/object_store/latest/object_store/struct.PutPayload.html



##########
datafusion/storage/src/read.rs:
##########
@@ -0,0 +1,149 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use crate::context::StorageContext;
+use crate::file_metadata::StorageFileMetadata;
+use async_trait::async_trait;
+use bytes::{Buf, Bytes};
+use chrono::{DateTime, Utc};
+use datafusion_common::Result;
+use futures::stream::BoxStream;
+use futures::{stream, Stream, StreamExt};
+use std::ops::{Range, RangeBounds};
+
+/// Request only a portion of an object's bytes
+///
+/// Implementations may wish to inspect [`ReadResult`] for the exact byte
+/// range returned.
+#[derive(Debug, PartialEq, Eq, Clone)]
+pub enum ReadRange {
+    /// Request a specific range of bytes
+    ///
+    /// If the given range is zero-length or starts after the end of the 
object,
+    /// an error will be returned. Additionally, if the range ends after the 
end
+    /// of the object, the entire remainder of the object will be returned.
+    /// Otherwise, the exact requested range will be returned.
+    Bounded(Range<u64>),
+    /// Request all bytes starting from a given byte offset
+    Offset(u64),
+    /// Request up to the last n bytes
+    Suffix(u64),
+}
+
+impl<T: RangeBounds<u64>> From<T> for ReadRange {
+    fn from(value: T) -> Self {
+        use std::ops::Bound::*;
+        let first = match value.start_bound() {
+            Included(i) => *i,
+            Excluded(i) => i + 1,
+            Unbounded => 0,
+        };
+        match value.end_bound() {
+            Included(i) => Self::Bounded(first..(i + 1)),
+            Excluded(i) => Self::Bounded(first..*i),
+            Unbounded => Self::Offset(first),
+        }
+    }
+}
+
+/// Options for a read request, such as range
+#[derive(Debug, Default, Clone)]
+pub struct StorageReadOptions {
+    /// Request will succeed if the `StorageFileMetadata::e_tag` matches
+    /// otherwise returning [`Error::Precondition`]
+    ///
+    /// See <https://datatracker.ietf.org/doc/html/rfc9110#name-if-match>
+    ///
+    /// Examples:
+    ///
+    /// ```text
+    /// If-Match: "xyzzy"
+    /// If-Match: "xyzzy", "r2d2xxxx", "c3piozzzz"
+    /// If-Match: *
+    /// ```
+    pub if_match: Option<String>,
+    /// Request will succeed if the `StorageFileMetadata::e_tag` does not match
+    /// otherwise returning [`Error::NotModified`]
+    ///
+    /// See <https://datatracker.ietf.org/doc/html/rfc9110#section-13.1.2>
+    ///
+    /// Examples:
+    ///
+    /// ```text
+    /// If-None-Match: "xyzzy"
+    /// If-None-Match: "xyzzy", "r2d2xxxx", "c3piozzzz"
+    /// If-None-Match: *
+    /// ```
+    pub if_none_match: Option<String>,
+    /// Request will succeed if the object has been modified since
+    ///
+    /// <https://datatracker.ietf.org/doc/html/rfc9110#section-13.1.3>
+    pub if_modified_since: Option<DateTime<Utc>>,
+    /// Request will succeed if the object has not been modified since
+    /// otherwise returning [`Error::Precondition`]
+    ///
+    /// Some stores, such as S3, will only return `NotModified` for exact
+    /// timestamp matches, instead of for any timestamp greater than or equal.
+    ///
+    /// <https://datatracker.ietf.org/doc/html/rfc9110#section-13.1.4>
+    pub if_unmodified_since: Option<DateTime<Utc>>,
+    /// Request transfer of only the specified range of bytes
+    /// otherwise returning [`Error::NotModified`]
+    ///
+    /// <https://datatracker.ietf.org/doc/html/rfc9110#name-range>
+    pub range: Option<ReadRange>,
+    /// Request a particular object version
+    pub version: Option<String>,
+}
+
+#[async_trait]
+pub trait StorageFileRead: Send + Sync + 'static {
+    async fn read(&mut self, ctx: &StorageContext) -> Result<Bytes>;
+}
+
+pub struct StorageFileReader {
+    meta: StorageFileMetadata,
+    inner: Box<dyn StorageFileRead>,
+}
+
+/// Expose public API for reading from a file
+impl StorageFileReader {
+    pub fn metadata(&self) -> &StorageFileMetadata {
+        &self.meta
+    }
+
+    pub fn into_futures_reader(self) -> StorageFileFuturesReader {
+        todo!()
+    }
+
+    pub fn into_stream(self) -> StorageFileBytesStream {
+        todo!()
+    }
+
+    pub fn into_tokio_reader(self) -> StorageFileTokioReader {
+        todo!()
+    }
+}
+
+/// Adapter to allow using `futures::io::AsyncRead` with `StorageFileReader`

Review Comment:
   I have the same question about read as write -- to all these APIs require 
copying the data (aka take the data as `&[u8]`)?
   
   One of the neat things we do with the object store API and parquet reader 
when reading strings in `Utf8View`  is that the same `Bytes` buffer that is 
returned from `object_store` is converted into a parquet Buffer and then 
eventually forms the underlying Arrow Buffer of the returned array
   
   IN other words there is no copying data around once it comes back from 
object store



##########
datafusion/storage/src/write.rs:
##########
@@ -0,0 +1,79 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use crate::context::StorageContext;
+use async_trait::async_trait;
+use bytes::{Buf, Bytes};
+use datafusion_common::Result;
+use futures::AsyncWrite;
+use std::pin::Pin;
+use std::sync::Arc;
+use std::task::{Context, Poll};
+
+#[derive(Debug, Clone, Default, PartialEq, Eq)]
+pub struct StorageWriteOptions {
+    /// A version indicator for the newly created object
+    pub version: Option<String>,
+}
+
+/// Result for a write request
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct StorageWriteResult {
+    /// The unique identifier for the newly created object
+    ///
+    /// <https://datatracker.ietf.org/doc/html/rfc9110#name-etag>
+    pub e_tag: Option<String>,
+    /// A version indicator for the newly created object
+    pub version: Option<String>,
+}
+
+#[async_trait]
+pub trait StorageFileWrite: Send + Sync + 'static {
+    /// Write a single chunk of data to the file
+    async fn write(&mut self, ctx: &StorageContext, data: Bytes) -> Result<()>;
+
+    /// Finish writing to the file and return the result
+    async fn finish(&mut self, ctx: &StorageContext) -> 
Result<StorageWriteResult>;
+}
+
+pub struct StorageFileWriter {
+    inner: Box<dyn StorageFileWrite>,

Review Comment:
   I wonder if this was meant to be a trait (rather than a self recursive 
strucutre)



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: github-unsubscr...@datafusion.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: github-unsubscr...@datafusion.apache.org
For additional commands, e-mail: github-h...@datafusion.apache.org

Reply via email to