This is an automated email from the ASF dual-hosted git repository.

dheres pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/arrow-rs.git


The following commit(s) were added to refs/heads/main by this push:
     new 5234006c23 Upgrade to object store 0.13.1 (#9256)
5234006c23 is described below

commit 5234006c235d75d6a3dc4543c1c0d31a30cf9782
Author: DaniĆ«l Heres <[email protected]>
AuthorDate: Wed Jan 28 14:27:08 2026 +0100

    Upgrade to object store 0.13.1 (#9256)
    
    # Which issue does this PR close?
    
    <!--
    We generally require a GitHub issue to be filed for all bug fixes and
    enhancements and this helps us generate change logs for our releases.
    You can link an issue to this PR using the GitHub syntax.
    -->
    
    - Closes #NNN.
    
    # Rationale for this change
    
    <!--
    Why are you proposing this change? If this is already explained clearly
    in the issue then this section is not needed.
    Explaining clearly why changes are proposed helps reviewers understand
    your changes and offer better suggestions for fixes.
    -->
    
    # What changes are included in this PR?
    
    <!--
    There is no need to duplicate the description in the issue here but it
    is sometimes worth providing a summary of the individual changes in this
    PR.
    -->
    
    # Are these changes tested?
    
    <!--
    We typically require tests for all PRs in order to:
    1. Prevent the code from being accidentally broken by subsequent changes
    2. Serve as another way to document the expected behavior of the code
    
    If tests are not included in your PR, please explain why (for example,
    are they covered by existing tests)?
    -->
    
    # Are there any user-facing changes?
    
    <!--
    If there are user-facing changes then we may require documentation to be
    updated before approving the PR.
    
    If there are any breaking changes to public APIs, please call them out.
    -->
---
 parquet/Cargo.toml                           |  4 ++--
 parquet/src/arrow/async_reader/store.rs      | 10 +++++-----
 parquet/src/arrow/async_writer/store.rs      |  3 ++-
 parquet/tests/encryption/encryption_async.rs |  2 +-
 4 files changed, 10 insertions(+), 9 deletions(-)

diff --git a/parquet/Cargo.toml b/parquet/Cargo.toml
index 053adc4fca..bea1f622b4 100644
--- a/parquet/Cargo.toml
+++ b/parquet/Cargo.toml
@@ -49,7 +49,7 @@ parquet-variant = { workspace = true, optional = true }
 parquet-variant-json = { workspace = true, optional = true }
 parquet-variant-compute = { workspace = true, optional = true }
 
-object_store = { version = "0.12.0", default-features = false, optional = true 
}
+object_store = { version = "0.13.1", default-features = false, optional = true 
}
 
 bytes = { version = "1.1", default-features = false, features = ["std"] }
 thrift = { version = "0.17", default-features = false }
@@ -93,7 +93,7 @@ arrow = { workspace = true, features = ["ipc", "test_utils", 
"prettyprint", "jso
 arrow-cast = { workspace = true }
 tokio = { version = "1.0", default-features = false, features = ["macros", 
"rt-multi-thread", "io-util", "fs"] }
 rand = { version = "0.9", default-features = false, features = ["std", 
"std_rng", "thread_rng"] }
-object_store = { version = "0.12.0", default-features = false, features = 
["azure", "fs"] }
+object_store = { version = "0.13.1", default-features = false, features = 
["azure", "fs"] }
 sysinfo = { version = "0.37.1", default-features = false, features = 
["system"] }
 
 [package.metadata.docs.rs]
diff --git a/parquet/src/arrow/async_reader/store.rs 
b/parquet/src/arrow/async_reader/store.rs
index 59b161bbc6..d47ca744d8 100644
--- a/parquet/src/arrow/async_reader/store.rs
+++ b/parquet/src/arrow/async_reader/store.rs
@@ -23,17 +23,17 @@ use crate::errors::{ParquetError, Result};
 use crate::file::metadata::{PageIndexPolicy, ParquetMetaData, 
ParquetMetaDataReader};
 use bytes::Bytes;
 use futures::{FutureExt, TryFutureExt, future::BoxFuture};
+use object_store::ObjectStoreExt;
 use object_store::{GetOptions, GetRange};
 use object_store::{ObjectStore, path::Path};
 use tokio::runtime::Handle;
-
 /// Reads Parquet files in object storage using [`ObjectStore`].
 ///
 /// ```no_run
 /// # use std::io::stdout;
 /// # use std::sync::Arc;
 /// # use object_store::azure::MicrosoftAzureBuilder;
-/// # use object_store::ObjectStore;
+/// # use object_store::{ObjectStore, ObjectStoreExt};
 /// # use object_store::path::Path;
 /// # use parquet::arrow::async_reader::ParquetObjectReader;
 /// # use parquet::arrow::ParquetRecordBatchStreamBuilder;
@@ -93,7 +93,7 @@ impl ParquetObjectReader {
     /// Providing this size up front is an important optimization to avoid 
extra calls when the
     /// underlying store does not support suffix range requests.
     ///
-    /// The file size can be obtained using [`ObjectStore::list`] or 
[`ObjectStore::head`].
+    /// The file size can be obtained using [`ObjectStore::list`] or 
[`ObjectStoreExt::head`].
     pub fn with_file_size(self, file_size: u64) -> Self {
         Self {
             file_size: Some(file_size),
@@ -186,7 +186,7 @@ impl MetadataSuffixFetch for &mut ParquetObjectReader {
 
 impl AsyncFileReader for ParquetObjectReader {
     fn get_bytes(&mut self, range: Range<u64>) -> BoxFuture<'_, Result<Bytes>> 
{
-        self.spawn(|store, path| store.get_range(path, range))
+        self.spawn(|store, path| store.get_range(path, range).boxed())
     }
 
     fn get_byte_ranges(&mut self, ranges: Vec<Range<u64>>) -> BoxFuture<'_, 
Result<Vec<Bytes>>>
@@ -264,7 +264,7 @@ mod tests {
     use futures::FutureExt;
     use object_store::local::LocalFileSystem;
     use object_store::path::Path;
-    use object_store::{ObjectMeta, ObjectStore};
+    use object_store::{ObjectMeta, ObjectStore, ObjectStoreExt};
 
     async fn get_meta_store() -> (ObjectMeta, Arc<dyn ObjectStore>) {
         let res = parquet_test_data();
diff --git a/parquet/src/arrow/async_writer/store.rs 
b/parquet/src/arrow/async_writer/store.rs
index b067e4d927..698248e619 100644
--- a/parquet/src/arrow/async_writer/store.rs
+++ b/parquet/src/arrow/async_writer/store.rs
@@ -32,7 +32,7 @@ use tokio::io::AsyncWriteExt;
 /// # use arrow_array::{ArrayRef, Int64Array, RecordBatch};
 /// # use object_store::memory::InMemory;
 /// # use object_store::path::Path;
-/// # use object_store::ObjectStore;
+/// # use object_store::{ObjectStore, ObjectStoreExt};
 /// # use std::sync::Arc;
 ///
 /// # use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
@@ -125,6 +125,7 @@ mod tests {
     use super::*;
     use crate::arrow::AsyncArrowWriter;
     use crate::arrow::arrow_reader::ParquetRecordBatchReaderBuilder;
+    use object_store::ObjectStoreExt;
 
     #[tokio::test]
     async fn test_async_writer() {
diff --git a/parquet/tests/encryption/encryption_async.rs 
b/parquet/tests/encryption/encryption_async.rs
index dc57ecd50d..48c844afb9 100644
--- a/parquet/tests/encryption/encryption_async.rs
+++ b/parquet/tests/encryption/encryption_async.rs
@@ -297,9 +297,9 @@ async fn get_encrypted_meta_store() -> (
     object_store::ObjectMeta,
     std::sync::Arc<dyn object_store::ObjectStore>,
 ) {
-    use object_store::ObjectStore;
     use object_store::local::LocalFileSystem;
     use object_store::path::Path;
+    use object_store::{ObjectStore, ObjectStoreExt};
 
     use std::sync::Arc;
     let test_data = arrow::util::test_util::parquet_test_data();

Reply via email to