adriangb commented on code in PR #6431:
URL: https://github.com/apache/arrow-rs/pull/6431#discussion_r1773756768


##########
parquet/src/file/metadata/reader.rs:
##########
@@ -0,0 +1,954 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use std::{io::Read, ops::Range, sync::Arc};
+
+use bytes::Bytes;
+
+use crate::basic::ColumnOrder;
+use crate::errors::{ParquetError, Result};
+use crate::file::metadata::{FileMetaData, ParquetMetaData, RowGroupMetaData};
+use crate::file::page_index::index::Index;
+use crate::file::page_index::index_reader::{acc_range, decode_column_index, 
decode_offset_index};
+use crate::file::reader::ChunkReader;
+use crate::file::{FOOTER_SIZE, PARQUET_MAGIC};
+use crate::format::{ColumnOrder as TColumnOrder, FileMetaData as 
TFileMetaData};
+use crate::schema::types;
+use crate::schema::types::SchemaDescriptor;
+use crate::thrift::{TCompactSliceInputProtocol, TSerializable};
+
+#[cfg(feature = "async")]
+use crate::arrow::async_reader::MetadataFetch;
+
+/// Reads the [`ParquetMetaData`] from a byte stream.
+///
+/// See [`crate::file::metadata::ParquetMetaDataWriter#output-format`] for a 
description of
+/// the Parquet metadata.
+///
+/// # Example
+/// ```no_run
+/// # use parquet::file::metadata::ParquetMetaDataReader;
+/// # fn open_parquet_file(path: &str) -> std::fs::File { unimplemented!(); }
+/// // read parquet metadata including page indexes
+/// let file = open_parquet_file("some_path.parquet");
+/// let mut reader = ParquetMetaDataReader::new()
+///     .with_page_indexes(true);
+/// reader.try_parse(&file).unwrap();
+/// let metadata = reader.finish().unwrap();
+/// assert!(metadata.column_index().is_some());
+/// assert!(metadata.offset_index().is_some());
+/// ```
+#[derive(Default)]
+pub struct ParquetMetaDataReader {
+    metadata: Option<ParquetMetaData>,
+    column_index: bool,
+    offset_index: bool,
+    prefetch_hint: Option<usize>,
+}
+
+impl ParquetMetaDataReader {
+    /// Create a new [`ParquetMetaDataReader`]
+    pub fn new() -> Self {
+        Default::default()
+    }
+
+    /// Create a new [`ParquetMetaDataReader`] populated with a 
[`ParquetMetaData`] struct
+    /// obtained via other means.
+    pub fn new_with_metadata(metadata: ParquetMetaData) -> Self {
+        Self {
+            metadata: Some(metadata),
+            ..Default::default()
+        }
+    }
+
+    /// Enable or disable reading the page index structures described in
+    /// "[Parquet page index]: Layout to Support Page Skipping". Equivalent to:
+    /// `self.with_column_indexes(val).with_offset_indexes(val)`
+    ///
+    /// [Parquet page index]: 
https://github.com/apache/parquet-format/blob/master/PageIndex.md
+    pub fn with_page_indexes(self, val: bool) -> Self {
+        self.with_column_indexes(val).with_offset_indexes(val)
+    }
+
+    /// Enable or disable reading the Parquet [ColumnIndex] structure.
+    ///
+    /// [ColumnIndex]:  
https://github.com/apache/parquet-format/blob/master/PageIndex.md
+    pub fn with_column_indexes(mut self, val: bool) -> Self {
+        self.column_index = val;
+        self
+    }
+
+    /// Enable or disable reading the Parquet [OffsetIndex] structure.
+    ///
+    /// [OffsetIndex]:  
https://github.com/apache/parquet-format/blob/master/PageIndex.md
+    pub fn with_offset_indexes(mut self, val: bool) -> Self {
+        self.offset_index = val;
+        self
+    }
+
+    /// Provide a hint as to the number of bytes needed to fully parse the 
[`ParquetMetaData`].
+    /// Only used for the asynchronous [`Self::try_load()`] method.
+    ///
+    /// By default, the reader will first fetch the last 8 bytes of the input 
file to obtain the
+    /// size of the footer metadata. A second fetch will be performed to 
obtain the needed bytes.
+    /// After parsing the footer metadata, a third fetch will be performed to 
obtain the bytes
+    /// needed to decode the page index structures, if they have been 
requested. To avoid
+    /// unnecessary fetches, `prefetch` can be set to an estimate of the 
number of bytes needed
+    /// to fully decode the [`ParquetMetaData`], which can reduce the number 
of fetch requests and
+    /// reduce latency. Setting `prefetch` too small will not trigger an 
error, but will result
+    /// in extra fetches being performed.
+    pub fn with_prefetch_hint(mut self, prefetch: Option<usize>) -> Self {
+        self.prefetch_hint = prefetch;
+        self
+    }
+
+    /// Indicates whether a [`ParquetMetaData`] is present.
+    pub fn has_metadata(&self) -> bool {
+        self.metadata.is_some()
+    }
+
+    /// Return the parsed [`ParquetMetaData`] struct, leaving `None` in its 
place.
+    pub fn finish(&mut self) -> Result<ParquetMetaData> {
+        self.metadata
+            .take()
+            .ok_or_else(|| general_err!("could not parse parquet metadata"))
+    }
+
+    /// Given a [`ChunkReader`], parse and return the [`ParquetMetaData`] in a 
single pass.
+    ///
+    /// If `reader` is [`Bytes`] based, then the buffer must contain 
sufficient bytes to complete

Review Comment:
   I mean the use case is basically:
   1. You have the parquet bytes in memory. From that you load the 
`ParquetMetaData` including page indexes.
   2. Use the new writer to write `ParquetMetadata` to bytes and store the 
bytes in a K/V store.
   3. When I want to load this file again I start by getting the metadata bytes 
from the K/V store.
   4. I decode those bytes using this new API, passing in the original file 
size to adjust the offsets.
   5. I now have a `ParquetMetaData` in memory that I can pass back so that 
upstream things can decide if they even need to read the rest of the file or 
what pages in the file they need to read.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to