alamb commented on code in PR #2045:
URL: https://github.com/apache/arrow-rs/pull/2045#discussion_r918262650


##########
parquet/src/file/statistics.rs:
##########
@@ -341,21 +393,24 @@ impl fmt::Display for Statistics {
 }
 
 /// Typed implementation for [`Statistics`].
-#[derive(Clone)]
-pub struct TypedStatistics<T: DataType> {
-    min: Option<T::T>,
-    max: Option<T::T>,
+pub type TypedStatistics<T> = ValueStatistics<<T as DataType>::T>;

Review Comment:
   I am sorry my rust-fu isn't good enough -- what is the purpose of renaming 
`TypedStatistics` to ValueStatistics (and doing the DataType::T nonsense in the 
typedef)? Is that needed to `#[derive]` works?



##########
parquet/src/column/writer/mod.rs:
##########
@@ -202,65 +184,49 @@ pub struct ColumnWriterImpl<'a, T: DataType> {
     total_num_values: u64,
     dictionary_page_offset: Option<u64>,
     data_page_offset: Option<u64>,
-    min_column_value: Option<T::T>,
-    max_column_value: Option<T::T>,
+    min_column_value: Option<E::T>,
+    max_column_value: Option<E::T>,
     num_column_nulls: u64,
     column_distinct_count: Option<u64>,
+    encodings: BTreeSet<Encoding>,

Review Comment:
   Why is this a BTreeSet (which requires `Ord` on `Encoding`) rather than a 
`HashSet`? Is there any reason the ordering is important?



##########
parquet/src/column/writer/mod.rs:
##########
@@ -1395,14 +1233,14 @@ mod tests {
             true,
             &[1, 2],
             Some(0),
-            &[Encoding::PLAIN, Encoding::RLE_DICTIONARY, Encoding::RLE],
+            &[Encoding::PLAIN, Encoding::RLE, Encoding::RLE_DICTIONARY],
         );
         check_encoding_write_support::<Int32Type>(
             WriterVersion::PARQUET_2_0,
             false,
             &[1, 2],
             None,
-            &[Encoding::DELTA_BINARY_PACKED, Encoding::RLE],
+            &[Encoding::RLE, Encoding::DELTA_BINARY_PACKED],

Review Comment:
   I don't understand these changes. Why did the order of supported encodings 
change? Is it due to the use of a Set?



##########
parquet/src/column/writer/encoder.rs:
##########
@@ -0,0 +1,247 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use crate::basic::Encoding;
+use crate::column::writer::{compare_greater, fallback_encoding, 
has_dictionary_support, is_nan, update_max, update_min};
+use crate::data_type::private::ParquetValueType;
+use crate::data_type::DataType;
+use crate::encodings::encoding::{get_encoder, DictEncoder, Encoder};
+use crate::errors::{ParquetError, Result};
+use crate::file::properties::WriterProperties;
+use crate::schema::types::{ColumnDescPtr, ColumnDescriptor};
+use crate::util::memory::ByteBufferPtr;
+
+/// A collection of [`ParquetValueType`] encoded by a [`ColumnValueEncoder`]
+pub trait ColumnValues {

Review Comment:
   Somehow I am missing how the actual values are retrieved (as in where are 
the inputs actually read from?)



##########
parquet/src/column/writer/mod.rs:
##########
@@ -174,26 +154,28 @@ type ColumnCloseResult = (
 );
 
 /// Typed column writer for a primitive column.
-pub struct ColumnWriterImpl<'a, T: DataType> {
+pub type ColumnWriterImpl<'a, T> = GenericColumnWriter<'a, 
ColumnValueEncoderImpl<T>>;
+
+#[doc(hidden)]
+pub struct GenericColumnWriter<'a, E: ColumnValueEncoder> {
     // Column writer properties
     descr: ColumnDescPtr,
     props: WriterPropertiesPtr,
     statistics_enabled: EnabledStatistics,
 
     page_writer: Box<dyn PageWriter + 'a>,
-    has_dictionary: bool,

Review Comment:
   this appears to be a significant improvement / change -- not tracking 
`DictEncoder` specially



##########
parquet/src/file/statistics.rs:
##########
@@ -37,15 +37,45 @@
 //! }
 //! ```
 
-use std::{cmp, fmt};
+use std::fmt;
 
 use byteorder::{ByteOrder, LittleEndian};
 use parquet_format::Statistics as TStatistics;
 
 use crate::basic::Type;
+use crate::data_type::private::ParquetValueType;
 use crate::data_type::*;
 use crate::util::bit_util::from_ne_slice;
 
+pub(crate) mod private {
+    use super::*;
+
+    pub trait MakeStatistics {

Review Comment:
   Maybe another reason to not switch from `TypedStatistics` to  
`ValueStatistics` 🤔 



##########
parquet/src/column/writer/encoder.rs:
##########
@@ -0,0 +1,247 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use crate::basic::Encoding;
+use crate::column::writer::{compare_greater, fallback_encoding, 
has_dictionary_support, is_nan, update_max, update_min};
+use crate::data_type::private::ParquetValueType;
+use crate::data_type::DataType;
+use crate::encodings::encoding::{get_encoder, DictEncoder, Encoder};
+use crate::errors::{ParquetError, Result};
+use crate::file::properties::WriterProperties;
+use crate::schema::types::{ColumnDescPtr, ColumnDescriptor};
+use crate::util::memory::ByteBufferPtr;
+
+/// A collection of [`ParquetValueType`] encoded by a [`ColumnValueEncoder`]
+pub trait ColumnValues {
+    /// The underlying value type
+    type T: ParquetValueType;
+
+    /// The number of values in this collection
+    fn len(&self) -> usize;
+
+    /// Returns the min and max values in this collection, skipping any NaN 
values
+    ///
+    /// Returns `None` if no values found
+    fn min_max(&self, descr: &ColumnDescriptor) -> Option<(&Self::T, 
&Self::T)>;
+}
+
+/// The encoded data for a dictionary page
+pub struct DictionaryPage {
+    pub buf: ByteBufferPtr,
+    pub num_values: usize,
+    pub is_sorted: bool,
+}
+
+/// The encoded values for a data page, with optional statistics
+pub struct DataPageValues<T> {
+    pub buf: ByteBufferPtr,
+    pub num_values: usize,
+    pub encoding: Encoding,
+    pub min_value: Option<T>,
+    pub max_value: Option<T>,
+}
+
+/// A generic encoder of [`ColumnValues`] to data and dictionary pages used by
+/// [super::GenericColumnWriter`]
+pub trait ColumnValueEncoder {
+    /// The underlying value type of [`Self::Values`]
+    ///
+    /// Note: this avoids needing to fully qualify `<Self::Values as 
ColumnValues>::T`
+    type T: ParquetValueType;
+
+    /// The values encoded by this encoder
+    type Values: ColumnValues<T = Self::T> + ?Sized;
+
+    /// Create a new [`ColumnValueEncoder`]
+    fn try_new(descr: &ColumnDescPtr, props: &WriterProperties) -> Result<Self>
+    where
+        Self: Sized;
+
+    /// Write the corresponding values to this [`ColumnValueEncoder`]
+    fn write(&mut self, values: &Self::Values, offset: usize, len: usize) -> 
Result<()>;
+
+    /// Returns the number of buffered values
+    fn num_values(&self) -> usize;
+
+    /// Returns true if this encoder has a dictionary page
+    fn has_dictionary(&self) -> bool;
+
+    /// Returns an estimate of the dictionary page size in bytes, or `None` if 
no dictionary
+    fn estimated_dict_page_size(&self) -> Option<usize>;
+
+    /// Returns an estimate of the data page size in bytes
+    fn estimated_data_page_size(&self) -> usize;
+
+    /// Flush the dictionary page for this column chunk if any. Any subsequent 
calls to
+    /// [`Self::write`] will not be dictionary encoded
+    ///
+    /// Note: [`Self::flush_data_page`] must be called first, as this will 
error if there
+    /// are any pending page values
+    fn flush_dict_page(&mut self) -> Result<Option<DictionaryPage>>;
+
+    /// Flush the next data page for this column chunk
+    fn flush_data_page(&mut self) -> Result<DataPageValues<Self::T>>;
+}
+
+pub struct ColumnValueEncoderImpl<T: DataType> {
+    encoder: Box<dyn Encoder<T>>,
+    dict_encoder: Option<DictEncoder<T>>,
+    descr: ColumnDescPtr,
+    num_values: usize,
+    min_value: Option<T::T>,
+    max_value: Option<T::T>,
+}
+
+impl<T: DataType> ColumnValueEncoder for ColumnValueEncoderImpl<T> {
+    type T = T::T;
+
+    type Values = [T::T];
+
+    fn try_new(descr: &ColumnDescPtr, props: &WriterProperties) -> 
Result<Self> {
+        let dict_supported = props.dictionary_enabled(descr.path())
+            && has_dictionary_support(T::get_physical_type(), &props);
+        let dict_encoder = dict_supported.then(|| 
DictEncoder::new(descr.clone()));
+
+        // Set either main encoder or fallback encoder.
+        let encoder = get_encoder(
+            descr.clone(),
+            props
+                .encoding(descr.path())
+                .unwrap_or_else(|| fallback_encoding(T::get_physical_type(), 
&props)),
+        )?;
+
+        Ok(Self {
+            encoder,
+            dict_encoder,
+            descr: descr.clone(),
+            num_values: 0,
+            min_value: None,
+            max_value: None,
+        })
+    }
+
+    fn write(&mut self, values: &[T::T], offset: usize, len: usize) -> 
Result<()> {
+        self.num_values += len;
+
+        let slice = values.get(offset..offset + len).ok_or_else(|| {
+            general_err!(
+                "Expected to write {} values, but have only {}",
+                len,
+                values.len() - offset
+            )
+        })?;
+
+        if let Some((min, max)) = slice.min_max(&self.descr) {
+            update_min(&self.descr, min, &mut self.min_value);
+            update_max(&self.descr, max, &mut self.max_value);
+        }
+
+        match &mut self.dict_encoder {
+            Some(encoder) => encoder.put(slice),
+            _ => self.encoder.put(slice),
+        }
+    }
+
+    fn num_values(&self) -> usize {
+        self.num_values
+    }
+
+    fn has_dictionary(&self) -> bool {
+        self.dict_encoder.is_some()
+    }
+
+    fn estimated_dict_page_size(&self) -> Option<usize> {
+        Some(self.dict_encoder.as_ref()?.dict_encoded_size())
+    }
+
+    fn estimated_data_page_size(&self) -> usize {
+        match &self.dict_encoder {
+            Some(encoder) => encoder.estimated_data_encoded_size(),
+            _ => self.encoder.estimated_data_encoded_size(),
+        }
+    }
+
+    fn flush_dict_page(&mut self) -> Result<Option<DictionaryPage>> {
+        match self.dict_encoder.take() {
+            Some(encoder) => {
+                if self.num_values != 0 {
+                    return Err(general_err!(
+                        "Must flush data pages before flushing dictionary"
+                    ));
+                }
+
+                let buf = encoder.write_dict()?;
+
+                Ok(Some(DictionaryPage {
+                    buf,
+                    num_values: encoder.num_entries(),
+                    is_sorted: encoder.is_sorted(),
+                }))
+            }
+            _ => Ok(None),
+        }
+    }
+
+    fn flush_data_page(&mut self) -> Result<DataPageValues<T::T>> {
+        let (buf, encoding) = match &mut self.dict_encoder {
+            Some(encoder) => (encoder.write_indices()?, 
Encoding::RLE_DICTIONARY),
+            _ => (self.encoder.flush_buffer()?, self.encoder.encoding()),
+        };
+
+        Ok(DataPageValues {
+            buf,
+            encoding,
+            num_values: std::mem::take(&mut self.num_values),

Review Comment:
   Not that it matters, but is there some reason this isn't 
   
   ```suggestion
               num_values: self.num_values.take()
   ```
   
   (this is different than the lines immediately below for min/max value, which 
is why I am asking)



##########
parquet/src/basic.rs:
##########
@@ -212,7 +212,7 @@ pub enum Repetition {
 /// Encodings supported by Parquet.
 /// Not all encodings are valid for all types. These enums are also used to 
specify the
 /// encoding of definition and repetition levels.
-#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Ord, PartialOrd)]

Review Comment:
   Sorting for `Encoding`s seems somewhat strange (I asked a better question 
below at the site of use of `BTreeSet`)



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to