alamb commented on code in PR #8294:
URL: https://github.com/apache/arrow-datafusion/pull/8294#discussion_r1400976603
##########
datafusion/core/src/datasource/physical_plan/parquet/row_groups.rs:
##########
@@ -303,112 +298,6 @@ struct RowGroupPruningStatistics<'a> {
parquet_schema: &'a Schema,
}
-/// Extract the min/max statistics from a `ParquetStatistics` object
-macro_rules! get_statistic {
Review Comment:
This macro is moved, without modification, into `statistics.rs`
##########
datafusion/core/src/datasource/physical_plan/parquet.rs:
##########
@@ -718,28 +719,6 @@ pub async fn plan_to_parquet(
Ok(())
}
-// Copy from the arrow-rs
Review Comment:
Moved to statistics.rs
##########
datafusion/core/src/datasource/physical_plan/parquet/row_groups.rs:
##########
@@ -431,11 +320,29 @@ macro_rules! get_null_count_values {
impl<'a> PruningStatistics for RowGroupPruningStatistics<'a> {
fn min_values(&self, column: &Column) -> Option<ArrayRef> {
- get_min_max_values!(self, column, min, min_bytes)
+ let field = self
+ .parquet_schema
+ .fields()
+ .find(&column.name)
+ .map(|(_idx, field)| field)?;
+
+ RowGoupStatisticsConverter::new(&field)
Review Comment:
The idea here is (eventually) to prune more than one row group at a time.
However, this PR still does it one at a time
##########
datafusion/core/src/datasource/physical_plan/parquet/statistics.rs:
##########
@@ -0,0 +1,805 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use arrow::{array::ArrayRef, datatypes::DataType};
+use arrow_array::new_empty_array;
+use arrow_schema::Field;
+use datafusion_common::{Result, ScalarValue};
+use parquet::file::{
+ metadata::RowGroupMetaData, statistics::Statistics as ParquetStatistics,
+};
+use std::sync::Arc;
+
+// Convert the bytes array to i128.
+// The endian of the input bytes array must be big-endian.
+pub(crate) fn from_bytes_to_i128(b: &[u8]) -> i128 {
+ // The bytes array are from parquet file and must be the big-endian.
+ // The endian is defined by parquet format, and the reference document
+ //
https://github.com/apache/parquet-format/blob/54e53e5d7794d383529dd30746378f19a12afd58/src/main/thrift/parquet.thrift#L66
+ i128::from_be_bytes(sign_extend_be(b))
+}
+
+// Copy from arrow-rs
+//
https://github.com/apache/arrow-rs/blob/733b7e7fd1e8c43a404c3ce40ecf741d493c21b4/parquet/src/arrow/buffer/bit_util.rs#L55
+// Convert the byte slice to fixed length byte array with the length of 16
+fn sign_extend_be(b: &[u8]) -> [u8; 16] {
+ assert!(b.len() <= 16, "Array too large, expected less than 16");
+ let is_negative = (b[0] & 128u8) == 128u8;
+ let mut result = if is_negative { [255u8; 16] } else { [0u8; 16] };
+ for (d, s) in result.iter_mut().skip(16 - b.len()).zip(b) {
+ *d = *s;
+ }
+ result
+}
+
+/// Converts parquet RowGroup statistics (stored in
+/// [`RowGroupMetaData`]) into an arrow [`ArrayRef`]
+///
+/// For example, given a parquet file with 3 Row Groups, when asked for
+/// statistics for column "A" it will return a single array with 3 elements,
+///
+pub(crate) struct RowGoupStatisticsConverter<'a> {
+ field: &'a Field,
+}
+
+/// Extract a single min/max statistics from a [`ParquetStatistics`] object
+///
+/// * `$column_statistics` is the `ParquetStatistics` object
+/// * `$func is the function` (`min`/`max`) to call to get the value
+/// * `$bytes_func` is the function (`min_bytes`/`max_bytes`) to call to get
the value as bytes
+/// * `$target_arrow_type` is the [`DataType`] of the target statistics
+macro_rules! get_statistic {
+ ($column_statistics:expr, $func:ident, $bytes_func:ident,
$target_arrow_type:expr) => {{
+ if !$column_statistics.has_min_max_set() {
+ return None;
+ }
+ match $column_statistics {
+ ParquetStatistics::Boolean(s) =>
Some(ScalarValue::Boolean(Some(*s.$func()))),
+ ParquetStatistics::Int32(s) => {
+ match $target_arrow_type {
+ // int32 to decimal with the precision and scale
+ Some(DataType::Decimal128(precision, scale)) => {
+ Some(ScalarValue::Decimal128(
+ Some(*s.$func() as i128),
+ precision,
+ scale,
+ ))
+ }
+ _ => Some(ScalarValue::Int32(Some(*s.$func()))),
+ }
+ }
+ ParquetStatistics::Int64(s) => {
+ match $target_arrow_type {
+ // int64 to decimal with the precision and scale
+ Some(DataType::Decimal128(precision, scale)) => {
+ Some(ScalarValue::Decimal128(
+ Some(*s.$func() as i128),
+ precision,
+ scale,
+ ))
+ }
+ _ => Some(ScalarValue::Int64(Some(*s.$func()))),
+ }
+ }
+ // 96 bit ints not supported
+ ParquetStatistics::Int96(_) => None,
+ ParquetStatistics::Float(s) =>
Some(ScalarValue::Float32(Some(*s.$func()))),
+ ParquetStatistics::Double(s) =>
Some(ScalarValue::Float64(Some(*s.$func()))),
+ ParquetStatistics::ByteArray(s) => {
+ match $target_arrow_type {
+ // decimal data type
+ Some(DataType::Decimal128(precision, scale)) => {
+ Some(ScalarValue::Decimal128(
+ Some(from_bytes_to_i128(s.$bytes_func())),
+ precision,
+ scale,
+ ))
+ }
+ _ => {
+ let s = std::str::from_utf8(s.$bytes_func())
+ .map(|s| s.to_string())
+ .ok();
+ Some(ScalarValue::Utf8(s))
+ }
+ }
+ }
+ // type not supported yet
+ ParquetStatistics::FixedLenByteArray(s) => {
+ match $target_arrow_type {
+ // just support the decimal data type
+ Some(DataType::Decimal128(precision, scale)) => {
+ Some(ScalarValue::Decimal128(
+ Some(from_bytes_to_i128(s.$bytes_func())),
+ precision,
+ scale,
+ ))
+ }
+ _ => None,
+ }
+ }
+ }
+ }};
+}
+
+#[derive(Debug, Clone, Copy)]
+enum MinMax {
+ Min,
+ Max,
+}
+
+impl<'a> RowGoupStatisticsConverter<'a> {
+ /// Create a new RowGoupStatisticsConverter suitable that can extract
+ /// statistics for the specified field
+ pub fn new(field: &'a Field) -> Self {
+ Self { field }
+ }
+
+ /// Returns the min value for the column into an array ref.
+ pub fn min<'b>(
+ &self,
+ row_group_meta_data: impl IntoIterator<Item = &'b RowGroupMetaData>,
+ ) -> Result<ArrayRef> {
+ self.min_max_impl(MinMax::Min, row_group_meta_data)
+ }
+
+ /// Returns the max value for the column into an array ref.
+ pub fn max<'b>(
+ &self,
+ row_group_meta_data: impl IntoIterator<Item = &'b RowGroupMetaData>,
+ ) -> Result<ArrayRef> {
+ self.min_max_impl(MinMax::Max, row_group_meta_data)
+ }
+
+ /// Extracts all min/max values for the column into an array ref.
+ fn min_max_impl<'b>(
+ &self,
+ mm: MinMax,
+ row_group_meta_data: impl IntoIterator<Item = &'b RowGroupMetaData>,
+ ) -> Result<ArrayRef> {
+ let mut row_group_meta_data =
row_group_meta_data.into_iter().peekable();
+
+ // if it is empty, return empty array
Review Comment:
this empty handling is new, to support the new array ref interface
##########
datafusion/core/src/datasource/physical_plan/parquet/statistics.rs:
##########
@@ -0,0 +1,805 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use arrow::{array::ArrayRef, datatypes::DataType};
+use arrow_array::new_empty_array;
+use arrow_schema::Field;
+use datafusion_common::{Result, ScalarValue};
+use parquet::file::{
+ metadata::RowGroupMetaData, statistics::Statistics as ParquetStatistics,
+};
+use std::sync::Arc;
+
+// Convert the bytes array to i128.
+// The endian of the input bytes array must be big-endian.
+pub(crate) fn from_bytes_to_i128(b: &[u8]) -> i128 {
+ // The bytes array are from parquet file and must be the big-endian.
+ // The endian is defined by parquet format, and the reference document
+ //
https://github.com/apache/parquet-format/blob/54e53e5d7794d383529dd30746378f19a12afd58/src/main/thrift/parquet.thrift#L66
+ i128::from_be_bytes(sign_extend_be(b))
+}
+
+// Copy from arrow-rs
+//
https://github.com/apache/arrow-rs/blob/733b7e7fd1e8c43a404c3ce40ecf741d493c21b4/parquet/src/arrow/buffer/bit_util.rs#L55
+// Convert the byte slice to fixed length byte array with the length of 16
+fn sign_extend_be(b: &[u8]) -> [u8; 16] {
+ assert!(b.len() <= 16, "Array too large, expected less than 16");
+ let is_negative = (b[0] & 128u8) == 128u8;
+ let mut result = if is_negative { [255u8; 16] } else { [0u8; 16] };
+ for (d, s) in result.iter_mut().skip(16 - b.len()).zip(b) {
+ *d = *s;
+ }
+ result
+}
+
+/// Converts parquet RowGroup statistics (stored in
+/// [`RowGroupMetaData`]) into an arrow [`ArrayRef`]
+///
+/// For example, given a parquet file with 3 Row Groups, when asked for
+/// statistics for column "A" it will return a single array with 3 elements,
+///
+pub(crate) struct RowGoupStatisticsConverter<'a> {
+ field: &'a Field,
+}
+
+/// Extract a single min/max statistics from a [`ParquetStatistics`] object
+///
+/// * `$column_statistics` is the `ParquetStatistics` object
+/// * `$func is the function` (`min`/`max`) to call to get the value
+/// * `$bytes_func` is the function (`min_bytes`/`max_bytes`) to call to get
the value as bytes
+/// * `$target_arrow_type` is the [`DataType`] of the target statistics
+macro_rules! get_statistic {
Review Comment:
This implementation leaves a lot to be desired, but I want to get tests in
place before I start changing it
##########
datafusion/core/src/datasource/physical_plan/parquet/statistics.rs:
##########
@@ -0,0 +1,805 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use arrow::{array::ArrayRef, datatypes::DataType};
+use arrow_array::new_empty_array;
+use arrow_schema::Field;
+use datafusion_common::{Result, ScalarValue};
+use parquet::file::{
+ metadata::RowGroupMetaData, statistics::Statistics as ParquetStatistics,
+};
+use std::sync::Arc;
+
+// Convert the bytes array to i128.
+// The endian of the input bytes array must be big-endian.
+pub(crate) fn from_bytes_to_i128(b: &[u8]) -> i128 {
+ // The bytes array are from parquet file and must be the big-endian.
+ // The endian is defined by parquet format, and the reference document
+ //
https://github.com/apache/parquet-format/blob/54e53e5d7794d383529dd30746378f19a12afd58/src/main/thrift/parquet.thrift#L66
+ i128::from_be_bytes(sign_extend_be(b))
+}
+
+// Copy from arrow-rs
+//
https://github.com/apache/arrow-rs/blob/733b7e7fd1e8c43a404c3ce40ecf741d493c21b4/parquet/src/arrow/buffer/bit_util.rs#L55
+// Convert the byte slice to fixed length byte array with the length of 16
+fn sign_extend_be(b: &[u8]) -> [u8; 16] {
+ assert!(b.len() <= 16, "Array too large, expected less than 16");
+ let is_negative = (b[0] & 128u8) == 128u8;
+ let mut result = if is_negative { [255u8; 16] } else { [0u8; 16] };
+ for (d, s) in result.iter_mut().skip(16 - b.len()).zip(b) {
+ *d = *s;
+ }
+ result
+}
+
+/// Converts parquet RowGroup statistics (stored in
+/// [`RowGroupMetaData`]) into an arrow [`ArrayRef`]
+///
+/// For example, given a parquet file with 3 Row Groups, when asked for
+/// statistics for column "A" it will return a single array with 3 elements,
+///
+pub(crate) struct RowGoupStatisticsConverter<'a> {
+ field: &'a Field,
+}
+
+/// Extract a single min/max statistics from a [`ParquetStatistics`] object
+///
+/// * `$column_statistics` is the `ParquetStatistics` object
+/// * `$func is the function` (`min`/`max`) to call to get the value
+/// * `$bytes_func` is the function (`min_bytes`/`max_bytes`) to call to get
the value as bytes
+/// * `$target_arrow_type` is the [`DataType`] of the target statistics
+macro_rules! get_statistic {
+ ($column_statistics:expr, $func:ident, $bytes_func:ident,
$target_arrow_type:expr) => {{
+ if !$column_statistics.has_min_max_set() {
+ return None;
+ }
+ match $column_statistics {
+ ParquetStatistics::Boolean(s) =>
Some(ScalarValue::Boolean(Some(*s.$func()))),
+ ParquetStatistics::Int32(s) => {
+ match $target_arrow_type {
+ // int32 to decimal with the precision and scale
+ Some(DataType::Decimal128(precision, scale)) => {
+ Some(ScalarValue::Decimal128(
+ Some(*s.$func() as i128),
+ precision,
+ scale,
+ ))
+ }
+ _ => Some(ScalarValue::Int32(Some(*s.$func()))),
+ }
+ }
+ ParquetStatistics::Int64(s) => {
+ match $target_arrow_type {
+ // int64 to decimal with the precision and scale
+ Some(DataType::Decimal128(precision, scale)) => {
+ Some(ScalarValue::Decimal128(
+ Some(*s.$func() as i128),
+ precision,
+ scale,
+ ))
+ }
+ _ => Some(ScalarValue::Int64(Some(*s.$func()))),
+ }
+ }
+ // 96 bit ints not supported
+ ParquetStatistics::Int96(_) => None,
+ ParquetStatistics::Float(s) =>
Some(ScalarValue::Float32(Some(*s.$func()))),
+ ParquetStatistics::Double(s) =>
Some(ScalarValue::Float64(Some(*s.$func()))),
+ ParquetStatistics::ByteArray(s) => {
+ match $target_arrow_type {
+ // decimal data type
+ Some(DataType::Decimal128(precision, scale)) => {
+ Some(ScalarValue::Decimal128(
+ Some(from_bytes_to_i128(s.$bytes_func())),
+ precision,
+ scale,
+ ))
+ }
+ _ => {
+ let s = std::str::from_utf8(s.$bytes_func())
+ .map(|s| s.to_string())
+ .ok();
+ Some(ScalarValue::Utf8(s))
+ }
+ }
+ }
+ // type not supported yet
+ ParquetStatistics::FixedLenByteArray(s) => {
+ match $target_arrow_type {
+ // just support the decimal data type
+ Some(DataType::Decimal128(precision, scale)) => {
+ Some(ScalarValue::Decimal128(
+ Some(from_bytes_to_i128(s.$bytes_func())),
+ precision,
+ scale,
+ ))
+ }
+ _ => None,
+ }
+ }
+ }
+ }};
+}
+
+#[derive(Debug, Clone, Copy)]
+enum MinMax {
+ Min,
+ Max,
+}
+
+impl<'a> RowGoupStatisticsConverter<'a> {
+ /// Create a new RowGoupStatisticsConverter suitable that can extract
+ /// statistics for the specified field
+ pub fn new(field: &'a Field) -> Self {
+ Self { field }
+ }
+
+ /// Returns the min value for the column into an array ref.
+ pub fn min<'b>(
+ &self,
+ row_group_meta_data: impl IntoIterator<Item = &'b RowGroupMetaData>,
+ ) -> Result<ArrayRef> {
+ self.min_max_impl(MinMax::Min, row_group_meta_data)
+ }
+
+ /// Returns the max value for the column into an array ref.
+ pub fn max<'b>(
+ &self,
+ row_group_meta_data: impl IntoIterator<Item = &'b RowGroupMetaData>,
+ ) -> Result<ArrayRef> {
+ self.min_max_impl(MinMax::Max, row_group_meta_data)
+ }
+
+ /// Extracts all min/max values for the column into an array ref.
+ fn min_max_impl<'b>(
+ &self,
+ mm: MinMax,
+ row_group_meta_data: impl IntoIterator<Item = &'b RowGroupMetaData>,
+ ) -> Result<ArrayRef> {
+ let mut row_group_meta_data =
row_group_meta_data.into_iter().peekable();
+
+ // if it is empty, return empty array
+ if row_group_meta_data.peek().is_none() {
+ return Ok(new_empty_array(self.field.data_type()));
+ }
+
+ let maybe_index = row_group_meta_data.peek().and_then(|rg_meta| {
+ rg_meta
+ .columns()
+ .iter()
+ .enumerate()
+ .find(|(_idx, c)| c.column_descr().name() == self.field.name())
+ .map(|(idx, _c)| idx)
+ });
+
+ // don't have this column, return an array of all NULLs
+ let Some(column_index) = maybe_index else {
+ let num_row_groups = row_group_meta_data.count();
+ let sv = ScalarValue::try_from(self.field.data_type())?;
+ return sv.to_array_of_size(num_row_groups);
+ };
+
+ let stats_iter = row_group_meta_data.map(move |row_group_meta_data| {
+ row_group_meta_data.column(column_index).statistics()
+ });
+
+ // this is the value to use when the statistics are not set
+ let null_value = ScalarValue::try_from(self.field.data_type())?;
+ match mm {
+ MinMax::Min => {
+ let values = stats_iter.map(|column_statistics| {
+ column_statistics
+ .and_then(|column_statistics| {
+ get_statistic!(
+ column_statistics,
+ min,
+ min_bytes,
+ Some(self.field.data_type().clone())
+ )
+ })
+ .unwrap_or_else(|| null_value.clone())
+ });
+ ScalarValue::iter_to_array(values)
+ }
+ MinMax::Max => {
+ let values = stats_iter.map(|column_statistics| {
+ column_statistics
+ .and_then(|column_statistics| {
+ get_statistic!(
+ column_statistics,
+ max,
+ max_bytes,
+ Some(self.field.data_type().clone())
+ )
+ })
+ .unwrap_or_else(|| null_value.clone())
+ });
+ ScalarValue::iter_to_array(values)
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use arrow_array::{
+ BinaryArray, BooleanArray, Decimal128Array, Float32Array, Float64Array,
+ Int32Array, Int64Array, RecordBatch, StringArray,
TimestampNanosecondArray,
+ };
+ use arrow_schema::SchemaRef;
+ use bytes::Bytes;
+ use datafusion_common::test_util::parquet_test_data;
+ use parquet::arrow::arrow_reader::ArrowReaderBuilder;
+ use parquet::arrow::arrow_writer::ArrowWriter;
+ use parquet::file::metadata::ParquetMetaData;
+ use parquet::file::properties::{EnabledStatistics, WriterProperties};
+ use std::path::PathBuf;
+
+ // TODO error cases (with parquet statistics that are mismatched in
expected type)
+
+ #[test]
+ fn roundtrip_empty() {
+ let empty_bool_array = new_empty_array(&DataType::Boolean);
+ Test {
+ input: empty_bool_array.clone(),
+ expected_min: empty_bool_array.clone(),
+ expected_max: empty_bool_array.clone(),
+ }
+ .run()
+ }
+
+ #[test]
+ fn roundtrip_bool() {
+ Test {
+ input: Arc::new(BooleanArray::from(vec![
+ // row group 1
+ Some(true),
+ None,
+ Some(true),
+ // row group 2
+ Some(true),
+ Some(false),
+ None,
+ // row group 3
+ None,
+ None,
+ None,
+ ])),
+ expected_min: Arc::new(BooleanArray::from(vec![
+ Some(true),
+ Some(false),
+ None,
+ ])),
+ expected_max: Arc::new(BooleanArray::from(vec![
+ Some(true),
+ Some(true),
+ None,
+ ])),
+ }
+ .run()
+ }
+
+ #[test]
+ fn roundtrip_int32() {
+ Test {
+ input: Arc::new(Int32Array::from(vec![
+ // row group 1
+ Some(1),
+ None,
+ Some(3),
+ // row group 2
+ Some(0),
+ Some(5),
+ None,
+ // row group 3
+ None,
+ None,
+ None,
+ ])),
+ expected_min: Arc::new(Int32Array::from(vec![Some(1), Some(0),
None])),
+ expected_max: Arc::new(Int32Array::from(vec![Some(3), Some(5),
None])),
+ }
+ .run()
+ }
+
+ #[test]
+ fn roundtrip_int64() {
+ Test {
+ input: Arc::new(Int64Array::from(vec![
+ // row group 1
+ Some(1),
+ None,
+ Some(3),
+ // row group 2
+ Some(0),
+ Some(5),
+ None,
+ // row group 3
+ None,
+ None,
+ None,
+ ])),
+ expected_min: Arc::new(Int64Array::from(vec![Some(1), Some(0),
None])),
+ expected_max: Arc::new(Int64Array::from(vec![Some(3), Some(5),
None])),
+ }
+ .run()
+ }
+
+ #[test]
+ fn roundtrip_f32() {
+ Test {
+ input: Arc::new(Float32Array::from(vec![
+ // row group 1
+ Some(1.0),
+ None,
+ Some(3.0),
+ // row group 2
+ Some(-1.0),
+ Some(5.0),
+ None,
+ // row group 3
+ None,
+ None,
+ None,
+ ])),
+ expected_min: Arc::new(Float32Array::from(vec![Some(1.0),
Some(-1.0), None])),
+ expected_max: Arc::new(Float32Array::from(vec![Some(3.0),
Some(5.0), None])),
+ }
+ .run()
+ }
+
+ #[test]
+ fn roundtrip_f64() {
+ Test {
+ input: Arc::new(Float64Array::from(vec![
+ // row group 1
+ Some(1.0),
+ None,
+ Some(3.0),
+ // row group 2
+ Some(-1.0),
+ Some(5.0),
+ None,
+ // row group 3
+ None,
+ None,
+ None,
+ ])),
+ expected_min: Arc::new(Float64Array::from(vec![Some(1.0),
Some(-1.0), None])),
+ expected_max: Arc::new(Float64Array::from(vec![Some(3.0),
Some(5.0), None])),
+ }
+ .run()
+ }
+
+ #[test]
+ #[should_panic(
Review Comment:
It is not good that the statistics don't round trip -- I will write up a
ticket prior to merging this PR
##########
datafusion/core/src/datasource/physical_plan/parquet/statistics.rs:
##########
@@ -0,0 +1,805 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use arrow::{array::ArrayRef, datatypes::DataType};
+use arrow_array::new_empty_array;
+use arrow_schema::Field;
+use datafusion_common::{Result, ScalarValue};
+use parquet::file::{
+ metadata::RowGroupMetaData, statistics::Statistics as ParquetStatistics,
+};
+use std::sync::Arc;
+
+// Convert the bytes array to i128.
+// The endian of the input bytes array must be big-endian.
+pub(crate) fn from_bytes_to_i128(b: &[u8]) -> i128 {
+ // The bytes array are from parquet file and must be the big-endian.
+ // The endian is defined by parquet format, and the reference document
+ //
https://github.com/apache/parquet-format/blob/54e53e5d7794d383529dd30746378f19a12afd58/src/main/thrift/parquet.thrift#L66
+ i128::from_be_bytes(sign_extend_be(b))
+}
+
+// Copy from arrow-rs
+//
https://github.com/apache/arrow-rs/blob/733b7e7fd1e8c43a404c3ce40ecf741d493c21b4/parquet/src/arrow/buffer/bit_util.rs#L55
+// Convert the byte slice to fixed length byte array with the length of 16
+fn sign_extend_be(b: &[u8]) -> [u8; 16] {
+ assert!(b.len() <= 16, "Array too large, expected less than 16");
+ let is_negative = (b[0] & 128u8) == 128u8;
+ let mut result = if is_negative { [255u8; 16] } else { [0u8; 16] };
+ for (d, s) in result.iter_mut().skip(16 - b.len()).zip(b) {
+ *d = *s;
+ }
+ result
+}
+
+/// Converts parquet RowGroup statistics (stored in
+/// [`RowGroupMetaData`]) into an arrow [`ArrayRef`]
+///
+/// For example, given a parquet file with 3 Row Groups, when asked for
+/// statistics for column "A" it will return a single array with 3 elements,
+///
+pub(crate) struct RowGoupStatisticsConverter<'a> {
+ field: &'a Field,
+}
+
+/// Extract a single min/max statistics from a [`ParquetStatistics`] object
+///
+/// * `$column_statistics` is the `ParquetStatistics` object
+/// * `$func is the function` (`min`/`max`) to call to get the value
+/// * `$bytes_func` is the function (`min_bytes`/`max_bytes`) to call to get
the value as bytes
+/// * `$target_arrow_type` is the [`DataType`] of the target statistics
+macro_rules! get_statistic {
+ ($column_statistics:expr, $func:ident, $bytes_func:ident,
$target_arrow_type:expr) => {{
+ if !$column_statistics.has_min_max_set() {
+ return None;
+ }
+ match $column_statistics {
+ ParquetStatistics::Boolean(s) =>
Some(ScalarValue::Boolean(Some(*s.$func()))),
+ ParquetStatistics::Int32(s) => {
+ match $target_arrow_type {
+ // int32 to decimal with the precision and scale
+ Some(DataType::Decimal128(precision, scale)) => {
+ Some(ScalarValue::Decimal128(
+ Some(*s.$func() as i128),
+ precision,
+ scale,
+ ))
+ }
+ _ => Some(ScalarValue::Int32(Some(*s.$func()))),
+ }
+ }
+ ParquetStatistics::Int64(s) => {
+ match $target_arrow_type {
+ // int64 to decimal with the precision and scale
+ Some(DataType::Decimal128(precision, scale)) => {
+ Some(ScalarValue::Decimal128(
+ Some(*s.$func() as i128),
+ precision,
+ scale,
+ ))
+ }
+ _ => Some(ScalarValue::Int64(Some(*s.$func()))),
+ }
+ }
+ // 96 bit ints not supported
+ ParquetStatistics::Int96(_) => None,
+ ParquetStatistics::Float(s) =>
Some(ScalarValue::Float32(Some(*s.$func()))),
+ ParquetStatistics::Double(s) =>
Some(ScalarValue::Float64(Some(*s.$func()))),
+ ParquetStatistics::ByteArray(s) => {
+ match $target_arrow_type {
+ // decimal data type
+ Some(DataType::Decimal128(precision, scale)) => {
+ Some(ScalarValue::Decimal128(
+ Some(from_bytes_to_i128(s.$bytes_func())),
+ precision,
+ scale,
+ ))
+ }
+ _ => {
+ let s = std::str::from_utf8(s.$bytes_func())
+ .map(|s| s.to_string())
+ .ok();
+ Some(ScalarValue::Utf8(s))
+ }
+ }
+ }
+ // type not supported yet
+ ParquetStatistics::FixedLenByteArray(s) => {
+ match $target_arrow_type {
+ // just support the decimal data type
+ Some(DataType::Decimal128(precision, scale)) => {
+ Some(ScalarValue::Decimal128(
+ Some(from_bytes_to_i128(s.$bytes_func())),
+ precision,
+ scale,
+ ))
+ }
+ _ => None,
+ }
+ }
+ }
+ }};
+}
+
+#[derive(Debug, Clone, Copy)]
+enum MinMax {
+ Min,
+ Max,
+}
+
+impl<'a> RowGoupStatisticsConverter<'a> {
+ /// Create a new RowGoupStatisticsConverter suitable that can extract
+ /// statistics for the specified field
+ pub fn new(field: &'a Field) -> Self {
+ Self { field }
+ }
+
+ /// Returns the min value for the column into an array ref.
+ pub fn min<'b>(
+ &self,
+ row_group_meta_data: impl IntoIterator<Item = &'b RowGroupMetaData>,
+ ) -> Result<ArrayRef> {
+ self.min_max_impl(MinMax::Min, row_group_meta_data)
+ }
+
+ /// Returns the max value for the column into an array ref.
+ pub fn max<'b>(
+ &self,
+ row_group_meta_data: impl IntoIterator<Item = &'b RowGroupMetaData>,
+ ) -> Result<ArrayRef> {
+ self.min_max_impl(MinMax::Max, row_group_meta_data)
+ }
+
+ /// Extracts all min/max values for the column into an array ref.
+ fn min_max_impl<'b>(
+ &self,
+ mm: MinMax,
+ row_group_meta_data: impl IntoIterator<Item = &'b RowGroupMetaData>,
+ ) -> Result<ArrayRef> {
+ let mut row_group_meta_data =
row_group_meta_data.into_iter().peekable();
+
+ // if it is empty, return empty array
+ if row_group_meta_data.peek().is_none() {
+ return Ok(new_empty_array(self.field.data_type()));
+ }
+
+ let maybe_index = row_group_meta_data.peek().and_then(|rg_meta| {
+ rg_meta
+ .columns()
+ .iter()
+ .enumerate()
+ .find(|(_idx, c)| c.column_descr().name() == self.field.name())
+ .map(|(idx, _c)| idx)
+ });
+
+ // don't have this column, return an array of all NULLs
+ let Some(column_index) = maybe_index else {
+ let num_row_groups = row_group_meta_data.count();
+ let sv = ScalarValue::try_from(self.field.data_type())?;
+ return sv.to_array_of_size(num_row_groups);
+ };
+
+ let stats_iter = row_group_meta_data.map(move |row_group_meta_data| {
+ row_group_meta_data.column(column_index).statistics()
+ });
+
+ // this is the value to use when the statistics are not set
+ let null_value = ScalarValue::try_from(self.field.data_type())?;
+ match mm {
+ MinMax::Min => {
+ let values = stats_iter.map(|column_statistics| {
+ column_statistics
+ .and_then(|column_statistics| {
+ get_statistic!(
+ column_statistics,
+ min,
+ min_bytes,
+ Some(self.field.data_type().clone())
+ )
+ })
+ .unwrap_or_else(|| null_value.clone())
+ });
+ ScalarValue::iter_to_array(values)
+ }
+ MinMax::Max => {
+ let values = stats_iter.map(|column_statistics| {
+ column_statistics
+ .and_then(|column_statistics| {
+ get_statistic!(
+ column_statistics,
+ max,
+ max_bytes,
+ Some(self.field.data_type().clone())
+ )
+ })
+ .unwrap_or_else(|| null_value.clone())
+ });
+ ScalarValue::iter_to_array(values)
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use arrow_array::{
+ BinaryArray, BooleanArray, Decimal128Array, Float32Array, Float64Array,
+ Int32Array, Int64Array, RecordBatch, StringArray,
TimestampNanosecondArray,
+ };
+ use arrow_schema::SchemaRef;
+ use bytes::Bytes;
+ use datafusion_common::test_util::parquet_test_data;
+ use parquet::arrow::arrow_reader::ArrowReaderBuilder;
+ use parquet::arrow::arrow_writer::ArrowWriter;
+ use parquet::file::metadata::ParquetMetaData;
+ use parquet::file::properties::{EnabledStatistics, WriterProperties};
+ use std::path::PathBuf;
+
+ // TODO error cases (with parquet statistics that are mismatched in
expected type)
+
+ #[test]
+ fn roundtrip_empty() {
+ let empty_bool_array = new_empty_array(&DataType::Boolean);
+ Test {
+ input: empty_bool_array.clone(),
+ expected_min: empty_bool_array.clone(),
+ expected_max: empty_bool_array.clone(),
+ }
+ .run()
+ }
+
+ #[test]
+ fn roundtrip_bool() {
+ Test {
+ input: Arc::new(BooleanArray::from(vec![
+ // row group 1
+ Some(true),
+ None,
+ Some(true),
+ // row group 2
+ Some(true),
+ Some(false),
+ None,
+ // row group 3
+ None,
+ None,
+ None,
+ ])),
+ expected_min: Arc::new(BooleanArray::from(vec![
+ Some(true),
+ Some(false),
+ None,
+ ])),
+ expected_max: Arc::new(BooleanArray::from(vec![
+ Some(true),
+ Some(true),
+ None,
+ ])),
+ }
+ .run()
+ }
+
+ #[test]
+ fn roundtrip_int32() {
+ Test {
+ input: Arc::new(Int32Array::from(vec![
+ // row group 1
+ Some(1),
+ None,
+ Some(3),
+ // row group 2
+ Some(0),
+ Some(5),
+ None,
+ // row group 3
+ None,
+ None,
+ None,
+ ])),
+ expected_min: Arc::new(Int32Array::from(vec![Some(1), Some(0),
None])),
+ expected_max: Arc::new(Int32Array::from(vec![Some(3), Some(5),
None])),
+ }
+ .run()
+ }
+
+ #[test]
+ fn roundtrip_int64() {
+ Test {
+ input: Arc::new(Int64Array::from(vec![
+ // row group 1
+ Some(1),
+ None,
+ Some(3),
+ // row group 2
+ Some(0),
+ Some(5),
+ None,
+ // row group 3
+ None,
+ None,
+ None,
+ ])),
+ expected_min: Arc::new(Int64Array::from(vec![Some(1), Some(0),
None])),
+ expected_max: Arc::new(Int64Array::from(vec![Some(3), Some(5),
None])),
+ }
+ .run()
+ }
+
+ #[test]
+ fn roundtrip_f32() {
+ Test {
+ input: Arc::new(Float32Array::from(vec![
+ // row group 1
+ Some(1.0),
+ None,
+ Some(3.0),
+ // row group 2
+ Some(-1.0),
+ Some(5.0),
+ None,
+ // row group 3
+ None,
+ None,
+ None,
+ ])),
+ expected_min: Arc::new(Float32Array::from(vec![Some(1.0),
Some(-1.0), None])),
+ expected_max: Arc::new(Float32Array::from(vec![Some(3.0),
Some(5.0), None])),
+ }
+ .run()
+ }
+
+ #[test]
+ fn roundtrip_f64() {
+ Test {
+ input: Arc::new(Float64Array::from(vec![
+ // row group 1
+ Some(1.0),
+ None,
+ Some(3.0),
+ // row group 2
+ Some(-1.0),
+ Some(5.0),
+ None,
+ // row group 3
+ None,
+ None,
+ None,
+ ])),
+ expected_min: Arc::new(Float64Array::from(vec![Some(1.0),
Some(-1.0), None])),
+ expected_max: Arc::new(Float64Array::from(vec![Some(3.0),
Some(5.0), None])),
+ }
+ .run()
+ }
+
+ #[test]
+ #[should_panic(
+ expected = "Inconsistent types in ScalarValue::iter_to_array. Expected
Int64, got TimestampNanosecond(NULL, None)"
+ )]
+ fn roundtrip_timestamp() {
+ Test {
+ input: Arc::new(TimestampNanosecondArray::from(vec![
+ // row group 1
+ Some(1),
+ None,
+ Some(3),
+ // row group 2
+ Some(9),
+ Some(5),
+ None,
+ // row group 3
+ None,
+ None,
+ None,
+ ])),
+ expected_min: Arc::new(TimestampNanosecondArray::from(vec![
+ Some(1),
+ Some(5),
+ None,
+ ])),
+ expected_max: Arc::new(TimestampNanosecondArray::from(vec![
+ Some(3),
+ Some(9),
+ None,
+ ])),
+ }
+ .run()
+ }
+
+ #[test]
+ fn roundtrip_decimal() {
+ Test {
+ input: Arc::new(
+ Decimal128Array::from(vec![
+ // row group 1
+ Some(100),
+ None,
+ Some(22000),
+ // row group 2
+ Some(500000),
+ Some(330000),
+ None,
+ // row group 3
+ None,
+ None,
+ None,
+ ])
+ .with_precision_and_scale(9, 2)
+ .unwrap(),
+ ),
+ expected_min: Arc::new(
+ Decimal128Array::from(vec![Some(100), Some(330000), None])
+ .with_precision_and_scale(9, 2)
+ .unwrap(),
+ ),
+ expected_max: Arc::new(
+ Decimal128Array::from(vec![Some(22000), Some(500000), None])
+ .with_precision_and_scale(9, 2)
+ .unwrap(),
+ ),
+ }
+ .run()
+ }
+
+ #[test]
+ fn roundtrip_utf8() {
+ Test {
+ input: Arc::new(StringArray::from(vec![
+ // row group 1
+ Some("A"),
+ None,
+ Some("Q"),
+ // row group 2
+ Some("ZZ"),
+ Some("AA"),
+ None,
+ // row group 3
+ None,
+ None,
+ None,
+ ])),
+ expected_min: Arc::new(StringArray::from(vec![Some("A"),
Some("AA"), None])),
+ expected_max: Arc::new(StringArray::from(vec![Some("Q"),
Some("ZZ"), None])),
+ }
+ .run()
+ }
+
+ #[test]
+ #[should_panic(
+ expected = "Inconsistent types in ScalarValue::iter_to_array. Expected
Utf8, got Binary(NULL)"
Review Comment:
Likewise, this should round trip but does not
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]