alamb commented on code in PR #15503:
URL: https://github.com/apache/datafusion/pull/15503#discussion_r2026948204


##########
datafusion/physical-plan/src/joins/cross_join.rs:
##########
@@ -344,6 +345,26 @@ impl ExecutionPlan for CrossJoinExec {
         ))
     }
 
+    fn statistics_by_partition(&self) -> Result<Vec<Statistics>> {

Review Comment:
   I don't think I saw a test for this code. Maybe I missed it



##########
datafusion/core/tests/physical_optimizer/partition_statistics.rs:
##########
@@ -0,0 +1,317 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#[cfg(test)]
+mod test {
+    use arrow_schema::{DataType, Field, Schema, SortOptions};
+    use datafusion::datasource::listing::ListingTable;
+    use datafusion::prelude::SessionContext;
+    use datafusion_catalog::TableProvider;
+    use datafusion_common::stats::Precision;
+    use datafusion_common::{ScalarValue, Statistics};
+    use datafusion_execution::config::SessionConfig;
+    use datafusion_expr_common::operator::Operator;
+    use datafusion_physical_expr::expressions::{binary, lit, Column};
+    use datafusion_physical_expr_common::physical_expr::PhysicalExpr;
+    use datafusion_physical_expr_common::sort_expr::{LexOrdering, 
PhysicalSortExpr};
+    use datafusion_physical_plan::filter::FilterExec;
+    use datafusion_physical_plan::projection::ProjectionExec;
+    use datafusion_physical_plan::sorts::sort::SortExec;
+    use datafusion_physical_plan::union::UnionExec;
+    use datafusion_physical_plan::ExecutionPlan;
+    use std::sync::Arc;
+
+    async fn generate_listing_table_with_statistics(

Review Comment:
   Could you please document a bit what is in this file and what the expected 
data is?



##########
datafusion/core/tests/physical_optimizer/partition_statistics.rs:
##########
@@ -0,0 +1,317 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#[cfg(test)]
+mod test {
+    use arrow_schema::{DataType, Field, Schema, SortOptions};
+    use datafusion::datasource::listing::ListingTable;
+    use datafusion::prelude::SessionContext;
+    use datafusion_catalog::TableProvider;
+    use datafusion_common::stats::Precision;
+    use datafusion_common::{ScalarValue, Statistics};
+    use datafusion_execution::config::SessionConfig;
+    use datafusion_expr_common::operator::Operator;
+    use datafusion_physical_expr::expressions::{binary, lit, Column};
+    use datafusion_physical_expr_common::physical_expr::PhysicalExpr;
+    use datafusion_physical_expr_common::sort_expr::{LexOrdering, 
PhysicalSortExpr};
+    use datafusion_physical_plan::filter::FilterExec;
+    use datafusion_physical_plan::projection::ProjectionExec;
+    use datafusion_physical_plan::sorts::sort::SortExec;
+    use datafusion_physical_plan::union::UnionExec;
+    use datafusion_physical_plan::ExecutionPlan;
+    use std::sync::Arc;
+
+    async fn generate_listing_table_with_statistics(
+        target_partition: Option<usize>,
+    ) -> Arc<dyn ExecutionPlan> {
+        let mut session_config = 
SessionConfig::new().with_collect_statistics(true);
+        if let Some(partition) = target_partition {
+            session_config = session_config.with_target_partitions(partition);
+        }
+        let ctx = SessionContext::new_with_config(session_config);
+        // Create table with partition
+        let create_table_sql = "CREATE EXTERNAL TABLE t1 (id INT not null, 
date DATE) STORED AS PARQUET LOCATION 
'./tests/data/test_statistics_per_partition' PARTITIONED BY (date) WITH ORDER 
(id ASC);";
+        ctx.sql(create_table_sql)
+            .await
+            .unwrap()
+            .collect()
+            .await
+            .unwrap();
+        let table = ctx.table_provider("t1").await.unwrap();
+        let listing_table = table
+            .as_any()
+            .downcast_ref::<ListingTable>()
+            .unwrap()
+            .clone();
+        listing_table
+            .scan(&ctx.state(), None, &[], None)
+            .await
+            .unwrap()
+    }
+
+    fn check_unchanged_statistics(statistics: Vec<Statistics>) {
+        // Check the statistics of each partition
+        for stat in &statistics {
+            assert_eq!(stat.num_rows, Precision::Exact(2));
+            // First column (id) should have non-null values
+            assert_eq!(stat.column_statistics[0].null_count, 
Precision::Exact(0));
+        }
+
+        // Verify specific id values for each partition
+        assert_eq!(
+            statistics[0].column_statistics[0].max_value,
+            Precision::Exact(ScalarValue::Int32(Some(4)))
+        );
+        assert_eq!(
+            statistics[0].column_statistics[0].min_value,
+            Precision::Exact(ScalarValue::Int32(Some(3)))
+        );
+        assert_eq!(
+            statistics[1].column_statistics[0].max_value,
+            Precision::Exact(ScalarValue::Int32(Some(2)))
+        );
+        assert_eq!(
+            statistics[1].column_statistics[0].min_value,
+            Precision::Exact(ScalarValue::Int32(Some(1)))
+        );
+    }
+
+    #[tokio::test]
+    async fn test_statistics_by_partition_of_data_source() -> 
datafusion_common::Result<()>
+    {
+        let scan = generate_listing_table_with_statistics(Some(2)).await;
+        let statistics = scan.statistics_by_partition()?;
+        // Check the statistics of each partition
+        assert_eq!(statistics.len(), 2);
+        for stat in &statistics {
+            assert_eq!(stat.column_statistics.len(), 2);
+            assert_eq!(stat.total_byte_size, Precision::Exact(110));
+        }
+        check_unchanged_statistics(statistics);
+        Ok(())
+    }
+
+    #[tokio::test]
+    async fn test_statistics_by_partition_of_projection() -> 
datafusion_common::Result<()>
+    {
+        let scan = generate_listing_table_with_statistics(Some(2)).await;
+        // Add projection execution plan
+        let exprs: Vec<(Arc<dyn PhysicalExpr>, String)> =
+            vec![(Arc::new(Column::new("id", 0)), "id".to_string())];
+        let projection = ProjectionExec::try_new(exprs, scan)?;
+        let statistics = projection.statistics_by_partition()?;
+        for stat in &statistics {
+            assert_eq!(stat.column_statistics.len(), 1);
+            assert_eq!(stat.total_byte_size, Precision::Exact(8));
+        }
+        check_unchanged_statistics(statistics);
+        Ok(())
+    }
+
+    #[tokio::test]
+    async fn test_statistics_by_partition_of_sort() -> 
datafusion_common::Result<()> {
+        let scan = generate_listing_table_with_statistics(Some(2)).await;
+        // Add sort execution plan
+        let sort = SortExec::new(
+            LexOrdering::new(vec![PhysicalSortExpr {
+                expr: Arc::new(Column::new("id", 0)),
+                options: SortOptions {
+                    descending: false,
+                    nulls_first: false,
+                },
+            }]),
+            scan,
+        );
+        let mut sort_exec = Arc::new(sort.clone());
+        let statistics = sort_exec.statistics_by_partition()?;
+        assert_eq!(statistics.len(), 1);
+        assert_eq!(statistics[0].num_rows, Precision::Exact(4));
+        assert_eq!(statistics[0].column_statistics.len(), 2);
+        assert_eq!(statistics[0].total_byte_size, Precision::Exact(220));
+        assert_eq!(
+            statistics[0].column_statistics[0].null_count,
+            Precision::Exact(0)
+        );
+        assert_eq!(
+            statistics[0].column_statistics[0].max_value,
+            Precision::Exact(ScalarValue::Int32(Some(4)))
+        );
+        assert_eq!(
+            statistics[0].column_statistics[0].min_value,
+            Precision::Exact(ScalarValue::Int32(Some(1)))
+        );

Review Comment:
   I found it hard to read here what the expected statistics were
   
   What do you think about a pattern like this (to create the expected 
statistcs)
   
   ```rust
   ```suggestion
           let expected_statistics = Statistics {
             num_rows: Precision::Exact(4),
             total_bute_size:Precision::Exact(220),
             column_statistics: vec![
               ColumnStatistics {...
               }]
             };
           assert_eq!(statistics, expected_statistics);
   ```



##########
datafusion/physical-plan/src/execution_plan.rs:
##########
@@ -427,6 +427,16 @@ pub trait ExecutionPlan: Debug + DisplayAs + Send + Sync {
         Ok(Statistics::new_unknown(&self.schema()))
     }
 
+    /// Returns statistics for each partition of this `ExecutionPlan` node.
+    /// If statistics are not available, returns an array of
+    /// [`Statistics::new_unknown`] for each partition.
+    fn statistics_by_partition(&self) -> Result<Vec<Statistics>> {

Review Comment:
   Can we please make a structure rather than directly using `Vec<Statistics>` 
here? 
   
   I think doing so will make it easier / less breaking if we want to evolve 
how these statistics are handled. This was a lesson learned from our work with 
LexOrdering / EquivalenceProperties. 
   
   Something like the following
   
   ```rust
   /// Statistics for each partitition
   struct PartitionedStatistics {
     inner: Vec<Statistics>
   }
   
   impl PartitionedStatistics {
     fn len(&self) -> usize {
      self.inner.len()
     }
   
     /// return the statistics for the specified partition
     fn statistics(&self) -> &Statistics {
       ...
     }
   }
   ```



##########
datafusion/core/tests/physical_optimizer/partition_statistics.rs:
##########
@@ -0,0 +1,317 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#[cfg(test)]
+mod test {
+    use arrow_schema::{DataType, Field, Schema, SortOptions};
+    use datafusion::datasource::listing::ListingTable;
+    use datafusion::prelude::SessionContext;
+    use datafusion_catalog::TableProvider;
+    use datafusion_common::stats::Precision;
+    use datafusion_common::{ScalarValue, Statistics};
+    use datafusion_execution::config::SessionConfig;
+    use datafusion_expr_common::operator::Operator;
+    use datafusion_physical_expr::expressions::{binary, lit, Column};
+    use datafusion_physical_expr_common::physical_expr::PhysicalExpr;
+    use datafusion_physical_expr_common::sort_expr::{LexOrdering, 
PhysicalSortExpr};
+    use datafusion_physical_plan::filter::FilterExec;
+    use datafusion_physical_plan::projection::ProjectionExec;
+    use datafusion_physical_plan::sorts::sort::SortExec;
+    use datafusion_physical_plan::union::UnionExec;
+    use datafusion_physical_plan::ExecutionPlan;
+    use std::sync::Arc;
+
+    async fn generate_listing_table_with_statistics(
+        target_partition: Option<usize>,
+    ) -> Arc<dyn ExecutionPlan> {
+        let mut session_config = 
SessionConfig::new().with_collect_statistics(true);
+        if let Some(partition) = target_partition {
+            session_config = session_config.with_target_partitions(partition);
+        }
+        let ctx = SessionContext::new_with_config(session_config);
+        // Create table with partition
+        let create_table_sql = "CREATE EXTERNAL TABLE t1 (id INT not null, 
date DATE) STORED AS PARQUET LOCATION 
'./tests/data/test_statistics_per_partition' PARTITIONED BY (date) WITH ORDER 
(id ASC);";
+        ctx.sql(create_table_sql)
+            .await
+            .unwrap()
+            .collect()
+            .await
+            .unwrap();
+        let table = ctx.table_provider("t1").await.unwrap();
+        let listing_table = table
+            .as_any()
+            .downcast_ref::<ListingTable>()
+            .unwrap()
+            .clone();
+        listing_table
+            .scan(&ctx.state(), None, &[], None)
+            .await
+            .unwrap()
+    }
+
+    fn check_unchanged_statistics(statistics: Vec<Statistics>) {
+        // Check the statistics of each partition
+        for stat in &statistics {
+            assert_eq!(stat.num_rows, Precision::Exact(2));
+            // First column (id) should have non-null values
+            assert_eq!(stat.column_statistics[0].null_count, 
Precision::Exact(0));
+        }
+
+        // Verify specific id values for each partition
+        assert_eq!(
+            statistics[0].column_statistics[0].max_value,
+            Precision::Exact(ScalarValue::Int32(Some(4)))
+        );
+        assert_eq!(
+            statistics[0].column_statistics[0].min_value,
+            Precision::Exact(ScalarValue::Int32(Some(3)))
+        );
+        assert_eq!(
+            statistics[1].column_statistics[0].max_value,
+            Precision::Exact(ScalarValue::Int32(Some(2)))
+        );
+        assert_eq!(
+            statistics[1].column_statistics[0].min_value,
+            Precision::Exact(ScalarValue::Int32(Some(1)))
+        );
+    }
+
+    #[tokio::test]
+    async fn test_statistics_by_partition_of_data_source() -> 
datafusion_common::Result<()>
+    {
+        let scan = generate_listing_table_with_statistics(Some(2)).await;
+        let statistics = scan.statistics_by_partition()?;
+        // Check the statistics of each partition
+        assert_eq!(statistics.len(), 2);
+        for stat in &statistics {
+            assert_eq!(stat.column_statistics.len(), 2);
+            assert_eq!(stat.total_byte_size, Precision::Exact(110));
+        }
+        check_unchanged_statistics(statistics);
+        Ok(())
+    }
+
+    #[tokio::test]
+    async fn test_statistics_by_partition_of_projection() -> 
datafusion_common::Result<()>
+    {
+        let scan = generate_listing_table_with_statistics(Some(2)).await;
+        // Add projection execution plan
+        let exprs: Vec<(Arc<dyn PhysicalExpr>, String)> =
+            vec![(Arc::new(Column::new("id", 0)), "id".to_string())];
+        let projection = ProjectionExec::try_new(exprs, scan)?;
+        let statistics = projection.statistics_by_partition()?;
+        for stat in &statistics {
+            assert_eq!(stat.column_statistics.len(), 1);
+            assert_eq!(stat.total_byte_size, Precision::Exact(8));
+        }
+        check_unchanged_statistics(statistics);
+        Ok(())
+    }
+
+    #[tokio::test]
+    async fn test_statistics_by_partition_of_sort() -> 
datafusion_common::Result<()> {
+        let scan = generate_listing_table_with_statistics(Some(2)).await;
+        // Add sort execution plan
+        let sort = SortExec::new(
+            LexOrdering::new(vec![PhysicalSortExpr {
+                expr: Arc::new(Column::new("id", 0)),
+                options: SortOptions {
+                    descending: false,
+                    nulls_first: false,
+                },
+            }]),
+            scan,
+        );
+        let mut sort_exec = Arc::new(sort.clone());
+        let statistics = sort_exec.statistics_by_partition()?;
+        assert_eq!(statistics.len(), 1);
+        assert_eq!(statistics[0].num_rows, Precision::Exact(4));
+        assert_eq!(statistics[0].column_statistics.len(), 2);
+        assert_eq!(statistics[0].total_byte_size, Precision::Exact(220));
+        assert_eq!(
+            statistics[0].column_statistics[0].null_count,
+            Precision::Exact(0)
+        );
+        assert_eq!(
+            statistics[0].column_statistics[0].max_value,
+            Precision::Exact(ScalarValue::Int32(Some(4)))
+        );
+        assert_eq!(
+            statistics[0].column_statistics[0].min_value,
+            Precision::Exact(ScalarValue::Int32(Some(1)))
+        );
+        sort_exec = Arc::new(sort.with_preserve_partitioning(true));
+        let statistics = sort_exec.statistics_by_partition()?;
+        assert_eq!(statistics.len(), 2);
+        assert_eq!(statistics[0].num_rows, Precision::Exact(2));
+        assert_eq!(statistics[1].num_rows, Precision::Exact(2));
+        assert_eq!(statistics[0].column_statistics.len(), 2);
+        assert_eq!(statistics[1].column_statistics.len(), 2);
+        assert_eq!(statistics[0].total_byte_size, Precision::Exact(110));
+        assert_eq!(statistics[1].total_byte_size, Precision::Exact(110));
+        assert_eq!(
+            statistics[0].column_statistics[0].null_count,
+            Precision::Exact(0)
+        );
+        assert_eq!(
+            statistics[0].column_statistics[0].max_value,
+            Precision::Exact(ScalarValue::Int32(Some(4)))
+        );
+        assert_eq!(
+            statistics[0].column_statistics[0].min_value,
+            Precision::Exact(ScalarValue::Int32(Some(3)))
+        );
+        assert_eq!(
+            statistics[1].column_statistics[0].null_count,
+            Precision::Exact(0)
+        );
+        assert_eq!(
+            statistics[1].column_statistics[0].max_value,
+            Precision::Exact(ScalarValue::Int32(Some(2)))
+        );
+        assert_eq!(
+            statistics[1].column_statistics[0].min_value,
+            Precision::Exact(ScalarValue::Int32(Some(1)))
+        );
+        Ok(())
+    }
+
+    #[tokio::test]
+    async fn test_statistics_by_partition_of_filter() -> 
datafusion_common::Result<()> {
+        let scan = generate_listing_table_with_statistics(Some(2)).await;
+        let schema = Schema::new(vec![Field::new("id", DataType::Int32, 
false)]);
+        let predicate = binary(
+            Arc::new(Column::new("id", 0)),
+            Operator::Lt,
+            lit(1i32),
+            &schema,
+        )?;
+        let filter: Arc<dyn ExecutionPlan> =
+            Arc::new(FilterExec::try_new(predicate, scan)?);
+        let _full_statistics = filter.statistics()?;
+        // The full statistics is invalid, at least, we can improve the 
selectivity estimation of the filter

Review Comment:
   I don't understand this comment. Should we file a ticket to track whatever 
the expected result is?



##########
datafusion/physical-plan/src/statistics.rs:
##########
@@ -0,0 +1,151 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! Defines the cross join plan for loading the left side of the cross join
+//! and producing batches in parallel for the right partitions
+
+use datafusion_common::stats::Precision;
+use datafusion_common::{ColumnStatistics, ScalarValue, Statistics};
+use std::mem;
+
+/// Generic function to compute statistics across multiple items that have 
statistics
+pub fn compute_summary_statistics<T, I>(
+    items: I,
+    column_count: usize,
+    stats_extractor: impl Fn(&T) -> Option<&Statistics>,
+) -> Statistics
+where
+    I: IntoIterator<Item = T>,
+{
+    let mut col_stats_set = vec![ColumnStatistics::default(); column_count];
+    let mut num_rows = Precision::<usize>::Absent;
+    let mut total_byte_size = Precision::<usize>::Absent;
+
+    for (idx, item) in items.into_iter().enumerate() {
+        if let Some(item_stats) = stats_extractor(&item) {
+            if idx == 0 {
+                // First item, set values directly
+                num_rows = item_stats.num_rows;
+                total_byte_size = item_stats.total_byte_size;
+                for (index, column_stats) in
+                    item_stats.column_statistics.iter().enumerate()
+                {
+                    col_stats_set[index].null_count = column_stats.null_count;
+                    col_stats_set[index].max_value = 
column_stats.max_value.clone();
+                    col_stats_set[index].min_value = 
column_stats.min_value.clone();
+                    col_stats_set[index].sum_value = 
column_stats.sum_value.clone();
+                }
+                continue;
+            }
+
+            // Accumulate statistics for subsequent items
+            num_rows = add_row_stats(item_stats.num_rows, num_rows);
+            total_byte_size = add_row_stats(item_stats.total_byte_size, 
total_byte_size);
+
+            for (item_col_stats, col_stats) in item_stats
+                .column_statistics
+                .iter()
+                .zip(col_stats_set.iter_mut())
+            {
+                col_stats.null_count =
+                    add_row_stats(item_col_stats.null_count, 
col_stats.null_count);
+                set_max_if_greater(&item_col_stats.max_value, &mut 
col_stats.max_value);
+                set_min_if_lesser(&item_col_stats.min_value, &mut 
col_stats.min_value);
+                col_stats.sum_value = 
item_col_stats.sum_value.add(&col_stats.sum_value);
+            }
+        }
+    }
+
+    Statistics {
+        num_rows,
+        total_byte_size,
+        column_statistics: col_stats_set,
+    }
+}
+
+/// If the given value is numerically greater than the original maximum value,

Review Comment:
   This seems somewhat duplicated with `Precision::max` 🤔 



##########
datafusion/datasource/src/source.rs:
##########
@@ -175,6 +175,23 @@ impl ExecutionPlan for DataSourceExec {
         self.data_source.statistics()
     }
 
+    fn statistics_by_partition(&self) -> 
datafusion_common::Result<Vec<Statistics>> {
+        let mut statistics = vec![
+            Statistics::new_unknown(&self.schema());
+            self.properties().partitioning.partition_count()
+        ];
+        if let Some(file_config) =
+            self.data_source.as_any().downcast_ref::<FileScanConfig>()
+        {
+            for (idx, file_group) in 
file_config.file_groups.iter().enumerate() {
+                if let Some(stat) = file_group.statistics() {
+                    statistics[idx] = stat.clone();

Review Comment:
   I am also growing worried about the amount of cloning happening for each 
Statistics object... they are deep clones at the moment



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: github-unsubscr...@datafusion.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: github-unsubscr...@datafusion.apache.org
For additional commands, e-mail: github-h...@datafusion.apache.org

Reply via email to