yahoNanJing commented on code in PR #4122: URL: https://github.com/apache/arrow-datafusion/pull/4122#discussion_r1017522456
########## datafusion/core/src/physical_optimizer/enforcement.rs: ########## @@ -0,0 +1,1739 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! Enforcement optimizer rules are used to make sure the plan's Distribution and Ordering +//! requirements are met by inserting necessary [[RepartitionExec]] and [[SortExec]]. +//! +use crate::error::Result; +use crate::physical_optimizer::PhysicalOptimizerRule; +use crate::physical_plan::aggregates::{AggregateExec, AggregateMode, PhysicalGroupBy}; +use crate::physical_plan::coalesce_partitions::CoalescePartitionsExec; +use crate::physical_plan::joins::{ + CrossJoinExec, HashJoinExec, PartitionMode, SortMergeJoinExec, +}; +use crate::physical_plan::projection::ProjectionExec; +use crate::physical_plan::repartition::RepartitionExec; +use crate::physical_plan::rewrite::TreeNodeRewritable; +use crate::physical_plan::sorts::sort::SortExec; +use crate::physical_plan::Partitioning; +use crate::physical_plan::{with_new_children_if_necessary, Distribution, ExecutionPlan}; +use crate::prelude::SessionConfig; +use datafusion_expr::logical_plan::JoinType; +use datafusion_physical_expr::equivalence::EquivalenceProperties; +use datafusion_physical_expr::expressions::Column; +use datafusion_physical_expr::expressions::NoOp; +use datafusion_physical_expr::{ + expr_list_eq_strict_order, normalize_expr_with_equivalence_properties, + normalize_sort_expr_with_equivalence_properties, PhysicalExpr, PhysicalSortExpr, +}; +use std::collections::HashMap; +use std::sync::Arc; + +/// BasicEnforcement rule, it ensures the Distribution and Ordering requirements are met +/// in the strictest way. It might add additional [[RepartitionExec]] to the plan tree +/// and give a non-optimal plan, but it can avoid the possible data skew in joins +/// +/// For example for a HashJoin with keys(a, b, c), the required Distribution(a, b, c) can be satisfied by +/// several alternative partitioning ways: [(a, b, c), (a, b), (a, c), (b, c), (a), (b), (c), ( )]. +/// +/// This rule only chooses the exactly match and satisfies the Distribution(a, b, c) by a HashPartition(a, b, c). +#[derive(Default)] +pub struct BasicEnforcement {} + +impl BasicEnforcement { + #[allow(missing_docs)] + pub fn new() -> Self { + Self {} + } +} + +impl PhysicalOptimizerRule for BasicEnforcement { + fn optimize( + &self, + plan: Arc<dyn ExecutionPlan>, + config: &SessionConfig, + ) -> Result<Arc<dyn ExecutionPlan>> { + let target_partitions = config.target_partitions; + let top_down_join_key_reordering = config.top_down_join_key_reordering; + let new_plan = if top_down_join_key_reordering { + // Run a top-down process to adjust input key ordering recursively + adjust_input_keys_down_recursively(plan, vec![])? + } else { + plan + }; + // Distribution and Ordering enforcement need to be applied bottom-up. + new_plan.transform_up(&{ + |plan| { + let adjusted = if !top_down_join_key_reordering { + reorder_join_keys_to_inputs(plan) + } else { + plan + }; + Some(ensure_distribution_and_ordering( + adjusted, + target_partitions, + )) + } + }) + } + + fn name(&self) -> &str { + "BasicEnforcement" + } +} + +/// When the physical planner creates the Joins, the ordering of join keys is from the original query. +/// That might not match with the output partitioning of the join node's children +/// This method runs a top-down process and try to adjust the output partitioning of the children +/// if the children themselves are Joins or Aggregations. +fn adjust_input_keys_down_recursively( + plan: Arc<dyn crate::physical_plan::ExecutionPlan>, + parent_required: Vec<Arc<dyn PhysicalExpr>>, +) -> Result<Arc<dyn crate::physical_plan::ExecutionPlan>> { + let plan_any = plan.as_any(); + if let Some(HashJoinExec { + left, + right, + on, + filter, + join_type, + mode, + null_equals_null, + .. + }) = plan_any.downcast_ref::<HashJoinExec>() + { + match mode { + PartitionMode::Partitioned => { + let join_key_pairs = extract_join_keys(on); + if let Some(( + JoinKeyPairs { + left_keys, + right_keys, + }, + new_positions, + )) = try_reorder( + join_key_pairs.clone(), + parent_required, + &plan.equivalence_properties(), + ) { + let new_join_on = if !new_positions.is_empty() { + new_join_conditions(&left_keys, &right_keys) + } else { + on.clone() + }; + let new_left = + adjust_input_keys_down_recursively(left.clone(), left_keys)?; + let new_right = + adjust_input_keys_down_recursively(right.clone(), right_keys)?; + Ok(Arc::new(HashJoinExec::try_new( + new_left, + new_right, + new_join_on, + filter.clone(), + join_type, + PartitionMode::Partitioned, + null_equals_null, + )?)) + } else { + let new_left = adjust_input_keys_down_recursively( + left.clone(), + join_key_pairs.left_keys, + )?; + let new_right = adjust_input_keys_down_recursively( + right.clone(), + join_key_pairs.right_keys, + )?; + Ok(Arc::new(HashJoinExec::try_new( + new_left, + new_right, + on.clone(), + filter.clone(), + join_type, + PartitionMode::Partitioned, + null_equals_null, + )?)) + } + } + PartitionMode::CollectLeft => { + let new_left = adjust_input_keys_down_recursively(left.clone(), vec![])?; + let new_right = match join_type { + JoinType::Inner | JoinType::Right => try_push_required_to_right( + parent_required, + right.clone(), + left.schema().fields().len(), + )?, + JoinType::RightSemi | JoinType::RightAnti => { + adjust_input_keys_down_recursively( + right.clone(), + parent_required.clone(), + )? + } + JoinType::Left + | JoinType::LeftSemi + | JoinType::LeftAnti + | JoinType::Full => { + adjust_input_keys_down_recursively(right.clone(), vec![])? + } + }; + + Ok(Arc::new(HashJoinExec::try_new( + new_left, + new_right, + on.clone(), + filter.clone(), + join_type, + PartitionMode::CollectLeft, + null_equals_null, + )?)) + } + } + } else if let Some(CrossJoinExec { left, right, .. }) = + plan_any.downcast_ref::<CrossJoinExec>() + { + let new_left = adjust_input_keys_down_recursively(left.clone(), vec![])?; + let new_right = try_push_required_to_right( + parent_required, + right.clone(), + left.schema().fields().len(), + )?; + Ok(Arc::new(CrossJoinExec::try_new(new_left, new_right)?)) + } else if let Some(SortMergeJoinExec { + left, + right, + on, + join_type, + sort_options, + null_equals_null, + .. + }) = plan_any.downcast_ref::<SortMergeJoinExec>() + { + let join_key_pairs = extract_join_keys(on); + if let Some(( + JoinKeyPairs { + left_keys, + right_keys, + }, + new_positions, + )) = try_reorder( + join_key_pairs.clone(), + parent_required, + &plan.equivalence_properties(), + ) { + let new_join_on = if !new_positions.is_empty() { + new_join_conditions(&left_keys, &right_keys) + } else { + on.clone() + }; + let new_options = if !new_positions.is_empty() { + let mut new_sort_options = vec![]; + for idx in 0..sort_options.len() { + new_sort_options.push(sort_options[new_positions[idx]]) + } + new_sort_options + } else { + sort_options.clone() + }; + + let new_left = adjust_input_keys_down_recursively(left.clone(), left_keys)?; + let new_right = + adjust_input_keys_down_recursively(right.clone(), right_keys)?; + + Ok(Arc::new(SortMergeJoinExec::try_new( + new_left, + new_right, + new_join_on, + *join_type, + new_options, + *null_equals_null, + )?)) + } else { + let new_left = adjust_input_keys_down_recursively( + left.clone(), + join_key_pairs.left_keys, + )?; + let new_right = adjust_input_keys_down_recursively( + right.clone(), + join_key_pairs.right_keys, + )?; + Ok(Arc::new(SortMergeJoinExec::try_new( + new_left, + new_right, + on.clone(), + *join_type, + sort_options.clone(), + *null_equals_null, + )?)) + } + } else if let Some(AggregateExec { + mode, + group_by, + aggr_expr, + input, + input_schema, + .. + }) = plan_any.downcast_ref::<AggregateExec>() + { + if parent_required.is_empty() { + plan.map_children(|plan| adjust_input_keys_down_recursively(plan, vec![])) + } else { + match mode { + AggregateMode::Final => plan.map_children(|plan| { + adjust_input_keys_down_recursively(plan, vec![]) + }), + AggregateMode::FinalPartitioned | AggregateMode::Partial => { + let out_put_columns = group_by + .expr() + .iter() + .enumerate() + .map(|(index, (_col, name))| Column::new(name, index)) + .collect::<Vec<_>>(); + + let out_put_exprs = out_put_columns + .iter() + .map(|c| Arc::new(c.clone()) as Arc<dyn PhysicalExpr>) + .collect::<Vec<_>>(); + + // Check whether the requirements can be satisfied by the Aggregation + if parent_required.len() != out_put_exprs.len() + || expr_list_eq_strict_order(&out_put_exprs, &parent_required) + || !group_by.null_expr().is_empty() + { + plan.map_children(|plan| { + adjust_input_keys_down_recursively(plan, vec![]) + }) + } else { + let new_positions = + expected_expr_positions(&out_put_exprs, &parent_required); + match new_positions { + Some(positions) => { + let mut new_group_exprs = vec![]; + for idx in positions.into_iter() { + new_group_exprs.push(group_by.expr()[idx].clone()); + } + let new_group_by = + PhysicalGroupBy::new_single(new_group_exprs); + match mode { + AggregateMode::FinalPartitioned => { + let new_input = + adjust_input_keys_down_recursively( Review Comment: Since the input of `FinalPartitioned` should be the `Partial` and they should share the same column order, it's safe to call `adjust_input_keys_down_recursively` here. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
