fvaleye commented on code in PR #1620:
URL: https://github.com/apache/iceberg-rust/pull/1620#discussion_r2314208788


##########
crates/integrations/datafusion/src/physical_plan/repartition.rs:
##########
@@ -0,0 +1,805 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use std::collections::HashSet;
+use std::sync::Arc;
+
+use datafusion::error::Result as DFResult;
+use datafusion::physical_plan::expressions::Column;
+use datafusion::physical_plan::repartition::RepartitionExec;
+use datafusion::physical_plan::{ExecutionPlan, Partitioning};
+use iceberg::spec::{SchemaRef, TableMetadata, TableMetadataRef, Transform};
+
+/// Creates an Iceberg-aware repartition execution plan that optimizes data 
distribution
+/// for parallel processing while respecting Iceberg table partitioning 
semantics.
+///
+/// This function automatically determines the optimal partitioning strategy 
based on
+/// the table's partition specification and sort order:
+///
+/// ## Partitioning Strategies
+///
+/// - **Unpartitioned tables**: Uses round-robin distribution to ensure 
balanced load
+///   across all workers, maximizing parallelism for write operations.
+///
+/// - **Hash partitioning**: Used for tables with identity transforms or 
bucket transforms:
+///   - Identity partition columns (e.g., `PARTITIONED BY (user_id, category)`)
+///   - Bucket columns from partition spec or sort order
+///   - This ensures data co-location within partitions and buckets for 
optimal file clustering
+///
+/// - **Round-robin partitioning**: Used for:
+///   - Range-only partitions (e.g., date/time partitions that concentrate 
data)
+///   - Tables with only temporal/range transforms that don't provide good 
distribution
+///   - Unpartitioned or non-bucketed tables
+///
+/// - **Mixed transforms**: Tables with both range and identity/bucket 
transforms use hash
+///   partitioning on the identity/bucket columns for optimal distribution.
+///
+/// ## Performance notes
+///
+/// - Only repartitions when the input partitioning scheme differs from the 
desired strategy
+/// - Only repartitions when the input partition count differs from the target
+/// - Requires explicit target partition count for deterministic behavior
+/// - Preserves column order (partitions first, then buckets) for consistent 
file layout
+///
+/// # Arguments
+///
+/// * `input` - The input execution plan providing data to be repartitioned
+/// * `table_schema` - The Iceberg table schema used to resolve column 
references
+/// * `table_metadata` - The Iceberg table metadata containing partition spec 
and sort order
+/// * `target_partitions` - Target number of partitions for parallel 
processing (must be > 0)
+///
+/// # Returns
+///
+/// An execution plan that will apply the optimal partitioning strategy during 
execution,
+/// or the original input plan unchanged if no repartitioning is needed.
+///
+/// # Example
+///
+/// ```ignore
+/// let repartitioned_plan = repartition(
+///     input_plan,
+///     table.schema_ref(),
+///     table.metadata_ref(),
+///     4, // Explicit partition count
+/// )?;
+/// ```
+pub fn repartition(
+    input: Arc<dyn ExecutionPlan>,
+    table_schema: SchemaRef,
+    table_metadata: TableMetadataRef,
+    target_partitions: usize,
+) -> DFResult<Arc<dyn ExecutionPlan>> {
+    if target_partitions == 0 {
+        return Err(datafusion::error::DataFusionError::Plan(
+            "repartition requires target_partitions > 0".to_string(),
+        ));
+    }
+
+    let partitioning_strategy =
+        determine_partitioning_strategy(&input, &table_schema, 
&table_metadata, target_partitions)?;
+
+    if !needs_repartitioning(&input, &partitioning_strategy) {
+        return Ok(input);
+    }
+
+    Ok(Arc::new(RepartitionExec::try_new(
+        input,
+        partitioning_strategy,
+    )?))
+}
+
+/// Returns whether repartitioning is actually needed by comparing input and 
desired partitioning
+fn needs_repartitioning(input: &Arc<dyn ExecutionPlan>, desired: 
&Partitioning) -> bool {
+    let input_partitioning = input.properties().output_partitioning();
+    match (input_partitioning, desired) {
+        (Partitioning::RoundRobinBatch(a), Partitioning::RoundRobinBatch(b)) 
=> a != b,
+        (Partitioning::Hash(a_exprs, a_n), Partitioning::Hash(b_exprs, b_n)) 
=> {
+            a_n != b_n || !same_columns(a_exprs, b_exprs)
+        }
+        _ => true,
+    }
+}
+
+/// Helper function to check if two sets of column expressions are the same
+fn same_columns(
+    a_exprs: &[Arc<dyn datafusion::physical_expr::PhysicalExpr>],
+    b_exprs: &[Arc<dyn datafusion::physical_expr::PhysicalExpr>],
+) -> bool {
+    if a_exprs.len() != b_exprs.len() {
+        return false;
+    }
+    a_exprs
+        .iter()
+        .zip(b_exprs.iter())
+        .all(|(a, b)| a.as_any().downcast_ref::<Column>() == 
b.as_any().downcast_ref::<Column>())
+}
+
+/// Determines the optimal partitioning strategy based on table metadata.
+///
+/// This function analyzes the table's partition specification and sort order 
to select
+/// the most appropriate DataFusion partitioning strategy for insert 
operations.
+///
+/// ## Partitioning Strategy Logic
+///
+/// The strategy is determined by analyzing the table's partition transforms:
+///
+/// - **Hash partitioning**: Used only when there are identity transforms 
(direct column partitioning)
+///   or bucket transforms that provide good data distribution:
+///   1. Identity partition columns (e.g., `PARTITIONED BY (user_id, 
category)`)
+///   2. Bucket columns from partition spec (e.g., `bucket(16, user_id)`)
+///   3. Bucket columns from sort order
+///   
+///   This ensures data co-location within partitions and buckets for optimal 
file clustering.
+///
+/// - **Round-robin partitioning**: Used for:
+///   - Unpartitioned tables
+///   - Range-only partitions (e.g., date/time partitions that concentrate 
data)
+///   - Tables with only temporal/range transforms that don't provide good 
distribution
+///   - Tables with no suitable hash columns
+///
+/// ## Column Priority and Deduplication
+///
+/// When multiple column sources are available, they are combined in this 
order:
+/// 1. Partition identity columns (highest priority)
+/// 2. Bucket columns from partition spec  
+/// 3. Bucket columns from sort order
+///
+/// Duplicate columns are automatically removed while preserving the priority 
order.
+///
+/// ## Fallback Behavior
+///
+/// If no suitable hash columns are found (e.g., unpartitioned, range-only, or 
non-bucketed table),
+/// falls back to round-robin batch partitioning for even load distribution.
+fn determine_partitioning_strategy(
+    input: &Arc<dyn ExecutionPlan>,
+    table_schema: &SchemaRef,
+    table_metadata: &TableMetadata,
+    target_partitions: usize,
+) -> DFResult<Partitioning> {
+    let partition_spec = table_metadata.default_partition_spec();
+    let sort_order = table_metadata.default_sort_order();
+
+    let names_iter: Box<dyn Iterator<Item = &str>> = {
+        // Partition identity columns
+        let part_names = partition_spec.fields().iter().filter_map(|pf| {
+            if matches!(pf.transform, Transform::Identity) {
+                table_schema
+                    .field_by_id(pf.source_id)
+                    .map(|sf| sf.name.as_str())
+            } else {
+                None
+            }
+        });
+        // Bucket columns from partition spec
+        let bucket_names_part = partition_spec.fields().iter().filter_map(|pf| 
{
+            if let Transform::Bucket(_) = pf.transform {
+                table_schema
+                    .field_by_id(pf.source_id)
+                    .map(|sf| sf.name.as_str())
+            } else {
+                None
+            }
+        });
+        // Bucket columns from sort order
+        let bucket_names_sort = sort_order.fields.iter().filter_map(|sf| {
+            if let Transform::Bucket(_) = sf.transform {
+                table_schema
+                    .field_by_id(sf.source_id)
+                    .map(|field| field.name.as_str())
+            } else {
+                None
+            }
+        });
+        Box::new(part_names.chain(bucket_names_part).chain(bucket_names_sort))
+    };
+
+    // Order: partitions first, then buckets
+    // Deduplicate while preserving order
+    let input_schema = input.schema();
+    let mut seen: HashSet<&str> = HashSet::new();

Review Comment:
   I used a `HashSet` because I wanted to keep order.
   If I use an iterator to collect into a `HashSet`, we could lose the order, 
resulting in incorrect partitioning.
   We could add `indexmap::IndexSet` as a dependency or use 'seen' to maintain 
the order with a hash set.
   
   In partitioning, order matters: partition identity columns must come before 
bucket columns. 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to