fvaleye commented on code in PR #1620:
URL: https://github.com/apache/iceberg-rust/pull/1620#discussion_r2297894049


##########
crates/integrations/datafusion/src/physical_plan/repartition.rs:
##########
@@ -0,0 +1,906 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use std::any::Any;
+use std::sync::Arc;
+
+use datafusion::error::Result as DFResult;
+use datafusion::execution::{SendableRecordBatchStream, TaskContext};
+use datafusion::physical_expr::{EquivalenceProperties, PhysicalExpr};
+use datafusion::physical_plan::execution_plan::{Boundedness, EmissionType};
+use datafusion::physical_plan::expressions::Column;
+use datafusion::physical_plan::repartition::RepartitionExec;
+use datafusion::physical_plan::{
+    DisplayAs, DisplayFormatType, ExecutionPlan, Partitioning, PlanProperties,
+};
+use iceberg::spec::{SchemaRef, TableMetadata, TableMetadataRef, Transform};
+
+/// Iceberg-specific repartition execution plan that optimizes data 
distribution
+/// for parallel processing while respecting Iceberg table partitioning 
semantics.
+///
+/// This execution plan automatically determines the optimal partitioning 
strategy based on
+/// the table's partition specification and the configured write distribution 
mode:
+///
+/// ## Partitioning Strategies
+///
+/// - **Unpartitioned tables**: Uses round-robin distribution to ensure 
balanced load
+///   across all workers, maximizing parallelism for write operations.
+///
+/// - **Partitioned tables**: Uses hash partitioning on partition columns 
(identity transforms)
+///   and bucket columns to maintain data co-location. This ensures:
+///   - Better file clustering within partitions
+///   - Improved query pruning performance
+///   - Optimal join performance on partitioned columns
+///
+/// - **Range-distributed tables**: Approximates range distribution by hashing 
on sort order
+///   columns since DataFusion lacks native range exchange. Falls back to 
partition/bucket
+///   column hashing when available.
+///
+/// ## Write Distribution Modes
+///
+/// Respects the table's `write.distribution-mode` property:
+/// - `hash` (default): Distributes by partition and bucket columns
+/// - `range`: Distributes by sort order columns
+/// - `none`: Uses round-robin distribution
+///
+/// ## Performance notes
+///
+/// - Only repartitions when the input partitioning scheme differs from the 
desired strategy
+/// - Only repartitions when the input partition count differs from the target
+/// - Automatically detects optimal partition count from DataFusion's 
SessionConfig
+/// - Preserves column order (partitions first, then buckets) for consistent 
file layout
+#[derive(Debug)]
+pub struct IcebergRepartitionExec {
+    /// Input execution plan
+    input: Arc<dyn ExecutionPlan>,
+    /// Iceberg table schema to determine partitioning strategy
+    table_schema: SchemaRef,
+    /// Iceberg table metadata to determine partitioning strategy
+    table_metadata: TableMetadataRef,
+    /// Target number of partitions for data distribution
+    target_partitions: usize,
+    /// Partitioning strategy
+    partitioning_strategy: Partitioning,
+    /// Plan properties for optimization
+    plan_properties: PlanProperties,
+}
+
+impl IcebergRepartitionExec {
+    /// Creates a new IcebergRepartitionExec with automatic partitioning 
strategy selection.
+    ///
+    /// This constructor analyzes the table's partition specification, sort 
order, and write
+    /// distribution mode to determine the optimal repartitioning strategy for 
insert operations.
+    ///
+    /// # Arguments
+    ///
+    /// * `input` - The input execution plan providing data to be repartitioned
+    /// * `table_schema` - The Iceberg table schema used to resolve column 
references
+    /// * `table_metadata` - The Iceberg table metadata containing partition 
spec, sort order,
+    ///   and table properties including write distribution mode
+    /// * `target_partitions` - Target number of partitions for parallel 
processing:
+    ///   - `0`: Auto-detect from DataFusion's SessionConfig target_partitions 
(recommended)
+    ///   - `> 0`: Use explicit partition count for specific performance tuning
+    ///
+    /// # Returns
+    ///
+    /// A configured repartition execution plan that will apply the optimal 
partitioning
+    /// strategy during execution, or pass through unchanged data if no 
repartitioning
+    /// is needed.
+    ///
+    /// # Example
+    ///
+    /// ```ignore
+    /// let repartition_exec = IcebergRepartitionExec::new(
+    ///     input_plan,
+    ///     table.schema_ref(),
+    ///     table.metadata_ref(),
+    ///     state.config().target_partitions(),
+    /// )?;
+    /// ```
+    pub fn new(
+        input: Arc<dyn ExecutionPlan>,
+        table_schema: SchemaRef,
+        table_metadata: TableMetadataRef,
+        target_partitions: usize,
+    ) -> DFResult<Self> {
+        if target_partitions == 0 {
+            return Err(datafusion::error::DataFusionError::Plan(
+                "IcebergRepartitionExec requires target_partitions > 
0".to_string(),
+            ));
+        }
+
+        let partitioning_strategy = Self::determine_partitioning_strategy(
+            &input,
+            &table_schema,
+            &table_metadata,
+            target_partitions,
+        )?;
+
+        let plan_properties = Self::compute_properties(&input, 
&partitioning_strategy)?;
+
+        Ok(Self {
+            input,
+            table_schema,
+            table_metadata,
+            target_partitions,
+            partitioning_strategy,
+            plan_properties,
+        })
+    }
+
+    /// Computes the plan properties based on the table partitioning strategy
+    /// Selects the partitioning strategy based on the table partitioning 
strategy
+    fn compute_properties(
+        input: &Arc<dyn ExecutionPlan>,
+        partitioning_strategy: &Partitioning,
+    ) -> DFResult<PlanProperties> {
+        let schema = input.schema();
+        let equivalence_properties = EquivalenceProperties::new(schema);
+
+        Ok(PlanProperties::new(
+            equivalence_properties,
+            partitioning_strategy.clone(),
+            EmissionType::Incremental,
+            Boundedness::Bounded,
+        ))
+    }
+
+    /// Determines the optimal partitioning strategy based on table metadata 
and distribution mode.
+    ///
+    /// This function analyzes the table's partition specification, sort 
order, and write distribution
+    /// mode to select the most appropriate DataFusion partitioning strategy 
for insert operations.
+    ///
+    /// ## Distribution Mode Logic
+    ///
+    /// The strategy is determined by the table's `write.distribution-mode` 
property:
+    ///
+    /// - **`hash` (default)**: Uses hash partitioning on:
+    ///   1. Identity partition columns (e.g., `PARTITIONED BY (year, month)`)
+    ///   2. Bucket columns from partition spec (e.g., `bucket(16, user_id)`)
+    ///   3. Bucket columns from sort order
+    ///   
+    ///   This ensures data co-location within partitions and buckets for 
optimal file clustering.
+    ///
+    /// - **`range`**: Approximates range distribution by hashing on sort 
order columns.
+    ///   Since DataFusion lacks native range exchange, this provides the 
closest alternative
+    ///   while maintaining some ordering characteristics.
+    ///
+    /// - **`none` or other**: Falls back to round-robin distribution for 
balanced load.
+    ///
+    /// ## Column Priority and Deduplication
+    ///
+    /// When multiple column sources are available, they are combined in this 
order:
+    /// 1. Partition identity columns (highest priority)
+    /// 2. Bucket columns from partition spec  
+    /// 3. Bucket columns from sort order
+    /// 4. Sort order columns (for range mode)
+    ///
+    /// Duplicate columns are automatically removed while preserving the 
priority order.
+    ///
+    /// ## Fallback Behavior
+    ///
+    /// If no suitable hash columns are found (e.g., unpartitioned, 
non-bucketed table),
+    /// falls back to round-robin batch partitioning for even load 
distribution.
+    fn determine_partitioning_strategy(

Review Comment:
   > However, this reminds me another case: range only partition, e.g. we only 
has partitions like date, time. I think in this case we should also use round 
robin partition since in this case most data are focused in several partitions.
   
   Hum. You are right. The range partitions concentrate data in recent 
partitions, making hash partitioning counterproductive (considering a date with 
a temporal partition).
   Since DataFusion doesn't provide Range, the fallback is round-robin and not 
hashing.
   
   Briefly:
   - Hash partition: Only on bucket columns (partition spec + sort order)
   - Round-robin: Everything else (unpartitioned, range, identity, temporal 
transforms)
   
   > Also I don't think we should take into account write.distribution-mode for 
now. The example you use are for spark, but not applicable for datafusion.
   Oh, good point, I misunderstood this. I thought it was an icebergrust table 
property.



##########
crates/integrations/datafusion/src/physical_plan/repartition.rs:
##########
@@ -0,0 +1,906 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use std::any::Any;
+use std::sync::Arc;
+
+use datafusion::error::Result as DFResult;
+use datafusion::execution::{SendableRecordBatchStream, TaskContext};
+use datafusion::physical_expr::{EquivalenceProperties, PhysicalExpr};
+use datafusion::physical_plan::execution_plan::{Boundedness, EmissionType};
+use datafusion::physical_plan::expressions::Column;
+use datafusion::physical_plan::repartition::RepartitionExec;
+use datafusion::physical_plan::{
+    DisplayAs, DisplayFormatType, ExecutionPlan, Partitioning, PlanProperties,
+};
+use iceberg::spec::{SchemaRef, TableMetadata, TableMetadataRef, Transform};
+
+/// Iceberg-specific repartition execution plan that optimizes data 
distribution
+/// for parallel processing while respecting Iceberg table partitioning 
semantics.
+///
+/// This execution plan automatically determines the optimal partitioning 
strategy based on
+/// the table's partition specification and the configured write distribution 
mode:
+///
+/// ## Partitioning Strategies
+///
+/// - **Unpartitioned tables**: Uses round-robin distribution to ensure 
balanced load
+///   across all workers, maximizing parallelism for write operations.
+///
+/// - **Partitioned tables**: Uses hash partitioning on partition columns 
(identity transforms)
+///   and bucket columns to maintain data co-location. This ensures:
+///   - Better file clustering within partitions
+///   - Improved query pruning performance
+///   - Optimal join performance on partitioned columns
+///
+/// - **Range-distributed tables**: Approximates range distribution by hashing 
on sort order
+///   columns since DataFusion lacks native range exchange. Falls back to 
partition/bucket
+///   column hashing when available.
+///
+/// ## Write Distribution Modes
+///
+/// Respects the table's `write.distribution-mode` property:
+/// - `hash` (default): Distributes by partition and bucket columns
+/// - `range`: Distributes by sort order columns
+/// - `none`: Uses round-robin distribution
+///
+/// ## Performance notes
+///
+/// - Only repartitions when the input partitioning scheme differs from the 
desired strategy
+/// - Only repartitions when the input partition count differs from the target
+/// - Automatically detects optimal partition count from DataFusion's 
SessionConfig
+/// - Preserves column order (partitions first, then buckets) for consistent 
file layout
+#[derive(Debug)]
+pub struct IcebergRepartitionExec {
+    /// Input execution plan
+    input: Arc<dyn ExecutionPlan>,
+    /// Iceberg table schema to determine partitioning strategy
+    table_schema: SchemaRef,
+    /// Iceberg table metadata to determine partitioning strategy
+    table_metadata: TableMetadataRef,
+    /// Target number of partitions for data distribution
+    target_partitions: usize,
+    /// Partitioning strategy
+    partitioning_strategy: Partitioning,
+    /// Plan properties for optimization
+    plan_properties: PlanProperties,
+}
+
+impl IcebergRepartitionExec {
+    /// Creates a new IcebergRepartitionExec with automatic partitioning 
strategy selection.
+    ///
+    /// This constructor analyzes the table's partition specification, sort 
order, and write
+    /// distribution mode to determine the optimal repartitioning strategy for 
insert operations.
+    ///
+    /// # Arguments
+    ///
+    /// * `input` - The input execution plan providing data to be repartitioned
+    /// * `table_schema` - The Iceberg table schema used to resolve column 
references
+    /// * `table_metadata` - The Iceberg table metadata containing partition 
spec, sort order,
+    ///   and table properties including write distribution mode
+    /// * `target_partitions` - Target number of partitions for parallel 
processing:
+    ///   - `0`: Auto-detect from DataFusion's SessionConfig target_partitions 
(recommended)
+    ///   - `> 0`: Use explicit partition count for specific performance tuning
+    ///
+    /// # Returns
+    ///
+    /// A configured repartition execution plan that will apply the optimal 
partitioning
+    /// strategy during execution, or pass through unchanged data if no 
repartitioning
+    /// is needed.
+    ///
+    /// # Example
+    ///
+    /// ```ignore
+    /// let repartition_exec = IcebergRepartitionExec::new(
+    ///     input_plan,
+    ///     table.schema_ref(),
+    ///     table.metadata_ref(),
+    ///     state.config().target_partitions(),
+    /// )?;
+    /// ```
+    pub fn new(
+        input: Arc<dyn ExecutionPlan>,
+        table_schema: SchemaRef,
+        table_metadata: TableMetadataRef,
+        target_partitions: usize,
+    ) -> DFResult<Self> {
+        if target_partitions == 0 {
+            return Err(datafusion::error::DataFusionError::Plan(
+                "IcebergRepartitionExec requires target_partitions > 
0".to_string(),
+            ));
+        }
+
+        let partitioning_strategy = Self::determine_partitioning_strategy(
+            &input,
+            &table_schema,
+            &table_metadata,
+            target_partitions,
+        )?;
+
+        let plan_properties = Self::compute_properties(&input, 
&partitioning_strategy)?;
+
+        Ok(Self {
+            input,
+            table_schema,
+            table_metadata,
+            target_partitions,
+            partitioning_strategy,
+            plan_properties,
+        })
+    }
+
+    /// Computes the plan properties based on the table partitioning strategy
+    /// Selects the partitioning strategy based on the table partitioning 
strategy
+    fn compute_properties(
+        input: &Arc<dyn ExecutionPlan>,
+        partitioning_strategy: &Partitioning,
+    ) -> DFResult<PlanProperties> {
+        let schema = input.schema();
+        let equivalence_properties = EquivalenceProperties::new(schema);
+
+        Ok(PlanProperties::new(
+            equivalence_properties,
+            partitioning_strategy.clone(),
+            EmissionType::Incremental,
+            Boundedness::Bounded,
+        ))
+    }
+
+    /// Determines the optimal partitioning strategy based on table metadata 
and distribution mode.
+    ///
+    /// This function analyzes the table's partition specification, sort 
order, and write distribution
+    /// mode to select the most appropriate DataFusion partitioning strategy 
for insert operations.
+    ///
+    /// ## Distribution Mode Logic
+    ///
+    /// The strategy is determined by the table's `write.distribution-mode` 
property:
+    ///
+    /// - **`hash` (default)**: Uses hash partitioning on:
+    ///   1. Identity partition columns (e.g., `PARTITIONED BY (year, month)`)
+    ///   2. Bucket columns from partition spec (e.g., `bucket(16, user_id)`)
+    ///   3. Bucket columns from sort order
+    ///   
+    ///   This ensures data co-location within partitions and buckets for 
optimal file clustering.
+    ///
+    /// - **`range`**: Approximates range distribution by hashing on sort 
order columns.
+    ///   Since DataFusion lacks native range exchange, this provides the 
closest alternative
+    ///   while maintaining some ordering characteristics.
+    ///
+    /// - **`none` or other**: Falls back to round-robin distribution for 
balanced load.
+    ///
+    /// ## Column Priority and Deduplication
+    ///
+    /// When multiple column sources are available, they are combined in this 
order:
+    /// 1. Partition identity columns (highest priority)
+    /// 2. Bucket columns from partition spec  
+    /// 3. Bucket columns from sort order
+    /// 4. Sort order columns (for range mode)
+    ///
+    /// Duplicate columns are automatically removed while preserving the 
priority order.
+    ///
+    /// ## Fallback Behavior
+    ///
+    /// If no suitable hash columns are found (e.g., unpartitioned, 
non-bucketed table),
+    /// falls back to round-robin batch partitioning for even load 
distribution.
+    fn determine_partitioning_strategy(

Review Comment:
   > However, this reminds me another case: range only partition, e.g. we only 
has partitions like date, time. I think in this case we should also use round 
robin partition since in this case most data are focused in several partitions.
   
   Hum. You are right. The range partitions concentrate data in recent 
partitions, making hash partitioning counterproductive (considering a date with 
a temporal partition).
   Since DataFusion doesn't provide Range, the fallback is round-robin and not 
hashing.
   
   Briefly:
   - Hash partition: Only on bucket columns (partition spec + sort order)
   - Round-robin: Everything else (unpartitioned, range, identity, temporal 
transforms)
   
   > Also I don't think we should take into account write.distribution-mode for 
now. The example you use are for spark, but not applicable for datafusion.
   
   Oh, good point, I misunderstood this. I thought it was an icebergrust table 
property.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to