szehon-ho commented on code in PR #54330:
URL: https://github.com/apache/spark/pull/54330#discussion_r2874720178


##########
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/GroupPartitionsExec.scala:
##########
@@ -0,0 +1,209 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.spark.sql.execution.datasources.v2
+
+import scala.collection.mutable.ArrayBuffer
+
+import org.apache.spark.{Partition, SparkException}
+import org.apache.spark.rdd.{CoalescedRDD, PartitionCoalescer, PartitionGroup, 
RDD}
+import org.apache.spark.sql.catalyst.InternalRow
+import org.apache.spark.sql.catalyst.expressions._
+import org.apache.spark.sql.catalyst.plans.physical.{KeyedPartitioning, 
Partitioning}
+import org.apache.spark.sql.catalyst.util.InternalRowComparableWrapper
+import org.apache.spark.sql.connector.catalog.functions.Reducer
+import org.apache.spark.sql.execution.{SparkPlan, UnaryExecNode}
+import org.apache.spark.sql.types.DataType
+
+/**
+ * Physical operator that groups input partitions by their partition keys.
+ *
+ * This operator is used to coalesce partitions from bucketed/partitioned data 
sources
+ * where multiple input partitions share the same partition key. It's commonly 
used in
+ * storage-partitioned joins to align partitions from different sides of the 
join.
+ *
+ * @param child The child plan providing bucketed/partitioned input
+ * @param joinKeyPositions Optional projection to select a subset of the 
partitioning key
+ *                         for join compatibility (e.g., when join keys are a 
subset of
+ *                         partition keys)
+ * @param commonPartitionKeys Optional sequence of expected partition key 
values and their

Review Comment:
   i think its not true, its used for all cases of push partition values, not 
just partially clustered case.
   
   maybe we can even take the change to rename it 'expectedPartitionKeys'



##########
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/GroupPartitionsExec.scala:
##########
@@ -0,0 +1,209 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.spark.sql.execution.datasources.v2
+
+import scala.collection.mutable.ArrayBuffer
+
+import org.apache.spark.{Partition, SparkException}
+import org.apache.spark.rdd.{CoalescedRDD, PartitionCoalescer, PartitionGroup, 
RDD}
+import org.apache.spark.sql.catalyst.InternalRow
+import org.apache.spark.sql.catalyst.expressions._
+import org.apache.spark.sql.catalyst.plans.physical.{KeyedPartitioning, 
Partitioning}
+import org.apache.spark.sql.catalyst.util.InternalRowComparableWrapper
+import org.apache.spark.sql.connector.catalog.functions.Reducer
+import org.apache.spark.sql.execution.{SparkPlan, UnaryExecNode}
+import org.apache.spark.sql.types.DataType
+
+/**
+ * Physical operator that groups input partitions by their partition keys.
+ *
+ * This operator is used to coalesce partitions from bucketed/partitioned data 
sources
+ * where multiple input partitions share the same partition key. It's commonly 
used in
+ * storage-partitioned joins to align partitions from different sides of the 
join.
+ *
+ * @param child The child plan providing bucketed/partitioned input
+ * @param joinKeyPositions Optional projection to select a subset of the 
partitioning key
+ *                         for join compatibility (e.g., when join keys are a 
subset of
+ *                         partition keys)
+ * @param commonPartitionKeys Optional sequence of expected partition key 
values and their
+ *                              split counts, used for partially clustered data
+ * @param reducers Optional reducers to apply to partition keys for grouping 
compatibility
+ * @param applyPartialClustering Whether to apply partial clustering for 
skewed data
+ * @param replicatePartitions Whether to replicate partitions across multiple 
keys
+ */
+case class GroupPartitionsExec(
+    child: SparkPlan,
+    joinKeyPositions: Option[Seq[Int]] = None,
+    commonPartitionKeys: Option[Seq[(InternalRow, Int)]] = None,
+    reducers: Option[Seq[Option[Reducer[_, _]]]] = None,
+    applyPartialClustering: Boolean = false,
+    replicatePartitions: Boolean = false
+  ) extends UnaryExecNode {
+
+  override def outputPartitioning: Partitioning = {
+    child.outputPartitioning match {
+      case p: Partitioning with Expression =>
+        p.transform {
+          case k: KeyedPartitioning =>
+            // There can be multiple `KeyedPartitioning` in an output 
partitioning of a join, but
+            // they can only differ in `expressions`. `partitionKeys` must 
match so we can calculate
+            // it only once via `groupedPartitions`.
+            val projectedExpressions = 
joinKeyPositions.fold(k.expressions)(_.map(k.expressions))
+            k.copy(expressions = projectedExpressions, partitionKeys = 
groupedPartitions.map(_._1))
+        }.asInstanceOf[Partitioning]
+      case o => o
+    }
+  }
+
+  /**
+   * Distributes partitions based on `commonPartitionKeys` and clustering mode.
+   */
+  private def distributeByCommonKeys(

Review Comment:
   what do you think changing to something like: 'alignToExpectedKeys' ? to 
indicate its aligning the keys to the external expectation?



##########
sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/EnsureRequirements.scala:
##########
@@ -670,75 +657,57 @@ case class EnsureRequirements(
         joinType == LeftAnti || joinType == LeftOuter
   }
 
-  // Populate the common partition information down to the scan nodes
-  private def populateCommonPartitionInfo(
+  /**
+   * Unwraps a GroupPartitionsExec to get the underlying child plan.
+   */
+  private def unwrapGroupPartitions(plan: SparkPlan): SparkPlan = plan match {
+    case g: GroupPartitionsExec => g.child
+    case other => other
+  }
+
+  /**
+   * Applies or updates GroupPartitionsExec with the given parameters.
+   */
+  private def applyGroupPartitions(
       plan: SparkPlan,
-      values: Seq[(InternalRow, Int)],
       joinKeyPositions: Option[Seq[Int]],
+      mergedPartitionKeys: Seq[(InternalRow, Int)],
       reducers: Option[Seq[Option[Reducer[_, _]]]],
       applyPartialClustering: Boolean,
-      replicatePartitions: Boolean): SparkPlan = plan match {
-    case scan: BatchScanExec =>
-      val newScan = scan.copy(
-        spjParams = scan.spjParams.copy(
-          commonPartitionValues = Some(values),
+      replicatePartitions: Boolean): SparkPlan = {
+    plan match {
+      case g: GroupPartitionsExec =>
+        g.copy(

Review Comment:
   do we need copyTagsFrom to preserve tags?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to