Github user yucai commented on a diff in the pull request:
https://github.com/apache/spark/pull/21156#discussion_r200937190
--- Diff:
sql/core/src/main/scala/org/apache/spark/sql/execution/joins/SortMergeJoinExec.scala
---
@@ -76,8 +76,36 @@ case class SortMergeJoinExec(
s"${getClass.getSimpleName} should not take $x as the JoinType")
}
- override def requiredChildDistribution: Seq[Distribution] =
- HashClusteredDistribution(leftKeys) ::
HashClusteredDistribution(rightKeys) :: Nil
+ private def avoidShuffleIfPossible(
+ joinKeys: Seq[Expression],
+ expressions: Seq[Expression]): Seq[Distribution] = {
+ val indices = expressions.map(x =>
joinKeys.indexWhere(_.semanticEquals(x)))
+ HashClusteredDistribution(indices.map(leftKeys(_))) ::
+ HashClusteredDistribution(indices.map(rightKeys(_))) :: Nil
+ }
+
+ override def requiredChildDistribution: Seq[Distribution] = {
+ if (!conf.sortMergeJoinExecChildrenPartitioningDetection) {
+ return HashClusteredDistribution(leftKeys) ::
HashClusteredDistribution(rightKeys) :: Nil
+ }
+
+ val leftPartitioning = left.outputPartitioning
+ val rightPartitioning = right.outputPartitioning
+ leftPartitioning match {
+ case HashPartitioning(leftExpressions, _)
+ if leftPartitioning.satisfies(ClusteredDistribution(leftKeys)) =>
+ avoidShuffleIfPossible(leftKeys, leftExpressions)
+
+ case _ => rightPartitioning match {
--- End diff --
Yes, you are right. The main purpose of this feature is for the bucketed
table, so the `HashPartitioning` is enough.
Actually, with the similar way, we can skip the shuffle for one side if it
is `RangePartitioning` also, but I am not sure if it is really useful.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]