Ngone51 commented on a change in pull request #31470:
URL: https://github.com/apache/spark/pull/31470#discussion_r571917078



##########
File path: 
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/DeduplicateRelations.scala
##########
@@ -0,0 +1,224 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.catalyst.analysis
+
+import scala.collection.mutable.ArrayBuffer
+
+import org.apache.spark.sql.catalyst.expressions.{Alias, AttributeMap, 
AttributeSet, NamedExpression}
+import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, Except, Expand, 
FlatMapCoGroupsInPandas, FlatMapGroupsInPandas, Generate, Intersect, Join, 
LogicalPlan, MapInPandas, Project, SerializeFromObject, Union, Window}
+import org.apache.spark.sql.catalyst.rules.Rule
+
+object DeduplicateRelations extends Rule[LogicalPlan] {
+
+  override def apply(plan: LogicalPlan): LogicalPlan = {
+    renewDuplicatedRelations(Nil, plan)._1.resolveOperatorsUp {
+      case p: LogicalPlan if !p.childrenResolved => p
+      // To resolve duplicate expression IDs for Join.
+      case j @ Join(left, right, _, _, _) if !j.duplicateResolved =>
+        j.copy(right = dedupRight(left, right))
+      // intersect/except will be rewritten to join at the beginning of 
optimizer. Here we need to
+      // deduplicate the right side plan, so that we won't produce an invalid 
self-join later.
+      case i @ Intersect(left, right, _) if !i.duplicateResolved =>
+        i.copy(right = dedupRight(left, right))
+      case e @ Except(left, right, _) if !e.duplicateResolved =>
+        e.copy(right = dedupRight(left, right))
+      // Only after we finish by-name resolution for Union
+      case u: Union if !u.byName && !u.duplicateResolved =>
+        // Use projection-based de-duplication for Union to avoid breaking the 
checkpoint sharing
+        // feature in streaming.
+        val newChildren = u.children.foldRight(Seq.empty[LogicalPlan]) { 
(head, tail) =>
+          head +: tail.map {
+            case child if head.outputSet.intersect(child.outputSet).isEmpty =>
+              child
+            case child =>
+              val projectList = child.output.map { attr =>
+                Alias(attr, attr.name)()
+              }
+              Project(projectList, child)
+          }
+        }
+        u.copy(children = newChildren)
+    }
+  }
+
+  private def renewDuplicatedRelations(
+      existingRelations: Seq[MultiInstanceRelation],
+      plan: LogicalPlan): (LogicalPlan, Seq[MultiInstanceRelation]) = plan 
match {
+    case p: LogicalPlan if p.isStreaming => (plan, Nil)
+
+    case m: MultiInstanceRelation =>
+      if (isDuplicated(existingRelations, m)) {
+        val newNode = m.newInstance()
+        newNode.copyTagsFrom(m)
+        (newNode, Nil)
+      } else {
+        (m, Seq(m))
+      }
+
+    case _ if plan.children.nonEmpty =>
+      val newChildren = ArrayBuffer.empty[LogicalPlan]
+      val relations = ArrayBuffer.empty[MultiInstanceRelation]
+      for (c <- plan.children) {
+        val (renewed, collected) = renewDuplicatedRelations(existingRelations 
++ relations, c)
+        newChildren += renewed
+        relations ++= collected
+      }
+
+      if (plan.childrenResolved) {
+        val attrMap = AttributeMap(plan.children.flatMap(_.output).zip(
+          newChildren.flatMap(_.output)).filter { case (a1, a2) => a1.exprId 
!= a2.exprId })
+        val newPlan = plan.withNewChildren(newChildren).rewriteAttrs(attrMap)
+        (newPlan, relations)
+      } else {
+        (plan.withNewChildren(newChildren), relations)
+      }
+
+    case _ => (plan, Nil)
+  }
+
+  private def isDuplicated(
+      existingRelations: Seq[MultiInstanceRelation],
+      relation: MultiInstanceRelation): Boolean = {
+    existingRelations.exists { er =>
+      er.asInstanceOf[LogicalPlan].outputSet
+        .intersect(relation.asInstanceOf[LogicalPlan].outputSet).nonEmpty
+    }
+  }
+
+  /**
+   * Generate a new logical plan for the right child with different expression 
IDs
+   * for all conflicting attributes.
+   */
+  private def dedupRight (left: LogicalPlan, right: LogicalPlan): LogicalPlan 
= {
+    val conflictingAttributes = left.outputSet.intersect(right.outputSet)
+    logDebug(s"Conflicting attributes ${conflictingAttributes.mkString(",")} " 
+
+      s"between $left and $right")
+
+    /**
+     * For LogicalPlan likes MultiInstanceRelation, Project, Aggregate, etc, 
whose output doesn't
+     * inherit directly from its children, we could just stop collect on it. 
Because we could
+     * always replace all the lower conflict attributes with the new 
attributes from the new
+     * plan. Theoretically, we should do recursively collect for Generate and 
Window but we leave
+     * it to the next batch to reduce possible overhead because this should be 
a corner case.
+     */
+    def collectConflictPlans(plan: LogicalPlan): Seq[(LogicalPlan, 
LogicalPlan)] = plan match {
+      // Handle base relations that might appear more than once.
+      case oldVersion: MultiInstanceRelation

Review comment:
       Required by the streaming plan as we skip it in 
`renewDuplicatedRelations`




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to