Github user chenghao-intel commented on a diff in the pull request:
https://github.com/apache/spark/pull/5062#discussion_r26553403
--- Diff:
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
---
@@ -237,22 +237,35 @@ class Analyzer(catalog: Catalog,
// Special handling for cases when self-join introduce duplicate
expression ids.
case j @ Join(left, right, _, _) if
left.outputSet.intersect(right.outputSet).nonEmpty =>
val conflictingAttributes =
left.outputSet.intersect(right.outputSet)
+ logDebug(s"Conflicting attributes
${conflictingAttributes.mkString(",")} in $j")
- val (oldRelation, newRelation, attributeRewrites) = right.collect {
+ val (oldRelation, newRelation) = right.collect {
+ // Handle base relations that might appear more than once.
case oldVersion: MultiInstanceRelation
if
oldVersion.outputSet.intersect(conflictingAttributes).nonEmpty =>
val newVersion = oldVersion.newInstance()
- val newAttributes =
AttributeMap(oldVersion.output.zip(newVersion.output))
- (oldVersion, newVersion, newAttributes)
+ (oldVersion, newVersion)
+
+ // Handle projects that create conflicting aliases.
+ case oldVersion @ Project(projectList, child)
--- End diff --
@marmbrus Only handle the `Project` probably not enough. For example:
```
test("self join with aliases#1") {
Seq(1).map(i => (i, i.toString)).toDF("int",
"str").groupBy($"str").max("int").as("str").registerTempTable("df")
Seq(1).map(i => (i, i.toString)).toDF("int",
"str").groupBy($"str").max("int").as("str").explain(true)
checkAnswer(
sql(
"""
|SELECT x.str, y.str
|FROM df x JOIN df y ON x.str = y.str
""".stripMargin),
Row(1, 1) :: Nil)
}
```
Output in console like
```
'Subquery str
'Aggregate ['str], ['str,MAX(int#48) AS MAX(int)#50]
Project [_1#46 AS int#48,_2#47 AS str#49]
LocalRelation [_1#46,_2#47], [[1,1]]
Aggregate [str#49], [str#49,MAX(int#48) AS MAX(int)#50]
Project [_1#46 AS int#48,_2#47 AS str#49]
LocalRelation [_1#46,_2#47], [[1,1]]
Aggregate [str#49], [str#49,MAX(int#48) AS MAX(int)#50]
LocalRelation [int#48,str#49], [[1,1]]
Aggregate false, [str#49], [str#49,MAX(PartialMax#53) AS MAX(int)#50]
Exchange (HashPartitioning [str#49], 5)
Aggregate true, [str#49], [str#49,MAX(int#48) AS PartialMax#53]
LocalTableScan [int#48,str#49], [[1,1]]
Code Generation: false
== RDD ==
next on empty iterator
java.util.NoSuchElementException: next on empty iterator
at scala.collection.Iterator$$anon$2.next(Iterator.scala:39)
at scala.collection.Iterator$$anon$2.next(Iterator.scala:37)
at
scala.collection.IndexedSeqLike$Elements.next(IndexedSeqLike.scala:64)
at scala.collection.IterableLike$class.head(IterableLike.scala:91)
at
scala.collection.mutable.ArrayBuffer.scala$collection$IndexedSeqOptimized$$super$head(ArrayBuffer.scala:47)
at
scala.collection.IndexedSeqOptimized$class.head(IndexedSeqOptimized.scala:120)
```
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]