cloud-fan commented on a change in pull request #29342:
URL: https://github.com/apache/spark/pull/29342#discussion_r469819019
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/joins/ShuffledHashJoinExec.scala
##########
@@ -71,8 +85,210 @@ case class ShuffledHashJoinExec(
val numOutputRows = longMetric("numOutputRows")
streamedPlan.execute().zipPartitions(buildPlan.execute()) { (streamIter,
buildIter) =>
val hashed = buildHashedRelation(buildIter)
- join(streamIter, hashed, numOutputRows)
+ joinType match {
+ case FullOuter => fullOuterJoin(streamIter, hashed, numOutputRows)
+ case _ => join(streamIter, hashed, numOutputRows)
+ }
+ }
+ }
+
+ private def fullOuterJoin(
+ streamIter: Iterator[InternalRow],
+ hashedRelation: HashedRelation,
+ numOutputRows: SQLMetric): Iterator[InternalRow] = {
+ val joinKeys = streamSideKeyGenerator()
+ val joinRow = new JoinedRow
+ val (joinRowWithStream, joinRowWithBuild) = {
+ buildSide match {
+ case BuildLeft => (joinRow.withRight _, joinRow.withLeft _)
+ case BuildRight => (joinRow.withLeft _, joinRow.withRight _)
+ }
+ }
+ val buildNullRow = new GenericInternalRow(buildOutput.length)
+ val streamNullRow = new GenericInternalRow(streamedOutput.length)
+ val streamNullJoinRow = new JoinedRow
Review comment:
why do we need a new `JoinedRow` instead of reusing `joinRow`?
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]