c21 commented on a change in pull request #31736:
URL: https://github.com/apache/spark/pull/31736#discussion_r588083648
##########
File path:
sql/core/src/test/scala/org/apache/spark/sql/execution/WholeStageCodegenSuite.scala
##########
@@ -92,6 +92,39 @@ class WholeStageCodegenSuite extends QueryTest with
SharedSparkSession
Seq(Row(0, 0, 0), Row(1, 1, 1), Row(2, 2, 2), Row(3, 3, 3), Row(4, 4,
4)))
}
+ test("Inner/Cross BroadcastNestedLoopJoinExec should be included in
WholeStageCodegen") {
+ val df1 = spark.range(4).select($"id".as("k1"))
+ val df2 = spark.range(3).select($"id".as("k2"))
+ val df3 = spark.range(2).select($"id".as("k3"))
+
+ // test broadcast nested loop join without condition
+ val oneJoinDF = df1.join(df2)
+ assert(oneJoinDF.queryExecution.executedPlan.collect {
+ case WholeStageCodegenExec(_ : BroadcastNestedLoopJoinExec) => true
+ }.size === 1)
+ checkAnswer(oneJoinDF,
Review comment:
@maropu - sure, updated.
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastNestedLoopJoinExec.scala
##########
@@ -393,4 +394,65 @@ case class BroadcastNestedLoopJoinExec(
}
}
}
+
+ override def supportCodegen: Boolean = {
+ joinType.isInstanceOf[InnerLike]
+ }
+
+ override def inputRDDs(): Seq[RDD[InternalRow]] = {
+ streamed.asInstanceOf[CodegenSupport].inputRDDs()
+ }
+
+ override def needCopyResult: Boolean = true
Review comment:
@maropu - in case of inner/cross broadcast nested loop join, one input
row can potentially have multiple output rows, so I am following the [comment
here](https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegenExec.scala#L346-L347)
to set it true. btw sort merge join has [same
behavior](https://github.com/apache/spark/blob/master/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/SortMergeJoinExec.scala#L570).
I think if we want to improve it, I can do in another followup PR. I am not
very familiar with this part and I can check closer later.
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastNestedLoopJoinExec.scala
##########
@@ -393,4 +394,65 @@ case class BroadcastNestedLoopJoinExec(
}
}
}
+
+ override def supportCodegen: Boolean = {
+ joinType.isInstanceOf[InnerLike]
+ }
+
+ override def inputRDDs(): Seq[RDD[InternalRow]] = {
+ streamed.asInstanceOf[CodegenSupport].inputRDDs()
+ }
+
+ override def needCopyResult: Boolean = true
+
+ override def doProduce(ctx: CodegenContext): String = {
+ streamed.asInstanceOf[CodegenSupport].produce(ctx, this)
+ }
+
+ override def doConsume(ctx: CodegenContext, input: Seq[ExprCode], row:
ExprCode): String = {
+ joinType match {
+ case _: InnerLike => codegenInner(ctx, input)
+ case x =>
+ throw new IllegalArgumentException(
+ s"BroadcastNestedLoopJoin code-gen should not take $x as the
JoinType")
+ }
+ }
+
+ /**
+ * Returns the variable name for [[Broadcast]] side.
+ */
+ private def prepareBroadcast(ctx: CodegenContext): String = {
+ // Create a name for broadcast side
+ val broadcastArray = broadcast.executeBroadcast[Array[InternalRow]]()
+ val broadcastTerm = ctx.addReferenceObj("broadcastTerm", broadcastArray)
+
+ // Inline mutable state since not many join operations in a task
+ ctx.addMutableState("InternalRow[]", "broadcastArray",
Review comment:
@maropu - updated to `buildRowArray`.
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastNestedLoopJoinExec.scala
##########
@@ -393,4 +394,65 @@ case class BroadcastNestedLoopJoinExec(
}
}
}
+
+ override def supportCodegen: Boolean = {
+ joinType.isInstanceOf[InnerLike]
+ }
+
+ override def inputRDDs(): Seq[RDD[InternalRow]] = {
+ streamed.asInstanceOf[CodegenSupport].inputRDDs()
+ }
+
+ override def needCopyResult: Boolean = true
+
+ override def doProduce(ctx: CodegenContext): String = {
+ streamed.asInstanceOf[CodegenSupport].produce(ctx, this)
+ }
+
+ override def doConsume(ctx: CodegenContext, input: Seq[ExprCode], row:
ExprCode): String = {
+ joinType match {
+ case _: InnerLike => codegenInner(ctx, input)
+ case x =>
+ throw new IllegalArgumentException(
+ s"BroadcastNestedLoopJoin code-gen should not take $x as the
JoinType")
Review comment:
@maropu - updated.
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastNestedLoopJoinExec.scala
##########
@@ -393,4 +394,65 @@ case class BroadcastNestedLoopJoinExec(
}
}
}
+
+ override def supportCodegen: Boolean = {
+ joinType.isInstanceOf[InnerLike]
+ }
+
+ override def inputRDDs(): Seq[RDD[InternalRow]] = {
+ streamed.asInstanceOf[CodegenSupport].inputRDDs()
+ }
+
+ override def needCopyResult: Boolean = true
+
+ override def doProduce(ctx: CodegenContext): String = {
+ streamed.asInstanceOf[CodegenSupport].produce(ctx, this)
+ }
+
+ override def doConsume(ctx: CodegenContext, input: Seq[ExprCode], row:
ExprCode): String = {
+ joinType match {
+ case _: InnerLike => codegenInner(ctx, input)
+ case x =>
+ throw new IllegalArgumentException(
+ s"BroadcastNestedLoopJoin code-gen should not take $x as the
JoinType")
+ }
+ }
+
+ /**
+ * Returns the variable name for [[Broadcast]] side.
+ */
+ private def prepareBroadcast(ctx: CodegenContext): String = {
+ // Create a name for broadcast side
+ val broadcastArray = broadcast.executeBroadcast[Array[InternalRow]]()
+ val broadcastTerm = ctx.addReferenceObj("broadcastTerm", broadcastArray)
+
+ // Inline mutable state since not many join operations in a task
+ ctx.addMutableState("InternalRow[]", "broadcastArray",
+ v => s"$v = (InternalRow[]) $broadcastTerm.value();", forceInline = true)
+ }
+
+ private def codegenInner(ctx: CodegenContext, input: Seq[ExprCode]): String
= {
+ val arrayTerm = prepareBroadcast(ctx)
Review comment:
@maropu - updated to `buildRowArrayTerm`.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]