xingchaozh commented on code in PR #37930: URL: https://github.com/apache/spark/pull/37930#discussion_r976267738
########## sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala: ########## @@ -1440,4 +1440,25 @@ class JoinSuite extends QueryTest with SharedSparkSession with AdaptiveSparkPlan } } } + + test("SPARK-40487: Make defaultJoin in BroadcastNestedLoopJoinExec running in parallel") { + withTable("t1", "t2") { + spark.range(5, 15).toDF("k").write.saveAsTable("t1") + spark.range(4, 8).toDF("k").write.saveAsTable("t2") + + val queryBuildLeft = + s""" + |SELECT /*+ BROADCAST(t1) */ * FROM t1 LEFT JOIN t2 on t1.k < t2.k + """.stripMargin + val result1 = sql(queryBuildLeft) + + val queryBuildRight = + s""" + |SELECT /*+ BROADCAST(t2) */ * FROM t1 LEFT JOIN t2 on t1.k < t2.k + """.stripMargin Review Comment: Sure, fixed ########## sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastNestedLoopJoinExec.scala: ########## @@ -286,21 +286,26 @@ case class BroadcastNestedLoopJoinExec( */ private def defaultJoin(relation: Broadcast[Array[InternalRow]]): RDD[InternalRow] = { val streamRdd = streamed.execute() - val matchedBroadcastRows = getMatchedBroadcastRowsBitSet(streamRdd, relation) - val notMatchedBroadcastRows: Seq[InternalRow] = { - val nulls = new GenericInternalRow(streamed.output.size) - val buf: CompactBuffer[InternalRow] = new CompactBuffer() - val joinedRow = new JoinedRow - joinedRow.withLeft(nulls) - var i = 0 - val buildRows = relation.value - while (i < buildRows.length) { - if (!matchedBroadcastRows.get(i)) { - buf += joinedRow.withRight(buildRows(i)).copy() + def notMatchedBroadcastRows: RDD[InternalRow] = { + getMatchedBroadcastRowsBitSetRDD(streamRdd, relation) + .repartition(1) + .mapPartitions { iter => + Seq(iter.fold(new BitSet(relation.value.length))(_ | _)).toIterator + }.flatMap(matchedBroadcastRows => { Review Comment: Sure, fixed -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org