Github user rxin commented on a diff in the pull request:

    https://github.com/apache/spark/pull/250#discussion_r11012293
  
    --- Diff: 
sql/core/src/main/scala/org/apache/spark/sql/execution/joins.scala ---
    @@ -40,33 +50,86 @@ case class SparkEquiInnerJoin(
       override def requiredChildDistribution =
         ClusteredDistribution(leftKeys) :: ClusteredDistribution(rightKeys) :: 
Nil
     
    +  val (buildPlan, streamedPlan) = buildSide match {
    +    case BuildLeft => (left, right)
    +    case BuildRight => (right, left)
    +  }
    +
    +  val (buildKeys, streamedKeys) = buildSide match {
    +    case BuildLeft => (leftKeys, rightKeys)
    +    case BuildRight => (rightKeys, leftKeys)
    +  }
    +
       def output = left.output ++ right.output
     
    -  def execute() = attachTree(this, "execute") {
    -    val leftWithKeys = left.execute().mapPartitions { iter =>
    -      val generateLeftKeys = new Projection(leftKeys, left.output)
    -      iter.map(row => (generateLeftKeys(row), row.copy()))
    -    }
    +  @transient lazy val buildSideKeyGenerator = new Projection(buildKeys, 
buildPlan.output)
    +  @transient lazy val streamSideKeyGenerator =
    +    () => new MutableProjection(streamedKeys, streamedPlan.output)
     
    -    val rightWithKeys = right.execute().mapPartitions { iter =>
    -      val generateRightKeys = new Projection(rightKeys, right.output)
    -      iter.map(row => (generateRightKeys(row), row.copy()))
    -    }
    +  def execute() = {
     
    -    // Do the join.
    -    val joined = 
filterNulls(leftWithKeys).joinLocally(filterNulls(rightWithKeys))
    -    // Drop join keys and merge input tuples.
    -    joined.map { case (_, (leftTuple, rightTuple)) => buildRow(leftTuple 
++ rightTuple) }
    -  }
    +    buildPlan.execute().zipPartitions(streamedPlan.execute()) { 
(buildIter, streamIter) =>
    +      val hashTable = new java.util.HashMap[Row, ArrayBuffer[Row]]()
    +      var currentRow: Row = null
    +
    +      // Create a mapping of buildKeys -> rows
    +      while(buildIter.hasNext) {
    +        currentRow = buildIter.next()
    +        val rowKey = buildSideKeyGenerator(currentRow)
    +        if(!rowKey.anyNull) {
    +          val existingMatchList = hashTable.get(rowKey)
    +          val matchList = if (existingMatchList == null) {
    +            val newMatchList = new ArrayBuffer[Row]()
    +            hashTable.put(rowKey, newMatchList)
    +            newMatchList
    +          } else {
    +            existingMatchList
    +          }
    +          matchList += currentRow.copy()
    +        }
    +      }
     
    -  /**
    -   * Filters any rows where the any of the join keys is null, ensuring 
three-valued
    -   * logic for the equi-join conditions.
    -   */
    -  protected def filterNulls(rdd: RDD[(Row, Row)]) =
    -    rdd.filter {
    -      case (key: Seq[_], _) => !key.exists(_ == null)
    +      new Iterator[Row] {
    +        private[this] var currentRow: Row = _
    --- End diff --
    
    add inline comment to explain these 3 variables. the naming is slightly 
confusing right now because they are all "current", while "currentRow" refers 
to the streaming side, and currentMatches refers to the hash side, and 
currentPosition refers to the position in currentMatches ... I think adding 
some inline comment would make them clear.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

Reply via email to