Github user marmbrus commented on a diff in the pull request:

    https://github.com/apache/spark/pull/1147#discussion_r15710803
  
    --- Diff: 
sql/core/src/main/scala/org/apache/spark/sql/execution/joins.scala ---
    @@ -137,6 +137,185 @@ trait HashJoin {
     }
     
     /**
    + * Constant Value for Binary Join Node
    + */
    +object HashOuterJoin {
    +  val DUMMY_LIST = Seq[Row](null)
    +  val EMPTY_LIST = Seq[Row]()
    +}
    +
    +/**
    + * :: DeveloperApi ::
    + * Performs a hash based outer join for two child relations by shuffling 
the data using 
    + * the join keys. This operator requires loading the associated partition 
in both side into memory.
    + */
    +@DeveloperApi
    +case class HashOuterJoin(
    +    leftKeys: Seq[Expression],
    +    rightKeys: Seq[Expression],
    +    joinType: JoinType,
    +    condition: Option[Expression],
    +    left: SparkPlan,
    +    right: SparkPlan) extends BinaryNode {
    +
    +  override def outputPartitioning: Partitioning = left.outputPartitioning
    +
    +  override def requiredChildDistribution =
    +    ClusteredDistribution(leftKeys) :: ClusteredDistribution(rightKeys) :: 
Nil
    +
    +  def output = left.output ++ right.output
    +
    +  // TODO we need to rewrite all of the iterators with our own 
implementation instead of the Scala
    +  // iterator for performance purpose. 
    +
    +  private[this] def leftOuterIterator(
    +      key: Row, leftIter: Iterable[Row], rightIter: Iterable[Row]): 
Iterator[Row] = {
    +    val joinedRow = new JoinedRow()
    +    val rightNullRow = new GenericRow(right.output.length)
    +    val boundCondition = 
    +      condition.map(newPredicate(_, left.output ++ 
right.output)).getOrElse((row: Row) => true)
    +
    +    leftIter.iterator.flatMap { l => 
    +      joinedRow.withLeft(l)
    +      var matched = false
    +      (if (!key.anyNull) rightIter.collect { case r if 
(boundCondition(joinedRow.withRight(r))) => 
    +        matched = true
    +        joinedRow.copy
    +      } else {
    +        Nil
    +      }) ++ HashOuterJoin.DUMMY_LIST.filter(_ => !matched).map( _ => {
    +        // HashOuterJoin.DUMMY_LIST.filter(_ => !matched) is a tricky way 
to add additional row,
    +        // as we don't know whether we need to append it until finish 
iterating all of the 
    +        // records in right side.
    +        // If we didn't get any proper row, then append a single row with 
empty right
    +        joinedRow.withRight(rightNullRow).copy
    +      })
    +    }
    +  }
    +
    +  private[this] def rightOuterIterator(
    +      key: Row, leftIter: Iterable[Row], rightIter: Iterable[Row]): 
Iterator[Row] = {
    +    val joinedRow = new JoinedRow()
    +    val leftNullRow = new GenericRow(left.output.length)
    +    val boundCondition = 
    +      condition.map(newPredicate(_, left.output ++ 
right.output)).getOrElse((row: Row) => true)
    +
    +    rightIter.iterator.flatMap { r => 
    +      joinedRow.withRight(r)
    +      var matched = false
    +      (if (!key.anyNull) leftIter.collect { case l if 
(boundCondition(joinedRow.withLeft(l))) => 
    +        matched = true
    +        joinedRow.copy
    +      } else {
    +        Nil
    +      }) ++ HashOuterJoin.DUMMY_LIST.filter(_ => !matched).map( _ => {
    +        // HashOuterJoin.DUMMY_LIST.filter(_ => !matched) is a tricky way 
to add additional row,
    +        // as we don't know whether we need to append it until finish 
iterating all of the 
    +        // records in left side.
    +        // If we didn't get any proper row, then append a single row with 
empty left.
    +        joinedRow.withLeft(leftNullRow).copy
    +      })
    +    }
    +  }
    +
    +  private[this] def fullOuterIterator(
    +      key: Row, leftIter: Iterable[Row], rightIter: Iterable[Row]): 
Iterator[Row] = {
    +    val joinedRow = new JoinedRow()
    +    val leftNullRow = new GenericRow(left.output.length)
    +    val rightNullRow = new GenericRow(right.output.length)
    +    val boundCondition = 
    +      condition.map(newPredicate(_, left.output ++ 
right.output)).getOrElse((row: Row) => true)
    +
    +    if (!key.anyNull) {
    +      // Store the positions of records in right, if one of its associated 
row satisfy
    +      // the join condition.
    +      val rightMatchedSet = scala.collection.mutable.Set[Int]()
    +      leftIter.iterator.flatMap[Row] { l =>
    +        joinedRow.withLeft(l)
    +        var matched = false
    +        rightIter.zipWithIndex.collect { 
    +          // 1. For those matched (satisfy the join condition) records 
with both sides filled, 
    +          //    append them directly
    +
    +          case (r, idx) if (boundCondition(joinedRow.withRight(r)))=> {
    +            matched = true
    +            // if the row satisfy the join condition, add its index into 
the matched set
    +            rightMatchedSet.add(idx)
    +            joinedRow.copy
    +          }
    +        } ++ HashOuterJoin.DUMMY_LIST.filter(_ => !matched).map( _ => {
    +          // 2. For those unmatched records in left, append additional 
records with empty right.
    +
    +          // HashOuterJoin.DUMMY_LIST.filter(_ => !matched) is a tricky 
way to add additional row,
    +          // as we don't know whether we need to append it until finish 
iterating all 
    +          // of the records in right side.
    +          // If we didn't get any proper row, then append a single row 
with empty right.
    +          joinedRow.withRight(rightNullRow).copy
    +        })
    +      } ++ rightIter.zipWithIndex.collect {
    +        // 3. For those unmatched records in right, append additional 
records with empty left.
    +
    +        // Re-visiting the records in right, and append additional row 
with empty left, if its not 
    +        // in the matched set. 
    +        case (r, idx) if (!rightMatchedSet.contains(idx)) => {
    +          joinedRow(leftNullRow, r).copy
    +        }
    +      }
    +    } else {
    +      leftIter.iterator.map[Row] { l =>
    +        joinedRow(l, rightNullRow).copy
    +      } ++ rightIter.iterator.map[Row] { r =>
    +        joinedRow(leftNullRow, r).copy
    +      }
    +    }
    +  }
    +
    +  private[this] def buildHashTable(
    +      iter: Iterator[Row], keyGenerator: Projection): Map[Row, 
ArrayBuffer[Row]] = {
    +    // TODO: Use Spark's HashMap implementation.
    +    val hashTable = scala.collection.mutable.Map[Row, ArrayBuffer[Row]]()
    --- End diff --
    
    We should probably at least be using java.util here.  The scala collection 
library seems to have weird performance sometimes.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

Reply via email to