Github user marmbrus commented on a diff in the pull request:

    https://github.com/apache/spark/pull/837#discussion_r13262730
  
    --- Diff: 
sql/core/src/main/scala/org/apache/spark/sql/execution/joins.scala ---
    @@ -144,6 +144,150 @@ case class HashJoin(
      * :: DeveloperApi ::
      */
     @DeveloperApi
    +case class LeftSemiJoinHash(
    +                     leftKeys: Seq[Expression],
    +                     rightKeys: Seq[Expression],
    +                     buildSide: BuildSide,
    +                     left: SparkPlan,
    +                     right: SparkPlan) extends BinaryNode {
    +
    +  override def outputPartitioning: Partitioning = left.outputPartitioning
    +
    +  override def requiredChildDistribution =
    +    ClusteredDistribution(leftKeys) :: ClusteredDistribution(rightKeys) :: 
Nil
    +
    +  val (buildPlan, streamedPlan) = buildSide match {
    +    case BuildLeft => (left, right)
    +    case BuildRight => (right, left)
    +  }
    +
    +  val (buildKeys, streamedKeys) = buildSide match {
    +    case BuildLeft => (leftKeys, rightKeys)
    +    case BuildRight => (rightKeys, leftKeys)
    +  }
    +
    +  def output = left.output
    +
    +  @transient lazy val buildSideKeyGenerator = new Projection(buildKeys, 
buildPlan.output)
    +  @transient lazy val streamSideKeyGenerator =
    +    () => new MutableProjection(streamedKeys, streamedPlan.output)
    +
    +  def execute() = {
    +
    +    buildPlan.execute().zipPartitions(streamedPlan.execute()) { 
(buildIter, streamIter) =>
    +    // TODO: Use Spark's HashMap implementation.
    +      val hashTable = new java.util.HashMap[Row, ArrayBuffer[Row]]()
    +      var currentRow: Row = null
    +
    +      // Create a mapping of buildKeys -> rows
    +      while (buildIter.hasNext) {
    +        currentRow = buildIter.next()
    +        val rowKey = buildSideKeyGenerator(currentRow)
    +        if(!rowKey.anyNull) {
    +          val existingMatchList = hashTable.get(rowKey)
    +          val matchList = if (existingMatchList == null) {
    +            val newMatchList = new ArrayBuffer[Row]()
    +            hashTable.put(rowKey, newMatchList)
    +            newMatchList
    +          } else {
    +            existingMatchList
    +          }
    +          matchList += currentRow.copy()
    +        }
    +      }
    +
    +      new Iterator[Row] {
    +        private[this] var currentStreamedRow: Row = _
    +        private[this] var currentHashMatched: Boolean = false
    +
    +        private[this] val joinKeys = streamSideKeyGenerator()
    +
    +        override final def hasNext: Boolean =
    +          streamIter.hasNext && fetchNext()
    +
    +        override final def next() = {
    +          currentStreamedRow
    +        }
    +
    +        /**
    +         * Searches the streamed iterator for the next row that has at 
least one match in hashtable.
    +         *
    +         * @return true if the search is successful, and false the 
streamed iterator runs out of
    +         *         tuples.
    +         */
    +        private final def fetchNext(): Boolean = {
    +          currentHashMatched = false
    +          while (!currentHashMatched && streamIter.hasNext) {
    +            currentStreamedRow = streamIter.next()
    +            if (!joinKeys(currentStreamedRow).anyNull) {
    +              currentHashMatched = true
    +            }
    +          }
    +          currentHashMatched
    +        }
    +      }
    +    }
    +  }
    +}
    +
    +/**
    + * :: DeveloperApi ::
    + */
    +@DeveloperApi
    +case class LeftSemiJoinBNL(
    --- End diff --
    
    I don't think this operator is exercised by the included test cases.  We 
should add a test where the join condition can be calculated with hash keys.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

Reply via email to