Github user sujith71955 commented on a diff in the pull request:

    https://github.com/apache/spark/pull/16677#discussion_r100033968
  
    --- Diff: 
sql/core/src/main/scala/org/apache/spark/sql/execution/limit.scala ---
    @@ -90,25 +95,101 @@ trait BaseLimitExec extends UnaryExecNode with 
CodegenSupport {
     }
     
     /**
    - * Take the first `limit` elements of each child partition, but do not 
collect or shuffle them.
    + * Take the `limit` elements of the child output.
      */
    -case class LocalLimitExec(limit: Int, child: SparkPlan) extends 
BaseLimitExec {
    +case class GlobalLimitExec(limit: Int, child: SparkPlan) extends 
UnaryExecNode {
     
    -  override def outputOrdering: Seq[SortOrder] = child.outputOrdering
    +  override def output: Seq[Attribute] = child.output
     
       override def outputPartitioning: Partitioning = child.outputPartitioning
    -}
     
    -/**
    - * Take the first `limit` elements of the child's single output partition.
    - */
    -case class GlobalLimitExec(limit: Int, child: SparkPlan) extends 
BaseLimitExec {
    +  override def outputOrdering: Seq[SortOrder] = child.outputOrdering
    +
    +  private val serializer: Serializer = new 
UnsafeRowSerializer(child.output.size)
    +
    +  protected override def doExecute(): RDD[InternalRow] = {
    +    val childRDD = child.execute()
    +    val partitioner = LocalPartitioning(child.outputPartitioning,
    +      childRDD.getNumPartitions)
    +    val shuffleDependency = ShuffleExchange.prepareShuffleDependency(
    +      childRDD, child.output, partitioner, serializer)
    +    val numberOfOutput: Seq[Int] = if 
(shuffleDependency.rdd.getNumPartitions != 0) {
    +      // submitMapStage does not accept RDD with 0 partition.
    +      // So, we will not submit this dependency.
    +      val submittedStageFuture = 
sparkContext.submitMapStage(shuffleDependency)
    +      submittedStageFuture.get().numberOfOutput.toSeq
    +    } else {
    +      Nil
    +    }
     
    -  override def requiredChildDistribution: List[Distribution] = AllTuples 
:: Nil
    +    // Try to keep child plan's original data parallelism or not. It is 
enabled by default.
    +    val respectChildParallelism = sqlContext.conf.enableParallelGlobalLimit
     
    -  override def outputPartitioning: Partitioning = child.outputPartitioning
    +    val shuffled = new ShuffledRowRDD(shuffleDependency)
     
    -  override def outputOrdering: Seq[SortOrder] = child.outputOrdering
    +    val sumOfOutput = numberOfOutput.sum
    +    if (sumOfOutput <= limit) {
    +      shuffled
    +    } else if (!respectChildParallelism) {
    +      // This is mainly for tests.
    +      // We take the rows of each partition until we reach the required 
limit number.
    +      var countForRows = 0
    +      val takeAmounts = new mutable.HashMap[Int, Int]()
    +      numberOfOutput.zipWithIndex.foreach { case (num, index) =>
    +        if (countForRows + num < limit) {
    +          countForRows += num
    +          takeAmounts += ((index, num))
    +        } else {
    +          val toTake = limit - countForRows
    +          countForRows += toTake
    +          takeAmounts += ((index, toTake))
    +        }
    +      }
    +      val broadMap = sparkContext.broadcast(takeAmounts)
    +      shuffled.mapPartitionsWithIndexInternal { case (index, iter) =>
    +        broadMap.value.get(index).map { size =>
    +          iter.take(size)
    +        }.get
    +      }
    +    } else {
    +      // We try to distribute the required limit number of rows across all 
child rdd's partitions.
    +      var numToReduce = (sumOfOutput - limit)
    +      val reduceAmounts = new mutable.HashMap[Int, Int]()
    +      val nonEmptyParts = numberOfOutput.filter(_ > 0).size
    +      val reducePerPart = numToReduce / nonEmptyParts
    +      numberOfOutput.zipWithIndex.foreach { case (num, index) =>
    +        if (num >= reducePerPart) {
    +          numToReduce -= reducePerPart
    +          reduceAmounts += ((index, reducePerPart))
    +        } else {
    +          numToReduce -= num
    +          reduceAmounts += ((index, num))
    +        }
    +      }
    +      while (numToReduce > 0) {
    +        numberOfOutput.zipWithIndex.foreach { case (num, index) =>
    +          val toReduce = if (numToReduce / nonEmptyParts > 0) {
    +            numToReduce / nonEmptyParts
    +          } else {
    +            numToReduce
    +          }
    +          if (num - reduceAmounts(index) >= toReduce) {
    +            reduceAmounts(index) = reduceAmounts(index) + toReduce
    +            numToReduce -= toReduce
    +          } else if (num - reduceAmounts(index) > 0) {
    +            reduceAmounts(index) = reduceAmounts(index) + 1
    +            numToReduce -= 1
    +          }
    +        }
    +      }
    +      val broadMap = sparkContext.broadcast(reduceAmounts)
    +      shuffled.mapPartitionsWithIndexInternal { case (index, iter) =>
    +        broadMap.value.get(index).map { size =>
    +          iter.drop(size)
    --- End diff --
    
    @viirya  just need one clarification, as per the above logic we always try 
to find reduce amount and try to drop the amount from respective iterators , 
but what if the limit value is less and the record count is more, then we need 
to drop more data from each iterator right, in this scenario i think take() 
will be better. whats your point of view on this scenario.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to