Github user maropu commented on a diff in the pull request: https://github.com/apache/spark/pull/22219#discussion_r214825542 --- Diff: sql/core/src/main/scala/org/apache/spark/sql/execution/SparkPlan.scala --- @@ -329,17 +337,26 @@ abstract class SparkPlan extends QueryPlan[SparkPlan] with Logging with Serializ * * This is modeled after `RDD.take` but never runs any job locally on the driver. */ - def executeTake(n: Int): Array[InternalRow] = { + def executeTake(n: Int): Array[InternalRow] = executeTakeSeqView(n)._2.force + + /** + * Runs this query returning the tuple of the row count and the SeqView of first `n` rows. + * + * This is modeled to execute decodeUnsafeRows lazily to reduce peak memory usage of + * decoding rows. Only compressed byte arrays consume memory after return. + */ + private[spark] def executeTakeSeqView( --- End diff -- For example, `collectCountAndSeqView` and `executeTakeSeqView` depend on each other? If no, please split them into separate PRs.
--- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org