Github user cloud-fan commented on a diff in the pull request:
https://github.com/apache/spark/pull/20933#discussion_r179514435
--- Diff:
sql/core/src/main/scala/org/apache/spark/sql/execution/DataSourceScanExec.scala
---
@@ -396,107 +395,34 @@ case class FileSourceScanExec(
readFile: (PartitionedFile) => Iterator[InternalRow],
selectedPartitions: Seq[PartitionDirectory],
fsRelation: HadoopFsRelation): RDD[InternalRow] = {
- val defaultMaxSplitBytes =
- fsRelation.sparkSession.sessionState.conf.filesMaxPartitionBytes
+ val maxSplitBytes =
PartitionedFileUtil.maxSplitBytes(relation.sparkSession, selectedPartitions)
val openCostInBytes =
fsRelation.sparkSession.sessionState.conf.filesOpenCostInBytes
- val defaultParallelism =
fsRelation.sparkSession.sparkContext.defaultParallelism
- val totalBytes = selectedPartitions.flatMap(_.files.map(_.getLen +
openCostInBytes)).sum
- val bytesPerCore = totalBytes / defaultParallelism
-
- val maxSplitBytes = Math.min(defaultMaxSplitBytes,
Math.max(openCostInBytes, bytesPerCore))
logInfo(s"Planning scan with bin packing, max size: $maxSplitBytes
bytes, " +
--- End diff --
We can also move this log to `PartitionedFileUtil.maxSplitBytes`
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]