attilapiros commented on a change in pull request #26016: [SPARK-24914][SQL]
New statistic to improve data size estimate for columnar storage formats
URL: https://github.com/apache/spark/pull/26016#discussion_r388363290
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/command/CommandUtils.scala
##########
@@ -67,28 +76,72 @@ object CommandUtils extends Logging {
}
}
- def calculateTotalSize(spark: SparkSession, catalogTable: CatalogTable):
BigInt = {
+ def calculateTotalSize(
+ spark: SparkSession,
+ catalogTable: CatalogTable): SizeInBytesWithDeserFactor = {
val sessionState = spark.sessionState
val startTime = System.nanoTime()
val totalSize = if (catalogTable.partitionColumnNames.isEmpty) {
- calculateSingleLocationSize(sessionState, catalogTable.identifier,
- catalogTable.storage.locationUri)
+ calculateSingleLocationSize(
+ sessionState,
+ catalogTable.identifier,
+ catalogTable.storage.locationUri,
+ catalogTable.storage.serde)
} else {
// Calculate table size as a sum of the visible partitions. See
SPARK-21079
val partitions =
sessionState.catalog.listPartitions(catalogTable.identifier)
logInfo(s"Starting to calculate sizes for ${partitions.length}
partitions.")
val paths = partitions.map(_.storage.locationUri)
- calculateMultipleLocationSizes(spark, catalogTable.identifier, paths).sum
+ sumSizeWithMaxDeserializationFactor(
+ calculateMultipleLocationSizes(
+ spark,
+ catalogTable.identifier,
+ paths,
+ catalogTable.storage.serde))
}
logInfo(s"It took ${(System.nanoTime() - startTime) / (1000 * 1000)} ms to
calculate" +
s" the total size for table ${catalogTable.identifier}.")
totalSize
}
+ def sumSizeWithMaxDeserializationFactor(
+ sizesWithFactors: Seq[SizeInBytesWithDeserFactor]):
SizeInBytesWithDeserFactor = {
+ val definedFactors = sizesWithFactors.flatMap(_.deserFactor)
+ SizeInBytesWithDeserFactor(
+ sizesWithFactors.map(_.sizeInBytes).sum,
+ if (definedFactors.isEmpty) None else Some(definedFactors.max))
+ }
+
+ def sizeInBytesWithDeserFactor(
+ calcDeserFact: Boolean,
+ hadoopConf: Configuration,
+ fStatus: FileStatus,
+ serde: Option[String]): SizeInBytesWithDeserFactor = {
+ assert(fStatus.isFile)
+ val factor = if (calcDeserFact) {
+ val isOrc = serde.contains(orcSerDeCanonicalClass) ||
fStatus.getPath.getName.endsWith(".orc")
+ val rawSize = if (isOrc) Some(OrcUtils.rawSize(hadoopConf,
fStatus.getPath)) else None
Review comment:
Unfortunately no. I tried to come up with a calculation but during testing
it proved to be wrong. Asked help from the Parquet developers but currently we
have no good formula to estimate it.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]