wangyum commented on a change in pull request #24715: [SPARK-25474][SQL] Data
source tables support fallback to HDFS for size estimation
URL: https://github.com/apache/spark/pull/24715#discussion_r314587042
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala
##########
@@ -619,3 +620,35 @@ object DataSourceStrategy {
(nonconvertiblePredicates ++ unhandledPredicates, pushedFilters,
handledFilters)
}
}
+
+
+/**
+ * Support for recalculating table statistics if table statistics are not
available.
+ */
+class DetermineTableStats(session: SparkSession) extends Rule[LogicalPlan] {
+
+ private val sessionConf = session.sessionState.conf
+
+ override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
+ // For the data source table, we only recalculate the table statistics
when it creates
+ // the CatalogFileIndex using defaultSizeInBytes. See SPARK-25474 for more
details.
+ case logical @ LogicalRelation(_, _, Some(table), _)
+ if sessionConf.fallBackToHdfsForStatsEnabled && table.stats.isEmpty &&
+ sessionConf.manageFilesourcePartitions &&
+ table.tracksPartitionsInCatalog && table.partitionColumnNames.nonEmpty
=>
+ val sizeInBytes = CommandUtils.getSizeInBytesFallBackToHdfs(session,
table)
+ val withStats = table.copy(stats = Some(CatalogStatistics(sizeInBytes =
BigInt(sizeInBytes))))
+ logical.copy(catalogTable = Some(withStats))
+
+ case relation: HiveTableRelation
Review comment:
@advancedxy Already work on
this:https://github.com/apache/spark/pull/25306/commits/c86a27b2a5e286733ad305de1d7e42d1373b3a3b
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]