wangyum commented on a change in pull request #22502: [SPARK-25474][SQL] 
Support `spark.sql.statistics.fallBackToHdfs` in data source tables
URL: https://github.com/apache/spark/pull/22502#discussion_r314179379
 
 

 ##########
 File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/HadoopFsRelation.scala
 ##########
 @@ -71,7 +70,13 @@ case class HadoopFsRelation(
 
   override def sizeInBytes: Long = {
     val compressionFactor = sqlContext.conf.fileCompressionFactor
-    (location.sizeInBytes * compressionFactor).toLong
+    val defaultSize = (location.sizeInBytes * compressionFactor).toLong
+    location match {
+      case cfi: CatalogFileIndex if 
sparkSession.sessionState.conf.fallBackToHdfsForStatsEnabled =>
 
 Review comment:
   Yes. I have prepared some tests to illustrate this issue. These tests can be 
passed before this commit:
   ```scala
     test("Non-partitioned data source table") {
       withTempDir { dir =>
         withTable("spark_25474") {
           sql(s"CREATE TABLE spark_25474 (c1 BIGINT) USING PARQUET LOCATION 
'${dir.toURI}'")
           
spark.range(5).write.mode(SaveMode.Overwrite).parquet(dir.getCanonicalPath)
   
           assert(getCatalogTable("spark_25474").stats.isEmpty)
           val relation = 
spark.table("spark_25474").queryExecution.analyzed.children.head
           assert(relation.stats.sizeInBytes === 935)
         }
       }
     }
   
     test("Partitioned data source table default") {
       withTempDir { dir =>
         withTable("spark_25474") {
           spark.sql("CREATE TABLE spark_25474(a int, b int) USING parquet " +
             s"PARTITIONED BY(a) LOCATION '${dir.toURI}'")
           spark.sql("INSERT INTO TABLE spark_25474 PARTITION(a=1) SELECT 2")
   
           assert(getCatalogTable("spark_25474").stats.isEmpty)
           val relation = 
spark.table("spark_25474").queryExecution.analyzed.children.head
           // scalastyle:off line.size.limit
           // It's 8.0EB in this case. This 8.0EB from:
           // 
https://github.com/apache/spark/blob/c30b5297bc607ae33cc2fcf624b127942154e559/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala#L383-L387
           // scalastyle:on line.size.limit
           assert(relation.stats.sizeInBytes === conf.defaultSizeInBytes)
         }
       }
     }
   
     test("Partitioned data source table and disable 
HIVE_MANAGE_FILESOURCE_PARTITIONS") {
       withSQLConf(SQLConf.HIVE_MANAGE_FILESOURCE_PARTITIONS.key -> "false") {
         withTempDir { dir =>
           withTable("spark_25474") {
             spark.sql("CREATE TABLE spark_25474(a int, b int) USING parquet " +
               s"PARTITIONED BY(a) LOCATION '${dir.toURI}'")
             spark.sql("INSERT INTO TABLE spark_25474 PARTITION(a=1) SELECT 2")
   
             assert(getCatalogTable("spark_25474").stats.isEmpty)
             val relation = 
spark.table("spark_25474").queryExecution.analyzed.children.head
             assert(relation.stats.sizeInBytes === 418)
           }
         }
       }
     }
   ```
   
   
https://github.com/apache/spark/compare/master...wangyum:SPARK-25474-DEV?expand=1
   

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to