MaxGekk commented on a change in pull request #31423:
URL: https://github.com/apache/spark/pull/31423#discussion_r568362026
##########
File path:
sql/hive/src/test/scala/org/apache/spark/sql/hive/PartitionedTablePerfStatsSuite.scala
##########
@@ -370,7 +370,7 @@ class PartitionedTablePerfStatsSuite
assert(HiveCatalogMetrics.METRIC_PARTITIONS_FETCHED.getCount() == 0)
// reads and caches all the files initially
- assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 5)
+ assert(HiveCatalogMetrics.METRIC_FILES_DISCOVERED.getCount() == 10)
Review comment:
The second partition discovery should re-use file statuses from the
cache if the cache is NoOp cache
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
##########
@@ -413,9 +413,13 @@ case class DataSource(
} else {
val globbedPaths = checkAndGlobPathIfNecessary(
checkEmptyGlobPath = true, checkFilesExist = checkFilesExist)
- val index = createInMemoryFileIndex(globbedPaths)
+ val fileStatusCache = FileStatusCache.getOrCreate(sparkSession)
+ val indexInSchemaInferring = new InMemoryFileIndex(
+ sparkSession, globbedPaths, options, userSpecifiedSchema,
fileStatusCache)
val (resultDataSchema, resultPartitionSchema) =
- getOrInferFileFormatSchema(format, () => index)
+ getOrInferFileFormatSchema(format, () => indexInSchemaInferring)
+ val index = new InMemoryFileIndex(
+ sparkSession, globbedPaths, options, Some(resultPartitionSchema),
fileStatusCache)
Review comment:
New index re-uses the file statuses cache `fileStatusCache`, so, this
should allow to avoid additional accesses to the file system.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]