gaborgsomogyi commented on a change in pull request #27664: [SPARK-30915][SS]
FileStreamSink: Avoid reading the metadata log file when finding the latest
batch ID
URL: https://github.com/apache/spark/pull/27664#discussion_r402800750
##########
File path:
sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/FileStreamSinkLogSuite.scala
##########
@@ -240,6 +247,44 @@ class FileStreamSinkLogSuite extends SparkFunSuite with
SharedSparkSession {
))
}
+ test("getLatestBatchId") {
+ withCountOpenLocalFileSystemAsLocalFileSystem {
+ val scheme = CountOpenLocalFileSystem.scheme
+ withSQLConf(SQLConf.FILE_SINK_LOG_COMPACT_INTERVAL.key -> "3") {
+ withTempDir { dir =>
+ val sinkLog = new FileStreamSinkLog(FileStreamSinkLog.VERSION, spark,
+ s"$scheme:///${dir.getCanonicalPath}")
+ for (batchId <- 0 to 2) {
+ sinkLog.add(
+ batchId,
+ Array(newFakeSinkFileStatus("/a/b/" + batchId,
FileStreamSinkLog.ADD_ACTION)))
+ }
+
+ def getCountForOpenOnMetadataFile(batchId: Long): Long = {
+ val path = sinkLog.batchIdToPath(batchId).toUri.getPath
+
CountOpenLocalFileSystem.pathToNumOpenCalled.get(path).map(_.get()).getOrElse(0)
+ }
+
+ CountOpenLocalFileSystem.resetCount()
+
+ assert(sinkLog.getLatestBatchId() === Some(2L))
+ // getLatestBatchId doesn't open the latest metadata log file
+ (0L to 2L).foreach { batchId =>
+ assert(getCountForOpenOnMetadataFile(batchId) === 0)
Review comment:
Nit: Just to be consistent with the other parts `s/0/0L`. Same applies to
the other 2 occurrence...
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]