HeartSaVioR commented on code in PR #37935:
URL: https://github.com/apache/spark/pull/37935#discussion_r978280322


##########
sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreSuite.scala:
##########
@@ -357,6 +357,75 @@ class StateStoreSuite extends 
StateStoreSuiteBase[HDFSBackedStateStoreProvider]
     }
   }
 
+  test("SPARK-40492: maintenance before unload") {
+    val conf = new SparkConf()
+      .setMaster("local")
+      .setAppName("SPARK-40492")
+    val opId = 0
+    val dir1 = newDir()
+    val storeProviderId1 = StateStoreProviderId(StateStoreId(dir1, opId, 0), 
UUID.randomUUID)
+    val sqlConf = 
getDefaultSQLConf(SQLConf.STATE_STORE_MIN_DELTAS_FOR_SNAPSHOT.defaultValue.get,
+      SQLConf.MAX_BATCHES_TO_RETAIN_IN_MEMORY.defaultValue.get)
+    sqlConf.setConf(SQLConf.MIN_BATCHES_TO_RETAIN, 2)
+    val storeConf = StateStoreConf(sqlConf)
+    val hadoopConf = new Configuration()
+
+    var latestStoreVersion = 0
+
+    def generateStoreVersions(): Unit = {
+      for (i <- 1 to 20) {
+        val store = StateStore.get(storeProviderId1, keySchema, valueSchema, 
numColsPrefixKey = 0,
+          latestStoreVersion, storeConf, hadoopConf)
+        put(store, "a", 0, i)
+        store.commit()
+        latestStoreVersion += 1
+      }
+    }
+
+    val timeoutDuration = 1.minute
+
+    quietly {
+      withSpark(new SparkContext(conf)) { sc =>
+        withCoordinatorRef(sc) { coordinatorRef =>
+          require(!StateStore.isMaintenanceRunning, "StateStore is 
unexpectedly running")
+
+          // Generate sufficient versions of store for snapshots
+          generateStoreVersions()
+          eventually(timeout(timeoutDuration)) {
+            // Store should have been reported to the coordinator
+            assert(coordinatorRef.getLocation(storeProviderId1).nonEmpty,
+              "active instance was not reported")
+            // Background maintenance should clean up and generate snapshots
+            assert(StateStore.isMaintenanceRunning, "Maintenance task is not 
running")
+            // Some snapshots should have been generated
+            
tryWithProviderResource(newStoreProvider(storeProviderId1.storeId)) { provider 
=>
+              val snapshotVersions = (1 to latestStoreVersion).filter { 
version =>
+                fileExists(provider, version, isSnapshot = true)
+              }
+              assert(snapshotVersions.nonEmpty, "no snapshot file found")
+            }
+          }
+          // Generate more versions such that there is another snapshot.
+          generateStoreVersions()
+
+          // If driver decides to deactivate all stores related to a query run,
+          // then this instance should be unloaded.
+          coordinatorRef.deactivateInstances(storeProviderId1.queryRunId)
+          eventually(timeout(timeoutDuration)) {
+            assert(!StateStore.isLoaded(storeProviderId1))
+          }
+
+          // Earliest delta file should be scheduled a cleanup during unload.

Review Comment:
   I see. If we are relying on timing of maintenance interval, please 
explicitly set the interval to ensure that the test won't fail on changing 
default. Also please leave code comment about the condition of maintenance 
interval to make this test be successful.
   
   Btw, would you mind if I ask to run this test case 100 times and verify we 
don't have flakiness?
   
   Thanks in advance!



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to