siying commented on code in PR #48355:
URL: https://github.com/apache/spark/pull/48355#discussion_r1866618101


##########
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/RocksDBFileManager.scala:
##########
@@ -299,20 +313,33 @@ class RocksDBFileManager(
   }
 
   // Get latest snapshot version <= version
-  def getLatestSnapshotVersion(version: Long): Long = {
+  def getLatestSnapshotVersionAndUniqueId(
+      version: Long, checkpointUniqueId: Option[String] = None): Array[(Long, 
Option[String])] = {
     val path = new Path(dfsRootDir)
     if (fm.exists(path)) {
       // If the latest version snapshot exists, we avoid listing.
-      if (fm.exists(dfsBatchZipFile(version))) {
-        return version
+      if (fm.exists(dfsBatchZipFile(version, checkpointUniqueId))) {
+        return Array((version, checkpointUniqueId))
+      } else if (fm.exists(dfsBatchZipFile(version))) {
+        // This is possible when the state was previously ran under checkpoint 
format v1
+        // and restarted with v2. Then even if there is checkpointUniqueId 
passed in, the file
+        // does not have that uniqueId in the filename.
+        return Array((version, None))
       }
-      fm.list(path, onlyZipFiles)
-        .map(_.getPath.getName.stripSuffix(".zip"))
-        .map(_.toLong)
-        .filter(_ <= version)
-        .foldLeft(0L)(math.max)
+      val versionAndUniqueIds = fm.list(path, onlyZipFiles)
+        .map(_.getPath.getName.stripSuffix(".zip").split("_"))
+        .filter {
+          case Array(ver, _) => ver.toLong <= version
+          case Array(ver) => ver.toLong <= version
+        }
+        .map {
+          case Array(version, uniqueId) => (version.toLong, Option(uniqueId))
+          case Array(version) => (version.toLong, None)
+        }
+      val maxVersion = versionAndUniqueIds.map(_._1).foldLeft(0L)(math.max)
+      versionAndUniqueIds.filter(_._1 == maxVersion)

Review Comment:
   @WweiL as I said, we should not do that, because the latest snapshot in the 
checkpoint directory may not be the one in the lineage. We should not return 
this snapshot at all. The way I can think of that would work is to pass the 
lineage to this function, and find the latest snapshot file that matches the 
checkpoint ID from the lineage. In this case, we will guarantee to find a 
snapshot if there is one.
   
   The way it is working is that, if we find the latest version with snapshot 
file(s), but the ID doesn't match lineage, we will end up with using it, which 
is wrong.



##########
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/RocksDB.scala:
##########
@@ -278,77 +280,185 @@ class RocksDB(
   // We send snapshots that needs to be uploaded by the maintenance thread to 
this queue
   private val snapshotsToUploadQueue = new 
ConcurrentLinkedQueue[RocksDBSnapshot]()
 
+  /**
+   * Based on the ground truth lineage loaded from changelog file (lineage) and
+   * the latest snapshot (version, uniqueId) pair from file listing, this 
function finds
+   * the ground truth latest snapshot (version, uniqueId) the db instance 
needs to load.
+   *
+   * @param lineage the ground truth lineage loaded from changelog file
+   * @param latestSnapshotVersionsAndUniqueIds
+   *   (version, uniqueId) pair of the latest snapshot version. In case of 
task re-execution,
+   *   the array could have more than one element.
+   * @return the ground truth latest snapshot (version, uniqueId) the db 
instance needs to load
+   */
+  private def getLatestSnapshotVersionAndUniqueIdFromLineage(
+      lineage: Array[LineageItem],
+      latestSnapshotVersionsAndUniqueIds: Array[(Long, Option[String])]):
+      (Long, Option[String]) = {
+    lineage.foreach {
+      case LineageItem(version, uniqueId) =>
+        if (latestSnapshotVersionsAndUniqueIds.contains((version, 
Some(uniqueId)))) {
+          return (version, Some(uniqueId))
+        }
+    }
+    throw 
QueryExecutionErrors.cannotGetLatestSnapshotVersionAndUniqueIdFromLineage(
+      printLineageItems(lineage), 
printLineage(latestSnapshotVersionsAndUniqueIds)
+    )
+  }
+
+  /**
+   * Read the lineage from the changelog files. It first get the changelog 
reader
+   * of the correct changelog version and then read the lineage information 
from the file.
+   * The changelog file is named as version_stateStoreCkptId.changelog
+   * @param version version of the changelog file, used to load changelog file.
+   * @param stateStoreCkptId uniqueId of the changelog file, used to load 
changelog file.
+   * @return
+   */
+  private def getLineageFromChangelogFile(
+      version: Long,
+      stateStoreCkptId: Option[String]): Array[LineageItem] = {
+    var changelogReader: StateStoreChangelogReader = null
+    var currLineage: Array[LineageItem] = Array.empty
+    try {
+      changelogReader = fileManager.getChangelogReader(version, 
stateStoreCkptId)
+      currLineage = changelogReader.lineage
+      logInfo(log"Loading lineage: " +
+        log"${MDC(LogKeys.LINEAGE, lineageManager)} from " +
+        log"changelog version: ${MDC(LogKeys.VERSION_NUM, version)} " +
+        log"uniqueId: ${MDC(LogKeys.UUID, stateStoreCkptId.getOrElse(""))}.")
+    } finally {
+      if (changelogReader != null) {
+        changelogReader.closeIfNeeded()
+      }
+    }
+    currLineage
+  }
+
+
   /**
    * Load the given version of data in a native RocksDB instance.
    * Note that this will copy all the necessary file from DFS to local disk as 
needed,
    * and possibly restart the native RocksDB instance.
    */
-  def load(
+  private def loadV2(
+        version: Long,
+        stateStoreCkptId: Option[String],
+        readOnly: Boolean = false): RocksDB = {
+  // An array contains lineage information from [snapShotVersion, version] 
(inclusive both end)
+  var currVersionLineage: Array[LineageItem] = lineageManager.getLineage()
+  try {
+    if (loadedVersion != version || (loadedStateStoreCkptId.isEmpty ||
+        stateStoreCkptId.get != loadedStateStoreCkptId.get)) {
+      closeDB(ignoreException = false)
+
+      val (latestSnapshotVersion, latestSnapshotUniqueId) = {
+        // Special handling when version is 0.
+        // When loading the very first version (0), stateStoreCkptId does not 
need to be defined
+        // because there won't be 0.changelog / 0.zip file created in RocksDB.
+        if (version == 0 && stateStoreCkptId.isEmpty) {
+          (0L, None)
+        // When there is a snapshot file, it is the ground truth, we can skip
+        // reconstructing the lineage from changelog file.
+        } else if (fileManager.existsSnapshotFile(version, stateStoreCkptId)) {

Review Comment:
   @WweiL I thought local lineage manager should always be valid. In what 
situation local lineage manager can't be trusted? 
   I understand that local lineage might not contain the lineage from the 
version to load as it is too new, but if it contains, we should always be able 
to use without reloading.



##########
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/MicroBatchExecution.scala:
##########
@@ -965,7 +966,8 @@ class MicroBatchExecution(
       updateStateStoreCkptId(execCtx, latestExecPlan)
     }
     execCtx.reportTimeTaken("commitOffsets") {
-      if (!commitLog.add(execCtx.batchId, 
CommitMetadata(watermarkTracker.currentWatermark))) {
+      if (!commitLog.add(execCtx.batchId,
+        CommitMetadata(watermarkTracker.currentWatermark, 
currentStateStoreCkptId.toMap))) {

Review Comment:
   Same as commented above. It is better to fill a None if it is it is V1.
   Did you validate that for V1, the commit log generated can be read by an 
older version?



##########
common/utils/src/main/resources/error/error-conditions.json:
##########
@@ -275,7 +280,7 @@
       },
       "INVALID_CHANGE_LOG_READER_VERSION" : {
         "message" : [
-          "The change log reader version cannot be <version>."
+          "The change log reader version cannot be <version>. The checkpoint 
probably is from a future Spark version, please upgrade your Spark."

Review Comment:
   @WweiL do you mean the change is already merged and the PR is not fully 
rebased?



##########
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/MicroBatchExecution.scala:
##########
@@ -513,6 +513,7 @@ class MicroBatchExecution(
               execCtx.startOffsets ++= execCtx.endOffsets
               watermarkTracker.setWatermark(
                 math.max(watermarkTracker.currentWatermark, 
commitMetadata.nextBatchWatermarkMs))
+              currentStateStoreCkptId ++= commitMetadata.stateUniqueIds

Review Comment:
   It's better to make it explicitly None, rather than empty map.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to