WweiL commented on code in PR #48355:
URL: https://github.com/apache/spark/pull/48355#discussion_r1859533022
##########
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/RocksDB.scala:
##########
@@ -278,77 +280,185 @@ class RocksDB(
// We send snapshots that needs to be uploaded by the maintenance thread to
this queue
private val snapshotsToUploadQueue = new
ConcurrentLinkedQueue[RocksDBSnapshot]()
+ /**
+ * Based on the ground truth lineage loaded from changelog file (lineage) and
+ * the latest snapshot (version, uniqueId) pair from file listing, this
function finds
+ * the ground truth latest snapshot (version, uniqueId) the db instance
needs to load.
+ *
+ * @param lineage the ground truth lineage loaded from changelog file
+ * @param latestSnapshotVersionsAndUniqueIds
+ * (version, uniqueId) pair of the latest snapshot version. In case of
task re-execution,
+ * the array could have more than one element.
+ * @return the ground truth latest snapshot (version, uniqueId) the db
instance needs to load
+ */
+ private def getLatestSnapshotVersionAndUniqueIdFromLineage(
+ lineage: Array[LineageItem],
+ latestSnapshotVersionsAndUniqueIds: Array[(Long, Option[String])]):
+ (Long, Option[String]) = {
+ lineage.foreach {
+ case LineageItem(version, uniqueId) =>
+ if (latestSnapshotVersionsAndUniqueIds.contains((version,
Some(uniqueId)))) {
+ return (version, Some(uniqueId))
+ }
+ }
+ throw
QueryExecutionErrors.cannotGetLatestSnapshotVersionAndUniqueIdFromLineage(
+ printLineageItems(lineage),
printLineage(latestSnapshotVersionsAndUniqueIds)
+ )
+ }
+
+ /**
+ * Read the lineage from the changelog files. It first get the changelog
reader
+ * of the correct changelog version and then read the lineage information
from the file.
+ * The changelog file is named as version_stateStoreCkptId.changelog
+ * @param version version of the changelog file, used to load changelog file.
+ * @param stateStoreCkptId uniqueId of the changelog file, used to load
changelog file.
+ * @return
+ */
+ private def getLineageFromChangelogFile(
+ version: Long,
+ stateStoreCkptId: Option[String]): Array[LineageItem] = {
+ var changelogReader: StateStoreChangelogReader = null
+ var currLineage: Array[LineageItem] = Array.empty
+ try {
+ changelogReader = fileManager.getChangelogReader(version,
stateStoreCkptId)
+ currLineage = changelogReader.lineage
+ logInfo(log"Loading lineage: " +
+ log"${MDC(LogKeys.LINEAGE, lineageManager)} from " +
+ log"changelog version: ${MDC(LogKeys.VERSION_NUM, version)} " +
+ log"uniqueId: ${MDC(LogKeys.UUID, stateStoreCkptId.getOrElse(""))}.")
+ } finally {
+ if (changelogReader != null) {
+ changelogReader.closeIfNeeded()
+ }
+ }
+ currLineage
+ }
+
+
/**
* Load the given version of data in a native RocksDB instance.
* Note that this will copy all the necessary file from DFS to local disk as
needed,
* and possibly restart the native RocksDB instance.
*/
- def load(
+ private def loadV2(
+ version: Long,
+ stateStoreCkptId: Option[String],
+ readOnly: Boolean = false): RocksDB = {
+ // An array contains lineage information from [snapShotVersion, version]
(inclusive both end)
+ var currVersionLineage: Array[LineageItem] = lineageManager.getLineage()
+ try {
+ if (loadedVersion != version || (loadedStateStoreCkptId.isEmpty ||
+ stateStoreCkptId.get != loadedStateStoreCkptId.get)) {
+ closeDB(ignoreException = false)
+
+ val (latestSnapshotVersion, latestSnapshotUniqueId) = {
+ // Special handling when version is 0.
+ // When loading the very first version (0), stateStoreCkptId does not
need to be defined
+ // because there won't be 0.changelog / 0.zip file created in RocksDB.
+ if (version == 0 && stateStoreCkptId.isEmpty) {
+ (0L, None)
+ // When there is a snapshot file, it is the ground truth, we can skip
+ // reconstructing the lineage from changelog file.
+ } else if (fileManager.existsSnapshotFile(version, stateStoreCkptId)) {
Review Comment:
The check from local lineage manager already happens here above:
https://github.com/apache/spark/blob/3245b6275d2c55ab948dcf88c99525d6120fdaa0/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/RocksDB.scala#L348
When version != loadedVersion, it means the values in memory are not valid
anymore and we need to reload from dfs. So I believe check local lineage
manager is not a valid option here.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]