Copilot commented on code in PR #9150:
URL: https://github.com/apache/ozone/pull/9150#discussion_r2483058419


##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java:
##########
@@ -275,6 +311,65 @@ private void init() throws IOException {
       }
       addVersionNodeWithDependents(snapshotLocalData);
     }
+    for (UUID snapshotId : versionNodeMap.keySet()) {
+      incrementOrphanCheckCount(snapshotId);
+    }
+    long snapshotLocalDataManagerServiceInterval = 
configuration.getTimeDuration(
+        OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL, 
OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL_DEFAULT,
+        TimeUnit.MILLISECONDS);
+    if (snapshotLocalDataManagerServiceInterval > 0) {
+      this.scheduler = new Scheduler(LOCAL_DATA_MANAGER_SERVICE_NAME, true, 1);
+      this.scheduler.scheduleWithFixedDelay(
+          () -> {
+            try {
+              checkOrphanSnapshotVersions(omMetadataManager, chainManager);
+            } catch (IOException e) {
+              LOG.error("Exception while checking orphan snapshot versions", 
e);
+            }
+          }, snapshotLocalDataManagerServiceInterval, 
snapshotLocalDataManagerServiceInterval, TimeUnit.MILLISECONDS);
+    }
+
+  }
+
+  private void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, 
SnapshotChainManager chainManager)
+      throws IOException {
+    for (Map.Entry<UUID, Integer> entry : 
snapshotToBeCheckedForOrphans.entrySet()) {
+      UUID snapshotId = entry.getKey();
+      int countBeforeCheck = entry.getValue();
+      checkOrphanSnapshotVersions(metadataManager, chainManager, snapshotId);
+      decrementOrphanCheckCount(snapshotId, countBeforeCheck);
+    }
+  }
+
+  @VisibleForTesting
+  void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, 
SnapshotChainManager chainManager,
+      UUID snapshotId) throws IOException {
+    try (WritableOmSnapshotLocalDataProvider snapshotLocalDataProvider = new 
WritableOmSnapshotLocalDataProvider(
+        snapshotId)) {
+      OmSnapshotLocalData snapshotLocalData = 
snapshotLocalDataProvider.getSnapshotLocalData();
+      boolean isSnapshotPurged = 
OmSnapshotManager.isSnapshotPurged(chainManager, metadataManager, snapshotId,
+          snapshotLocalData.getTransactionInfo());
+      for (Map.Entry<Integer, LocalDataVersionNode> 
integerLocalDataVersionNodeEntry : getVersionNodeMap()
+          .get(snapshotId).getSnapshotVersions().entrySet()) {

Review Comment:
   Potential `NullPointerException` if `getVersionNodeMap().get(snapshotId)` 
returns null. This can occur if the snapshot is removed from the map between 
the time it's added to `snapshotToBeCheckedForOrphans` and when this method 
executes. Add a null check before iterating.
   ```suggestion
         LocalDataVersionNodeMap versionNodeMap = 
getVersionNodeMap().get(snapshotId);
         if (versionNodeMap == null) {
           // The snapshotId is no longer present; skip orphan check for this 
snapshot.
           return;
         }
         for (Map.Entry<Integer, LocalDataVersionNode> 
integerLocalDataVersionNodeEntry : 
versionNodeMap.getSnapshotVersions().entrySet()) {
   ```



##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java:
##########
@@ -694,47 +795,76 @@ public synchronized void commit() throws IOException {
       // Need to update the disk state if and only if the dirty bit is set.
       if (isDirty()) {
         String filePath = getSnapshotLocalPropertyYamlPath(super.snapshotId);
-        String tmpFilePath = filePath + ".tmp";
-        File tmpFile = new File(tmpFilePath);
-        boolean tmpFileExists = tmpFile.exists();
-        if (tmpFileExists) {
-          tmpFileExists = !tmpFile.delete();
-        }
-        if (tmpFileExists) {
-          throw new IOException("Unable to delete tmp file " + tmpFilePath);
+        File snapshotLocalDataFile = new File(filePath);
+        if (!localDataVersionNodes.getSnapshotVersions().isEmpty()) {
+          String tmpFilePath = filePath + ".tmp";
+          File tmpFile = new File(tmpFilePath);
+          boolean tmpFileExists = tmpFile.exists();
+          if (tmpFileExists) {
+            tmpFileExists = !tmpFile.delete();
+          }
+          if (tmpFileExists) {
+            throw new IOException("Unable to delete tmp file " + tmpFilePath);
+          }
+          snapshotLocalDataSerializer.save(new File(tmpFilePath), 
super.snapshotLocalData);
+          Files.move(tmpFile.toPath(), Paths.get(filePath), 
StandardCopyOption.ATOMIC_MOVE,
+              StandardCopyOption.REPLACE_EXISTING);
+        } else if (snapshotLocalDataFile.exists()) {
+          LOG.info("Deleting Yaml file corresponding to snapshotId: {} in path 
: {}",
+              super.snapshotId, snapshotLocalDataFile.getAbsolutePath());
+          if (!snapshotLocalDataFile.delete()) {
+            throw new IOException("Unable to delete file " + 
snapshotLocalDataFile.getAbsolutePath());
+          }
         }
-        snapshotLocalDataSerializer.save(new File(tmpFilePath), 
super.snapshotLocalData);
-        Files.move(tmpFile.toPath(), Paths.get(filePath), 
StandardCopyOption.ATOMIC_MOVE,
-            StandardCopyOption.REPLACE_EXISTING);
-        upsertNode(super.snapshotId, localDataVersionNodes);
+        upsertNode(super.snapshotId, localDataVersionNodes, 
getSnapshotLocalData().getTransactionInfo() != null);
         // Reset dirty bit
         resetDirty();
       }
     }
 
-    private void upsertNode(UUID snapshotId, SnapshotVersionsMeta 
snapshotVersions) throws IOException {
+    private void upsertNode(UUID snapshotId, SnapshotVersionsMeta 
snapshotVersions,
+        boolean transactionInfoSet) throws IOException {
       internalLock.writeLock().lock();
       try {
         SnapshotVersionsMeta existingSnapVersions = 
getVersionNodeMap().remove(snapshotId);
         Map<Integer, LocalDataVersionNode> existingVersions = 
existingSnapVersions == null ? Collections.emptyMap() :
             existingSnapVersions.getSnapshotVersions();
+        Map<Integer, LocalDataVersionNode> newVersions = 
snapshotVersions.getSnapshotVersions();
         Map<Integer, List<LocalDataVersionNode>> predecessors = new 
HashMap<>();
+        boolean versionsRemoved = false;
         // Track all predecessors of the existing versions and remove the node 
from the graph.
         for (Map.Entry<Integer, LocalDataVersionNode> existingVersion : 
existingVersions.entrySet()) {
           LocalDataVersionNode existingVersionNode = 
existingVersion.getValue();
           // Create a copy of predecessors since the list of nodes returned 
would be a mutable set and it changes as the
           // nodes in the graph would change.
           predecessors.put(existingVersion.getKey(), new 
ArrayList<>(localDataGraph.predecessors(existingVersionNode)));
+          versionsRemoved = versionsRemoved || 
!newVersions.containsKey(existingVersion.getKey());
           localDataGraph.removeNode(existingVersionNode);
         }
+
         // Add the nodes to be added in the graph and map.
         addSnapshotVersionMeta(snapshotId, snapshotVersions);
         // Reconnect all the predecessors for existing nodes.
-        for (Map.Entry<Integer, LocalDataVersionNode> entry : 
snapshotVersions.getSnapshotVersions().entrySet()) {
+        for (Map.Entry<Integer, LocalDataVersionNode> entry : 
newVersions.entrySet()) {
           for (LocalDataVersionNode predecessor : 
predecessors.getOrDefault(entry.getKey(), Collections.emptyList())) {
             localDataGraph.putEdge(predecessor, entry.getValue());
           }
         }
+        if (existingSnapVersions != null) {
+          // The previous snapshotId could have become an orphan entry or 
could have orphan versions.(In case of
+          // version removals)
+          if (versionsRemoved || 
!Objects.equals(existingSnapVersions.getPreviousSnapshotId(),
+              snapshotVersions.getPreviousSnapshotId())) {
+            
incrementOrphanCheckCount(existingSnapVersions.getPreviousSnapshotId());
+          }
+          // If the transactionInfo set this means the snapshot has been 
purged and the entire yaml file could have
+          // become an orphan if the version is also updated it
+          // could mean that there could be some orphan version present within 
the
+          // same snapshot.

Review Comment:
   The comment has unclear grammar and structure. Consider rewording to: 'If 
the transactionInfo is set, this means the snapshot has been purged and the 
entire YAML file could have become an orphan. If the version is also updated, 
it could mean that there are orphan versions present within the same snapshot.'
   ```suggestion
             // If the transactionInfo is set, this means the snapshot has been 
purged and the entire YAML file could have become an orphan.
             // If the version is also updated, it could mean that there are 
orphan versions present within the same snapshot.
   ```



##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java:
##########
@@ -275,6 +311,65 @@ private void init() throws IOException {
       }
       addVersionNodeWithDependents(snapshotLocalData);
     }
+    for (UUID snapshotId : versionNodeMap.keySet()) {
+      incrementOrphanCheckCount(snapshotId);
+    }
+    long snapshotLocalDataManagerServiceInterval = 
configuration.getTimeDuration(
+        OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL, 
OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL_DEFAULT,
+        TimeUnit.MILLISECONDS);
+    if (snapshotLocalDataManagerServiceInterval > 0) {
+      this.scheduler = new Scheduler(LOCAL_DATA_MANAGER_SERVICE_NAME, true, 1);
+      this.scheduler.scheduleWithFixedDelay(
+          () -> {
+            try {
+              checkOrphanSnapshotVersions(omMetadataManager, chainManager);
+            } catch (IOException e) {
+              LOG.error("Exception while checking orphan snapshot versions", 
e);
+            }
+          }, snapshotLocalDataManagerServiceInterval, 
snapshotLocalDataManagerServiceInterval, TimeUnit.MILLISECONDS);
+    }
+
+  }
+
+  private void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, 
SnapshotChainManager chainManager)
+      throws IOException {
+    for (Map.Entry<UUID, Integer> entry : 
snapshotToBeCheckedForOrphans.entrySet()) {
+      UUID snapshotId = entry.getKey();
+      int countBeforeCheck = entry.getValue();
+      checkOrphanSnapshotVersions(metadataManager, chainManager, snapshotId);
+      decrementOrphanCheckCount(snapshotId, countBeforeCheck);
+    }

Review Comment:
   Iterating over `snapshotToBeCheckedForOrphans.entrySet()` without 
synchronization can lead to `ConcurrentModificationException` if other threads 
modify the map during iteration (via `incrementOrphanCheckCount` or 
`decrementOrphanCheckCount`). Consider creating a copy of the entry set before 
iterating or use appropriate synchronization.



##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java:
##########
@@ -253,8 +265,32 @@ void addVersionNodeWithDependents(OmSnapshotLocalData 
snapshotLocalData) throws
     }
   }
 
-  private void init() throws IOException {
+  private void incrementOrphanCheckCount(UUID snapshotId) {
+    if (snapshotId != null) {
+      this.snapshotToBeCheckedForOrphans.compute(snapshotId, (k, v) -> v == 
null ? 1 : (v + 1));
+    }
+  }
+
+  private void decrementOrphanCheckCount(UUID snapshotId, int decrementBy) {
+    this.snapshotToBeCheckedForOrphans.compute(snapshotId, (k, v) -> {
+      if (v == null) {
+        return null;
+      }
+      int newValue = v - decrementBy;
+      if (newValue <= 0) {
+        return null;
+      }
+      return newValue;
+    });
+  }
+
+  Map<UUID, Integer> getSnapshotToBeCheckedForOrphans() {
+    return snapshotToBeCheckedForOrphans;

Review Comment:
   getSnapshotToBeCheckedForOrphans exposes the internal representation stored 
in field snapshotToBeCheckedForOrphans. The value may be modified [after this 
call to getSnapshotToBeCheckedForOrphans](1).
   ```suggestion
       return Collections.unmodifiableMap(snapshotToBeCheckedForOrphans);
   ```



##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java:
##########
@@ -694,47 +795,76 @@ public synchronized void commit() throws IOException {
       // Need to update the disk state if and only if the dirty bit is set.
       if (isDirty()) {
         String filePath = getSnapshotLocalPropertyYamlPath(super.snapshotId);
-        String tmpFilePath = filePath + ".tmp";
-        File tmpFile = new File(tmpFilePath);
-        boolean tmpFileExists = tmpFile.exists();
-        if (tmpFileExists) {
-          tmpFileExists = !tmpFile.delete();
-        }
-        if (tmpFileExists) {
-          throw new IOException("Unable to delete tmp file " + tmpFilePath);
+        File snapshotLocalDataFile = new File(filePath);
+        if (!localDataVersionNodes.getSnapshotVersions().isEmpty()) {
+          String tmpFilePath = filePath + ".tmp";
+          File tmpFile = new File(tmpFilePath);
+          boolean tmpFileExists = tmpFile.exists();
+          if (tmpFileExists) {
+            tmpFileExists = !tmpFile.delete();
+          }
+          if (tmpFileExists) {
+            throw new IOException("Unable to delete tmp file " + tmpFilePath);
+          }
+          snapshotLocalDataSerializer.save(new File(tmpFilePath), 
super.snapshotLocalData);
+          Files.move(tmpFile.toPath(), Paths.get(filePath), 
StandardCopyOption.ATOMIC_MOVE,
+              StandardCopyOption.REPLACE_EXISTING);
+        } else if (snapshotLocalDataFile.exists()) {
+          LOG.info("Deleting Yaml file corresponding to snapshotId: {} in path 
: {}",

Review Comment:
   Corrected capitalization of 'YAML' (all uppercase as it's an acronym).
   ```suggestion
             LOG.info("Deleting YAML file corresponding to snapshotId: {} in 
path : {}",
   ```



##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java:
##########
@@ -694,47 +795,76 @@ public synchronized void commit() throws IOException {
       // Need to update the disk state if and only if the dirty bit is set.
       if (isDirty()) {
         String filePath = getSnapshotLocalPropertyYamlPath(super.snapshotId);
-        String tmpFilePath = filePath + ".tmp";
-        File tmpFile = new File(tmpFilePath);
-        boolean tmpFileExists = tmpFile.exists();
-        if (tmpFileExists) {
-          tmpFileExists = !tmpFile.delete();
-        }
-        if (tmpFileExists) {
-          throw new IOException("Unable to delete tmp file " + tmpFilePath);
+        File snapshotLocalDataFile = new File(filePath);
+        if (!localDataVersionNodes.getSnapshotVersions().isEmpty()) {
+          String tmpFilePath = filePath + ".tmp";
+          File tmpFile = new File(tmpFilePath);
+          boolean tmpFileExists = tmpFile.exists();
+          if (tmpFileExists) {
+            tmpFileExists = !tmpFile.delete();
+          }
+          if (tmpFileExists) {
+            throw new IOException("Unable to delete tmp file " + tmpFilePath);
+          }
+          snapshotLocalDataSerializer.save(new File(tmpFilePath), 
super.snapshotLocalData);
+          Files.move(tmpFile.toPath(), Paths.get(filePath), 
StandardCopyOption.ATOMIC_MOVE,
+              StandardCopyOption.REPLACE_EXISTING);
+        } else if (snapshotLocalDataFile.exists()) {
+          LOG.info("Deleting Yaml file corresponding to snapshotId: {} in path 
: {}",
+              super.snapshotId, snapshotLocalDataFile.getAbsolutePath());
+          if (!snapshotLocalDataFile.delete()) {
+            throw new IOException("Unable to delete file " + 
snapshotLocalDataFile.getAbsolutePath());
+          }
         }
-        snapshotLocalDataSerializer.save(new File(tmpFilePath), 
super.snapshotLocalData);
-        Files.move(tmpFile.toPath(), Paths.get(filePath), 
StandardCopyOption.ATOMIC_MOVE,
-            StandardCopyOption.REPLACE_EXISTING);
-        upsertNode(super.snapshotId, localDataVersionNodes);
+        upsertNode(super.snapshotId, localDataVersionNodes, 
getSnapshotLocalData().getTransactionInfo() != null);
         // Reset dirty bit
         resetDirty();
       }
     }
 
-    private void upsertNode(UUID snapshotId, SnapshotVersionsMeta 
snapshotVersions) throws IOException {
+    private void upsertNode(UUID snapshotId, SnapshotVersionsMeta 
snapshotVersions,
+        boolean transactionInfoSet) throws IOException {
       internalLock.writeLock().lock();
       try {
         SnapshotVersionsMeta existingSnapVersions = 
getVersionNodeMap().remove(snapshotId);
         Map<Integer, LocalDataVersionNode> existingVersions = 
existingSnapVersions == null ? Collections.emptyMap() :
             existingSnapVersions.getSnapshotVersions();
+        Map<Integer, LocalDataVersionNode> newVersions = 
snapshotVersions.getSnapshotVersions();
         Map<Integer, List<LocalDataVersionNode>> predecessors = new 
HashMap<>();
+        boolean versionsRemoved = false;
         // Track all predecessors of the existing versions and remove the node 
from the graph.
         for (Map.Entry<Integer, LocalDataVersionNode> existingVersion : 
existingVersions.entrySet()) {
           LocalDataVersionNode existingVersionNode = 
existingVersion.getValue();
           // Create a copy of predecessors since the list of nodes returned 
would be a mutable set and it changes as the
           // nodes in the graph would change.
           predecessors.put(existingVersion.getKey(), new 
ArrayList<>(localDataGraph.predecessors(existingVersionNode)));
+          versionsRemoved = versionsRemoved || 
!newVersions.containsKey(existingVersion.getKey());
           localDataGraph.removeNode(existingVersionNode);
         }
+
         // Add the nodes to be added in the graph and map.
         addSnapshotVersionMeta(snapshotId, snapshotVersions);
         // Reconnect all the predecessors for existing nodes.
-        for (Map.Entry<Integer, LocalDataVersionNode> entry : 
snapshotVersions.getSnapshotVersions().entrySet()) {
+        for (Map.Entry<Integer, LocalDataVersionNode> entry : 
newVersions.entrySet()) {
           for (LocalDataVersionNode predecessor : 
predecessors.getOrDefault(entry.getKey(), Collections.emptyList())) {
             localDataGraph.putEdge(predecessor, entry.getValue());
           }
         }
+        if (existingSnapVersions != null) {
+          // The previous snapshotId could have become an orphan entry or 
could have orphan versions.(In case of
+          // version removals)
+          if (versionsRemoved || 
!Objects.equals(existingSnapVersions.getPreviousSnapshotId(),
+              snapshotVersions.getPreviousSnapshotId())) {
+            
incrementOrphanCheckCount(existingSnapVersions.getPreviousSnapshotId());
+          }
+          // If the transactionInfo set this means the snapshot has been 
purged and the entire yaml file could have

Review Comment:
   Corrected capitalization of 'YAML' (all uppercase as it's an acronym).
   ```suggestion
             // If the transactionInfo set this means the snapshot has been 
purged and the entire YAML file could have
   ```



##########
hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java:
##########
@@ -435,10 +454,79 @@ private void validateVersions(OmSnapshotLocalDataManager 
snapshotLocalDataManage
     }
   }
 
+  @ParameterizedTest
+  @ValueSource(booleans =  {true, false})
+  public void testOrphanVersionDeletionWithVersionDeletion(boolean 
purgeSnapshot) throws IOException {
+    localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, 
conf);
+    List<UUID> snapshotIds = createSnapshotLocalData(localDataManager, 3);
+    UUID firstSnapId = snapshotIds.get(0);
+    UUID secondSnapId = snapshotIds.get(1);
+    UUID thirdSnapId = snapshotIds.get(2);
+
+    addVersionsToLocalData(localDataManager, firstSnapId, ImmutableMap.of(1, 
1, 2, 2, 3, 3));
+    addVersionsToLocalData(localDataManager, secondSnapId, ImmutableMap.of(4, 
2, 8, 1, 10, 3, 11, 3));
+    addVersionsToLocalData(localDataManager, thirdSnapId, ImmutableMap.of(5, 
8, 13, 10));
+    assertEquals(new HashSet<>(snapshotIds), 
localDataManager.getSnapshotToBeCheckedForOrphans().keySet());
+    localDataManager.getSnapshotToBeCheckedForOrphans().clear();
+    purgedSnapshotIdMap.put(secondSnapId, purgeSnapshot);
+    localDataManager.checkOrphanSnapshotVersions(omMetadataManager, null, 
thirdSnapId);
+    try (ReadableOmSnapshotLocalDataProvider snap = 
localDataManager.getOmSnapshotLocalData(thirdSnapId)) {
+      OmSnapshotLocalData snapshotLocalData = snap.getSnapshotLocalData();
+      assertEquals(Sets.newHashSet(0, 13), 
snapshotLocalData.getVersionSstFileInfos().keySet());
+    }
+    
assertTrue(localDataManager.getSnapshotToBeCheckedForOrphans().containsKey(secondSnapId));
+    localDataManager.checkOrphanSnapshotVersions(omMetadataManager, null, 
secondSnapId);
+    try (ReadableOmSnapshotLocalDataProvider snap = 
localDataManager.getOmSnapshotLocalData(secondSnapId)) {
+      OmSnapshotLocalData snapshotLocalData = snap.getSnapshotLocalData();
+      if (purgeSnapshot) {
+        assertEquals(Sets.newHashSet(0, 10), 
snapshotLocalData.getVersionSstFileInfos().keySet());
+      } else {
+        assertEquals(Sets.newHashSet(0, 10, 11), 
snapshotLocalData.getVersionSstFileInfos().keySet());
+      }
+    }
+  }
+
+  @ParameterizedTest
+  @ValueSource(booleans =  {true, false})
+  public void testOrphanVersionDeletionWithChainUpdate(boolean purgeSnapshot) 
throws IOException {
+    localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, 
conf);
+    List<UUID> snapshotIds = createSnapshotLocalData(localDataManager, 3);
+    UUID firstSnapId = snapshotIds.get(0);
+    UUID secondSnapId = snapshotIds.get(1);
+    UUID thirdSnapId = snapshotIds.get(2);
+
+    addVersionsToLocalData(localDataManager, firstSnapId, ImmutableMap.of(1, 
1, 2, 2, 3, 3));
+    addVersionsToLocalData(localDataManager, secondSnapId, ImmutableMap.of(4, 
2, 8, 1, 10, 3, 11, 3));
+    addVersionsToLocalData(localDataManager, thirdSnapId, ImmutableMap.of(5, 
8, 13, 10));
+    purgedSnapshotIdMap.put(secondSnapId, purgeSnapshot);
+    try (WritableOmSnapshotLocalDataProvider snapshotLocalDataProvider =
+             localDataManager.getWritableOmSnapshotLocalData(thirdSnapId, 
firstSnapId)) {
+      snapshotLocalDataProvider.commit();
+    }
+    try (ReadableOmSnapshotLocalDataProvider snap = 
localDataManager.getOmSnapshotLocalData(thirdSnapId)) {
+      OmSnapshotLocalData snapshotLocalData = snap.getSnapshotLocalData();
+      assertEquals(Sets.newHashSet(0, 5, 13), 
snapshotLocalData.getVersionSstFileInfos().keySet());
+      assertEquals(firstSnapId, snapshotLocalData.getPreviousSnapshotId());
+    }
+
+    
assertTrue(localDataManager.getSnapshotToBeCheckedForOrphans().containsKey(secondSnapId));
+    localDataManager.checkOrphanSnapshotVersions(omMetadataManager, null, 
secondSnapId);
+    if (purgeSnapshot) {
+      NoSuchFileException e = assertThrows(NoSuchFileException.class,

Review Comment:
   Variable 'NoSuchFileException e' is never read.
   ```suggestion
         assertThrows(NoSuchFileException.class,
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to