sumitagrawl commented on code in PR #4838:
URL: https://github.com/apache/ozone/pull/4838#discussion_r1229371268


##########
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/StorageVolumeUtil.java:
##########
@@ -210,44 +211,59 @@ public static boolean checkVolume(StorageVolume volume, 
String scmId,
     }
 
     File[] rootFiles = volumeRoot.listFiles();
+    // This will either be cluster ID or SCM ID, depending on if SCM HA is
+    // finalized yet or not.
+    String workingDirName = clusterId;
+    boolean success = true;
 
     if (rootFiles == null) {
       // This is the case for IOException, where listFiles returns null.
       // So, we fail the volume.
-      return false;
+      success = false;
     } else if (rootFiles.length == 1) {
-      // DN started for first time or this is a newly added volume.
       // The one file is the version file.
-      // So we create cluster ID directory, or SCM ID directory if
-      // pre-finalized for SCM HA.
+      // DN started for first time or this is a newly added volume.
       // Either the SCM ID or cluster ID will be used in naming the
-      // volume's subdirectory, depending on the datanode's layout version.
-      String id = VersionedDatanodeFeatures.ScmHA.chooseContainerPathID(conf,
-          scmId, clusterId);
-      try {
-        volume.createWorkingDir(id, dbVolumeSet);
-      } catch (IOException e) {
-        logger.error("Prepare working dir failed for volume {}.",
-            volumeRootPath, e);
-        return false;
-      }
-      return true;
+      // volume's working directory, depending on the datanode's layout 
version.
+      workingDirName = VersionedDatanodeFeatures.ScmHA
+          .chooseContainerPathID(conf, scmId, clusterId);
     } else if (rootFiles.length == 2) {
-      // If we are finalized for SCM HA and there is no cluster ID directory,
-      // the volume may have been unhealthy during finalization and been
-      // skipped. Create cluster ID symlink now.
-      // Else, We are still pre-finalized.
-      // The existing directory should be left for backwards compatibility.
-      return VersionedDatanodeFeatures.ScmHA.
+      // The two files are the version file and an existing working directory.
+      // If the working directory matches the cluster ID, we do not need to
+      // do extra steps.
+      // If the working directory matches the SCM ID and SCM HA has been
+      // finalized, the volume may have been unhealthy during finalization and
+      // been skipped. In that case create the cluster ID symlink now.
+      success = VersionedDatanodeFeatures.ScmHA.
           upgradeVolumeIfNeeded(volume, clusterId);
+      workingDirName = clusterId;
     } else {
-      if (!clusterDir.exists()) {
+      // If there are more files in this directory, we only care that a
+      // working directory named after our cluster ID is present for us to use.
+      // Any existing SCM ID directory should be left for backwards
+      // compatibility.
+      if (clusterIDDir.exists()) {
+        workingDirName = clusterId;
+      } else {
         logger.error("Volume {} is in an inconsistent state. {} files found " +
-            "but cluster ID directory {} does not exist.", volumeRootPath,
-            rootFiles.length, clusterDir);
-        return false;
+                "but cluster ID directory {} does not exist.", volumeRootPath,
+            rootFiles.length, clusterIDDir);
+        success = false;
       }
-      return true;
     }
+
+    // Once the correct working directory name is identified, create it and
+    // its subdirectories.
+    if (success) {
+      try {
+        volume.createWorkingDirs(workingDirName, dbVolumeSet);

Review Comment:
   for case rootFiles.length=2 and else part, do need call createWorkingDirs ? 
it may fail if already exist.



##########
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java:
##########
@@ -155,20 +154,23 @@ public void testTmpDirOnShutdown(String schemaVersion) 
throws IOException {
         clusterId, conf, LOG, null);
     // Create a container and move it under the tmp delete dir.
     KeyValueContainer container = ContainerTestUtils
-        .setUpTestContainerUnderTmpDir(
+        .addContainerToDeletedDir(
             hddsVolume, clusterId, conf, schemaVersion);
     assertTrue(container.getContainerFile().exists());
     assertTrue(container.getContainerDBFile().exists());
+    File[] deletedContainersAfterShutdown =
+        hddsVolume.getDeletedContainerDir().listFiles();
+    assertNotNull(deletedContainersAfterShutdown);
+    assertEquals(1, deletedContainersAfterShutdown.length);
 
     service.stop();
     service.join();
     service.close();
 
-    ListIterator<File> deleteLeftoverIt = KeyValueContainerUtil
-        .ContainerDeleteDirectory.getDeleteLeftovers(hddsVolume);
-    assertFalse(deleteLeftoverIt.hasNext());
-
-    volumeSet.shutdown();
+    deletedContainersAfterShutdown =

Review Comment:
   From code changes, unable to get when cleanup is triggered? as during 
HddsVolume shutdown, cleanup is removed.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to