This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new d8a391558f HDDS-12413. Move field declarations to start of class in 
hdds-container-service (#7968)
d8a391558f is described below

commit d8a391558f239355ed2f53580f7cc7fe86becfac
Author: Ivan Zlenko <[email protected]>
AuthorDate: Fri Apr 18 11:04:20 2025 +0500

    HDDS-12413. Move field declarations to start of class in 
hdds-container-service (#7968)
---
 .../apache/hadoop/ozone/HddsPolicyProvider.java    |  13 +-
 .../common/helpers/CommandHandlerMetrics.java      |  47 ++--
 .../common/impl/OpenContainerBlockMap.java         |  21 +-
 .../container/common/interfaces/Container.java     |  36 +--
 .../common/statemachine/DatanodeConfiguration.java | 273 +++++++++------------
 .../server/ratis/ContainerStateMachine.java        |  61 +++--
 .../transport/server/ratis/DispatcherContext.java  |  30 +--
 .../transport/server/ratis/XceiverServerRatis.java |  54 ++--
 .../container/common/utils/DiskCheckUtil.java      |   5 +-
 .../container/common/volume/StorageVolume.java     |  66 +++--
 .../KeyValueContainerMetadataInspector.java        |  15 +-
 .../keyvalue/helpers/KeyValueContainerUtil.java    |   6 +-
 .../keyvalue/impl/KeyValueStreamDataChannel.java   |  13 +-
 .../metadata/DatanodeSchemaTwoDBDefinition.java    |  12 +-
 .../replication/AbstractReplicationTask.java       |  24 +-
 .../replication/CopyContainerCompression.java      |  10 +-
 .../ozone/container/common/ContainerTestUtils.java |  17 +-
 .../hadoop/ozone/container/common/ScmTestMock.java |  17 +-
 .../container/common/helpers/TestBlockData.java    |   3 +-
 .../keyvalue/ContainerTestVersionInfo.java         |  40 +--
 .../TestKeyValueContainerMetadataInspector.java    |   9 +-
 .../ozone/container/upgrade/UpgradeTestHelper.java |   3 +-
 22 files changed, 359 insertions(+), 416 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java
index bf4b693332..e4893800b4 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java
@@ -39,6 +39,13 @@ public final class HddsPolicyProvider extends PolicyProvider 
{
   private static final Supplier<HddsPolicyProvider> SUPPLIER =
       MemoizedSupplier.valueOf(HddsPolicyProvider::new);
 
+  private static final List<Service> DN_SERVICES =
+      Collections.singletonList(
+          new Service(
+              OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL,
+              ReconfigureProtocol.class)
+      );
+
   private HddsPolicyProvider() {
   }
 
@@ -48,12 +55,6 @@ public static HddsPolicyProvider getInstance() {
     return SUPPLIER.get();
   }
 
-  private static final List<Service> DN_SERVICES =
-      Collections.singletonList(
-          new Service(
-              OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL,
-              ReconfigureProtocol.class)
-      );
 
   @Override
   public Service[] getServices() {
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/CommandHandlerMetrics.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/CommandHandlerMetrics.java
index b56a2bffcb..d9271803fc 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/CommandHandlerMetrics.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/CommandHandlerMetrics.java
@@ -35,32 +35,11 @@
  */
 @InterfaceAudience.Private
 public final class CommandHandlerMetrics implements MetricsSource {
-  enum CommandMetricsMetricsInfo implements MetricsInfo {
-    Command("The type of the SCM command"),
-    TotalRunTimeMs("The total runtime of the command handler in milliseconds"),
-    AvgRunTimeMs("Average run time of the command handler in milliseconds"),
-    QueueWaitingTaskCount("The number of queued tasks waiting for execution"),
-    InvocationCount("The number of times the command handler has been 
invoked"),
-    ThreadPoolActivePoolSize("The number of active threads in the thread 
pool"),
-    ThreadPoolMaxPoolSize("The maximum number of threads in the thread pool"),
-    CommandReceivedCount(
-        "The number of received SCM commands for each command type");
-
-    private final String desc;
-    CommandMetricsMetricsInfo(String desc) {
-      this.desc = desc;
-    }
+  public static final String SOURCE_NAME = 
CommandHandlerMetrics.class.getSimpleName();
 
-    @Override
-    public String description() {
-      return desc;
-    }
-  }
-
-  public static final String SOURCE_NAME =
-      CommandHandlerMetrics.class.getSimpleName();
   private final Map<Type, CommandHandler> handlerMap;
   private final Map<Type, AtomicInteger> commandCount;
+
   private CommandHandlerMetrics(Map<Type, CommandHandler> handlerMap) {
     this.handlerMap = handlerMap;
     this.commandCount = new HashMap<>();
@@ -121,4 +100,26 @@ public void unRegister() {
     MetricsSystem ms = DefaultMetricsSystem.instance();
     ms.unregisterSource(SOURCE_NAME);
   }
+
+  enum CommandMetricsMetricsInfo implements MetricsInfo {
+    Command("The type of the SCM command"),
+    TotalRunTimeMs("The total runtime of the command handler in milliseconds"),
+    AvgRunTimeMs("Average run time of the command handler in milliseconds"),
+    QueueWaitingTaskCount("The number of queued tasks waiting for execution"),
+    InvocationCount("The number of times the command handler has been 
invoked"),
+    ThreadPoolActivePoolSize("The number of active threads in the thread 
pool"),
+    ThreadPoolMaxPoolSize("The maximum number of threads in the thread pool"),
+    CommandReceivedCount(
+        "The number of received SCM commands for each command type");
+
+    private final String desc;
+    CommandMetricsMetricsInfo(String desc) {
+      this.desc = desc;
+    }
+
+    @Override
+    public String description() {
+      return desc;
+    }
+  }
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
index 03e7887091..6066199be7 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
@@ -40,6 +40,16 @@
  * marking the container as closed.
  */
 public class OpenContainerBlockMap {
+  /**
+   * TODO : We may construct the openBlockMap by reading the Block Layout
+   * for each block inside a container listing all chunk files and reading the
+   * sizes. This will help to recreate the openKeys Map once the DataNode
+   * restarts.
+   *
+   * For now, we will track all open blocks of a container in the blockMap.
+   */
+  private final ConcurrentMap<Long, BlockDataMap> containers = new 
ConcurrentHashMap<>();
+
   /**
    * Map: localId {@literal ->} BlockData.
    *
@@ -69,17 +79,6 @@ synchronized List<BlockData> getAll() {
     }
   }
 
-  /**
-   * TODO : We may construct the openBlockMap by reading the Block Layout
-   * for each block inside a container listing all chunk files and reading the
-   * sizes. This will help to recreate the openKeys Map once the DataNode
-   * restarts.
-   *
-   * For now, we will track all open blocks of a container in the blockMap.
-   */
-  private final ConcurrentMap<Long, BlockDataMap> containers =
-      new ConcurrentHashMap<>();
-
   /**
    * Removes the Container matching with specified containerId.
    * @param containerId containerId
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
index 85f8f31649..08be91af95 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
@@ -39,24 +39,6 @@ public interface Container<CONTAINERDATA extends 
ContainerData> {
    * Encapsulates the result of a container scan.
    */
   class ScanResult {
-    /**
-     * Represents the reason a container scan failed and a container should
-     * be marked unhealthy.
-     */
-    public enum FailureType {
-      MISSING_CONTAINER_DIR,
-      MISSING_METADATA_DIR,
-      MISSING_CONTAINER_FILE,
-      MISSING_CHUNKS_DIR,
-      MISSING_CHUNK_FILE,
-      CORRUPT_CONTAINER_FILE,
-      CORRUPT_CHUNK,
-      INCONSISTENT_CHUNK_LENGTH,
-      INACCESSIBLE_DB,
-      WRITE_FAILURE,
-      DELETED_CONTAINER
-    }
-
     private final boolean healthy;
     private final File unhealthyFile;
     private final FailureType failureType;
@@ -94,6 +76,24 @@ public FailureType getFailureType() {
     public Throwable getException() {
       return exception;
     }
+
+    /**
+     * Represents the reason a container scan failed and a container should
+     * be marked unhealthy.
+     */
+    public enum FailureType {
+      MISSING_CONTAINER_DIR,
+      MISSING_METADATA_DIR,
+      MISSING_CONTAINER_FILE,
+      MISSING_CHUNKS_DIR,
+      MISSING_CHUNK_FILE,
+      CORRUPT_CONTAINER_FILE,
+      CORRUPT_CHUNK,
+      INCONSISTENT_CHUNK_LENGTH,
+      INACCESSIBLE_DB,
+      WRITE_FAILURE,
+      DELETED_CONTAINER
+    }
   }
 
   /**
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
index 68edf3c50d..371aed1ae0 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
@@ -43,44 +43,30 @@
 public class DatanodeConfiguration extends ReconfigurableConfig {
   public static final String CONFIG_PREFIX = "hdds.datanode";
 
-  private static final String BLOCK_DELETE_THREAD_MAX
-      = "block.delete.threads.max";
+  private static final String BLOCK_DELETE_THREAD_MAX = 
"block.delete.threads.max";
+
   public static final String HDDS_DATANODE_BLOCK_DELETE_THREAD_MAX =
       CONFIG_PREFIX + "." + BLOCK_DELETE_THREAD_MAX;
 
-  private static final Logger LOG =
-      LoggerFactory.getLogger(DatanodeConfiguration.class);
-
-  static final String CONTAINER_DELETE_THREADS_MAX_KEY =
-      "hdds.datanode.container.delete.threads.max";
-  static final String CONTAINER_CLOSE_THREADS_MAX_KEY =
-      "hdds.datanode.container.close.threads.max";
-  static final String PERIODIC_DISK_CHECK_INTERVAL_MINUTES_KEY =
-      "hdds.datanode.periodic.disk.check.interval.minutes";
-  public static final String DISK_CHECK_FILE_SIZE_KEY =
-      "hdds.datanode.disk.check.io.file.size";
-  public static final String DISK_CHECK_IO_TEST_COUNT_KEY =
-      "hdds.datanode.disk.check.io.test.count";
-  public static final String DISK_CHECK_IO_FAILURES_TOLERATED_KEY =
-      "hdds.datanode.disk.check.io.failures.tolerated";
-  public static final String FAILED_DATA_VOLUMES_TOLERATED_KEY =
-      "hdds.datanode.failed.data.volumes.tolerated";
-  public static final String FAILED_METADATA_VOLUMES_TOLERATED_KEY =
-      "hdds.datanode.failed.metadata.volumes.tolerated";
-  public static final String FAILED_DB_VOLUMES_TOLERATED_KEY =
-      "hdds.datanode.failed.db.volumes.tolerated";
-  public static final String DISK_CHECK_MIN_GAP_KEY =
-      "hdds.datanode.disk.check.min.gap";
-  public static final String DISK_CHECK_TIMEOUT_KEY =
-      "hdds.datanode.disk.check.timeout";
+  private static final Logger LOG = 
LoggerFactory.getLogger(DatanodeConfiguration.class);
+
+  static final String CONTAINER_DELETE_THREADS_MAX_KEY = 
"hdds.datanode.container.delete.threads.max";
+  static final String CONTAINER_CLOSE_THREADS_MAX_KEY = 
"hdds.datanode.container.close.threads.max";
+  static final String PERIODIC_DISK_CHECK_INTERVAL_MINUTES_KEY = 
"hdds.datanode.periodic.disk.check.interval.minutes";
+  public static final String DISK_CHECK_FILE_SIZE_KEY = 
"hdds.datanode.disk.check.io.file.size";
+  public static final String DISK_CHECK_IO_TEST_COUNT_KEY = 
"hdds.datanode.disk.check.io.test.count";
+  public static final String DISK_CHECK_IO_FAILURES_TOLERATED_KEY = 
"hdds.datanode.disk.check.io.failures.tolerated";
+  public static final String FAILED_DATA_VOLUMES_TOLERATED_KEY = 
"hdds.datanode.failed.data.volumes.tolerated";
+  public static final String FAILED_METADATA_VOLUMES_TOLERATED_KEY = 
"hdds.datanode.failed.metadata.volumes.tolerated";
+  public static final String FAILED_DB_VOLUMES_TOLERATED_KEY = 
"hdds.datanode.failed.db.volumes.tolerated";
+  public static final String DISK_CHECK_MIN_GAP_KEY = 
"hdds.datanode.disk.check.min.gap";
+  public static final String DISK_CHECK_TIMEOUT_KEY = 
"hdds.datanode.disk.check.timeout";
 
   // Minimum space should be left on volume.
   // Ex: If volume has 1000GB and minFreeSpace is configured as 10GB,
   // In this case when availableSpace is 10GB or below, volume is assumed as 
full
-  public static final String HDDS_DATANODE_VOLUME_MIN_FREE_SPACE =
-      "hdds.datanode.volume.min.free.space";
-  public static final String HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT =
-      "5GB";
+  public static final String HDDS_DATANODE_VOLUME_MIN_FREE_SPACE = 
"hdds.datanode.volume.min.free.space";
+  public static final String HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT = 
"5GB";
   // Minimum percent of space should be left on volume.
   // Ex: If volume has 1000GB and minFreeSpacePercent is configured as 2%,
   // In this case when availableSpace is 20GB(2% of 1000) or below, volume is 
assumed as full
@@ -88,10 +74,8 @@ public class DatanodeConfiguration extends 
ReconfigurableConfig {
       "hdds.datanode.volume.min.free.space.percent";
   static final byte MIN_FREE_SPACE_UNSET = -1;
 
-  public static final String WAIT_ON_ALL_FOLLOWERS =
-      "hdds.datanode.wait.on.all.followers";
-  public static final String CONTAINER_SCHEMA_V3_ENABLED =
-      "hdds.datanode.container.schema.v3.enabled";
+  public static final String WAIT_ON_ALL_FOLLOWERS = 
"hdds.datanode.wait.on.all.followers";
+  public static final String CONTAINER_SCHEMA_V3_ENABLED = 
"hdds.datanode.container.schema.v3.enabled";
 
   static final boolean CHUNK_DATA_VALIDATION_CHECK_DEFAULT = false;
 
@@ -115,23 +99,24 @@ public class DatanodeConfiguration extends 
ReconfigurableConfig {
   static final long ROCKSDB_LOG_MAX_FILE_SIZE_BYTES_DEFAULT = 32 * 1024 * 1024;
   static final int ROCKSDB_LOG_MAX_FILE_NUM_DEFAULT = 64;
   // one hour
-  static final long ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICRO_SECONDS_DEFAULT 
=
-      1L * 60 * 60 * 1000 * 1000;
+  static final long ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICRO_SECONDS_DEFAULT 
= 1L * 60 * 60 * 1000 * 1000;
   static final int ROCKSDB_MAX_OPEN_FILES_DEFAULT = 1024;
-  public static final String ROCKSDB_LOG_MAX_FILE_SIZE_BYTES_KEY =
-      "hdds.datanode.rocksdb.log.max-file-size";
-  public static final String ROCKSDB_LOG_MAX_FILE_NUM_KEY =
-      "hdds.datanode.rocksdb.log.max-file-num";
-  public static final String
-      ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICRO_SECONDS_KEY =
+  public static final String ROCKSDB_LOG_MAX_FILE_SIZE_BYTES_KEY = 
"hdds.datanode.rocksdb.log.max-file-size";
+  public static final String ROCKSDB_LOG_MAX_FILE_NUM_KEY = 
"hdds.datanode.rocksdb.log.max-file-num";
+  public static final String 
ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICRO_SECONDS_KEY =
       "hdds.datanode.rocksdb.delete_obsolete_files_period";
-  public static final Boolean
-      OZONE_DATANODE_CHECK_EMPTY_CONTAINER_DIR_ON_DELETE_DEFAULT = false;
+  public static final Boolean 
OZONE_DATANODE_CHECK_EMPTY_CONTAINER_DIR_ON_DELETE_DEFAULT = false;
+
+  private static final long 
AUTO_COMPACTION_SMALL_SST_FILE_INTERVAL_MINUTES_DEFAULT = 120;
+  private static final int AUTO_COMPACTION_SMALL_SST_FILE_THREADS_DEFAULT = 1;
 
-  private static final long
-      AUTO_COMPACTION_SMALL_SST_FILE_INTERVAL_MINUTES_DEFAULT = 120;
-  private static final int
-      AUTO_COMPACTION_SMALL_SST_FILE_THREADS_DEFAULT = 1;
+  static final int CONTAINER_DELETE_THREADS_DEFAULT = 2;
+  static final int CONTAINER_CLOSE_THREADS_DEFAULT = 3;
+  static final int BLOCK_DELETE_THREADS_DEFAULT = 5;
+
+  public static final String BLOCK_DELETE_COMMAND_WORKER_INTERVAL =
+      "hdds.datanode.block.delete.command.worker.interval";
+  public static final Duration BLOCK_DELETE_COMMAND_WORKER_INTERVAL_DEFAULT = 
Duration.ofSeconds(2);
 
   /**
    * Number of threads per volume that Datanode will use for chunk read.
@@ -140,20 +125,10 @@ public class DatanodeConfiguration extends 
ReconfigurableConfig {
       type = ConfigType.INT,
       defaultValue = "10",
       tags = {DATANODE},
-      description = "Number of threads per volume that Datanode will use for " 
+
-          "reading replicated chunks."
+      description = "Number of threads per volume that Datanode will use for 
reading replicated chunks."
   )
   private int numReadThreadPerVolume = 10;
 
-  static final int CONTAINER_DELETE_THREADS_DEFAULT = 2;
-  static final int CONTAINER_CLOSE_THREADS_DEFAULT = 3;
-  static final int BLOCK_DELETE_THREADS_DEFAULT = 5;
-
-  public static final String BLOCK_DELETE_COMMAND_WORKER_INTERVAL =
-      "hdds.datanode.block.delete.command.worker.interval";
-  public static final Duration BLOCK_DELETE_COMMAND_WORKER_INTERVAL_DEFAULT =
-      Duration.ofSeconds(2);
-
   /**
    * The maximum number of threads used to delete containers on a datanode
    * simultaneously.
@@ -162,8 +137,7 @@ public class DatanodeConfiguration extends 
ReconfigurableConfig {
       type = ConfigType.INT,
       defaultValue = "2",
       tags = {DATANODE},
-      description = "The maximum number of threads used to delete containers " 
+
-          "on a datanode"
+      description = "The maximum number of threads used to delete containers 
on a datanode"
   )
   private int containerDeleteThreads = CONTAINER_DELETE_THREADS_DEFAULT;
 
@@ -175,8 +149,7 @@ public class DatanodeConfiguration extends 
ReconfigurableConfig {
       type = ConfigType.INT,
       defaultValue = "3",
       tags = {DATANODE},
-      description = "The maximum number of threads used to close containers " +
-          "on a datanode"
+      description = "The maximum number of threads used to close containers on 
a datanode"
   )
   private int containerCloseThreads = CONTAINER_CLOSE_THREADS_DEFAULT;
 
@@ -191,8 +164,7 @@ public class DatanodeConfiguration extends 
ReconfigurableConfig {
       type = ConfigType.INT,
       defaultValue = "5",
       tags = {DATANODE},
-      description = "The maximum number of threads used to handle delete " +
-          " blocks on a datanode"
+      description = "The maximum number of threads used to handle delete 
blocks on a datanode"
   )
   private int blockDeleteThreads = BLOCK_DELETE_THREADS_DEFAULT;
 
@@ -219,8 +191,7 @@ public class DatanodeConfiguration extends 
ReconfigurableConfig {
       type = ConfigType.TIME,
       defaultValue = "2s",
       tags = {DATANODE},
-      description = "The interval between DeleteCmdWorker execution of " +
-          "delete commands."
+      description = "The interval between DeleteCmdWorker execution of delete 
commands."
   )
   private Duration blockDeleteCommandWorkerInterval =
       BLOCK_DELETE_COMMAND_WORKER_INTERVAL_DEFAULT;
@@ -233,8 +204,7 @@ public class DatanodeConfiguration extends 
ReconfigurableConfig {
       type = ConfigType.INT,
       defaultValue = "5000",
       tags = {DATANODE},
-      description = "The default maximum number of commands in the queue " +
-          "and command type's sub-queue on a datanode"
+      description = "The default maximum number of commands in the queue and 
command type's sub-queue on a datanode"
   )
   private int cmdQueueLimit = 5000;
 
@@ -274,42 +244,19 @@ public class DatanodeConfiguration extends 
ReconfigurableConfig {
       defaultValue = "100ms",
       type = ConfigType.TIME,
       tags = { DATANODE, ConfigTag.DELETION},
-      description = "Timeout for the thread used to process the delete" +
-          " block command to wait for the container lock."
+      description = "Timeout for the thread used to process the delete block 
command to wait for the container lock."
   )
-  private long blockDeleteMaxLockWaitTimeoutMs =
-      Duration.ofMillis(100).toMillis();
-
-  public Duration getBlockDeletionInterval() {
-    return blockDeletionInterval;
-  }
-
-  public void setRecoveringContainerScrubInterval(Duration duration) {
-    recoveringContainerScrubInterval = duration;
-  }
-
-  public Duration getRecoveringContainerScrubInterval() {
-    return recoveringContainerScrubInterval;
-  }
-
-  public void setBlockDeletionInterval(Duration duration) {
-    blockDeletionInterval = duration;
-  }
+  private long blockDeleteMaxLockWaitTimeoutMs = 
Duration.ofMillis(100).toMillis();
 
   @Config(key = "block.deleting.limit.per.interval",
       defaultValue = "5000",
       reconfigurable = true,
       type = ConfigType.INT,
       tags = { ConfigTag.SCM, ConfigTag.DELETION },
-      description =
-          "Number of blocks to be deleted in an interval."
+      description = "Number of blocks to be deleted in an interval."
   )
   private int blockLimitPerInterval = 5000;
 
-  public int getBlockDeletionLimit() {
-    return blockLimitPerInterval;
-  }
-
   @Config(key = "block.deleting.max.lock.holding.time",
       defaultValue = "1s",
       type = ConfigType.TIME,
@@ -323,20 +270,7 @@ public int getBlockDeletionLimit() {
           + "which means the actual execution time may exceed this limit. "
           + "Unit could be defined with postfix (ns,ms,s,m,h,d). "
   )
-  private long blockDeletingMaxLockHoldingTime =
-      Duration.ofSeconds(1).toMillis();
-
-  public Duration getBlockDeletingMaxLockHoldingTime() {
-    return Duration.ofMillis(blockDeletingMaxLockHoldingTime);
-  }
-
-  public void setBlockDeletingMaxLockHoldingTime(Duration maxLockHoldingTime) {
-    blockDeletingMaxLockHoldingTime = maxLockHoldingTime.toMillis();
-  }
-
-  public void setBlockDeletionLimit(int limit) {
-    this.blockLimitPerInterval = limit;
-  }
+  private long blockDeletingMaxLockHoldingTime = 
Duration.ofSeconds(1).toMillis();
 
   @Config(key = "hdds.datanode.volume.min.free.space",
       defaultValue = "-1",
@@ -467,8 +401,7 @@ public void setBlockDeletionLimit(int limit) {
       defaultValue = "false",
       type = ConfigType.BOOLEAN,
       tags = { DATANODE },
-      description = "Enable safety checks such as checksum validation"
-          + " for Ratis calls."
+      description = "Enable safety checks such as checksum validation for 
Ratis calls."
   )
   private boolean isChunkDataValidationCheck =
       CHUNK_DATA_VALIDATION_CHECK_DEFAULT;
@@ -484,14 +417,6 @@ public void setBlockDeletionLimit(int limit) {
 
   private boolean waitOnAllFollowers = WAIT_ON_ALL_FOLLOWERS_DEFAULT;
 
-  public boolean waitOnAllFollowers() {
-    return waitOnAllFollowers;
-  }
-
-  public void setWaitOnAllFollowers(boolean val) {
-    this.waitOnAllFollowers = val;
-  }
-
   @Config(key = "container.schema.v3.enabled",
       defaultValue = "true",
       type = ConfigType.BOOLEAN,
@@ -505,8 +430,7 @@ public void setWaitOnAllFollowers(boolean val) {
       defaultValue = "|",
       type = ConfigType.STRING,
       tags = { DATANODE },
-      description = "The default separator between Container ID and container" 
+
-           " meta key name."
+      description = "The default separator between Container ID and container 
meta key name."
   )
   private String containerSchemaV3KeySeparator = "|";
 
@@ -514,8 +438,7 @@ public void setWaitOnAllFollowers(boolean val) {
       defaultValue = "INFO",
       type = ConfigType.STRING,
       tags = { DATANODE },
-      description =
-          "The user log level of RocksDB(DEBUG/INFO/WARN/ERROR/FATAL))"
+      description = "The user log level of 
RocksDB(DEBUG/INFO/WARN/ERROR/FATAL))"
   )
   private String rocksdbLogLevel = "INFO";
 
@@ -523,8 +446,7 @@ public void setWaitOnAllFollowers(boolean val) {
       defaultValue = "32MB",
       type = ConfigType.SIZE,
       tags = { DATANODE },
-      description = "The max size of each user log file of RocksDB. " +
-          "O means no size limit."
+      description = "The max size of each user log file of RocksDB. O means no 
size limit."
   )
   private long rocksdbLogMaxFileSize = ROCKSDB_LOG_MAX_FILE_SIZE_BYTES_DEFAULT;
 
@@ -543,8 +465,7 @@ public void setWaitOnAllFollowers(boolean val) {
       defaultValue = "1h", timeUnit = MICROSECONDS,
       type = ConfigType.TIME,
       tags = { DATANODE },
-      description = "Periodicity when obsolete files get deleted. " +
-          "Default is 1h."
+      description = "Periodicity when obsolete files get deleted. Default is 
1h."
   )
   private long rocksdbDeleteObsoleteFilesPeriod =
       ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICRO_SECONDS_DEFAULT;
@@ -571,8 +492,7 @@ public void setWaitOnAllFollowers(boolean val) {
       defaultValue = "1MB",
       type = ConfigType.SIZE,
       tags = { DATANODE },
-      description = "SST files smaller than this configuration will be " +
-          "auto compacted."
+      description = "SST files smaller than this configuration will be auto 
compacted."
   )
   private long autoCompactionSmallSstFileSize = 1024 * 1024;
 
@@ -580,8 +500,7 @@ public void setWaitOnAllFollowers(boolean val) {
       defaultValue = "512",
       type = ConfigType.INT,
       tags = { DATANODE },
-      description = "Auto compaction will happen if the number of small SST " +
-          " files exceeds this threshold."
+      description = "Auto compaction will happen if the number of small SST 
files exceeds this threshold."
   )
   private int autoCompactionSmallSstFileNum = 512;
 
@@ -611,8 +530,7 @@ public void setWaitOnAllFollowers(boolean val) {
       type = ConfigType.BOOLEAN,
       defaultValue = "false",
       tags = { DATANODE },
-      description = "Boolean Flag to decide whether to check container " +
-          "directory or not to determine container is empty"
+      description = "Boolean Flag to decide whether to check container 
directory or not to determine container is empty"
   )
   private boolean bCheckEmptyContainerDir =
       OZONE_DATANODE_CHECK_EMPTY_CONTAINER_DIR_ON_DELETE_DEFAULT;
@@ -627,10 +545,6 @@ public void setWaitOnAllFollowers(boolean val) {
   )
   private long deleteContainerTimeoutMs = Duration.ofSeconds(60).toMillis();
 
-  public long getDeleteContainerTimeoutMs() {
-    return deleteContainerTimeoutMs;
-  }
-
   @PostConstruct
   public void validate() {
     if (containerDeleteThreads < 1) {
@@ -777,7 +691,9 @@ public void validate() {
   private void validateMinFreeSpace() {
     if (minFreeSpaceRatio > 1) {
       LOG.warn("{} = {} is invalid, should be between 0 and 1",
-          HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT, minFreeSpaceRatio);
+          HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT,
+          minFreeSpaceRatio);
+
       minFreeSpaceRatio = MIN_FREE_SPACE_UNSET;
     }
 
@@ -793,6 +709,7 @@ private void validateMinFreeSpace() {
           HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT,
           minFreeSpaceRatio,
           HDDS_DATANODE_VOLUME_MIN_FREE_SPACE);
+
       minFreeSpaceRatio = MIN_FREE_SPACE_UNSET;
     }
 
@@ -803,22 +720,66 @@ private void validateMinFreeSpace() {
     }
   }
 
-  public void setContainerDeleteThreads(int containerDeleteThreads) {
-    this.containerDeleteThreads = containerDeleteThreads;
+  public Duration getBlockDeletionInterval() {
+    return blockDeletionInterval;
+  }
+
+  public void setBlockDeletionInterval(Duration duration) {
+    blockDeletionInterval = duration;
+  }
+
+  public Duration getRecoveringContainerScrubInterval() {
+    return recoveringContainerScrubInterval;
+  }
+
+  public void setRecoveringContainerScrubInterval(Duration duration) {
+    recoveringContainerScrubInterval = duration;
+  }
+
+  public int getBlockDeletionLimit() {
+    return blockLimitPerInterval;
+  }
+
+  public void setBlockDeletionLimit(int limit) {
+    this.blockLimitPerInterval = limit;
+  }
+
+  public long getDeleteContainerTimeoutMs() {
+    return deleteContainerTimeoutMs;
+  }
+
+  public Duration getBlockDeletingMaxLockHoldingTime() {
+    return Duration.ofMillis(blockDeletingMaxLockHoldingTime);
+  }
+
+  public void setBlockDeletingMaxLockHoldingTime(Duration maxLockHoldingTime) {
+    blockDeletingMaxLockHoldingTime = maxLockHoldingTime.toMillis();
+  }
+
+  public boolean waitOnAllFollowers() {
+    return waitOnAllFollowers;
+  }
+
+  public void setWaitOnAllFollowers(boolean val) {
+    this.waitOnAllFollowers = val;
   }
 
   public int getContainerDeleteThreads() {
     return containerDeleteThreads;
   }
 
-  public void setContainerCloseThreads(int containerCloseThreads) {
-    this.containerCloseThreads = containerCloseThreads;
+  public void setContainerDeleteThreads(int containerDeleteThreads) {
+    this.containerDeleteThreads = containerDeleteThreads;
   }
 
   public int getContainerCloseThreads() {
     return containerCloseThreads;
   }
 
+  public void setContainerCloseThreads(int containerCloseThreads) {
+    this.containerCloseThreads = containerCloseThreads;
+  }
+
   public long getMinFreeSpace(long capacity) {
     return minFreeSpaceRatio >= 0
         ? ((long) (capacity * minFreeSpaceRatio))
@@ -837,8 +798,7 @@ public long getPeriodicDiskCheckIntervalMinutes() {
     return periodicDiskCheckIntervalMinutes;
   }
 
-  public void setPeriodicDiskCheckIntervalMinutes(
-      long periodicDiskCheckIntervalMinutes) {
+  public void setPeriodicDiskCheckIntervalMinutes(long 
periodicDiskCheckIntervalMinutes) {
     this.periodicDiskCheckIntervalMinutes = periodicDiskCheckIntervalMinutes;
   }
 
@@ -922,14 +882,14 @@ public int getBlockDeleteQueueLimit() {
     return blockDeleteQueueLimit;
   }
 
-  public long getBlockDeleteMaxLockWaitTimeoutMs() {
-    return blockDeleteMaxLockWaitTimeoutMs;
-  }
-
   public void setBlockDeleteQueueLimit(int queueLimit) {
     this.blockDeleteQueueLimit = queueLimit;
   }
 
+  public long getBlockDeleteMaxLockWaitTimeoutMs() {
+    return blockDeleteMaxLockWaitTimeoutMs;
+  }
+
   public Duration getBlockDeleteCommandWorkerInterval() {
     return blockDeleteCommandWorkerInterval;
   }
@@ -955,14 +915,14 @@ public void setChunkDataValidationCheck(boolean 
writeChunkValidationCheck) {
     isChunkDataValidationCheck = writeChunkValidationCheck;
   }
 
-  public void setNumReadThreadPerVolume(int threads) {
-    this.numReadThreadPerVolume = threads;
-  }
-
   public int getNumReadThreadPerVolume() {
     return numReadThreadPerVolume;
   }
 
+  public void setNumReadThreadPerVolume(int threads) {
+    this.numReadThreadPerVolume = threads;
+  }
+
   public boolean getContainerSchemaV3Enabled() {
     return this.containerSchemaV3Enabled;
   }
@@ -1047,8 +1007,7 @@ public long 
getAutoCompactionSmallSstFileIntervalMinutes() {
     return autoCompactionSmallSstFileIntervalMinutes;
   }
 
-  public void setAutoCompactionSmallSstFileIntervalMinutes(
-      long autoCompactionSmallSstFileIntervalMinutes) {
+  public void setAutoCompactionSmallSstFileIntervalMinutes(long 
autoCompactionSmallSstFileIntervalMinutes) {
     this.autoCompactionSmallSstFileIntervalMinutes =
         autoCompactionSmallSstFileIntervalMinutes;
   }
@@ -1057,8 +1016,7 @@ public int getAutoCompactionSmallSstFileThreads() {
     return autoCompactionSmallSstFileThreads;
   }
 
-  public void setAutoCompactionSmallSstFileThreads(
-      int autoCompactionSmallSstFileThreads) {
+  public void setAutoCompactionSmallSstFileThreads(int 
autoCompactionSmallSstFileThreads) {
     this.autoCompactionSmallSstFileThreads =
         autoCompactionSmallSstFileThreads;
   }
@@ -1067,5 +1025,4 @@ static long getDefaultFreeSpace() {
     final StorageSize measure = 
StorageSize.parse(HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT);
     return Math.round(measure.getUnit().toBytes(measure.getValue()));
   }
-
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 2a9fe61d17..e38b485641 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -138,8 +138,36 @@
  *    Then, WriteChunk commit and CreateContainer will be executed in the same 
order.
  */
 public class ContainerStateMachine extends BaseStateMachine {
-  static final Logger LOG =
-      LoggerFactory.getLogger(ContainerStateMachine.class);
+  static final Logger LOG = 
LoggerFactory.getLogger(ContainerStateMachine.class);
+
+  private final SimpleStateMachineStorage storage = new 
SimpleStateMachineStorage();
+  private final ContainerDispatcher dispatcher;
+  private final ContainerController containerController;
+  private final XceiverServerRatis ratisServer;
+  private final NavigableMap<Long, WriteFutures> writeChunkFutureMap;
+
+  private final long writeChunkWaitMaxNs;
+
+  // keeps track of the containers created per pipeline
+  private final Map<Long, Long> container2BCSIDMap;
+  private final TaskQueueMap containerTaskQueues = new TaskQueueMap();
+  private final ExecutorService executor;
+  private final List<ThreadPoolExecutor> chunkExecutors;
+  private final Map<Long, Long> applyTransactionCompletionMap;
+  private final Set<Long> unhealthyContainers;
+  private final Cache<Long, ByteString> stateMachineDataCache;
+  private final AtomicBoolean stateMachineHealthy;
+
+  private final Semaphore applyTransactionSemaphore;
+  private final boolean waitOnBothFollowers;
+  private final HddsDatanodeService datanodeService;
+  private static Semaphore semaphore = new Semaphore(1);
+  private final AtomicBoolean peersValidated;
+
+  /**
+   * CSM metrics.
+   */
+  private final CSMMetrics metrics;
 
   static class TaskQueueMap {
     private final Map<Long, TaskQueue> map = new HashMap<>();
@@ -217,35 +245,6 @@ long getStartTime() {
     }
   }
 
-  private final SimpleStateMachineStorage storage =
-      new SimpleStateMachineStorage();
-  private final ContainerDispatcher dispatcher;
-  private final ContainerController containerController;
-  private final XceiverServerRatis ratisServer;
-  private final NavigableMap<Long, WriteFutures> writeChunkFutureMap;
-  private final long writeChunkWaitMaxNs;
-
-  // keeps track of the containers created per pipeline
-  private final Map<Long, Long> container2BCSIDMap;
-  private final TaskQueueMap containerTaskQueues = new TaskQueueMap();
-  private final ExecutorService executor;
-  private final List<ThreadPoolExecutor> chunkExecutors;
-  private final Map<Long, Long> applyTransactionCompletionMap;
-  private final Set<Long> unhealthyContainers;
-  private final Cache<Long, ByteString> stateMachineDataCache;
-  private final AtomicBoolean stateMachineHealthy;
-
-  private final Semaphore applyTransactionSemaphore;
-  private final boolean waitOnBothFollowers;
-  private final HddsDatanodeService datanodeService;
-  private static Semaphore semaphore = new Semaphore(1);
-  private final AtomicBoolean peersValidated;
-
-  /**
-   * CSM metrics.
-   */
-  private final CSMMetrics metrics;
-
   @SuppressWarnings("parameternumber")
   public ContainerStateMachine(HddsDatanodeService hddsDatanodeService, 
RaftGroupId gid,
       ContainerDispatcher dispatcher,
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java
index b17f3ab116..f9ee0a4bd0 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java
@@ -41,6 +41,21 @@ public final class DispatcherContext {
   private static final DispatcherContext HANDLE_PUT_SMALL_FILE
       = newBuilder(Op.HANDLE_PUT_SMALL_FILE).build();
 
+  private final Op op;
+  // whether the chunk data needs to be written or committed or both
+  private final WriteChunkStage stage;
+  // which term the request is being served in Ratis
+  private final long term;
+  // the log index in Ratis log to which the request belongs to
+  private final long logIndex;
+
+  private final Map<Long, Long> container2BCSIDMap;
+
+  private final boolean releaseSupported;
+  private volatile Runnable releaseMethod;
+
+  private final long startTime = Time.monotonicNowNanos();
+
   public static DispatcherContext getHandleReadChunk() {
     return HANDLE_READ_CHUNK;
   }
@@ -109,21 +124,6 @@ public static Op op(DispatcherContext context) {
     return context == null ? Op.NULL : context.getOp();
   }
 
-  private final Op op;
-  // whether the chunk data needs to be written or committed or both
-  private final WriteChunkStage stage;
-  // which term the request is being served in Ratis
-  private final long term;
-  // the log index in Ratis log to which the request belongs to
-  private final long logIndex;
-
-  private final Map<Long, Long> container2BCSIDMap;
-
-  private final boolean releaseSupported;
-  private volatile Runnable releaseMethod;
-
-  private final long startTime = Time.monotonicNowNanos();
-
   private DispatcherContext(Builder b) {
     this.op = Objects.requireNonNull(b.op, "op == null");
     this.term = b.term;
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index 0170a802c9..88cb0c78fc 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -123,37 +123,11 @@
  * Ozone containers.
  */
 public final class XceiverServerRatis implements XceiverServerSpi {
-  private static final Logger LOG = LoggerFactory
-      .getLogger(XceiverServerRatis.class);
-
-  private static class ActivePipelineContext {
-    /** The current datanode is the current leader of the pipeline. */
-    private final boolean isPipelineLeader;
-    /** The heartbeat containing pipeline close action has been triggered. */
-    private final boolean isPendingClose;
-
-    ActivePipelineContext(boolean isPipelineLeader, boolean isPendingClose) {
-      this.isPipelineLeader = isPipelineLeader;
-      this.isPendingClose = isPendingClose;
-    }
-
-    public boolean isPipelineLeader() {
-      return isPipelineLeader;
-    }
-
-    public boolean isPendingClose() {
-      return isPendingClose;
-    }
-  }
+  private static final Logger LOG = 
LoggerFactory.getLogger(XceiverServerRatis.class);
 
   private static final AtomicLong CALL_ID_COUNTER = new AtomicLong();
   private static final List<Integer> DEFAULT_PRIORITY_LIST =
-      new ArrayList<>(
-          Collections.nCopies(HddsProtos.ReplicationFactor.THREE_VALUE, 0));
-
-  private static long nextCallId() {
-    return CALL_ID_COUNTER.getAndIncrement() & Long.MAX_VALUE;
-  }
+      new 
ArrayList<>(Collections.nCopies(HddsProtos.ReplicationFactor.THREE_VALUE, 0));
 
   private int serverPort;
   private int adminPort;
@@ -179,6 +153,30 @@ private static long nextCallId() {
   private final DatanodeRatisServerConfig ratisServerConfig;
   private final HddsDatanodeService datanodeService;
 
+  private static class ActivePipelineContext {
+    /** The current datanode is the current leader of the pipeline. */
+    private final boolean isPipelineLeader;
+    /** The heartbeat containing pipeline close action has been triggered. */
+    private final boolean isPendingClose;
+
+    ActivePipelineContext(boolean isPipelineLeader, boolean isPendingClose) {
+      this.isPipelineLeader = isPipelineLeader;
+      this.isPendingClose = isPendingClose;
+    }
+
+    public boolean isPipelineLeader() {
+      return isPipelineLeader;
+    }
+
+    public boolean isPendingClose() {
+      return isPendingClose;
+    }
+  }
+
+  private static long nextCallId() {
+    return CALL_ID_COUNTER.getAndIncrement() & Long.MAX_VALUE;
+  }
+
   private XceiverServerRatis(HddsDatanodeService hddsDatanodeService, 
DatanodeDetails dd,
       ContainerDispatcher dispatcher, ContainerController containerController,
       StateContext context, ConfigurationSource conf, Parameters parameters)
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/DiskCheckUtil.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/DiskCheckUtil.java
index 2ca34d83fa..113062e0b9 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/DiskCheckUtil.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/DiskCheckUtil.java
@@ -42,12 +42,13 @@
  * where the disk is mounted.
  */
 public final class DiskCheckUtil {
-  private DiskCheckUtil() { }
-
   // For testing purposes, an alternate check implementation can be provided
   // to inject failures.
   private static DiskChecks impl = new DiskChecksImpl();
 
+  private DiskCheckUtil() {
+  }
+
   @VisibleForTesting
   public static void setTestImpl(DiskChecks diskChecks) {
     impl = diskChecks;
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
index fe0fa89692..2b28bc9c84 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
@@ -66,11 +66,9 @@
  * During DN startup, if the VERSION file exists, we verify that the
  * clusterID in the version file matches the clusterID from SCM.
  */
-public abstract class StorageVolume
-    implements Checkable<Boolean, VolumeCheckResult> {
+public abstract class StorageVolume implements Checkable<Boolean, 
VolumeCheckResult> {
 
-  private static final Logger LOG =
-      LoggerFactory.getLogger(StorageVolume.class);
+  private static final Logger LOG = 
LoggerFactory.getLogger(StorageVolume.class);
 
   // The name of the directory used for temporary files on the volume.
   public static final String TMP_DIR_NAME = "tmp";
@@ -78,36 +76,6 @@ public abstract class StorageVolume
   // health are written to. This will go inside the tmp directory.
   public static final String TMP_DISK_CHECK_DIR_NAME = "disk-check";
 
-  /**
-   * Type for StorageVolume.
-   */
-  public enum VolumeType {
-    DATA_VOLUME,
-    META_VOLUME,
-    DB_VOLUME,
-  }
-
-  /**
-   * VolumeState represents the different states a StorageVolume can be in.
-   * NORMAL          =&gt; Volume can be used for storage
-   * FAILED          =&gt; Volume has failed due and can no longer be used for
-   *                    storing containers.
-   * NON_EXISTENT    =&gt; Volume Root dir does not exist
-   * INCONSISTENT    =&gt; Volume Root dir is not empty but VERSION file is
-   *                    missing or Volume Root dir is not a directory
-   * NOT_FORMATTED   =&gt; Volume Root exists but not formatted(no VERSION 
file)
-   * NOT_INITIALIZED =&gt; VERSION file exists but has not been verified for
-   *                    correctness.
-   */
-  public enum VolumeState {
-    NORMAL,
-    FAILED,
-    NON_EXISTENT,
-    INCONSISTENT,
-    NOT_FORMATTED,
-    NOT_INITIALIZED
-  }
-
   private volatile VolumeState state;
 
   // VERSION file properties
@@ -144,6 +112,36 @@ public enum VolumeState {
   private Queue<Boolean> ioTestSlidingWindow;
   private int healthCheckFileSize;
 
+  /**
+   * Type for StorageVolume.
+   */
+  public enum VolumeType {
+    DATA_VOLUME,
+    META_VOLUME,
+    DB_VOLUME,
+  }
+
+  /**
+   * VolumeState represents the different states a StorageVolume can be in.
+   * NORMAL          =&gt; Volume can be used for storage
+   * FAILED          =&gt; Volume has failed due and can no longer be used for
+   *                    storing containers.
+   * NON_EXISTENT    =&gt; Volume Root dir does not exist
+   * INCONSISTENT    =&gt; Volume Root dir is not empty but VERSION file is
+   *                    missing or Volume Root dir is not a directory
+   * NOT_FORMATTED   =&gt; Volume Root exists but not formatted(no VERSION 
file)
+   * NOT_INITIALIZED =&gt; VERSION file exists but has not been verified for
+   *                    correctness.
+   */
+  public enum VolumeState {
+    NORMAL,
+    FAILED,
+    NON_EXISTENT,
+    INCONSISTENT,
+    NOT_FORMATTED,
+    NOT_INITIALIZED
+  }
+
   protected StorageVolume(Builder<?> b) throws IOException {
     storageType = b.storageType;
     volumeRoot = b.volumeRootStr;
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java
index 4f0aeef266..13388e943c 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java
@@ -74,10 +74,12 @@
  * log4j.appender.inspectorAppender.layout=org.apache.log4j.PatternLayout
  */
 public class KeyValueContainerMetadataInspector implements ContainerInspector {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(KeyValueContainerMetadataInspector.class);
-  public static final Logger REPORT_LOG = LoggerFactory.getLogger(
-      "ContainerMetadataInspectorReport");
+  private static final Logger LOG = 
LoggerFactory.getLogger(KeyValueContainerMetadataInspector.class);
+  public static final Logger REPORT_LOG = 
LoggerFactory.getLogger("ContainerMetadataInspectorReport");
+
+  public static final String SYSTEM_PROPERTY = 
"ozone.datanode.container.metadata.inspector";
+
+  private Mode mode;
 
   /**
    * The mode to run the inspector in.
@@ -99,11 +101,6 @@ public String toString() {
     }
   }
 
-  public static final String SYSTEM_PROPERTY = "ozone.datanode.container" +
-      ".metadata.inspector";
-
-  private Mode mode;
-
   public KeyValueContainerMetadataInspector(Mode mode) {
     this.mode = mode;
   }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index ab87875dbd..4b05db71c1 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -52,14 +52,12 @@
 
 public final class KeyValueContainerUtil {
 
+  private static final Logger LOG = 
LoggerFactory.getLogger(KeyValueContainerUtil.class);
+
   /* Never constructed. */
   private KeyValueContainerUtil() {
-
   }
 
-  private static final Logger LOG = LoggerFactory.getLogger(
-      KeyValueContainerUtil.class);
-
   /**
    * creates metadata path, chunks path and metadata DB for the specified
    * container.
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java
index 97236887a1..fe4bbd1478 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java
@@ -37,15 +37,10 @@
  * This class is used to get the DataChannel for streaming.
  */
 public class KeyValueStreamDataChannel extends StreamDataChannelBase {
-  static final Logger LOG =
-      LoggerFactory.getLogger(KeyValueStreamDataChannel.class);
+  static final Logger LOG = 
LoggerFactory.getLogger(KeyValueStreamDataChannel.class);
 
-  interface WriteMethod {
-    int applyAsInt(ByteBuffer src) throws IOException;
-  }
+  private final Buffers buffers = new 
Buffers(BlockDataStreamOutput.PUT_BLOCK_REQUEST_LENGTH_MAX);
 
-  private final Buffers buffers = new Buffers(
-      BlockDataStreamOutput.PUT_BLOCK_REQUEST_LENGTH_MAX);
   private final AtomicBoolean closed = new AtomicBoolean();
 
   KeyValueStreamDataChannel(File file, ContainerData containerData,
@@ -158,4 +153,8 @@ static void setEndIndex(ByteBuf b) {
     // set index for reading data
     b.writerIndex(protoIndex);
   }
+
+  interface WriteMethod {
+    int applyAsInt(ByteBuffer src) throws IOException;
+  }
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java
index 8cc27d7b9b..df6a572ae4 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java
@@ -33,8 +33,7 @@
  * version 2, where the block data, metadata, and transactions which are to be
  * deleted are put in their own separate column families.
  */
-public class DatanodeSchemaTwoDBDefinition
-    extends AbstractDatanodeDBDefinition
+public class DatanodeSchemaTwoDBDefinition extends AbstractDatanodeDBDefinition
     implements DBDefinition.WithMapInterface {
 
   public static final DBColumnFamilyDefinition<String, BlockData>
@@ -72,11 +71,6 @@ public class DatanodeSchemaTwoDBDefinition
           FixedLengthStringCodec.get(),
           BlockData.getCodec());
 
-  public DatanodeSchemaTwoDBDefinition(String dbPath,
-      ConfigurationSource config) {
-    super(dbPath, config);
-  }
-
   private static final Map<String, DBColumnFamilyDefinition<?, ?>>
       COLUMN_FAMILIES = DBColumnFamilyDefinition.newUnmodifiableMap(
           BLOCK_DATA,
@@ -85,6 +79,10 @@ public DatanodeSchemaTwoDBDefinition(String dbPath,
           FINALIZE_BLOCKS,
           LAST_CHUNK_INFO);
 
+  public DatanodeSchemaTwoDBDefinition(String dbPath, ConfigurationSource 
config) {
+    super(dbPath, config);
+  }
+
   @Override
   public Map<String, DBColumnFamilyDefinition<?, ?>> getMap() {
     return COLUMN_FAMILIES;
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/AbstractReplicationTask.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/AbstractReplicationTask.java
index 6207265e10..d080f92c99 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/AbstractReplicationTask.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/AbstractReplicationTask.java
@@ -30,18 +30,6 @@
  */
 public abstract class AbstractReplicationTask {
 
-  /**
-   * ENUM representing the different status values a replication task can
-   * have.
-   */
-  public enum Status {
-    QUEUED,
-    IN_PROGRESS,
-    FAILED,
-    DONE,
-    SKIPPED
-  }
-
   private volatile Status status = Status.QUEUED;
 
   private final long containerId;
@@ -162,4 +150,16 @@ public String toString() {
     }
     return sb.toString();
   }
+
+  /**
+   * ENUM representing the different status values a replication task can
+   * have.
+   */
+  public enum Status {
+    QUEUED,
+    IN_PROGRESS,
+    FAILED,
+    DONE,
+    SKIPPED
+  }
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/CopyContainerCompression.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/CopyContainerCompression.java
index 586c10d57a..41c575bbd6 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/CopyContainerCompression.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/CopyContainerCompression.java
@@ -51,18 +51,16 @@ public OutputStream wrap(OutputStream output) {
   SNAPPY(CompressorStreamFactory.SNAPPY_FRAMED),
   ZSTD(CompressorStreamFactory.ZSTANDARD);
 
+  private static final Logger LOG = 
LoggerFactory.getLogger(CopyContainerCompression.class);
+
+  private static final CopyContainerCompression DEFAULT_COMPRESSION = 
CopyContainerCompression.NO_COMPRESSION;
+
   private final String compressorFactoryName;
 
   CopyContainerCompression(String compressorFactoryName) {
     this.compressorFactoryName = compressorFactoryName;
   }
 
-  private static final Logger LOG =
-      LoggerFactory.getLogger(CopyContainerCompression.class);
-
-  private static final CopyContainerCompression DEFAULT_COMPRESSION =
-      CopyContainerCompression.NO_COMPRESSION;
-
   public static CopyContainerCompression getConf(ConfigurationSource conf) {
     try {
       return conf.getEnum(HDDS_CONTAINER_REPLICATION_COMPRESSION,
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
index 33c188c6d7..9ef89afd3c 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
@@ -84,9 +84,6 @@
  */
 public final class ContainerTestUtils {
 
-  private ContainerTestUtils() {
-  }
-
   public static final DispatcherContext WRITE_STAGE = DispatcherContext
       .newBuilder(DispatcherContext.Op.WRITE_STATE_MACHINE_DATA)
       .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA)
@@ -101,6 +98,14 @@ private ContainerTestUtils() {
   public static final DispatcherContext COMBINED_STAGE
       = DispatcherContext.getHandleWriteChunk();
 
+  private static final ContainerDispatcher NOOP_CONTAINER_DISPATCHER = new 
NoopContainerDispatcher();
+
+  private static final ContainerController EMPTY_CONTAINER_CONTROLLER
+      = new ContainerController(ContainerImplTestUtils.newContainerSet(), 
Collections.emptyMap());
+
+  private ContainerTestUtils() {
+  }
+
   /**
    * Creates an Endpoint class for testing purpose.
    *
@@ -331,16 +336,10 @@ public void setClusterId(String clusterId) {
     }
   }
 
-  private static final ContainerDispatcher NOOP_CONTAINER_DISPATCHER
-      = new NoopContainerDispatcher();
-
   public static ContainerDispatcher getNoopContainerDispatcher() {
     return NOOP_CONTAINER_DISPATCHER;
   }
 
-  private static final ContainerController EMPTY_CONTAINER_CONTROLLER
-      = new ContainerController(ContainerImplTestUtils.newContainerSet(), 
Collections.emptyMap());
-
   public static ContainerController getEmptyContainerController() {
     return EMPTY_CONTAINER_CONTROLLER;
   }
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
index 2218dbc5f4..708cd3240d 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
@@ -57,6 +57,13 @@ public class ScmTestMock implements 
StorageContainerDatanodeProtocol {
   private String clusterId;
   private String scmId;
 
+  // Map of datanode to containers
+  private Map<DatanodeDetails, Map<String, ContainerReplicaProto>> 
nodeContainers = new HashMap<>();
+  private Map<DatanodeDetails, NodeReportProto> nodeReports = new HashMap<>();
+  private AtomicInteger commandStatusReport = new AtomicInteger(0);
+  private List<CommandStatus> cmdStatusList = new ArrayList<>();
+  private List<SCMCommandProto> scmCommandRequests = new ArrayList<>();
+
   public ScmTestMock() {
     clusterId = UUID.randomUUID().toString();
     scmId = UUID.randomUUID().toString();
@@ -66,15 +73,6 @@ public ScmTestMock(String clusterId) {
     this.clusterId = clusterId;
     this.scmId = UUID.randomUUID().toString();
   }
-
-  // Map of datanode to containers
-  private Map<DatanodeDetails,
-      Map<String, ContainerReplicaProto>> nodeContainers =
-      new HashMap<>();
-  private Map<DatanodeDetails, NodeReportProto> nodeReports = new HashMap<>();
-  private AtomicInteger commandStatusReport = new AtomicInteger(0);
-  private List<CommandStatus> cmdStatusList = new ArrayList<>();
-  private List<SCMCommandProto> scmCommandRequests = new ArrayList<>();
   /**
    * Returns the number of heartbeats made to this class.
    *
@@ -325,7 +323,6 @@ public void reset() {
     rpcCount.set(0);
     containerReportsCount.set(0);
     nodeContainers.clear();
-
   }
 
   public int getCommandStatusReportCount() {
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
index 3685fbea69..81c9e48c4b 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
@@ -37,6 +37,8 @@
 public class TestBlockData {
   static final Logger LOG = LoggerFactory.getLogger(TestBlockData.class);
 
+  private static int chunkCount = 0;
+
   static ContainerProtos.ChunkInfo buildChunkInfo(String name, long offset,
       long len) {
     return ContainerProtos.ChunkInfo.newBuilder()
@@ -64,7 +66,6 @@ public void testAddAndRemove() {
     }
   }
 
-  private static int chunkCount = 0;
   static ContainerProtos.ChunkInfo addChunk(
       List<ContainerProtos.ChunkInfo> expected, long offset) {
     final long length = ThreadLocalRandom.current().nextLong(1000);
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerTestVersionInfo.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerTestVersionInfo.java
index bb336482c3..3d9e5df1a8 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerTestVersionInfo.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerTestVersionInfo.java
@@ -39,16 +39,7 @@
  */
 public class ContainerTestVersionInfo {
 
-  /**
-   * Composite annotation for tests parameterized with {@link 
ContainerTestVersionInfo}.
-   */
-  @Target(ElementType.METHOD)
-  @Retention(RetentionPolicy.RUNTIME)
-  @ParameterizedTest
-  
@MethodSource("org.apache.hadoop.ozone.container.keyvalue.ContainerTestVersionInfo#getLayoutList")
-  public @interface ContainerTest {
-    // composite annotation
-  }
+  private static List<ContainerTestVersionInfo> layoutList = new ArrayList<>();
 
   private static final String[] SCHEMA_VERSIONS = new String[] {
       null,
@@ -57,24 +48,34 @@ public class ContainerTestVersionInfo {
       OzoneConsts.SCHEMA_V3,
   };
 
+  static {
+    for (ContainerLayoutVersion ch : ContainerLayoutVersion.getAllVersions()) {
+      for (String sch : SCHEMA_VERSIONS) {
+        layoutList.add(new ContainerTestVersionInfo(sch, ch));
+      }
+    }
+  }
+
   private final String schemaVersion;
   private final ContainerLayoutVersion layout;
 
+  /**
+   * Composite annotation for tests parameterized with {@link 
ContainerTestVersionInfo}.
+   */
+  @Target(ElementType.METHOD)
+  @Retention(RetentionPolicy.RUNTIME)
+  @ParameterizedTest
+  
@MethodSource("org.apache.hadoop.ozone.container.keyvalue.ContainerTestVersionInfo#getLayoutList")
+  public @interface ContainerTest {
+    // composite annotation
+  }
+
   public ContainerTestVersionInfo(String schemaVersion,
       ContainerLayoutVersion layout) {
     this.schemaVersion = schemaVersion;
     this.layout = layout;
   }
 
-  private static List<ContainerTestVersionInfo> layoutList = new ArrayList<>();
-  static {
-    for (ContainerLayoutVersion ch : ContainerLayoutVersion.getAllVersions()) {
-      for (String sch : SCHEMA_VERSIONS) {
-        layoutList.add(new ContainerTestVersionInfo(sch, ch));
-      }
-    }
-  }
-
   public String getSchemaVersion() {
     return this.schemaVersion;
   }
@@ -91,6 +92,7 @@ public String toString() {
   public static List<ContainerTestVersionInfo> getLayoutList() {
     return layoutList;
   }
+
   public static void setTestSchemaVersion(String schemaVersion,
       OzoneConfiguration conf) {
     if (isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3)) {
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java
index e98be4881a..d782db01e3 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java
@@ -49,10 +49,12 @@
 /**
  * Tests for {@link KeyValueContainerMetadataInspector}.
  */
-public class TestKeyValueContainerMetadataInspector
-    extends TestKeyValueContainerIntegrityChecks {
+public class TestKeyValueContainerMetadataInspector extends 
TestKeyValueContainerIntegrityChecks {
   private static final long CONTAINER_ID = 102;
 
+  static final DeletedBlocksTransactionGeneratorForTesting GENERATOR =
+      new DeletedBlocksTransactionGeneratorForTesting();
+
   @ContainerTestVersionInfo.ContainerTest
   public void testRunDisabled(ContainerTestVersionInfo versionInfo)
       throws Exception {
@@ -193,9 +195,6 @@ List<DeletedBlocksTransaction> generate(
     }
   }
 
-  static final DeletedBlocksTransactionGeneratorForTesting GENERATOR
-      = new DeletedBlocksTransactionGeneratorForTesting();
-
   @ContainerTestVersionInfo.ContainerTest
   public void testCorrectDeleteWithTransaction(
       ContainerTestVersionInfo versionInfo) throws Exception {
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/UpgradeTestHelper.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/UpgradeTestHelper.java
index 04bfdf3076..e45a5243a2 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/UpgradeTestHelper.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/UpgradeTestHelper.java
@@ -54,9 +54,10 @@
  * Helpers for upgrade tests.
  */
 public final class UpgradeTestHelper {
+  private static final Random RANDOM = new Random();
+
   private UpgradeTestHelper() {
   }
-  private static final Random RANDOM = new Random();
 
   /**
    * Starts the datanode with the fore layout version, and calls the version


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to