This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new bcef7aa18d HDDS-9708. Refactor unit tests to reuse DispatcherContext. 
(#5617)
bcef7aa18d is described below

commit bcef7aa18d723ebac8b6706f186217cbce2b494c
Author: Tsz-Wo Nicholas Sze <[email protected]>
AuthorDate: Thu Nov 16 23:56:09 2023 -0800

    HDDS-9708. Refactor unit tests to reuse DispatcherContext. (#5617)
---
 .../BlockDeletingServiceTestImpl.java              | 13 +++++-----
 .../ozone/container/common/ContainerTestUtils.java | 16 ++++++++++++
 .../container/common/TestBlockDeletingService.java | 17 +++---------
 .../TestSchemaOneBackwardsCompatibility.java       |  1 -
 .../TestSchemaTwoBackwardsCompatibility.java       | 12 ++-------
 .../container/common/impl/TestHddsDispatcher.java  |  9 ++-----
 .../TestKeyValueContainerIntegrityChecks.java      | 14 +++-------
 .../container/keyvalue/TestKeyValueHandler.java    | 30 ++++++++++------------
 .../keyvalue/impl/AbstractTestChunkManager.java    |  5 ----
 .../keyvalue/impl/CommonChunkManagerTestCases.java | 26 +++++++++----------
 .../keyvalue/impl/TestChunkManagerDummyImpl.java   |  8 +++---
 .../keyvalue/impl/TestFilePerBlockStrategy.java    | 15 +++++------
 .../keyvalue/impl/TestFilePerChunkStrategy.java    |  9 +++----
 .../ozone/container/testutils/package-info.java    | 18 -------------
 hadoop-hdds/tools/pom.xml                          |  2 ++
 .../cli/container/upgrade/TestUpgradeManager.java  | 11 ++------
 16 files changed, 77 insertions(+), 129 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/BlockDeletingServiceTestImpl.java
similarity index 91%
rename from 
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
rename to 
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/BlockDeletingServiceTestImpl.java
index fdf4dd0d57..fef68c9894 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/BlockDeletingServiceTestImpl.java
@@ -14,7 +14,7 @@
  * License for the specific language governing permissions and limitations 
under
  * the License.
  */
-package org.apache.hadoop.ozone.container.testutils;
+package org.apache.hadoop.ozone.container.common;
 
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.Future;
@@ -31,8 +31,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
 /**
  * A test class implementation for {@link BlockDeletingService}.
  */
-public class BlockDeletingServiceTestImpl
-    extends BlockDeletingService {
+class BlockDeletingServiceTestImpl extends BlockDeletingService {
 
   // the service timeout
   private static final int SERVICE_TIMEOUT_IN_MILLISECONDS = 0;
@@ -42,14 +41,14 @@ public class BlockDeletingServiceTestImpl
   private Thread testingThread;
   private AtomicInteger numOfProcessed = new AtomicInteger(0);
 
-  public BlockDeletingServiceTestImpl(OzoneContainer container,
+  BlockDeletingServiceTestImpl(OzoneContainer container,
       int serviceInterval, ConfigurationSource conf) {
     super(container, serviceInterval, SERVICE_TIMEOUT_IN_MILLISECONDS,
         TimeUnit.MILLISECONDS, 10, conf);
   }
 
   @VisibleForTesting
-  public void runDeletingTasks() {
+  void runDeletingTasks() {
     if (latch.getCount() > 0) {
       this.latch.countDown();
     } else {
@@ -58,11 +57,11 @@ public class BlockDeletingServiceTestImpl
   }
 
   @VisibleForTesting
-  public boolean isStarted() {
+  boolean isStarted() {
     return latch != null && testingThread.isAlive();
   }
 
-  public int getTimesOfProcessed() {
+  int getTimesOfProcessed() {
     return numOfProcessed.get();
   }
 
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
index 71773c41a2..fd592022f3 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
@@ -85,6 +85,22 @@ public final class ContainerTestUtils {
   private ContainerTestUtils() {
   }
 
+  public static final DispatcherContext WRITE_STAGE
+      = new DispatcherContext.Builder()
+      .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA)
+      .build();
+
+  public static final DispatcherContext COMMIT_STAGE
+      = new DispatcherContext.Builder()
+      .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA)
+      .setContainer2BCSIDMap(Collections.emptyMap())
+      .build();
+
+  public static final DispatcherContext COMBINED_STAGE
+      = new DispatcherContext.Builder()
+      .setStage(DispatcherContext.WriteChunkStage.COMBINED)
+      .build();
+
   /**
    * Creates an Endpoint class for testing purpose.
    *
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
index 694e6637df..2f953e4ced 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
@@ -52,7 +52,6 @@ import 
org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import org.apache.hadoop.ozone.container.common.interfaces.DBHandle;
 import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
-import 
org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import 
org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
@@ -70,7 +69,6 @@ import 
org.apache.hadoop.ozone.container.metadata.DatanodeStore;
 import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl;
 import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import 
org.apache.hadoop.ozone.container.testutils.BlockDeletingServiceTestImpl;
 import org.apache.ozone.test.GenericTestUtils;
 import org.apache.ozone.test.GenericTestUtils.LogCapturer;
 import org.junit.After;
@@ -109,6 +107,8 @@ import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVI
 import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1;
 import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2;
 import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3;
+import static 
org.apache.hadoop.ozone.container.common.ContainerTestUtils.COMMIT_STAGE;
+import static 
org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE;
 import static 
org.apache.hadoop.ozone.container.common.ContainerTestUtils.createDbInstancesForTestIfNeeded;
 import static 
org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_BLOCK;
 import static 
org.apache.hadoop.ozone.container.common.states.endpoint.VersionEndpointTask.LOG;
@@ -128,9 +128,8 @@ public class TestBlockDeletingService {
 
   private File testRoot;
   private String scmId;
-  private String clusterID;
   private String datanodeUuid;
-  private OzoneConfiguration conf;
+  private final OzoneConfiguration conf = new OzoneConfiguration();
 
   private final ContainerLayoutVersion layout;
   private final String schemaVersion;
@@ -140,7 +139,6 @@ public class TestBlockDeletingService {
   public TestBlockDeletingService(ContainerTestVersionInfo versionInfo) {
     this.layout = versionInfo.getLayout();
     this.schemaVersion = versionInfo.getSchemaVersion();
-    conf = new OzoneConfiguration();
     ContainerTestVersionInfo.setTestSchemaVersion(schemaVersion, conf);
   }
 
@@ -159,7 +157,6 @@ public class TestBlockDeletingService {
       FileUtils.cleanDirectory(testRoot);
     }
     scmId = UUID.randomUUID().toString();
-    clusterID = UUID.randomUUID().toString();
     conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath());
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testRoot.getAbsolutePath());
     datanodeUuid = UUID.randomUUID().toString();
@@ -175,14 +172,6 @@ public class TestBlockDeletingService {
     CodecBuffer.assertNoLeaks();
   }
 
-  private static final DispatcherContext WRITE_STAGE =
-      new DispatcherContext.Builder()
-          .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA).build();
-
-  private static final DispatcherContext COMMIT_STAGE =
-      new DispatcherContext.Builder()
-          .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA).build();
-
   /**
    * A helper method to create some blocks and put them under deletion
    * state for testing. This method directly updates container.db and
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
index da9dd88c41..b28c59d7e8 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
@@ -46,7 +46,6 @@ import 
org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
 import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
 import org.apache.hadoop.ozone.container.metadata.SchemaOneDeletedBlocksTable;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import 
org.apache.hadoop.ozone.container.testutils.BlockDeletingServiceTestImpl;
 import org.apache.ozone.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Before;
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java
index 60050ece90..a828c1e692 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java
@@ -39,7 +39,6 @@ import 
org.apache.hadoop.ozone.container.common.interfaces.BlockIterator;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import org.apache.hadoop.ozone.container.common.interfaces.DBHandle;
-import 
org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import 
org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
@@ -53,7 +52,6 @@ import 
org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
 import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import 
org.apache.hadoop.ozone.container.testutils.BlockDeletingServiceTestImpl;
 import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
@@ -74,6 +72,8 @@ import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_CONTA
 import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_COUNT;
 import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_BYTES_USED;
 import static org.apache.hadoop.ozone.OzoneConsts.PENDING_DELETE_BLOCK_COUNT;
+import static 
org.apache.hadoop.ozone.container.common.ContainerTestUtils.COMMIT_STAGE;
+import static 
org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.mock;
@@ -116,14 +116,6 @@ public class TestSchemaTwoBackwardsCompatibility {
   private static final byte[] SAMPLE_DATA =
       randomAlphanumeric(1024).getBytes(UTF_8);
 
-  private static final DispatcherContext WRITE_STAGE =
-      new DispatcherContext.Builder()
-          .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA).build();
-
-  private static final DispatcherContext COMMIT_STAGE =
-      new DispatcherContext.Builder()
-          .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA).build();
-
   @BeforeEach
   public void setup() throws Exception {
     conf = new OzoneConfiguration();
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
index fde9d46917..796d6a04cf 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
@@ -51,7 +51,6 @@ import 
org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
 import org.apache.hadoop.ozone.container.common.report.IncrementalReportSender;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import 
org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
 import 
org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
@@ -83,6 +82,7 @@ import static 
org.apache.hadoop.hdds.fs.MockSpaceUsagePersistence.inMemory;
 import static org.apache.hadoop.hdds.fs.MockSpaceUsageSource.fixed;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
 import static 
org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getContainerCommandResponse;
+import static 
org.apache.hadoop.ozone.container.common.ContainerTestUtils.COMMIT_STAGE;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertThrows;
 import static org.junit.Assert.assertTrue;
@@ -315,16 +315,11 @@ public class TestHddsDispatcher {
           hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest), 
null);
       Assert.assertEquals(
           ContainerProtos.Result.CONTAINER_NOT_FOUND, response.getResult());
-      DispatcherContext dispatcherContext =
-          new DispatcherContext.Builder()
-              .setContainer2BCSIDMap(Collections.emptyMap())
-              .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA)
-              .build();
 
       GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
           .captureLogs(HddsDispatcher.LOG);
       // send write chunk request without sending create container
-      response = hddsDispatcher.dispatch(writeChunkRequest, dispatcherContext);
+      response = hddsDispatcher.dispatch(writeChunkRequest, COMMIT_STAGE);
       // container should not be found
       Assert.assertEquals(
           ContainerProtos.Result.CONTAINER_NOT_FOUND, response.getResult());
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java
index 49dc2e9554..898ea74c27 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java
@@ -29,7 +29,6 @@ import 
org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
 import org.apache.hadoop.ozone.container.common.interfaces.DBHandle;
-import 
org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
 import 
org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
@@ -50,6 +49,8 @@ import java.util.UUID;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
+import static 
org.apache.hadoop.ozone.container.common.ContainerTestUtils.COMMIT_STAGE;
+import static 
org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE;
 import static 
org.apache.hadoop.ozone.container.common.ContainerTestUtils.createDbInstancesForTestIfNeeded;
 import static org.junit.Assert.assertNotNull;
 
@@ -66,7 +67,6 @@ public class TestKeyValueContainerIntegrityChecks {
   private OzoneConfiguration conf;
   private File testRoot;
   private ChunkManager chunkManager;
-  private String datanodeID = UUID.randomUUID().toString();
   private String clusterID = UUID.randomUUID().toString();
 
   protected static final int UNIT_LEN = 1024;
@@ -133,12 +133,6 @@ public class TestKeyValueContainerIntegrityChecks {
         bytesPerChecksum);
     byte[] chunkData = 
RandomStringUtils.randomAscii(CHUNK_LEN).getBytes(UTF_8);
     ChecksumData checksumData = checksum.computeChecksum(chunkData);
-    DispatcherContext writeStage = new DispatcherContext.Builder()
-        .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA)
-        .build();
-    DispatcherContext commitStage = new DispatcherContext.Builder()
-        .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA)
-        .build();
 
     KeyValueContainerData containerData = new 
KeyValueContainerData(containerId,
         containerLayoutTestInfo.getLayout(),
@@ -166,9 +160,9 @@ public class TestKeyValueContainerIntegrityChecks {
           info.setChecksumData(checksumData);
           chunkList.add(info.getProtoBufMessage());
           chunkManager.writeChunk(container, blockID, info,
-              ByteBuffer.wrap(chunkData), writeStage);
+              ByteBuffer.wrap(chunkData), WRITE_STAGE);
           chunkManager.writeChunk(container, blockID, info,
-              ByteBuffer.wrap(chunkData), commitStage);
+              ByteBuffer.wrap(chunkData), COMMIT_STAGE);
         }
         blockData.setChunks(chunkList);
 
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
index e484c3334e..b1cd976b40 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
@@ -46,7 +46,6 @@ import 
org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import 
org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
 import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
@@ -147,9 +146,8 @@ public class TestKeyValueHandler {
 
     KeyValueContainer container = Mockito.mock(KeyValueContainer.class);
 
-    DispatcherContext context = new DispatcherContext.Builder().build();
     KeyValueHandler
-        .dispatchRequest(handler, createContainerRequest, container, context);
+        .dispatchRequest(handler, createContainerRequest, container, null);
     Mockito.verify(handler, times(0)).handleListBlock(
         any(ContainerCommandRequestProto.class), any());
 
@@ -157,7 +155,7 @@ public class TestKeyValueHandler {
     ContainerCommandRequestProto readContainerRequest =
         getDummyCommandRequestProto(ContainerProtos.Type.ReadContainer);
     KeyValueHandler
-        .dispatchRequest(handler, readContainerRequest, container, context);
+        .dispatchRequest(handler, readContainerRequest, container, null);
     Mockito.verify(handler, times(1)).handleReadContainer(
         any(ContainerCommandRequestProto.class), any());
 
@@ -165,7 +163,7 @@ public class TestKeyValueHandler {
     ContainerCommandRequestProto updateContainerRequest =
         getDummyCommandRequestProto(ContainerProtos.Type.UpdateContainer);
     KeyValueHandler
-        .dispatchRequest(handler, updateContainerRequest, container, context);
+        .dispatchRequest(handler, updateContainerRequest, container, null);
     Mockito.verify(handler, times(1)).handleUpdateContainer(
         any(ContainerCommandRequestProto.class), any());
 
@@ -173,7 +171,7 @@ public class TestKeyValueHandler {
     ContainerCommandRequestProto deleteContainerRequest =
         getDummyCommandRequestProto(ContainerProtos.Type.DeleteContainer);
     KeyValueHandler
-        .dispatchRequest(handler, deleteContainerRequest, container, context);
+        .dispatchRequest(handler, deleteContainerRequest, container, null);
     Mockito.verify(handler, times(1)).handleDeleteContainer(
         any(ContainerCommandRequestProto.class), any());
 
@@ -181,7 +179,7 @@ public class TestKeyValueHandler {
     ContainerCommandRequestProto listContainerRequest =
         getDummyCommandRequestProto(ContainerProtos.Type.ListContainer);
     KeyValueHandler
-        .dispatchRequest(handler, listContainerRequest, container, context);
+        .dispatchRequest(handler, listContainerRequest, container, null);
     Mockito.verify(handler, times(1)).handleUnsupportedOp(
         any(ContainerCommandRequestProto.class));
 
@@ -189,7 +187,7 @@ public class TestKeyValueHandler {
     ContainerCommandRequestProto closeContainerRequest =
         getDummyCommandRequestProto(ContainerProtos.Type.CloseContainer);
     KeyValueHandler
-        .dispatchRequest(handler, closeContainerRequest, container, context);
+        .dispatchRequest(handler, closeContainerRequest, container, null);
     Mockito.verify(handler, times(1)).handleCloseContainer(
         any(ContainerCommandRequestProto.class), any());
 
@@ -197,7 +195,7 @@ public class TestKeyValueHandler {
     ContainerCommandRequestProto putBlockRequest =
         getDummyCommandRequestProto(ContainerProtos.Type.PutBlock);
     KeyValueHandler
-        .dispatchRequest(handler, putBlockRequest, container, context);
+        .dispatchRequest(handler, putBlockRequest, container, null);
     Mockito.verify(handler, times(1)).handlePutBlock(
         any(ContainerCommandRequestProto.class), any(), any());
 
@@ -205,7 +203,7 @@ public class TestKeyValueHandler {
     ContainerCommandRequestProto getBlockRequest =
         getDummyCommandRequestProto(ContainerProtos.Type.GetBlock);
     KeyValueHandler
-        .dispatchRequest(handler, getBlockRequest, container, context);
+        .dispatchRequest(handler, getBlockRequest, container, null);
     Mockito.verify(handler, times(1)).handleGetBlock(
         any(ContainerCommandRequestProto.class), any());
 
@@ -215,7 +213,7 @@ public class TestKeyValueHandler {
     ContainerCommandRequestProto listBlockRequest =
         getDummyCommandRequestProto(ContainerProtos.Type.ListBlock);
     KeyValueHandler
-        .dispatchRequest(handler, listBlockRequest, container, context);
+        .dispatchRequest(handler, listBlockRequest, container, null);
     Mockito.verify(handler, times(1)).handleUnsupportedOp(
         any(ContainerCommandRequestProto.class));
 
@@ -223,7 +221,7 @@ public class TestKeyValueHandler {
     ContainerCommandRequestProto readChunkRequest =
         getDummyCommandRequestProto(ContainerProtos.Type.ReadChunk);
     KeyValueHandler
-        .dispatchRequest(handler, readChunkRequest, container, context);
+        .dispatchRequest(handler, readChunkRequest, container, null);
     Mockito.verify(handler, times(1)).handleReadChunk(
         any(ContainerCommandRequestProto.class), any(), any());
 
@@ -234,7 +232,7 @@ public class TestKeyValueHandler {
     ContainerCommandRequestProto writeChunkRequest =
         getDummyCommandRequestProto(ContainerProtos.Type.WriteChunk);
     KeyValueHandler
-        .dispatchRequest(handler, writeChunkRequest, container, context);
+        .dispatchRequest(handler, writeChunkRequest, container, null);
     Mockito.verify(handler, times(1)).handleWriteChunk(
         any(ContainerCommandRequestProto.class), any(), any());
 
@@ -242,7 +240,7 @@ public class TestKeyValueHandler {
     ContainerCommandRequestProto listChunkRequest =
         getDummyCommandRequestProto(ContainerProtos.Type.ListChunk);
     KeyValueHandler
-        .dispatchRequest(handler, listChunkRequest, container, context);
+        .dispatchRequest(handler, listChunkRequest, container, null);
     Mockito.verify(handler, times(2)).handleUnsupportedOp(
         any(ContainerCommandRequestProto.class));
 
@@ -250,7 +248,7 @@ public class TestKeyValueHandler {
     ContainerCommandRequestProto putSmallFileRequest =
         getDummyCommandRequestProto(ContainerProtos.Type.PutSmallFile);
     KeyValueHandler
-        .dispatchRequest(handler, putSmallFileRequest, container, context);
+        .dispatchRequest(handler, putSmallFileRequest, container, null);
     Mockito.verify(handler, times(1)).handlePutSmallFile(
         any(ContainerCommandRequestProto.class), any(), any());
 
@@ -258,7 +256,7 @@ public class TestKeyValueHandler {
     ContainerCommandRequestProto getSmallFileRequest =
         getDummyCommandRequestProto(ContainerProtos.Type.GetSmallFile);
     KeyValueHandler
-        .dispatchRequest(handler, getSmallFileRequest, container, context);
+        .dispatchRequest(handler, getSmallFileRequest, container, null);
     Mockito.verify(handler, times(1)).handleGetSmallFile(
         any(ContainerCommandRequestProto.class), any());
   }
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java
index d4e1963b83..28c68a7770 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java
@@ -22,7 +22,6 @@ import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
-import 
org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import 
org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
@@ -112,10 +111,6 @@ public abstract class AbstractTestChunkManager {
         .getLocalID(), 0), 0, bytes.length);
   }
 
-  protected DispatcherContext getDispatcherContext() {
-    return new DispatcherContext.Builder().build();
-  }
-
   protected Buffer rewindBufferToDataStart() {
     return data.position(header.length);
   }
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java
index c67f989d35..ad85970494 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java
@@ -24,7 +24,6 @@ import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import 
org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
 import org.apache.ozone.test.GenericTestUtils;
@@ -36,6 +35,8 @@ import java.nio.ByteBuffer;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE;
+import static 
org.apache.hadoop.ozone.container.common.ContainerTestUtils.COMBINED_STAGE;
+import static 
org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -60,7 +61,7 @@ public abstract class CommonChunkManagerTestCases
 
       chunkManager.writeChunk(getKeyValueContainer(), blockID, chunkInfo,
           getData(),
-          getDispatcherContext());
+          WRITE_STAGE);
 
       // THEN
       fail("testWriteChunkIncorrectLength failed");
@@ -76,7 +77,6 @@ public abstract class CommonChunkManagerTestCases
   public void testReadOversizeChunk() throws IOException {
     // GIVEN
     ChunkManager chunkManager = createTestSubject();
-    DispatcherContext dispatcherContext = getDispatcherContext();
     KeyValueContainer container = getKeyValueContainer();
     int tooLarge = OZONE_SCM_CHUNK_MAX_SIZE + 1;
     byte[] array = RandomStringUtils.randomAscii(tooLarge).getBytes(UTF_8);
@@ -94,7 +94,7 @@ public abstract class CommonChunkManagerTestCases
 
     // WHEN+THEN
     assertThrows(StorageContainerException.class, () ->
-        chunkManager.readChunk(container, blockID, chunkInfo, 
dispatcherContext)
+        chunkManager.readChunk(container, blockID, chunkInfo, null)
     );
   }
 
@@ -107,7 +107,7 @@ public abstract class CommonChunkManagerTestCases
 
     chunkManager.writeChunk(getKeyValueContainer(), getBlockID(),
         getChunkInfo(), getData(),
-        getDispatcherContext());
+        WRITE_STAGE);
 
     // THEN
     checkChunkFileCount(1);
@@ -119,14 +119,13 @@ public abstract class CommonChunkManagerTestCases
     // GIVEN
     ChunkManager chunkManager = createTestSubject();
     checkWriteIOStats(0, 0);
-    DispatcherContext dispatcherContext = getDispatcherContext();
     KeyValueContainer container = getKeyValueContainer();
     BlockID blockID = getBlockID();
     ChunkInfo chunkInfo = getChunkInfo();
 
     chunkManager.writeChunk(container, blockID,
         chunkInfo, getData(),
-        dispatcherContext);
+        COMBINED_STAGE);
 
     checkWriteIOStats(chunkInfo.getLen(), 1);
     checkReadIOStats(0, 0);
@@ -135,7 +134,7 @@ public abstract class CommonChunkManagerTestCases
     getBlockManager().putBlock(container, blockData);
 
     ByteBuffer expectedData = chunkManager
-        .readChunk(container, blockID, chunkInfo, dispatcherContext)
+        .readChunk(container, blockID, chunkInfo, null)
         .toByteString().asReadOnlyByteBuffer();
 
     // THEN
@@ -150,7 +149,7 @@ public abstract class CommonChunkManagerTestCases
     ChunkManager chunkManager = createTestSubject();
     chunkManager.writeChunk(getKeyValueContainer(), getBlockID(),
         getChunkInfo(), getData(),
-        getDispatcherContext());
+        COMBINED_STAGE);
     checkChunkFileCount(1);
 
     chunkManager.deleteChunk(getKeyValueContainer(), getBlockID(),
@@ -167,7 +166,7 @@ public abstract class CommonChunkManagerTestCases
     try {
       chunkManager.writeChunk(getKeyValueContainer(), getBlockID(),
           getChunkInfo(), getData(),
-          getDispatcherContext());
+          COMBINED_STAGE);
       long randomLength = 200L;
       ChunkInfo chunkInfo = new ChunkInfo(String.format("%d.data.%d",
           getBlockID().getLocalID(), 0), 0, randomLength);
@@ -191,7 +190,7 @@ public abstract class CommonChunkManagerTestCases
 
       // WHEN
       chunkManager.readChunk(getKeyValueContainer(),
-          getBlockID(), getChunkInfo(), getDispatcherContext());
+          getBlockID(), getChunkInfo(), null);
 
       // THEN
       fail("testReadChunkFileNotExists failed");
@@ -210,14 +209,13 @@ public abstract class CommonChunkManagerTestCases
     long len = getChunkInfo().getLen();
     int count = 100;
     ByteBuffer data = getData();
-    DispatcherContext context = getDispatcherContext();
 
     BlockData blockData = new BlockData(blockID);
     // WHEN
     for (int i = 0; i < count; i++) {
       ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", localID, i),
           i * len, len);
-      chunkManager.writeChunk(container, blockID, info, data, context);
+      chunkManager.writeChunk(container, blockID, info, data, COMBINED_STAGE);
       rewindBufferToDataStart();
       blockData.addChunk(info.getProtoBufMessage());
     }
@@ -230,7 +228,7 @@ public abstract class CommonChunkManagerTestCases
     for (int i = 0; i < count; i++) {
       ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", localID, i),
           i * len, len);
-      chunkManager.readChunk(container, blockID, info, context);
+      chunkManager.readChunk(container, blockID, info, null);
     }
 
     // THEN
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestChunkManagerDummyImpl.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestChunkManagerDummyImpl.java
index 5b87b8e820..714426108b 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestChunkManagerDummyImpl.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestChunkManagerDummyImpl.java
@@ -19,11 +19,11 @@
 package org.apache.hadoop.ozone.container.keyvalue.impl;
 
 import org.apache.hadoop.ozone.common.ChunkBuffer;
-import 
org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
 import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
 import org.junit.jupiter.api.Test;
 
+import static 
org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
 
 /**
@@ -39,11 +39,9 @@ public class TestChunkManagerDummyImpl extends 
AbstractTestChunkManager {
   @Test
   public void dummyManagerDoesNotWriteToFile() throws Exception {
     ChunkManager subject = createTestSubject();
-    DispatcherContext ctx = new DispatcherContext.Builder()
-        .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA).build();
 
     subject.writeChunk(getKeyValueContainer(), getBlockID(), getChunkInfo(),
-        getData(), ctx);
+        getData(), WRITE_STAGE);
 
     checkChunkFileCount(0);
   }
@@ -53,7 +51,7 @@ public class TestChunkManagerDummyImpl extends 
AbstractTestChunkManager {
     ChunkManager dummy = createTestSubject();
 
     ChunkBuffer dataRead = dummy.readChunk(getKeyValueContainer(),
-        getBlockID(), getChunkInfo(), getDispatcherContext());
+        getBlockID(), getChunkInfo(), null);
 
     assertNotNull(dataRead);
   }
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java
index 97ee1d747a..304bfa7f20 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.common.ChunkBuffer;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import 
org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
 import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
@@ -36,6 +35,7 @@ import java.security.MessageDigest;
 
 import static org.apache.hadoop.ozone.container.ContainerTestHelper.getChunk;
 import static 
org.apache.hadoop.ozone.container.ContainerTestHelper.setDataChecksum;
+import static 
org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.fail;
 
@@ -52,7 +52,7 @@ public class TestFilePerBlockStrategy extends 
CommonChunkManagerTestCases {
       KeyValueContainer container = getKeyValueContainer();
       BlockID blockID = getBlockID();
       chunkManager.writeChunk(container, blockID,
-          getChunkInfo(), getData(), getDispatcherContext());
+          getChunkInfo(), getData(), WRITE_STAGE);
       ChunkInfo chunkInfo = new ChunkInfo(String.format("%d.data.%d",
           blockID.getLocalID(), 0), 123, getChunkInfo().getLen());
 
@@ -90,7 +90,7 @@ public class TestFilePerBlockStrategy extends 
CommonChunkManagerTestCases {
       data.rewind();
       setDataChecksum(info, data);
       subject.writeChunk(container, blockID, info, data,
-          getDispatcherContext());
+          WRITE_STAGE);
     }
 
     // Request to read the whole data in a single go.
@@ -98,7 +98,7 @@ public class TestFilePerBlockStrategy extends 
CommonChunkManagerTestCases {
         datalen * chunkCount);
     ChunkBuffer chunk =
         subject.readChunk(container, blockID, largeChunk,
-            getDispatcherContext());
+            null);
     ByteBuffer newdata = chunk.toByteString().asReadOnlyByteBuffer();
     MessageDigest newSha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
     newSha.update(newdata);
@@ -120,11 +120,10 @@ public class TestFilePerBlockStrategy extends 
CommonChunkManagerTestCases {
     ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, datalen);
     ChunkBuffer data = ContainerTestHelper.getData(datalen);
     setDataChecksum(info, data);
-    DispatcherContext ctx = getDispatcherContext();
     ChunkManager subject = createTestSubject();
-    subject.writeChunk(container, blockID, info, data, ctx);
+    subject.writeChunk(container, blockID, info, data, WRITE_STAGE);
 
-    ChunkBuffer readData = subject.readChunk(container, blockID, info, ctx);
+    ChunkBuffer readData = subject.readChunk(container, blockID, info, null);
     // data will be ChunkBufferImplWithByteBuffer and readData will return
     // ChunkBufferImplWithByteBufferList. Hence, convert both ByteStrings
     // before comparing.
@@ -132,7 +131,7 @@ public class TestFilePerBlockStrategy extends 
CommonChunkManagerTestCases {
         readData.rewind().toByteString());
 
     ChunkInfo info2 = getChunk(blockID.getLocalID(), 0, start, length);
-    ChunkBuffer readData2 = subject.readChunk(container, blockID, info2, ctx);
+    ChunkBuffer readData2 = subject.readChunk(container, blockID, info2, null);
     assertEquals(length, info2.getLen());
     assertEquals(data.rewind().toByteString().substring(start, start + length),
         readData2.rewind().toByteString());
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerChunkStrategy.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerChunkStrategy.java
index 6ed50d7a35..f83216b712 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerChunkStrategy.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerChunkStrategy.java
@@ -23,7 +23,6 @@ import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.common.ChunkBuffer;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
-import 
org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
 import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils;
@@ -32,6 +31,8 @@ import org.junit.jupiter.api.Test;
 
 import java.io.File;
 
+import static 
org.apache.hadoop.ozone.container.common.ContainerTestUtils.COMMIT_STAGE;
+import static 
org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -58,8 +59,7 @@ public class TestFilePerChunkStrategy extends 
CommonChunkManagerTestCases {
     BlockID blockID = getBlockID();
     ChunkInfo chunkInfo = getChunkInfo();
     chunkManager.writeChunk(container, blockID, chunkInfo, getData(),
-        new DispatcherContext.Builder()
-            .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA).build());
+        WRITE_STAGE);
     // Now a chunk file is being written with Stage WRITE_DATA, so it should
     // create a temporary chunk file.
     checkChunkFileCount(1);
@@ -80,8 +80,7 @@ public class TestFilePerChunkStrategy extends 
CommonChunkManagerTestCases {
     checkWriteIOStats(chunkInfo.getLen(), 1);
 
     chunkManager.writeChunk(container, blockID, chunkInfo, getData(),
-        new DispatcherContext.Builder()
-            .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA).build());
+        COMMIT_STAGE);
 
     checkWriteIOStats(chunkInfo.getLen(), 1);
 
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java
deleted file mode 100644
index 0d4f6fc3cc..0000000000
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-package org.apache.hadoop.ozone.container.testutils;
-// Helper classes for ozone and container tests.
diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml
index 16d086b176..bba23762b9 100644
--- a/hadoop-hdds/tools/pom.xml
+++ b/hadoop-hdds/tools/pom.xml
@@ -57,6 +57,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd";>
     <dependency>
       <groupId>org.apache.ozone</groupId>
       <artifactId>hdds-container-service</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
     </dependency>
     <dependency>
       <artifactId>ratis-tools</artifactId>
diff --git 
a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java
 
b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java
index 7e60b7acbe..e5ce0a3fd8 100644
--- 
a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java
+++ 
b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java
@@ -33,7 +33,6 @@ import 
org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
-import 
org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
 import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
@@ -63,6 +62,8 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
+import static 
org.apache.hadoop.ozone.container.common.ContainerTestUtils.COMMIT_STAGE;
+import static 
org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE;
 import static 
org.apache.hadoop.ozone.container.common.states.endpoint.VersionEndpointTask.LOG;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
@@ -86,14 +87,6 @@ public class TestUpgradeManager {
   private FilePerBlockStrategy chunkManager;
   private ContainerSet containerSet;
 
-  private static final DispatcherContext WRITE_STAGE =
-      new DispatcherContext.Builder()
-          .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA).build();
-
-  private static final DispatcherContext COMMIT_STAGE =
-      new DispatcherContext.Builder()
-          .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA).build();
-
   @Before
   public void setup() throws Exception {
     DatanodeConfiguration dc = CONF.getObject(DatanodeConfiguration.class);


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to