This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 0dab553d70 HDDS-12541. Change ContainerID to value-based (#8044)
0dab553d70 is described below

commit 0dab553d702bf07f0f11e19caa0d2533e80400b4
Author: Tsz-Wo Nicholas Sze <[email protected]>
AuthorDate: Tue Mar 11 09:50:43 2025 -0700

    HDDS-12541. Change ContainerID to value-based (#8044)
---
 .../hadoop/hdds/scm/container/ContainerID.java     |  40 +++---
 .../hdds/scm/container/TestContainerInfo.java      |  21 +++
 .../container/TestReplicationManagerReport.java    |  22 +--
 .../ECReconstructionCoordinator.java               |   2 +-
 .../hdds/scm/TestSCMCommonPlacementPolicy.java     |  44 +++---
 .../TestContainerReplicaPendingOps.java            | 152 ++++++++++-----------
 .../replication/TestReplicationManager.java        |   6 +-
 .../TestReplicationManagerScenarios.java           |   2 +-
 .../datanode/TestDecommissionStatusSubCommand.java |  12 +-
 .../container/TestScmApplyTransactionFailure.java  |   2 +-
 .../hdds/scm/storage/TestContainerCommandsEC.java  |   8 +-
 .../TestContainerScannerIntegrationAbstract.java   |   2 +-
 .../hadoop/ozone/recon/api/TestEndpoints.java      |  16 +--
 .../recon/api/TestNSSummaryEndpointWithFSO.java    |  12 +-
 .../recon/api/TestNSSummaryEndpointWithLegacy.java |  12 +-
 .../api/TestNSSummaryEndpointWithOBSAndLegacy.java |  12 +-
 .../recon/tasks/TestContainerSizeCountTask.java    |  22 +--
 .../apache/hadoop/ozone/debug/ldb/DBScanner.java   |   2 +-
 .../freon/containergenerator/GeneratorScm.java     |   2 +-
 19 files changed, 204 insertions(+), 187 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
index c7985cf47d..23c8f3bb8a 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
@@ -18,13 +18,15 @@
 package org.apache.hadoop.hdds.scm.container;
 
 import com.google.common.base.Preconditions;
-import org.apache.commons.lang3.builder.CompareToBuilder;
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
+import jakarta.annotation.Nonnull;
+import java.util.Objects;
+import java.util.function.Supplier;
+import net.jcip.annotations.Immutable;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.utils.db.Codec;
 import org.apache.hadoop.hdds.utils.db.DelegatedCodec;
 import org.apache.hadoop.hdds.utils.db.LongCodec;
+import org.apache.ratis.util.MemoizedSupplier;
 
 /**
  * Container ID is an integer that is a value between 1..MAX_CONTAINER ID.
@@ -34,6 +36,7 @@
  * <p>
  * This class is immutable.
  */
+@Immutable
 public final class ContainerID implements Comparable<ContainerID> {
   private static final Codec<ContainerID> CODEC = new DelegatedCodec<>(
       LongCodec.get(), ContainerID::valueOf, c -> c.id,
@@ -46,16 +49,20 @@ public static Codec<ContainerID> getCodec() {
   }
 
   private final long id;
+  private final Supplier<HddsProtos.ContainerID> proto;
+  private final Supplier<Integer> hash;
 
   /**
    * Constructs ContainerID.
    *
    * @param id int
    */
-  public ContainerID(long id) {
+  private ContainerID(long id) {
     Preconditions.checkState(id >= 0,
         "Container ID should be positive. %s.", id);
     this.id = id;
+    this.proto = MemoizedSupplier.valueOf(() -> 
HddsProtos.ContainerID.newBuilder().setId(id).build());
+    this.hash = MemoizedSupplier.valueOf(() -> 61 * 71 + Long.hashCode(id));
   }
 
   /**
@@ -80,16 +87,12 @@ public long getId() {
     return id;
   }
 
-  /**
-   * Use proto message.
-   */
-  @Deprecated
-  public byte[] getBytes() {
+  public static byte[] getBytes(long id) {
     return LongCodec.get().toPersistedFormat(id);
   }
 
   public HddsProtos.ContainerID getProtobuf() {
-    return HddsProtos.ContainerID.newBuilder().setId(id).build();
+    return proto.get();
   }
 
   public static ContainerID getFromProtobuf(HddsProtos.ContainerID proto) {
@@ -107,25 +110,18 @@ public boolean equals(final Object o) {
     }
 
     final ContainerID that = (ContainerID) o;
-
-    return new EqualsBuilder()
-        .append(id, that.id)
-        .isEquals();
+    return this.id == that.id;
   }
 
   @Override
   public int hashCode() {
-    return new HashCodeBuilder(61, 71)
-        .append(id)
-        .toHashCode();
+    return hash.get();
   }
 
   @Override
-  public int compareTo(final ContainerID that) {
-    Preconditions.checkNotNull(that);
-    return new CompareToBuilder()
-        .append(this.id, that.id)
-        .build();
+  public int compareTo(@Nonnull final ContainerID that) {
+    Objects.requireNonNull(that, "that == null");
+    return Long.compare(this.id, that.id);
   }
 
   @Override
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerInfo.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerInfo.java
index 3f2f7f2c09..f38eceb52a 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerInfo.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerInfo.java
@@ -28,6 +28,8 @@
 
 import java.time.Duration;
 import java.time.Instant;
+import java.util.concurrent.ThreadLocalRandom;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
 import org.apache.hadoop.hdds.client.ECReplicationConfig;
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -40,6 +42,25 @@
  */
 
 public class TestContainerInfo {
+  static int oldHash(long id) {
+    return new HashCodeBuilder(61, 71)
+        .append(id)
+        .toHashCode();
+  }
+
+  static void assertHash(long value) {
+    final ContainerID id = ContainerID.valueOf(value);
+    assertEquals(oldHash(value), id.hashCode(), id::toString);
+  }
+
+  @Test
+  void testContainIdHash() {
+    for (int i = 0; i < 100; i++) {
+      assertHash(i);
+      final long id = ThreadLocalRandom.current().nextLong(Long.MAX_VALUE);
+      assertHash(id);
+    }
+  }
 
   @Test
   void getProtobufRatis() {
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java
index a4ce5b2494..ee0f920d8a 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java
@@ -80,13 +80,13 @@ void testJsonOutput() throws IOException {
 
     report.incrementAndSample(
         ReplicationManagerReport.HealthState.UNDER_REPLICATED,
-        new ContainerID(1));
+        ContainerID.valueOf(1));
     report.incrementAndSample(
         ReplicationManagerReport.HealthState.UNDER_REPLICATED,
-        new ContainerID(2));
+        ContainerID.valueOf(2));
     report.incrementAndSample(
         ReplicationManagerReport.HealthState.OVER_REPLICATED,
-        new ContainerID(3));
+        ContainerID.valueOf(3));
     report.setComplete();
 
     String jsonString = JsonUtils.toJsonStringWithDefaultPrettyPrinter(report);
@@ -124,13 +124,13 @@ void testJsonOutput() throws IOException {
   void testContainerIDsCanBeSampled() {
     report.incrementAndSample(
         ReplicationManagerReport.HealthState.UNDER_REPLICATED,
-        new ContainerID(1));
+        ContainerID.valueOf(1));
     report.incrementAndSample(
         ReplicationManagerReport.HealthState.UNDER_REPLICATED,
-        new ContainerID(2));
+        ContainerID.valueOf(2));
     report.incrementAndSample(
         ReplicationManagerReport.HealthState.OVER_REPLICATED,
-        new ContainerID(3));
+        ContainerID.valueOf(3));
 
     assertEquals(2,
         report.getStat(ReplicationManagerReport.HealthState.UNDER_REPLICATED));
@@ -141,13 +141,13 @@ void testContainerIDsCanBeSampled() {
 
     List<ContainerID> sample =
         
report.getSample(ReplicationManagerReport.HealthState.UNDER_REPLICATED);
-    assertEquals(new ContainerID(1), sample.get(0));
-    assertEquals(new ContainerID(2), sample.get(1));
+    assertEquals(ContainerID.valueOf(1), sample.get(0));
+    assertEquals(ContainerID.valueOf(2), sample.get(1));
     assertEquals(2, sample.size());
 
     sample =
         report.getSample(ReplicationManagerReport.HealthState.OVER_REPLICATED);
-    assertEquals(new ContainerID(3), sample.get(0));
+    assertEquals(ContainerID.valueOf(3), sample.get(0));
     assertEquals(1, sample.size());
 
     sample =
@@ -160,13 +160,13 @@ void testSamplesAreLimited() {
     for (int i = 0; i < ReplicationManagerReport.SAMPLE_LIMIT * 2; i++) {
       report.incrementAndSample(
           ReplicationManagerReport.HealthState.UNDER_REPLICATED,
-          new ContainerID(i));
+          ContainerID.valueOf(i));
     }
     List<ContainerID> sample =
         
report.getSample(ReplicationManagerReport.HealthState.UNDER_REPLICATED);
     assertEquals(ReplicationManagerReport.SAMPLE_LIMIT, sample.size());
     for (int i = 0; i < ReplicationManagerReport.SAMPLE_LIMIT; i++) {
-      assertEquals(new ContainerID(i), sample.get(i));
+      assertEquals(ContainerID.valueOf(i), sample.get(i));
     }
   }
 
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java
index 057d96204a..4694850b93 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java
@@ -493,7 +493,7 @@ private SortedMap<Long, BlockData[]> getBlockDataMap(long 
containerID,
 
     SortedMap<Long, BlockData[]> resultMap = new TreeMap<>();
     Token<ContainerTokenIdentifier> containerToken =
-        tokenHelper.getContainerToken(new ContainerID(containerID));
+        tokenHelper.getContainerToken(ContainerID.valueOf(containerID));
 
     Iterator<Map.Entry<Integer, DatanodeDetails>> iterator =
         sourceNodeMap.entrySet().iterator();
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java
index dd297cb35e..8b9dfe873e 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java
@@ -132,7 +132,7 @@ public void 
testReplicasToFixMisreplicationWithOneMisreplication() {
     List<DatanodeDetails> replicaDns = Stream.of(0, 1, 2, 3, 5)
                     .map(list::get).collect(Collectors.toList());
     List<ContainerReplica> replicas =
-            HddsTestUtils.getReplicasWithReplicaIndex(new ContainerID(1),
+            HddsTestUtils.getReplicasWithReplicaIndex(ContainerID.valueOf(1),
                     CLOSED, 0, 0, 0, replicaDns);
     testReplicasToFixMisreplication(replicas, dummyPlacementPolicy, 1,
             ImmutableMap.of(racks.get(0), 1));
@@ -153,7 +153,7 @@ public void 
testReplicasToFixMisreplicationWithTwoMisreplication() {
     List<DatanodeDetails> replicaDns = Stream.of(0, 1, 2, 3, 5)
                     .map(list::get).collect(Collectors.toList());
     List<ContainerReplica> replicas =
-            HddsTestUtils.getReplicasWithReplicaIndex(new ContainerID(1),
+            HddsTestUtils.getReplicasWithReplicaIndex(ContainerID.valueOf(1),
                     CLOSED, 0, 0, 0, replicaDns);
     testReplicasToFixMisreplication(replicas, dummyPlacementPolicy, 2,
             ImmutableMap.of(racks.get(0), 2));
@@ -174,7 +174,7 @@ public void 
testReplicasToFixMisreplicationWithThreeMisreplication() {
     List<DatanodeDetails> replicaDns = Stream.of(0, 1, 2, 3, 5)
                     .map(list::get).collect(Collectors.toList());
     List<ContainerReplica> replicas =
-            HddsTestUtils.getReplicasWithReplicaIndex(new ContainerID(1),
+            HddsTestUtils.getReplicasWithReplicaIndex(ContainerID.valueOf(1),
                     CLOSED, 0, 0, 0, replicaDns);
     testReplicasToFixMisreplication(replicas, dummyPlacementPolicy, 3,
             ImmutableMap.of(racks.get(0), 3));
@@ -197,7 +197,7 @@ public void 
testReplicasToFixMisreplicationWithThreeMisreplication() {
                     .map(list::get).collect(Collectors.toList());
     //Creating Replicas without replica Index
     List<ContainerReplica> replicas = HddsTestUtils
-            .getReplicas(new ContainerID(1), CLOSED, 0, replicaDns);
+            .getReplicas(ContainerID.valueOf(1), CLOSED, 0, replicaDns);
     testReplicasToFixMisreplication(replicas, dummyPlacementPolicy, 3,
             ImmutableMap.of(racks.get(0), 2, racks.get(3), 1));
   }
@@ -220,7 +220,7 @@ public void 
testReplicasToFixMisreplicationWithThreeMisreplication() {
                     .map(list::get).collect(Collectors.toList());
     //Creating Replicas without replica Index for replicas < number of racks
     List<ContainerReplica> replicas = HddsTestUtils
-            .getReplicas(new ContainerID(1), CLOSED, 0, replicaDns);
+            .getReplicas(ContainerID.valueOf(1), CLOSED, 0, replicaDns);
     testReplicasToFixMisreplication(replicas, dummyPlacementPolicy, 2,
             ImmutableMap.of(racks.get(0), 1, racks.get(3), 1));
   }
@@ -243,7 +243,7 @@ public void 
testReplicasToFixMisreplicationWithThreeMisreplication() {
                     .map(list::get).collect(Collectors.toList());
     //Creating Replicas without replica Index for replicas >number of racks
     List<ContainerReplica> replicas = HddsTestUtils
-            .getReplicas(new ContainerID(1), CLOSED, 0, replicaDns);
+            .getReplicas(ContainerID.valueOf(1), CLOSED, 0, replicaDns);
     testReplicasToFixMisreplication(replicas, dummyPlacementPolicy, 2,
             ImmutableMap.of(racks.get(0), 1, racks.get(3), 1));
   }
@@ -257,7 +257,7 @@ public void 
testReplicasToFixMisreplicationMaxReplicaPerRack() {
     List<DatanodeDetails> replicaDns = Stream.of(0, 2, 4, 6, 8)
                     .map(list::get).collect(Collectors.toList());
     List<ContainerReplica> replicas =
-            HddsTestUtils.getReplicasWithReplicaIndex(new ContainerID(1),
+            HddsTestUtils.getReplicasWithReplicaIndex(ContainerID.valueOf(1),
                     CLOSED, 0, 0, 0, replicaDns);
     testReplicasToFixMisreplication(replicas, dummyPlacementPolicy, 2,
             ImmutableMap.of(racks.get(0), 2));
@@ -273,7 +273,7 @@ public void 
testReplicasToFixMisreplicationMaxReplicaPerRack() {
     List<DatanodeDetails> replicaDns = Stream.of(0, 2, 4, 6, 8)
             .map(list::get).collect(Collectors.toList());
     List<ContainerReplica> replicas =
-            HddsTestUtils.getReplicasWithReplicaIndex(new ContainerID(1),
+            HddsTestUtils.getReplicasWithReplicaIndex(ContainerID.valueOf(1),
                     CLOSED, 0, 0, 0, replicaDns);
     Map<ContainerReplica, Boolean> replicaMap = replicas.stream().distinct()
             .collect(Collectors.toMap(Function.identity(), r -> false));
@@ -292,7 +292,7 @@ public void testReplicasWithoutMisreplication() {
     List<DatanodeDetails> replicaDns = Stream.of(0, 1, 2, 3, 4)
                     .map(list::get).collect(Collectors.toList());
     Map<ContainerReplica, Boolean> replicas =
-            HddsTestUtils.getReplicasWithReplicaIndex(new ContainerID(1),
+            HddsTestUtils.getReplicasWithReplicaIndex(ContainerID.valueOf(1),
                     CLOSED, 0, 0, 0, replicaDns)
                     .stream()
                     .collect(Collectors.toMap(Function.identity(), r -> true));
@@ -308,9 +308,9 @@ public void testReplicasToRemoveWithOneOverreplication() {
     List<DatanodeDetails> list = nodeManager.getAllNodes();
     Set<ContainerReplica> replicas = Sets.newHashSet(
             HddsTestUtils.getReplicasWithReplicaIndex(
-                    new ContainerID(1), CLOSED, 0, 0, 0, list.subList(1, 6)));
+                    ContainerID.valueOf(1), CLOSED, 0, 0, 0, list.subList(1, 
6)));
     ContainerReplica replica = ContainerReplica.newBuilder()
-            .setContainerID(new ContainerID(1))
+            .setContainerID(ContainerID.valueOf(1))
             .setContainerState(CLOSED)
             .setReplicaIndex(1)
             .setDatanodeDetails(list.get(7)).build();
@@ -330,11 +330,11 @@ public void testReplicasToRemoveWithTwoOverreplication() {
 
     Set<ContainerReplica> replicas = Sets.newHashSet(
             HddsTestUtils.getReplicasWithReplicaIndex(
-                    new ContainerID(1), CLOSED, 0, 0, 0, list.subList(1, 6)));
+                    ContainerID.valueOf(1), CLOSED, 0, 0, 0, list.subList(1, 
6)));
 
     Set<ContainerReplica> replicasToBeRemoved = Sets.newHashSet(
             HddsTestUtils.getReplicasWithReplicaIndex(
-                    new ContainerID(1), CLOSED, 0, 0, 0, list.subList(7, 9)));
+                    ContainerID.valueOf(1), CLOSED, 0, 0, 0, list.subList(7, 
9)));
     replicas.addAll(replicasToBeRemoved);
 
     Set<ContainerReplica> replicasToRemove = dummyPlacementPolicy
@@ -351,14 +351,14 @@ public void 
testReplicasToRemoveWith2CountPerUniqueReplica() {
 
     Set<ContainerReplica> replicas = Sets.newHashSet(
             HddsTestUtils.getReplicasWithReplicaIndex(
-                    new ContainerID(1), CLOSED, 0, 0, 0, list.subList(0, 3)));
+                    ContainerID.valueOf(1), CLOSED, 0, 0, 0, list.subList(0, 
3)));
     replicas.addAll(HddsTestUtils.getReplicasWithReplicaIndex(
-            new ContainerID(1), CLOSED, 0, 0, 0, list.subList(3, 6)));
+            ContainerID.valueOf(1), CLOSED, 0, 0, 0, list.subList(3, 6)));
     Set<ContainerReplica> replicasToBeRemoved = Sets.newHashSet(
-            HddsTestUtils.getReplicaBuilder(new ContainerID(1), CLOSED, 0, 0, 
0,
+            HddsTestUtils.getReplicaBuilder(ContainerID.valueOf(1), CLOSED, 0, 
0, 0,
                     list.get(7).getUuid(), list.get(7))
                     .setReplicaIndex(1).build(),
-            HddsTestUtils.getReplicaBuilder(new ContainerID(1), CLOSED, 0, 0, 
0,
+            HddsTestUtils.getReplicaBuilder(ContainerID.valueOf(1), CLOSED, 0, 
0, 0,
                     list.get(8).getUuid(), list.get(8)).setReplicaIndex(1)
                     .build());
     replicas.addAll(replicasToBeRemoved);
@@ -376,7 +376,7 @@ public void testReplicasToRemoveWithoutReplicaIndex() {
     List<DatanodeDetails> list = nodeManager.getAllNodes();
 
     Set<ContainerReplica> replicas = Sets.newHashSet(HddsTestUtils.getReplicas(
-                    new ContainerID(1), CLOSED, 0, list.subList(0, 5)));
+                    ContainerID.valueOf(1), CLOSED, 0, list.subList(0, 5)));
 
     Set<ContainerReplica> replicasToRemove = dummyPlacementPolicy
             .replicasToRemoveToFixOverreplication(replicas, 3);
@@ -397,16 +397,16 @@ public void 
testReplicasToRemoveWithOverreplicationWithinSameRack() {
 
     Set<ContainerReplica> replicas = Sets.newHashSet(
             HddsTestUtils.getReplicasWithReplicaIndex(
-                    new ContainerID(1), CLOSED, 0, 0, 0, list.subList(1, 6)));
+                    ContainerID.valueOf(1), CLOSED, 0, 0, 0, list.subList(1, 
6)));
 
     ContainerReplica replica1 = ContainerReplica.newBuilder()
-            .setContainerID(new ContainerID(1))
+            .setContainerID(ContainerID.valueOf(1))
             .setContainerState(CLOSED)
             .setReplicaIndex(1)
             .setDatanodeDetails(list.get(6)).build();
     replicas.add(replica1);
     ContainerReplica replica2 = ContainerReplica.newBuilder()
-            .setContainerID(new ContainerID(1))
+            .setContainerID(ContainerID.valueOf(1))
             .setContainerState(CLOSED)
             .setReplicaIndex(1)
             .setDatanodeDetails(list.get(0)).build();
@@ -435,7 +435,7 @@ public void testReplicasToRemoveWithNoOverreplication() {
     List<DatanodeDetails> list = nodeManager.getAllNodes();
     Set<ContainerReplica> replicas = Sets.newHashSet(
             HddsTestUtils.getReplicasWithReplicaIndex(
-                    new ContainerID(1), CLOSED, 0, 0, 0, list.subList(1, 6)));
+                    ContainerID.valueOf(1), CLOSED, 0, 0, 0, list.subList(1, 
6)));
 
     Set<ContainerReplica> replicasToRemove = dummyPlacementPolicy
             .replicasToRemoveToFixOverreplication(replicas, 1);
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestContainerReplicaPendingOps.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestContainerReplicaPendingOps.java
index 95209b68d8..0f546a4e3e 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestContainerReplicaPendingOps.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestContainerReplicaPendingOps.java
@@ -93,14 +93,14 @@ void cleanup() {
   @Test
   public void testGetPendingOpsReturnsEmptyList() {
     List<ContainerReplicaOp> ops =
-        pendingOps.getPendingOps(new ContainerID(1));
+        pendingOps.getPendingOps(ContainerID.valueOf(1));
     assertEquals(0, ops.size());
   }
 
   @Test
   public void testClear() {
-    pendingOps.scheduleAddReplica(new ContainerID(1), dn1, 0, addCmd, 
deadline);
-    pendingOps.scheduleDeleteReplica(new ContainerID(2), dn1, 0, deleteCmd, 
deadline);
+    pendingOps.scheduleAddReplica(ContainerID.valueOf(1), dn1, 0, addCmd, 
deadline);
+    pendingOps.scheduleDeleteReplica(ContainerID.valueOf(2), dn1, 0, 
deleteCmd, deadline);
 
     assertEquals(1, 
pendingOps.getPendingOpCount(ContainerReplicaOp.PendingOpType.ADD));
     assertEquals(1, 
pendingOps.getPendingOpCount(ContainerReplicaOp.PendingOpType.DELETE));
@@ -109,26 +109,26 @@ public void testClear() {
 
     assertEquals(0, 
pendingOps.getPendingOpCount(ContainerReplicaOp.PendingOpType.ADD));
     assertEquals(0, 
pendingOps.getPendingOpCount(ContainerReplicaOp.PendingOpType.DELETE));
-    assertEquals(0, pendingOps.getPendingOps(new ContainerID(1)).size());
-    assertEquals(0, pendingOps.getPendingOps(new ContainerID(2)).size());
+    assertEquals(0, pendingOps.getPendingOps(ContainerID.valueOf(1)).size());
+    assertEquals(0, pendingOps.getPendingOps(ContainerID.valueOf(2)).size());
 
   }
 
   @Test
   public void testCanAddReplicasForAdd() {
-    pendingOps.scheduleAddReplica(new ContainerID(1), dn1, 0, addCmd, 
deadline);
-    pendingOps.scheduleAddReplica(new ContainerID(1), dn2, 0, addCmd, 
deadline);
-    pendingOps.scheduleAddReplica(new ContainerID(1), dn3, 0, addCmd, 
deadline);
+    pendingOps.scheduleAddReplica(ContainerID.valueOf(1), dn1, 0, addCmd, 
deadline);
+    pendingOps.scheduleAddReplica(ContainerID.valueOf(1), dn2, 0, addCmd, 
deadline);
+    pendingOps.scheduleAddReplica(ContainerID.valueOf(1), dn3, 0, addCmd, 
deadline);
     // Duplicate for DN2
-    pendingOps.scheduleAddReplica(new ContainerID(1), dn2, 0, addCmd, deadline 
+ 1);
+    pendingOps.scheduleAddReplica(ContainerID.valueOf(1), dn2, 0, addCmd, 
deadline + 1);
     // Not a duplicate for DN2 as different index. Should not happen in 
practice as it is not valid to have 2 indexes
     // on the same node.
-    pendingOps.scheduleAddReplica(new ContainerID(1), dn2, 1, addCmd, 
deadline);
-    pendingOps.scheduleAddReplica(new ContainerID(2), dn1, 1, addCmd, 
deadline);
-    pendingOps.scheduleAddReplica(new ContainerID(2), dn1, 1, addCmd, deadline 
+ 1);
+    pendingOps.scheduleAddReplica(ContainerID.valueOf(1), dn2, 1, addCmd, 
deadline);
+    pendingOps.scheduleAddReplica(ContainerID.valueOf(2), dn1, 1, addCmd, 
deadline);
+    pendingOps.scheduleAddReplica(ContainerID.valueOf(2), dn1, 1, addCmd, 
deadline + 1);
 
     List<ContainerReplicaOp> ops =
-        pendingOps.getPendingOps(new ContainerID(1));
+        pendingOps.getPendingOps(ContainerID.valueOf(1));
     assertEquals(4, ops.size());
     for (ContainerReplicaOp op : ops) {
       if (!op.getTarget().equals(dn2)) {
@@ -147,7 +147,7 @@ public void testCanAddReplicasForAdd() {
     assertThat(allDns).contains(dn2);
     assertThat(allDns).contains(dn3);
 
-    ops = pendingOps.getPendingOps(new ContainerID(2));
+    ops = pendingOps.getPendingOps(ContainerID.valueOf(2));
     assertEquals(1, ops.size());
     assertEquals(1, ops.get(0).getReplicaIndex());
     assertEquals(ADD, ops.get(0).getOpType());
@@ -157,13 +157,13 @@ public void testCanAddReplicasForAdd() {
 
   @Test
   public void testCanAddReplicasForDelete() {
-    pendingOps.scheduleDeleteReplica(new ContainerID(1), dn1, 0, deleteCmd, 
deadline);
-    pendingOps.scheduleDeleteReplica(new ContainerID(1), dn2, 0, deleteCmd, 
deadline);
-    pendingOps.scheduleDeleteReplica(new ContainerID(1), dn3, 0, deleteCmd, 
deadline);
-    pendingOps.scheduleDeleteReplica(new ContainerID(2), dn1, 1, deleteCmd, 
deadline);
+    pendingOps.scheduleDeleteReplica(ContainerID.valueOf(1), dn1, 0, 
deleteCmd, deadline);
+    pendingOps.scheduleDeleteReplica(ContainerID.valueOf(1), dn2, 0, 
deleteCmd, deadline);
+    pendingOps.scheduleDeleteReplica(ContainerID.valueOf(1), dn3, 0, 
deleteCmd, deadline);
+    pendingOps.scheduleDeleteReplica(ContainerID.valueOf(2), dn1, 1, 
deleteCmd, deadline);
 
     List<ContainerReplicaOp> ops =
-        pendingOps.getPendingOps(new ContainerID(1));
+        pendingOps.getPendingOps(ContainerID.valueOf(1));
     assertEquals(3, ops.size());
     for (ContainerReplicaOp op : ops) {
       assertEquals(0, op.getReplicaIndex());
@@ -175,7 +175,7 @@ public void testCanAddReplicasForDelete() {
     assertThat(allDns).contains(dn2);
     assertThat(allDns).contains(dn3);
 
-    ops = pendingOps.getPendingOps(new ContainerID(2));
+    ops = pendingOps.getPendingOps(ContainerID.valueOf(2));
     assertEquals(1, ops.size());
     assertEquals(1, ops.get(0).getReplicaIndex());
     assertEquals(DELETE, ops.get(0).getOpType());
@@ -184,46 +184,46 @@ public void testCanAddReplicasForDelete() {
 
   @Test
   public void testCompletingOps() {
-    pendingOps.scheduleDeleteReplica(new ContainerID(1), dn1, 0, deleteCmd, 
deadline);
-    pendingOps.scheduleAddReplica(new ContainerID(1), dn1, 0, addCmd, 
deadline);
-    pendingOps.scheduleDeleteReplica(new ContainerID(1), dn2, 0, deleteCmd, 
deadline);
-    pendingOps.scheduleAddReplica(new ContainerID(1), dn3, 0, addCmd, 
deadline);
-    pendingOps.scheduleDeleteReplica(new ContainerID(2), dn1, 1, deleteCmd, 
deadline);
+    pendingOps.scheduleDeleteReplica(ContainerID.valueOf(1), dn1, 0, 
deleteCmd, deadline);
+    pendingOps.scheduleAddReplica(ContainerID.valueOf(1), dn1, 0, addCmd, 
deadline);
+    pendingOps.scheduleDeleteReplica(ContainerID.valueOf(1), dn2, 0, 
deleteCmd, deadline);
+    pendingOps.scheduleAddReplica(ContainerID.valueOf(1), dn3, 0, addCmd, 
deadline);
+    pendingOps.scheduleDeleteReplica(ContainerID.valueOf(2), dn1, 1, 
deleteCmd, deadline);
 
     List<ContainerReplicaOp> ops =
-        pendingOps.getPendingOps(new ContainerID(1));
+        pendingOps.getPendingOps(ContainerID.valueOf(1));
 
     // We expect 4 entries - 2 add and 2 delete.
     assertEquals(4, ops.size());
 
     assertTrue(pendingOps
-        .completeAddReplica(new ContainerID(1), dn1, 0));
-    ops = pendingOps.getPendingOps(new ContainerID(1));
+        .completeAddReplica(ContainerID.valueOf(1), dn1, 0));
+    ops = pendingOps.getPendingOps(ContainerID.valueOf(1));
     assertEquals(3, ops.size());
 
     // Complete one that does not exist:
     assertFalse(pendingOps
-        .completeAddReplica(new ContainerID(1), dn1, 0));
-    ops = pendingOps.getPendingOps(new ContainerID(1));
+        .completeAddReplica(ContainerID.valueOf(1), dn1, 0));
+    ops = pendingOps.getPendingOps(ContainerID.valueOf(1));
     assertEquals(3, ops.size());
 
     // Complete the remaining ones
-    pendingOps.completeDeleteReplica(new ContainerID(1), dn1, 0);
-    pendingOps.completeDeleteReplica(new ContainerID(1), dn2, 0);
-    pendingOps.completeAddReplica(new ContainerID(1), dn3, 0);
-    ops = pendingOps.getPendingOps(new ContainerID(1));
+    pendingOps.completeDeleteReplica(ContainerID.valueOf(1), dn1, 0);
+    pendingOps.completeDeleteReplica(ContainerID.valueOf(1), dn2, 0);
+    pendingOps.completeAddReplica(ContainerID.valueOf(1), dn3, 0);
+    ops = pendingOps.getPendingOps(ContainerID.valueOf(1));
     assertEquals(0, ops.size());
   }
 
   @Test
   public void testRemoveSpecificOp() {
-    pendingOps.scheduleDeleteReplica(new ContainerID(1), dn1, 0, deleteCmd, 
deadline);
-    pendingOps.scheduleAddReplica(new ContainerID(1), dn1, 0, addCmd, 
deadline);
-    pendingOps.scheduleDeleteReplica(new ContainerID(1), dn2, 0, deleteCmd, 
deadline);
-    pendingOps.scheduleAddReplica(new ContainerID(1), dn3, 0, addCmd, 
deadline);
-    pendingOps.scheduleDeleteReplica(new ContainerID(2), dn1, 1, deleteCmd, 
deadline);
+    pendingOps.scheduleDeleteReplica(ContainerID.valueOf(1), dn1, 0, 
deleteCmd, deadline);
+    pendingOps.scheduleAddReplica(ContainerID.valueOf(1), dn1, 0, addCmd, 
deadline);
+    pendingOps.scheduleDeleteReplica(ContainerID.valueOf(1), dn2, 0, 
deleteCmd, deadline);
+    pendingOps.scheduleAddReplica(ContainerID.valueOf(1), dn3, 0, addCmd, 
deadline);
+    pendingOps.scheduleDeleteReplica(ContainerID.valueOf(2), dn1, 1, 
deleteCmd, deadline);
 
-    ContainerID cid = new ContainerID(1);
+    ContainerID cid = ContainerID.valueOf(1);
     List<ContainerReplicaOp> ops = pendingOps.getPendingOps(cid);
     assertEquals(4, ops.size());
     for (ContainerReplicaOp op : ops) {
@@ -240,17 +240,17 @@ public void testRemoveExpiredEntries() {
     long expiry = clock.millis() + 1000;
     long laterExpiry =  clock.millis() + 2000;
     long latestExpiry = clock.millis() + 3000;
-    pendingOps.scheduleDeleteReplica(new ContainerID(1), dn1, 0, deleteCmd, 
expiry);
-    pendingOps.scheduleAddReplica(new ContainerID(1), dn1, 0, addCmd, expiry);
-    pendingOps.scheduleDeleteReplica(new ContainerID(1), dn2, 0, deleteCmd, 
laterExpiry);
-    pendingOps.scheduleAddReplica(new ContainerID(1), dn3, 0, addCmd, 
laterExpiry);
-    pendingOps.scheduleDeleteReplica(new ContainerID(2), dn1, 1, deleteCmd, 
latestExpiry);
-    pendingOps.scheduleAddReplica(new ContainerID(2), dn1, 1, addCmd, 
latestExpiry);
+    pendingOps.scheduleDeleteReplica(ContainerID.valueOf(1), dn1, 0, 
deleteCmd, expiry);
+    pendingOps.scheduleAddReplica(ContainerID.valueOf(1), dn1, 0, addCmd, 
expiry);
+    pendingOps.scheduleDeleteReplica(ContainerID.valueOf(1), dn2, 0, 
deleteCmd, laterExpiry);
+    pendingOps.scheduleAddReplica(ContainerID.valueOf(1), dn3, 0, addCmd, 
laterExpiry);
+    pendingOps.scheduleDeleteReplica(ContainerID.valueOf(2), dn1, 1, 
deleteCmd, latestExpiry);
+    pendingOps.scheduleAddReplica(ContainerID.valueOf(2), dn1, 1, addCmd, 
latestExpiry);
 
     List<ContainerReplicaOp> ops =
-        pendingOps.getPendingOps(new ContainerID(1));
+        pendingOps.getPendingOps(ContainerID.valueOf(1));
     assertEquals(4, ops.size());
-    ops = pendingOps.getPendingOps(new ContainerID(2));
+    ops = pendingOps.getPendingOps(ContainerID.valueOf(2));
     assertEquals(2, ops.size());
 
     // Some entries expire at "start + 1000" some at start + 2000 and
@@ -258,13 +258,13 @@ public void testRemoveExpiredEntries() {
     clock.fastForward(1000);
     pendingOps.removeExpiredEntries();
     // Nothing is remove as no deadline is older than the current clock time.
-    ops = pendingOps.getPendingOps(new ContainerID(1));
+    ops = pendingOps.getPendingOps(ContainerID.valueOf(1));
     assertEquals(4, ops.size());
 
     clock.fastForward(1000);
     pendingOps.removeExpiredEntries();
     // Those ADD with deadline + 1000 should be removed, but deletes are 
retained
-    ops = pendingOps.getPendingOps(new ContainerID(1));
+    ops = pendingOps.getPendingOps(ContainerID.valueOf(1));
     assertEquals(3, ops.size());
     // We should lose the entries for DN1
     assertFalse(isOpPresent(ops, dn1, 0, ADD));
@@ -275,19 +275,19 @@ public void testRemoveExpiredEntries() {
     pendingOps.removeExpiredEntries();
 
     // Now should only have entries for container 2 and the deletes for 
container 1
-    ops = pendingOps.getPendingOps(new ContainerID(1));
+    ops = pendingOps.getPendingOps(ContainerID.valueOf(1));
     assertEquals(2, ops.size());
 
     assertTrue(isOpPresent(ops, dn1, 0, DELETE));
     assertTrue(isOpPresent(ops, dn2, 0, DELETE));
 
-    ops = pendingOps.getPendingOps(new ContainerID(2));
+    ops = pendingOps.getPendingOps(ContainerID.valueOf(2));
     assertEquals(2, ops.size());
 
     // Advance the clock again and all should be removed except deletes
     clock.fastForward(1000);
     pendingOps.removeExpiredEntries();
-    ops = pendingOps.getPendingOps(new ContainerID(2));
+    ops = pendingOps.getPendingOps(ContainerID.valueOf(2));
     assertTrue(isOpPresent(ops, dn1, 1, DELETE));
     assertEquals(1, ops.size());
   }
@@ -301,12 +301,12 @@ private boolean isOpPresent(List<ContainerReplicaOp> ops, 
DatanodeDetails dn,
   @Test
   public void testReplicationMetrics() {
     long expiry = clock.millis() + 1000;
-    pendingOps.scheduleDeleteReplica(new ContainerID(1), dn1, 1, deleteCmd, 
expiry);
-    pendingOps.scheduleAddReplica(new ContainerID(1), dn1, 2, addCmd, expiry);
-    pendingOps.scheduleDeleteReplica(new ContainerID(2), dn2, 1, deleteCmd, 
expiry);
-    pendingOps.scheduleAddReplica(new ContainerID(2), dn3, 1, addCmd, expiry);
-    pendingOps.scheduleAddReplica(new ContainerID(3), dn3, 0, addCmd, expiry);
-    pendingOps.scheduleDeleteReplica(new ContainerID(4), dn3, 0, deleteCmd, 
expiry);
+    pendingOps.scheduleDeleteReplica(ContainerID.valueOf(1), dn1, 1, 
deleteCmd, expiry);
+    pendingOps.scheduleAddReplica(ContainerID.valueOf(1), dn1, 2, addCmd, 
expiry);
+    pendingOps.scheduleDeleteReplica(ContainerID.valueOf(2), dn2, 1, 
deleteCmd, expiry);
+    pendingOps.scheduleAddReplica(ContainerID.valueOf(2), dn3, 1, addCmd, 
expiry);
+    pendingOps.scheduleAddReplica(ContainerID.valueOf(3), dn3, 0, addCmd, 
expiry);
+    pendingOps.scheduleDeleteReplica(ContainerID.valueOf(4), dn3, 0, 
deleteCmd, expiry);
 
     // InFlight Replication and Deletion
     assertEquals(3, pendingOps.getPendingOpCount(ADD));
@@ -327,32 +327,32 @@ public void testReplicationMetrics() {
     assertEquals(metrics.getReplicaDeleteTimeoutTotal(), 1);
 
     expiry = clock.millis() + 1000;
-    pendingOps.scheduleDeleteReplica(new ContainerID(3), dn1, 2, deleteCmd, 
expiry);
-    pendingOps.scheduleAddReplica(new ContainerID(3), dn1, 3, addCmd, expiry);
-    pendingOps.scheduleDeleteReplica(new ContainerID(4), dn2, 2, deleteCmd, 
expiry);
-    pendingOps.scheduleAddReplica(new ContainerID(4), dn3, 4, addCmd, expiry);
-    pendingOps.scheduleAddReplica(new ContainerID(5), dn3, 0, addCmd, expiry);
-    pendingOps.scheduleDeleteReplica(new ContainerID(6), dn3, 0, deleteCmd, 
expiry);
+    pendingOps.scheduleDeleteReplica(ContainerID.valueOf(3), dn1, 2, 
deleteCmd, expiry);
+    pendingOps.scheduleAddReplica(ContainerID.valueOf(3), dn1, 3, addCmd, 
expiry);
+    pendingOps.scheduleDeleteReplica(ContainerID.valueOf(4), dn2, 2, 
deleteCmd, expiry);
+    pendingOps.scheduleAddReplica(ContainerID.valueOf(4), dn3, 4, addCmd, 
expiry);
+    pendingOps.scheduleAddReplica(ContainerID.valueOf(5), dn3, 0, addCmd, 
expiry);
+    pendingOps.scheduleDeleteReplica(ContainerID.valueOf(6), dn3, 0, 
deleteCmd, expiry);
 
     // InFlight Replication and Deletion. Previous Inflight should be
     // removed as they were timed out, but deletes are retained
     assertEquals(3, pendingOps.getPendingOpCount(ADD));
     assertEquals(6, pendingOps.getPendingOpCount(DELETE));
 
-    pendingOps.completeDeleteReplica(new ContainerID(3), dn1, 2);
-    pendingOps.completeAddReplica(new ContainerID(3), dn1, 3);
-    pendingOps.completeDeleteReplica(new ContainerID(4), dn2, 2);
-    pendingOps.completeAddReplica(new ContainerID(4), dn3, 4);
-    pendingOps.completeDeleteReplica(new ContainerID(6), dn3, 0);
-    pendingOps.completeAddReplica(new ContainerID(5), dn3, 0);
+    pendingOps.completeDeleteReplica(ContainerID.valueOf(3), dn1, 2);
+    pendingOps.completeAddReplica(ContainerID.valueOf(3), dn1, 3);
+    pendingOps.completeDeleteReplica(ContainerID.valueOf(4), dn2, 2);
+    pendingOps.completeAddReplica(ContainerID.valueOf(4), dn3, 4);
+    pendingOps.completeDeleteReplica(ContainerID.valueOf(6), dn3, 0);
+    pendingOps.completeAddReplica(ContainerID.valueOf(5), dn3, 0);
 
     assertEquals(metrics.getEcReplicasCreatedTotal(), 2);
     assertEquals(metrics.getEcReplicasDeletedTotal(), 2);
     assertEquals(metrics.getReplicasCreatedTotal(), 1);
     assertEquals(metrics.getReplicasDeletedTotal(), 1);
 
-    pendingOps.completeDeleteReplica(new ContainerID(3), dn1, 2);
-    pendingOps.completeAddReplica(new ContainerID(2), dn1, 3);
+    pendingOps.completeDeleteReplica(ContainerID.valueOf(3), dn1, 2);
+    pendingOps.completeAddReplica(ContainerID.valueOf(2), dn1, 3);
 
     // Checking pendingOpCount doesn't go below zero
     assertEquals(0, pendingOps.getPendingOpCount(ADD));
@@ -374,7 +374,7 @@ public void testNotifySubscribers() {
     pendingOps.registerSubscriber(subscriber2);
 
     // schedule an ADD and a DELETE
-    ContainerID containerID = new ContainerID(1);
+    ContainerID containerID = ContainerID.valueOf(1);
     pendingOps.scheduleAddReplica(containerID, dn1, 0, addCmd, deadline);
     ContainerReplicaOp addOp = pendingOps.getPendingOps(containerID).get(0);
     pendingOps.scheduleDeleteReplica(containerID, dn1, 0, deleteCmd, deadline);
@@ -411,7 +411,7 @@ public void testNotifySubscribers() {
 
   @Test
   public void subscribersShouldNotBeNotifiedWhenOpsHaveNotExpired() {
-    ContainerID containerID = new ContainerID(1);
+    ContainerID containerID = ContainerID.valueOf(1);
 
     // schedule ops
     pendingOps.scheduleDeleteReplica(containerID, dn1, 0, deleteCmd, deadline);
@@ -431,7 +431,7 @@ public void 
subscribersShouldNotBeNotifiedWhenOpsHaveNotExpired() {
 
   @Test
   public void subscribersShouldNotBeNotifiedWhenReplacingAnOpWithDuplicate() {
-    ContainerID containerID = new ContainerID(1);
+    ContainerID containerID = ContainerID.valueOf(1);
 
     // schedule ops
     pendingOps.scheduleAddReplica(containerID, dn2, 0, addCmd, deadline);
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
index e0a4130021..6e37d3de26 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
@@ -1755,12 +1755,12 @@ public void testPendingOpExpiry() throws 
ContainerNotFoundException {
     ContainerReplicaOp delOp = new ContainerReplicaOp(
         ContainerReplicaOp.PendingOpType.DELETE, dn2, 1, command, 
commandDeadline);
 
-    replicationManager.opCompleted(addOp, new ContainerID(1L), false);
-    replicationManager.opCompleted(delOp, new ContainerID(1L), false);
+    replicationManager.opCompleted(addOp, ContainerID.valueOf(1L), false);
+    replicationManager.opCompleted(delOp, ContainerID.valueOf(1L), false);
     // No commands should be sent for either of the above ops.
     assertEquals(0, commandsSent.size());
 
-    replicationManager.opCompleted(delOp, new ContainerID(1L), true);
+    replicationManager.opCompleted(delOp, ContainerID.valueOf(1L), true);
     assertEquals(1, commandsSent.size());
     Pair<UUID, SCMCommand<?>> sentCommand = commandsSent.iterator().next();
     // The target should be DN2 and the deadline should have been updated from 
the value set in commandDeadline above
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerScenarios.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerScenarios.java
index ac529b08f7..6484fc3b97 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerScenarios.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerScenarios.java
@@ -452,7 +452,7 @@ public ContainerReplica buildContainerReplica() {
 
       ContainerReplica.ContainerReplicaBuilder builder = new 
ContainerReplica.ContainerReplicaBuilder();
       return builder.setReplicaIndex(index)
-          .setContainerID(new ContainerID(containerId))
+          .setContainerID(ContainerID.valueOf(containerId))
           .setContainerState(state)
           .setSequenceId(sequenceId)
           .setDatanodeDetails(datanodeDetails)
diff --git 
a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java
 
b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java
index d810df6722..66ced0132a 100644
--- 
a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java
+++ 
b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java
@@ -264,14 +264,14 @@ private List<HddsProtos.Node> getNodeDetails(int n) {
   private Map<String, List<ContainerID>> getContainersOnDecomNodes() {
     Map<String, List<ContainerID>> containerMap = new HashMap<>();
     List<ContainerID> underReplicated = new ArrayList<>();
-    underReplicated.add(new ContainerID(1L));
-    underReplicated.add(new ContainerID(2L));
-    underReplicated.add(new ContainerID(3L));
+    underReplicated.add(ContainerID.valueOf(1L));
+    underReplicated.add(ContainerID.valueOf(2L));
+    underReplicated.add(ContainerID.valueOf(3L));
     containerMap.put("UnderReplicated", underReplicated);
     List<ContainerID> unclosed = new ArrayList<>();
-    unclosed.add(new ContainerID(10L));
-    unclosed.add(new ContainerID(11L));
-    unclosed.add(new ContainerID(12L));
+    unclosed.add(ContainerID.valueOf(10L));
+    unclosed.add(ContainerID.valueOf(11L));
+    unclosed.add(ContainerID.valueOf(12L));
     containerMap.put("UnClosed", unclosed);
     return containerMap;
   }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestScmApplyTransactionFailure.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestScmApplyTransactionFailure.java
index 84c31f088d..2bb67e9a88 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestScmApplyTransactionFailure.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestScmApplyTransactionFailure.java
@@ -80,7 +80,7 @@ public void testAddContainerToClosedPipeline() throws 
Exception {
         InvalidPipelineStateException.class);
     assertThrows(ContainerNotFoundException.class,
         () -> containerManager.getContainer(
-            new ContainerID(containerInfo.getContainerID())));
+            ContainerID.valueOf(containerInfo.getContainerID())));
 
     // verify that SCMStateMachine is still functioning after the rejected
     // transaction.
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
index e553d32eed..d79c312662 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java
@@ -268,7 +268,7 @@ public void testOrphanBlock() throws Exception {
 
     Token<ContainerTokenIdentifier> orphanContainerToken =
         containerTokenGenerator.generateToken(
-            ANY_USER, new ContainerID(orphanContainerID));
+            ANY_USER, ContainerID.valueOf(orphanContainerID));
 
     // Close the container by closing the pipeline
     scm.getPipelineManager().closePipeline(orphanPipeline, false);
@@ -689,7 +689,7 @@ private void testECReconstructionCoordinator(List<Integer> 
missingIndexes,
       OzoneKeyDetails key = bucket.getKey(keyString);
       long conID = key.getOzoneKeyLocations().get(0).getContainerID();
       Token<ContainerTokenIdentifier> cToken = containerTokenGenerator
-          .generateToken(ANY_USER, new ContainerID(conID));
+          .generateToken(ANY_USER, ContainerID.valueOf(conID));
 
       //Close the container first.
       closeContainer(conID);
@@ -876,7 +876,7 @@ public void 
testECReconstructionCoordinatorShouldCleanupContainersOnFailure()
     OzoneKeyDetails key = bucket.getKey(keyString);
     long conID = key.getOzoneKeyLocations().get(0).getContainerID();
     Token<ContainerTokenIdentifier> cToken =
-        containerTokenGenerator.generateToken(ANY_USER, new 
ContainerID(conID));
+        containerTokenGenerator.generateToken(ANY_USER, 
ContainerID.valueOf(conID));
     closeContainer(conID);
 
     Pipeline containerPipeline = scm.getPipelineManager().getPipeline(
@@ -1050,7 +1050,7 @@ public static void prepareData(int[][] ranges) throws 
Exception {
     blockTokenGenerator = new OzoneBlockTokenSecretManager(
         tokenLifetime, secretKeyClient);
     containerToken = containerTokenGenerator
-        .generateToken(ANY_USER, new ContainerID(containerID));
+        .generateToken(ANY_USER, ContainerID.valueOf(containerID));
   }
 
   public static void stopCluster() throws IOException {
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestContainerScannerIntegrationAbstract.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestContainerScannerIntegrationAbstract.java
index e4d49af9e6..b9710b4545 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestContainerScannerIntegrationAbstract.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestContainerScannerIntegrationAbstract.java
@@ -146,7 +146,7 @@ protected void waitForScmToCloseContainer(long containerID) 
throws Exception {
     ContainerManager cm = cluster.getStorageContainerManager()
         .getContainerManager();
     LambdaTestUtils.await(5000, 500,
-        () -> cm.getContainer(new ContainerID(containerID)).getState()
+        () -> cm.getContainer(ContainerID.valueOf(containerID)).getState()
             != HddsProtos.LifeCycleState.OPEN);
   }
 
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
index d7873b25a4..eb09cf3ca7 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
@@ -954,12 +954,12 @@ public void testGetFileCounts() throws Exception {
   public void testGetContainerCounts() throws Exception {
     // Mock container info objects with different sizes
     ContainerInfo omContainerInfo1 = mock(ContainerInfo.class);
-    given(omContainerInfo1.containerID()).willReturn(new ContainerID(1));
+    given(omContainerInfo1.containerID()).willReturn(ContainerID.valueOf(1));
     given(omContainerInfo1.getUsedBytes()).willReturn(1500000000L); // 1.5GB
     given(omContainerInfo1.getState()).willReturn(LifeCycleState.OPEN);
 
     ContainerInfo omContainerInfo2 = mock(ContainerInfo.class);
-    given(omContainerInfo2.containerID()).willReturn(new ContainerID(2));
+    given(omContainerInfo2.containerID()).willReturn(ContainerID.valueOf(2));
     given(omContainerInfo2.getUsedBytes()).willReturn(2500000000L); // 2.5GB
     given(omContainerInfo2.getState()).willReturn(LifeCycleState.OPEN);
 
@@ -1437,14 +1437,14 @@ private List<HddsProtos.Node> getNodeDetails(int n) {
   private Map<String, List<ContainerID>> getContainersOnDecomNodes() {
     Map<String, List<ContainerID>> containerMap = new HashMap<>();
     List<ContainerID> underReplicated = new ArrayList<>();
-    underReplicated.add(new ContainerID(1L));
-    underReplicated.add(new ContainerID(2L));
-    underReplicated.add(new ContainerID(3L));
+    underReplicated.add(ContainerID.valueOf(1L));
+    underReplicated.add(ContainerID.valueOf(2L));
+    underReplicated.add(ContainerID.valueOf(3L));
     containerMap.put("UnderReplicated", underReplicated);
     List<ContainerID> unclosed = new ArrayList<>();
-    unclosed.add(new ContainerID(10L));
-    unclosed.add(new ContainerID(11L));
-    unclosed.add(new ContainerID(12L));
+    unclosed.add(ContainerID.valueOf(10L));
+    unclosed.add(ContainerID.valueOf(11L));
+    unclosed.add(ContainerID.valueOf(12L));
     containerMap.put("UnClosed", unclosed);
     return containerMap;
   }
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
index d5ebf03a8e..30d786918b 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
@@ -1352,42 +1352,42 @@ private static ReconStorageContainerManagerFacade 
getMockReconSCM()
     ContainerManager containerManager = mock(ContainerManager.class);
 
     // Container 1 is 3-way replicated
-    ContainerID containerID1 = new ContainerID(CONTAINER_ONE_ID);
+    ContainerID containerID1 = ContainerID.valueOf(CONTAINER_ONE_ID);
     Set<ContainerReplica> containerReplicas1 = generateMockContainerReplicas(
         CONTAINER_ONE_REPLICA_COUNT, containerID1);
     when(containerManager.getContainerReplicas(containerID1))
             .thenReturn(containerReplicas1);
 
     // Container 2 is under replicated with 2 replica
-    ContainerID containerID2 = new ContainerID(CONTAINER_TWO_ID);
+    ContainerID containerID2 = ContainerID.valueOf(CONTAINER_TWO_ID);
     Set<ContainerReplica> containerReplicas2 = generateMockContainerReplicas(
         CONTAINER_TWO_REPLICA_COUNT, containerID2);
     when(containerManager.getContainerReplicas(containerID2))
             .thenReturn(containerReplicas2);
 
     // Container 3 is over replicated with 4 replica
-    ContainerID containerID3 = new ContainerID(CONTAINER_THREE_ID);
+    ContainerID containerID3 = ContainerID.valueOf(CONTAINER_THREE_ID);
     Set<ContainerReplica> containerReplicas3 = generateMockContainerReplicas(
         CONTAINER_THREE_REPLICA_COUNT, containerID3);
     when(containerManager.getContainerReplicas(containerID3))
         .thenReturn(containerReplicas3);
 
     // Container 4 is replicated with 5 replica
-    ContainerID containerID4 = new ContainerID(CONTAINER_FOUR_ID);
+    ContainerID containerID4 = ContainerID.valueOf(CONTAINER_FOUR_ID);
     Set<ContainerReplica> containerReplicas4 = generateMockContainerReplicas(
         CONTAINER_FOUR_REPLICA_COUNT, containerID4);
     when(containerManager.getContainerReplicas(containerID4))
         .thenReturn(containerReplicas4);
 
     // Container 5 is replicated with 2 replica
-    ContainerID containerID5 = new ContainerID(CONTAINER_FIVE_ID);
+    ContainerID containerID5 = ContainerID.valueOf(CONTAINER_FIVE_ID);
     Set<ContainerReplica> containerReplicas5 = generateMockContainerReplicas(
         CONTAINER_FIVE_REPLICA_COUNT, containerID5);
     when(containerManager.getContainerReplicas(containerID5))
         .thenReturn(containerReplicas5);
 
     // Container 6 is replicated with 3 replica
-    ContainerID containerID6 = new ContainerID(CONTAINER_SIX_ID);
+    ContainerID containerID6 = ContainerID.valueOf(CONTAINER_SIX_ID);
     Set<ContainerReplica> containerReplicas6 = generateMockContainerReplicas(
         CONTAINER_SIX_REPLICA_COUNT, containerID6);
     when(containerManager.getContainerReplicas(containerID6))
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
index 1a01e12543..3a37c45b5f 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
@@ -1277,42 +1277,42 @@ private static ReconStorageContainerManagerFacade 
getMockReconSCM()
     ContainerManager containerManager = mock(ContainerManager.class);
 
     // Container 1 is 3-way replicated
-    ContainerID containerID1 = new ContainerID(CONTAINER_ONE_ID);
+    ContainerID containerID1 = ContainerID.valueOf(CONTAINER_ONE_ID);
     Set<ContainerReplica> containerReplicas1 = generateMockContainerReplicas(
         CONTAINER_ONE_REPLICA_COUNT, containerID1);
     when(containerManager.getContainerReplicas(containerID1))
         .thenReturn(containerReplicas1);
 
     // Container 2 is under replicated with 2 replica
-    ContainerID containerID2 = new ContainerID(CONTAINER_TWO_ID);
+    ContainerID containerID2 = ContainerID.valueOf(CONTAINER_TWO_ID);
     Set<ContainerReplica> containerReplicas2 = generateMockContainerReplicas(
         CONTAINER_TWO_REPLICA_COUNT, containerID2);
     when(containerManager.getContainerReplicas(containerID2))
         .thenReturn(containerReplicas2);
 
     // Container 3 is over replicated with 4 replica
-    ContainerID containerID3 = new ContainerID(CONTAINER_THREE_ID);
+    ContainerID containerID3 = ContainerID.valueOf(CONTAINER_THREE_ID);
     Set<ContainerReplica> containerReplicas3 = generateMockContainerReplicas(
         CONTAINER_THREE_REPLICA_COUNT, containerID3);
     when(containerManager.getContainerReplicas(containerID3))
         .thenReturn(containerReplicas3);
 
     // Container 4 is replicated with 5 replica
-    ContainerID containerID4 = new ContainerID(CONTAINER_FOUR_ID);
+    ContainerID containerID4 = ContainerID.valueOf(CONTAINER_FOUR_ID);
     Set<ContainerReplica> containerReplicas4 = generateMockContainerReplicas(
         CONTAINER_FOUR_REPLICA_COUNT, containerID4);
     when(containerManager.getContainerReplicas(containerID4))
         .thenReturn(containerReplicas4);
 
     // Container 5 is replicated with 2 replica
-    ContainerID containerID5 = new ContainerID(CONTAINER_FIVE_ID);
+    ContainerID containerID5 = ContainerID.valueOf(CONTAINER_FIVE_ID);
     Set<ContainerReplica> containerReplicas5 = generateMockContainerReplicas(
         CONTAINER_FIVE_REPLICA_COUNT, containerID5);
     when(containerManager.getContainerReplicas(containerID5))
         .thenReturn(containerReplicas5);
 
     // Container 6 is replicated with 3 replica
-    ContainerID containerID6 = new ContainerID(CONTAINER_SIX_ID);
+    ContainerID containerID6 = ContainerID.valueOf(CONTAINER_SIX_ID);
     Set<ContainerReplica> containerReplicas6 = generateMockContainerReplicas(
         CONTAINER_SIX_REPLICA_COUNT, containerID6);
     when(containerManager.getContainerReplicas(containerID6))
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java
index 96fb41272f..a4fa0e220d 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java
@@ -1396,42 +1396,42 @@ private static ReconStorageContainerManagerFacade 
getMockReconSCM()
     ContainerManager containerManager = mock(ContainerManager.class);
 
     // Container 1 is 3-way replicated
-    ContainerID containerID1 = new ContainerID(CONTAINER_ONE_ID);
+    ContainerID containerID1 = ContainerID.valueOf(CONTAINER_ONE_ID);
     Set<ContainerReplica> containerReplicas1 = generateMockContainerReplicas(
         CONTAINER_ONE_REPLICA_COUNT, containerID1);
     when(containerManager.getContainerReplicas(containerID1))
         .thenReturn(containerReplicas1);
 
     // Container 2 is under replicated with 2 replica
-    ContainerID containerID2 = new ContainerID(CONTAINER_TWO_ID);
+    ContainerID containerID2 = ContainerID.valueOf(CONTAINER_TWO_ID);
     Set<ContainerReplica> containerReplicas2 = generateMockContainerReplicas(
         CONTAINER_TWO_REPLICA_COUNT, containerID2);
     when(containerManager.getContainerReplicas(containerID2))
         .thenReturn(containerReplicas2);
 
     // Container 3 is over replicated with 4 replica
-    ContainerID containerID3 = new ContainerID(CONTAINER_THREE_ID);
+    ContainerID containerID3 = ContainerID.valueOf(CONTAINER_THREE_ID);
     Set<ContainerReplica> containerReplicas3 = generateMockContainerReplicas(
         CONTAINER_THREE_REPLICA_COUNT, containerID3);
     when(containerManager.getContainerReplicas(containerID3))
         .thenReturn(containerReplicas3);
 
     // Container 4 is replicated with 5 replica
-    ContainerID containerID4 = new ContainerID(CONTAINER_FOUR_ID);
+    ContainerID containerID4 = ContainerID.valueOf(CONTAINER_FOUR_ID);
     Set<ContainerReplica> containerReplicas4 = generateMockContainerReplicas(
         CONTAINER_FOUR_REPLICA_COUNT, containerID4);
     when(containerManager.getContainerReplicas(containerID4))
         .thenReturn(containerReplicas4);
 
     // Container 5 is replicated with 2 replica
-    ContainerID containerID5 = new ContainerID(CONTAINER_FIVE_ID);
+    ContainerID containerID5 = ContainerID.valueOf(CONTAINER_FIVE_ID);
     Set<ContainerReplica> containerReplicas5 = generateMockContainerReplicas(
         CONTAINER_FIVE_REPLICA_COUNT, containerID5);
     when(containerManager.getContainerReplicas(containerID5))
         .thenReturn(containerReplicas5);
 
     // Container 6 is replicated with 3 replica
-    ContainerID containerID6 = new ContainerID(CONTAINER_SIX_ID);
+    ContainerID containerID6 = ContainerID.valueOf(CONTAINER_SIX_ID);
     Set<ContainerReplica> containerReplicas6 = generateMockContainerReplicas(
         CONTAINER_SIX_REPLICA_COUNT, containerID6);
     when(containerManager.getContainerReplicas(containerID6))
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java
index 032b948233..6e444717b2 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java
@@ -90,18 +90,18 @@ public void setUp() {
   public void testProcess() {
     // mock a container with invalid used bytes
     ContainerInfo omContainerInfo0 = mock(ContainerInfo.class);
-    given(omContainerInfo0.containerID()).willReturn(new ContainerID(0));
+    given(omContainerInfo0.containerID()).willReturn(ContainerID.valueOf(0));
     given(omContainerInfo0.getUsedBytes()).willReturn(-1L);
     given(omContainerInfo0.getState()).willReturn(OPEN);
 
     // Write 2 keys
     ContainerInfo omContainerInfo1 = mock(ContainerInfo.class);
-    given(omContainerInfo1.containerID()).willReturn(new ContainerID(1));
+    given(omContainerInfo1.containerID()).willReturn(ContainerID.valueOf(1));
     given(omContainerInfo1.getUsedBytes()).willReturn(1500000000L); // 1.5GB
     given(omContainerInfo1.getState()).willReturn(CLOSED);
 
     ContainerInfo omContainerInfo2 = mock(ContainerInfo.class);
-    given(omContainerInfo2.containerID()).willReturn(new ContainerID(2));
+    given(omContainerInfo2.containerID()).willReturn(ContainerID.valueOf(2));
     given(omContainerInfo2.getUsedBytes()).willReturn(2500000000L); // 2.5GB
     given(omContainerInfo2.getState()).willReturn(CLOSING);
 
@@ -134,13 +134,13 @@ public void testProcess() {
 
     // Add a new container
     ContainerInfo omContainerInfo3 = mock(ContainerInfo.class);
-    given(omContainerInfo3.containerID()).willReturn(new ContainerID(3));
+    given(omContainerInfo3.containerID()).willReturn(ContainerID.valueOf(3));
     given(omContainerInfo3.getUsedBytes()).willReturn(1000000000L); // 1GB
     given(omContainerInfo3.getState()).willReturn(QUASI_CLOSED);
     containers.add(omContainerInfo3);
 
     // Update existing key.
-    given(omContainerInfo2.containerID()).willReturn(new ContainerID(2));
+    given(omContainerInfo2.containerID()).willReturn(ContainerID.valueOf(2));
     given(omContainerInfo2.getUsedBytes()).willReturn(50000L); // 50KB
 
     task.processContainers(containers);
@@ -178,23 +178,23 @@ public void testProcess() {
   public void testProcessDeletedAndNegativeSizedContainers() {
     // Create a list of containers, including one that is deleted
     ContainerInfo omContainerInfo1 = mock(ContainerInfo.class);
-    given(omContainerInfo1.containerID()).willReturn(new ContainerID(1));
+    given(omContainerInfo1.containerID()).willReturn(ContainerID.valueOf(1));
     given(omContainerInfo1.getUsedBytes()).willReturn(1500000000L); // 1.5GB
     given(omContainerInfo1.getState()).willReturn(OPEN);
 
     ContainerInfo omContainerInfo2 = mock(ContainerInfo.class);
-    given(omContainerInfo2.containerID()).willReturn(new ContainerID(2));
+    given(omContainerInfo2.containerID()).willReturn(ContainerID.valueOf(2));
     given(omContainerInfo2.getUsedBytes()).willReturn(2500000000L); // 2.5GB
     given(omContainerInfo2.getState()).willReturn(CLOSED);
 
     ContainerInfo omContainerInfoDeleted = mock(ContainerInfo.class);
-    given(omContainerInfoDeleted.containerID()).willReturn(new ContainerID(3));
+    
given(omContainerInfoDeleted.containerID()).willReturn(ContainerID.valueOf(3));
     given(omContainerInfoDeleted.getUsedBytes()).willReturn(1000000000L);
     given(omContainerInfoDeleted.getState()).willReturn(DELETED); // 1GB
 
     // Create a mock container with negative size
     final ContainerInfo negativeSizeContainer = mock(ContainerInfo.class);
-    given(negativeSizeContainer.containerID()).willReturn(new ContainerID(0));
+    
given(negativeSizeContainer.containerID()).willReturn(ContainerID.valueOf(0));
     given(negativeSizeContainer.getUsedBytes()).willReturn(-1L);
     given(negativeSizeContainer.getState()).willReturn(OPEN);
 
@@ -202,13 +202,13 @@ public void 
testProcessDeletedAndNegativeSizedContainers() {
     final ContainerInfo negativeSizeDeletedContainer =
         mock(ContainerInfo.class);
     given(negativeSizeDeletedContainer.containerID()).willReturn(
-        new ContainerID(0));
+        ContainerID.valueOf(0));
     given(negativeSizeDeletedContainer.getUsedBytes()).willReturn(-1L);
     given(negativeSizeDeletedContainer.getState()).willReturn(DELETED);
 
     // Create a mock container with id 1 and updated size of 1GB from 1.5GB
     final ContainerInfo validSizeContainer = mock(ContainerInfo.class);
-    given(validSizeContainer.containerID()).willReturn(new ContainerID(1));
+    given(validSizeContainer.containerID()).willReturn(ContainerID.valueOf(1));
     given(validSizeContainer.getUsedBytes()).willReturn(1000000000L); // 1GB
     given(validSizeContainer.getState()).willReturn(CLOSED);
 
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/DBScanner.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/DBScanner.java
index bd74aca5bb..acd3d978b7 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/DBScanner.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/DBScanner.java
@@ -215,7 +215,7 @@ public byte[] getValueObject(DBColumnFamilyDefinition 
dbColumnFamilyDefinition,
     if (keyType.equals(String.class)) {
       return key.getBytes(UTF_8);
     } else if (keyType.equals(ContainerID.class)) {
-      return new ContainerID(Long.parseLong(key)).getBytes();
+      return ContainerID.getBytes(Long.parseLong(key));
     } else if (keyType.equals(Long.class)) {
       return LongCodec.get().toPersistedFormat(Long.parseLong(key));
     } else if (keyType.equals(PipelineID.class)) {
diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorScm.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorScm.java
index f2766438f4..efce02d54f 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorScm.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorScm.java
@@ -84,7 +84,7 @@ private void writeScmData(long index) throws Exception {
               .setOwner(getUserId())
               .build();
 
-      containerStore.put(new ContainerID(containerId), containerInfo);
+      containerStore.put(ContainerID.valueOf(containerId), containerInfo);
       return null;
     });
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to