This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new fc6776db33 HDDS-12739. Replace the getNodeByUuid methods in 
NodeManager. (#8202)
fc6776db33 is described below

commit fc6776db3326d2f2fed641459d8ca15ba826356e
Author: Tsz-Wo Nicholas Sze <[email protected]>
AuthorDate: Mon Mar 31 04:00:05 2025 -0700

    HDDS-12739. Replace the getNodeByUuid methods in NodeManager. (#8202)
---
 .../hadoop/hdds/scm/SCMCommonPlacementPolicy.java  |  5 ++--
 .../hdds/scm/container/ContainerReportHandler.java |  3 +--
 .../IncrementalContainerReportHandler.java         |  6 ++---
 .../hadoop/hdds/scm/node/DeadNodeHandler.java      |  2 +-
 .../hdds/scm/node/HealthyReadOnlyNodeHandler.java  |  2 +-
 .../apache/hadoop/hdds/scm/node/NodeManager.java   | 14 +++--------
 .../hadoop/hdds/scm/node/SCMNodeManager.java       | 21 ++++------------
 .../hdds/scm/pipeline/PipelineManagerImpl.java     |  2 +-
 .../hdds/scm/server/SCMBlockProtocolServer.java    |  3 ++-
 .../hdds/scm/server/SCMClientProtocolServer.java   |  5 ++--
 .../hdds/scm/TestSCMCommonPlacementPolicy.java     |  7 +++---
 .../hadoop/hdds/scm/container/MockNodeManager.java |  7 +++---
 .../hdds/scm/container/SimpleMockNodeManager.java  |  2 +-
 .../algorithms/TestContainerPlacementFactory.java  |  2 +-
 .../TestSCMContainerPlacementCapacity.java         |  6 ++---
 .../TestSCMContainerPlacementRackAware.java        |  4 ++--
 .../TestSCMContainerPlacementRackScatter.java      |  4 ++--
 .../TestSCMContainerPlacementRandom.java           |  6 ++---
 .../hadoop/hdds/scm/node/TestDeadNodeHandler.java  |  2 +-
 .../hdds/scm/pipeline/TestPipelineManagerImpl.java |  3 +--
 .../scm/pipeline/TestPipelinePlacementFactory.java |  2 +-
 .../hdds/scm/TestStorageContainerManager.java      |  2 +-
 .../scm/node/TestDecommissionAndMaintenance.java   | 26 ++++++++++----------
 .../hadoop/ozone/recon/api/NodeEndpoint.java       |  3 ++-
 .../ReconIncrementalContainerReportHandler.java    |  3 +--
 .../hadoop/ozone/recon/scm/ReconNodeManager.java   |  2 +-
 .../hadoop/ozone/recon/api/TestEndpoints.java      |  4 ++--
 ...TestReconIncrementalContainerReportHandler.java |  4 ++--
 .../ozone/recon/scm/TestReconNodeManager.java      | 28 ++++++++++------------
 29 files changed, 78 insertions(+), 102 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
index 6dcfcaa9f4..0a0f6d93c2 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
@@ -156,8 +156,7 @@ private List<DatanodeDetails> 
validateDatanodes(List<DatanodeDetails> dns) {
     }
     for (int i = 0; i < dns.size(); i++) {
       DatanodeDetails node = dns.get(i);
-      DatanodeDetails datanodeDetails =
-          nodeManager.getNodeByUuid(node.getUuid());
+      final DatanodeDetails datanodeDetails = 
nodeManager.getNode(node.getID());
       if (datanodeDetails != null) {
         dns.set(i, datanodeDetails);
       }
@@ -496,7 +495,7 @@ public void removePeers(DatanodeDetails dn,
   public boolean isValidNode(DatanodeDetails datanodeDetails,
       long metadataSizeRequired, long dataSizeRequired) {
     DatanodeInfo datanodeInfo = (DatanodeInfo)getNodeManager()
-        .getNodeByUuid(datanodeDetails.getUuid());
+        .getNode(datanodeDetails.getID());
     if (datanodeInfo == null) {
       LOG.error("Failed to find the DatanodeInfo for datanode {}",
           datanodeDetails);
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
index 130a11b3c6..4e41f962e2 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
@@ -142,8 +142,7 @@ public void onMessage(final ContainerReportFromDatanode 
reportFromDatanode,
 
     final DatanodeDetails dnFromReport =
         reportFromDatanode.getDatanodeDetails();
-    DatanodeDetails datanodeDetails =
-        nodeManager.getNodeByUuid(dnFromReport.getUuid());
+    final DatanodeDetails datanodeDetails = 
nodeManager.getNode(dnFromReport.getID());
     if (datanodeDetails == null) {
       LOG.warn("Received container report from unknown datanode {}",
           dnFromReport);
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
index 7f8c8162d1..a1bd9010f8 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
@@ -57,11 +57,9 @@ public void onMessage(final 
IncrementalContainerReportFromDatanode report,
                         final EventPublisher publisher) {
     final DatanodeDetails dnFromReport = report.getDatanodeDetails();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("Processing incremental container report from data node {}",
-          dnFromReport.getUuid());
+      LOG.debug("Processing incremental container report from data node {}", 
dnFromReport);
     }
-    DatanodeDetails dd =
-        nodeManager.getNodeByUuid(dnFromReport.getUuid());
+    final DatanodeDetails dd = nodeManager.getNode(dnFromReport.getID());
     if (dd == null) {
       LOG.warn("Received container report from unknown datanode {}",
           dnFromReport);
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
index f582623b8c..69de282e81 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java
@@ -117,7 +117,7 @@ public void onMessage(final DatanodeDetails datanodeDetails,
         //make sure after DN is removed from topology,
         //DatanodeDetails instance returned from nodeStateManager has no 
parent.
         Preconditions.checkState(
-            nodeManager.getNodeByUuid(datanodeDetails.getUuid())
+            nodeManager.getNode(datanodeDetails.getID())
                 .getParent() == null);
       }
     } catch (NodeNotFoundException ex) {
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HealthyReadOnlyNodeHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HealthyReadOnlyNodeHandler.java
index 286b8ec5c4..9bbfb519b6 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HealthyReadOnlyNodeHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HealthyReadOnlyNodeHandler.java
@@ -103,7 +103,7 @@ public void onMessage(DatanodeDetails datanodeDetails,
       // make sure after DN is added back into topology, DatanodeDetails
       // instance returned from nodeStateManager has parent correctly set.
       Preconditions.checkNotNull(
-          nodeManager.getNodeByUuid(datanodeDetails.getUuid())
+          nodeManager.getNode(datanodeDetails.getID())
               .getParent());
     }
   }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index d3bcf7d750..c2eba8df0a 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@ -29,6 +29,7 @@
 import java.util.UUID;
 import java.util.function.BiConsumer;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.DatanodeID;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
@@ -360,17 +361,8 @@ Map<SCMCommandProto.Type, Integer> 
getTotalDatanodeCommandCounts(
   // TODO: We can give better name to this method!
   List<SCMCommand<?>> getCommandQueue(UUID dnID);
 
-  /**
-   * Given datanode uuid, returns the DatanodeDetails for the node.
-   *
-   * @param uuid datanode uuid
-   * @return the given datanode, or null if not found
-   */
-  @Nullable DatanodeDetails getNodeByUuid(@Nullable String uuid);
-
-  default @Nullable DatanodeDetails getNodeByUuid(@Nullable UUID uuid) {
-    return uuid != null ? getNodeByUuid(uuid.toString()) : null;
-  };
+  /** @return the datanode of the given id if it exists; otherwise, return 
null. */
+  @Nullable DatanodeDetails getNode(@Nullable DatanodeID id);
 
   /**
    * Given datanode address(Ipaddress or hostname), returns a list of
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 3f09069083..2da48d175b 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -1676,29 +1676,16 @@ public List<SCMCommand<?>> getCommandQueue(UUID dnID) {
     }
   }
 
-  /**
-   * Given datanode uuid, returns the DatanodeDetails for the node.
-   *
-   * @param uuid node host address
-   * @return the given datanode, or null if not found
-   */
-  @Override
-  public DatanodeDetails getNodeByUuid(String uuid) {
-    return uuid != null && !uuid.isEmpty()
-        ? getNodeByUuid(UUID.fromString(uuid))
-        : null;
-  }
-
   @Override
-  public DatanodeDetails getNodeByUuid(UUID uuid) {
-    if (uuid == null) {
+  public DatanodeInfo getNode(DatanodeID id) {
+    if (id == null) {
       return null;
     }
 
     try {
-      return nodeStateManager.getNode(DatanodeID.of(uuid));
+      return nodeStateManager.getNode(id);
     } catch (NodeNotFoundException e) {
-      LOG.warn("Cannot find node for uuid {}", uuid);
+      LOG.warn("Cannot find node for uuid {}", id);
       return null;
     }
   }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java
index 2dcf2d56f2..b5681aca96 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java
@@ -637,7 +637,7 @@ private boolean isOpenWithUnregisteredNodes(Pipeline 
pipeline) {
       return false;
     }
     for (DatanodeDetails dn : pipeline.getNodes()) {
-      if (nodeManager.getNodeByUuid(dn.getUuid()) == null) {
+      if (nodeManager.getNode(dn.getID()) == null) {
         return true;
       }
     }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
index e81569dd2a..2b6eeb4626 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
@@ -49,6 +49,7 @@
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.DatanodeID;
 import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos;
 import org.apache.hadoop.hdds.scm.AddSCMRequest;
 import org.apache.hadoop.hdds.scm.ScmInfo;
@@ -378,7 +379,7 @@ public List<DatanodeDetails> sortDatanodes(List<String> 
nodes,
       final Node client = getClientNode(clientMachine);
       List<DatanodeDetails> nodeList = new ArrayList<>();
       nodes.forEach(uuid -> {
-        DatanodeDetails node = nodeManager.getNodeByUuid(uuid);
+        DatanodeDetails node = 
nodeManager.getNode(DatanodeID.fromUuidString(uuid));
         if (node != null) {
           nodeList.add(node);
         }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index 9524094631..3b614691bd 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -55,6 +55,7 @@
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.DatanodeID;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.DeletedBlocksTransactionInfo;
 import 
org.apache.hadoop.hdds.protocol.proto.ReconfigureProtocolProtos.ReconfigureProtocolService;
@@ -616,7 +617,7 @@ public HddsProtos.Node queryNode(UUID uuid)
       throws IOException {
     HddsProtos.Node result = null;
     try {
-      DatanodeDetails node = scm.getScmNodeManager().getNodeByUuid(uuid);
+      DatanodeDetails node = 
scm.getScmNodeManager().getNode(DatanodeID.of(uuid));
       if (node != null) {
         NodeStatus ns = scm.getScmNodeManager().getNodeStatus(node);
         result = HddsProtos.Node.newBuilder()
@@ -1222,7 +1223,7 @@ public List<HddsProtos.DatanodeUsageInfoProto> 
getDatanodeUsageInfo(
     // get datanodes by ip or uuid
     List<DatanodeDetails> nodes = new ArrayList<>();
     if (!Strings.isNullOrEmpty(uuid)) {
-      nodes.add(scm.getScmNodeManager().getNodeByUuid(uuid));
+      
nodes.add(scm.getScmNodeManager().getNode(DatanodeID.fromUuidString(uuid)));
     } else if (!Strings.isNullOrEmpty(address)) {
       nodes = scm.getScmNodeManager().getNodesByAddress(address);
     } else {
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java
index 8b9dfe873e..920fd46d8c 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java
@@ -47,6 +47,7 @@
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.DatanodeID;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
@@ -467,17 +468,17 @@ protected List<DatanodeDetails> chooseDatanodesInternal(
   @Test
   public void testDatanodeIsInvalidInCaseOfIncreasingCommittedBytes() {
     NodeManager nodeMngr = mock(NodeManager.class);
-    UUID datanodeUuid = UUID.randomUUID();
+    final DatanodeID datanodeID = DatanodeID.of(UUID.randomUUID());
     DummyPlacementPolicy placementPolicy =
         new DummyPlacementPolicy(nodeMngr, conf, 1);
     DatanodeDetails datanodeDetails = mock(DatanodeDetails.class);
-    when(datanodeDetails.getUuid()).thenReturn(datanodeUuid);
+    when(datanodeDetails.getID()).thenReturn(datanodeID);
 
     DatanodeInfo datanodeInfo = mock(DatanodeInfo.class);
     NodeStatus nodeStatus = mock(NodeStatus.class);
     when(nodeStatus.isNodeWritable()).thenReturn(true);
     when(datanodeInfo.getNodeStatus()).thenReturn(nodeStatus);
-    when(nodeMngr.getNodeByUuid(eq(datanodeUuid))).thenReturn(datanodeInfo);
+    when(nodeMngr.getNode(eq(datanodeID))).thenReturn(datanodeInfo);
 
     // capacity = 200000, used = 90000, remaining = 101000, committed = 500
     StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport1 =
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index ffb215edcc..c60a0c505e 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -36,6 +36,7 @@
 import java.util.stream.Collectors;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.DatanodeID;
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
@@ -831,8 +832,8 @@ public List<SCMCommand<?>> getCommandQueue(UUID dnID) {
   }
 
   @Override
-  public DatanodeDetails getNodeByUuid(String uuid) {
-    Node node = clusterMap.getNode(NetConstants.DEFAULT_RACK + "/" + uuid);
+  public DatanodeDetails getNode(DatanodeID id) {
+    Node node = clusterMap.getNode(NetConstants.DEFAULT_RACK + "/" + id);
     return node == null ? null : (DatanodeDetails)node;
   }
 
@@ -844,7 +845,7 @@ public List<DatanodeDetails> getNodesByAddress(String 
address) {
       return results;
     }
     for (String uuid : uuids) {
-      DatanodeDetails dn = getNodeByUuid(uuid);
+      DatanodeDetails dn = getNode(DatanodeID.fromUuidString(uuid));
       if (dn != null) {
         results.add(dn);
       }
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
index c64d11a622..9300d92885 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java
@@ -344,7 +344,7 @@ public List<SCMCommand<?>> getCommandQueue(UUID dnID) {
   }
 
   @Override
-  public DatanodeDetails getNodeByUuid(String uuid) {
+  public DatanodeDetails getNode(DatanodeID id) {
     return null;
   }
 
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java
index a02cc7f6ae..7b4901bf9e 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java
@@ -145,7 +145,7 @@ public void testRackAwarePolicy() throws IOException {
     when(nodeManager.getNodes(NodeStatus.inServiceHealthy()))
         .thenReturn(new ArrayList<>(datanodes));
     for (DatanodeInfo dn: dnInfos) {
-      when(nodeManager.getNodeByUuid(dn.getUuid()))
+      when(nodeManager.getNode(dn.getID()))
           .thenReturn(dn);
     }
 
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
index 8d860f2477..fd02abfb02 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
@@ -30,10 +30,10 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.UUID;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.conf.StorageUnit;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.DatanodeID;
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
@@ -112,9 +112,9 @@ public void chooseDatanodes() throws SCMException {
         .thenReturn(new SCMNodeMetric(100L, 80L, 20L, 0, 19));
     when(mockNodeManager.getNodeStat(datanodes.get(4)))
         .thenReturn(new SCMNodeMetric(100L, 70L, 30L, 0, 20));
-    when(mockNodeManager.getNodeByUuid(any(UUID.class))).thenAnswer(
+    when(mockNodeManager.getNode(any(DatanodeID.class))).thenAnswer(
             invocation -> datanodes.stream()
-                .filter(dn -> dn.getUuid().equals(invocation.getArgument(0)))
+                .filter(dn -> dn.getID().equals(invocation.getArgument(0)))
                 .findFirst()
                 .orElse(null));
 
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
index e5c396fefa..d0d4c9ff7c 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
@@ -176,7 +176,7 @@ private void setup(int datanodeCount) {
     when(nodeManager.getNodes(NodeStatus.inServiceHealthy()))
         .thenReturn(new ArrayList<>(datanodes));
     for (DatanodeInfo dn: dnInfos) {
-      when(nodeManager.getNodeByUuid(dn.getUuid()))
+      when(nodeManager.getNode(dn.getID()))
           .thenReturn(dn);
     }
     when(nodeManager.getClusterNetworkTopologyMap())
@@ -478,7 +478,7 @@ public void testDatanodeWithDefaultNetworkLocation(int 
datanodeCount)
     assertEquals(dataList.size(), StringUtils.countMatches(
         clusterMap.toString(), NetConstants.DEFAULT_RACK));
     for (DatanodeInfo dn: dnInfoList) {
-      when(nodeManager.getNodeByUuid(dn.getUuid()))
+      when(nodeManager.getNode(dn.getID()))
           .thenReturn(dn);
     }
 
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackScatter.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackScatter.java
index 07e015c660..b210776864 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackScatter.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackScatter.java
@@ -243,7 +243,7 @@ private void createMocksAndUpdateStorageReports(int 
datanodeCount) {
     when(nodeManager.getNodes(NodeStatus.inServiceHealthy()))
         .thenReturn(new ArrayList<>(datanodes));
     for (DatanodeInfo dn: dnInfos) {
-      when(nodeManager.getNodeByUuid(dn.getUuid()))
+      when(nodeManager.getNode(dn.getID()))
           .thenReturn(dn);
     }
     when(nodeManager.getClusterNetworkTopologyMap())
@@ -511,7 +511,7 @@ public void testDatanodeWithDefaultNetworkLocation(int 
datanodeCount)
     assertEquals(dataList.size(), StringUtils.countMatches(
         clusterMap.toString(), NetConstants.DEFAULT_RACK));
     for (DatanodeInfo dn: dnInfoList) {
-      when(nodeManager.getNodeByUuid(dn.getUuid()))
+      when(nodeManager.getNode(dn.getID()))
           .thenReturn(dn);
     }
 
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
index b8c03824e9..a7659fc3ab 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
@@ -201,11 +201,11 @@ public void testIsValidNode() throws SCMException {
     NodeManager mockNodeManager = mock(NodeManager.class);
     when(mockNodeManager.getNodes(NodeStatus.inServiceHealthy()))
         .thenReturn(new ArrayList<>(datanodes));
-    when(mockNodeManager.getNodeByUuid(datanodes.get(0).getUuid()))
+    when(mockNodeManager.getNode(datanodes.get(0).getID()))
         .thenReturn(datanodes.get(0));
-    when(mockNodeManager.getNodeByUuid(datanodes.get(1).getUuid()))
+    when(mockNodeManager.getNode(datanodes.get(1).getID()))
         .thenReturn(datanodes.get(1));
-    when(mockNodeManager.getNodeByUuid(datanodes.get(2).getUuid()))
+    when(mockNodeManager.getNode(datanodes.get(2).getID()))
         .thenReturn(datanodes.get(2));
 
     SCMContainerPlacementRandom scmContainerPlacementRandom =
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
index 7221fac8b6..aedf64f926 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
@@ -219,7 +219,7 @@ public void testOnMessage(@TempDir File tempDir) throws 
Exception {
 
     // First set the node to IN_MAINTENANCE and ensure the container replicas
     // are not removed on the dead event
-    datanode1 = nodeManager.getNodeByUuid(datanode1.getUuidString());
+    datanode1 = nodeManager.getNode(datanode1.getID());
     assertTrue(
         nodeManager.getClusterNetworkTopologyMap().contains(datanode1));
     nodeManager.setNodeOperationalState(datanode1,
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
index 3e25706962..80cf157d68 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
@@ -582,8 +582,7 @@ public void testScrubOpenWithUnregisteredNodes() throws 
Exception {
         pipeline.getPipelineState());
 
     // Now, "unregister" one of the nodes in the pipeline
-    DatanodeDetails firstDN = nodeManager.getNodeByUuid(
-        pipeline.getNodes().get(0).getUuidString());
+    DatanodeDetails firstDN = 
nodeManager.getNode(pipeline.getNodes().get(0).getID());
     nodeManager.getClusterNetworkTopologyMap().remove(firstDN);
 
     pipelineManager.scrubPipelines();
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementFactory.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementFactory.java
index 495128e859..80de91fe31 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementFactory.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementFactory.java
@@ -128,7 +128,7 @@ private void setupRacks(int datanodeCount, int nodesPerRack,
         false, 10);
     nodeManager = spy(nodeManagerBase);
     for (DatanodeInfo dn: dnInfos) {
-      when(nodeManager.getNodeByUuid(dn.getUuidString()))
+      when(nodeManager.getNode(dn.getID()))
           .thenReturn(dn);
     }
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
index 23c7bf3930..6f41bb630e 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
@@ -639,7 +639,7 @@ private void 
testScmProcessDatanodeHeartbeat(MiniOzoneCluster cluster) {
     assertEquals(cluster.getHddsDatanodes().size(), allNodes.size());
 
     for (DatanodeDetails node : allNodes) {
-      DatanodeInfo datanodeInfo = assertInstanceOf(DatanodeInfo.class, 
nodeManager.getNodeByUuid(node.getUuid()));
+      DatanodeInfo datanodeInfo = assertInstanceOf(DatanodeInfo.class, 
nodeManager.getNode(node.getID()));
       assertNotNull(datanodeInfo);
       assertThat(datanodeInfo.getLastHeartbeatTime()).isPositive();
       assertEquals(datanodeInfo.getUuidString(), 
datanodeInfo.getNetworkName());
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java
index 0b3e57d3d9..edcbf75f82 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java
@@ -51,7 +51,6 @@
 import java.util.Iterator;
 import java.util.List;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.stream.Collectors;
@@ -61,6 +60,7 @@
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.DatanodeID;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient;
@@ -203,10 +203,10 @@ public void 
testNodeWithOpenPipelineCanBeDecommissionedAndRecommissioned()
     // Once we have a DN id, look it up in the NM, as the datanodeDetails
     // instance in the pipeline may not be the same as the one stored in the
     // NM.
-    final UUID dnID = pipeline.getNodes().stream()
+    final DatanodeID dnID = pipeline.getNodes().stream()
         .filter(node -> ecPipeline.getNodes().contains(node))
-        .findFirst().get().getUuid();
-    final DatanodeDetails toDecommission = nm.getNodeByUuid(dnID.toString());
+        .findFirst().get().getID();
+    final DatanodeDetails toDecommission = nm.getNode(dnID);
 
     scmClient.decommissionNodes(Arrays.asList(
         getDNHostAndPort(toDecommission)), false);
@@ -273,14 +273,14 @@ public void 
testDecommissioningNodesCompleteDecommissionOnSCMRestart()
 
     // After the SCM restart, the DN should report as DECOMMISSIONING, then
     // it should re-enter the decommission workflow and move to DECOMMISSIONED
-    DatanodeDetails newDn = nm.getNodeByUuid(dn.getUuid().toString());
+    DatanodeDetails newDn = nm.getNode(dn.getID());
     waitForDnToReachOpState(nm, newDn, DECOMMISSIONED);
     waitForDnToReachPersistedOpState(newDn, DECOMMISSIONED);
 
     // Now the node is decommissioned, so restart SCM again
     cluster.restartStorageContainerManager(true);
     setManagers();
-    newDn = nm.getNodeByUuid(dn.getUuid().toString());
+    newDn = nm.getNode(dn.getID());
 
     // On initial registration, the DN should report its operational state
     // and if it is decommissioned, that should be updated in the NodeStatus
@@ -298,7 +298,7 @@ public void 
testDecommissioningNodesCompleteDecommissionOnSCMRestart()
     scmClient.recommissionNodes(Arrays.asList(getDNHostAndPort(dn)));
     // Now restart it and ensure it remains IN_SERVICE
     cluster.restartHddsDatanode(dnIndex, true);
-    newDn = nm.getNodeByUuid(dn.getUuid().toString());
+    newDn = nm.getNode(dn.getID());
 
     // As this is not an initial registration since SCM was started, the DN
     // should report its operational state and if it differs from what SCM
@@ -427,10 +427,10 @@ public void 
testSingleNodeWithOpenPipelineCanGotoMaintenance()
     // Once we have a DN id, look it up in the NM, as the datanodeDetails
     // instance in the pipeline may not be the same as the one stored in the
     // NM.
-    final UUID dnID = pipeline.getNodes().stream()
+    final DatanodeID dnID = pipeline.getNodes().stream()
         .filter(node -> ecPipeline.getNodes().contains(node))
-        .findFirst().get().getUuid();
-    final DatanodeDetails dn = nm.getNodeByUuid(dnID.toString());
+        .findFirst().get().getID();
+    final DatanodeDetails dn = nm.getNode(dnID);
 
     scmClient.startMaintenanceNodes(Arrays.asList(
         getDNHostAndPort(dn)), 0, true);
@@ -460,7 +460,7 @@ public void 
testSingleNodeWithOpenPipelineCanGotoMaintenance()
 
     // Restart the DN and it should keep the IN_MAINTENANCE state
     cluster.restartHddsDatanode(dn, true);
-    DatanodeDetails newDN = nm.getNodeByUuid(dn.getUuid().toString());
+    DatanodeDetails newDN = nm.getNode(dn.getID());
     waitForDnToReachHealthState(nm, newDN, HEALTHY);
     waitForDnToReachPersistedOpState(newDN, IN_MAINTENANCE);
 
@@ -474,7 +474,7 @@ public void 
testSingleNodeWithOpenPipelineCanGotoMaintenance()
 
     // Now restart it and ensure it remains IN_SERVICE
     cluster.restartHddsDatanode(dnIndex, true);
-    DatanodeDetails newDn = nm.getNodeByUuid(dn.getUuid().toString());
+    DatanodeDetails newDn = nm.getNode(dn.getID());
 
     // As this is not an initial registration since SCM was started, the DN
     // should report its operational state and if it differs from what SCM
@@ -583,7 +583,7 @@ public void 
testEnteringMaintenanceNodeCompletesAfterSCMRestart()
 
     List<DatanodeDetails> newDns = new ArrayList<>();
     for (DatanodeDetails dn : forMaintenance) {
-      newDns.add(nm.getNodeByUuid(dn.getUuid().toString()));
+      newDns.add(nm.getNode(dn.getID()));
     }
 
     // Ensure all 3 DNs go to maintenance
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
index 44c9d87695..82acb47565 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
@@ -46,6 +46,7 @@
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdds.client.DecommissionUtils;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.DatanodeID;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
@@ -213,7 +214,7 @@ public Response removeDatanodes(List<String> uuids) {
     Preconditions.checkArgument(!uuids.isEmpty(), "Datanode list argument 
should not be empty");
     try {
       for (String uuid : uuids) {
-        DatanodeDetails nodeByUuid = nodeManager.getNodeByUuid(uuid);
+        DatanodeDetails nodeByUuid = 
nodeManager.getNode(DatanodeID.fromUuidString(uuid));
         try {
           if (preChecksSuccess(nodeByUuid, failedNodeErrorResponseMap)) {
             removedDatanodes.add(DatanodeMetadata.newBuilder()
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java
index 97636e92fc..3d3160c80a 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java
@@ -56,8 +56,7 @@ public void onMessage(final 
IncrementalContainerReportFromDatanode report,
           dnFromReport);
     }
 
-    DatanodeDetails dd =
-        getNodeManager().getNodeByUuid(dnFromReport.getUuid());
+    final DatanodeDetails dd = getNodeManager().getNode(dnFromReport.getID());
     if (dd == null) {
       LOG.warn("Received container report from unknown datanode {}",
           dnFromReport);
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
index fbbb58a124..edcff24bc1 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
@@ -307,7 +307,7 @@ public void 
updateNodeOperationalStateFromScm(HddsProtos.Node scmNode,
           nodeStatus.getOperationalState());
 
       setNodeOperationalState(dnDetails, nodeOperationalStateFromScm);
-      DatanodeDetails scmDnd = getNodeByUuid(dnDetails.getUuid());
+      DatanodeDetails scmDnd = getNode(dnDetails.getID());
       scmDnd.setPersistedOpState(nodeOperationalStateFromScm);
     }
   }
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
index bae61bc54e..9bdd5d82b2 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
@@ -670,7 +670,7 @@ public void testGetDatanodes() throws Exception {
     // Change Node OperationalState with NodeManager
     final NodeManager nodeManager = reconScm.getScmNodeManager();
     final DatanodeDetails dnDetailsInternal =
-        nodeManager.getNodeByUuid(datanodeDetails.getUuidString());
+        nodeManager.getNode(datanodeDetails.getID());
     // Backup existing state and sanity check
     final NodeStatus nStatus = nodeManager.getNodeStatus(dnDetailsInternal);
     final NodeOperationalState backupOpState =
@@ -1280,7 +1280,7 @@ public void testExplicitRemovalOfDecommissionedNode() 
throws Exception {
     // Change Node3 OperationalState with NodeManager to 
NodeOperationalState.DECOMMISSIONED
     final NodeManager nodeManager = reconScm.getScmNodeManager();
     final DatanodeDetails dnDetailsInternal =
-        nodeManager.getNodeByUuid(datanodeDetails3.getUuidString());
+        nodeManager.getNode(datanodeDetails3.getID());
     // Backup existing state and sanity check
     final NodeStatus nStatus = nodeManager.getNodeStatus(dnDetailsInternal);
     final NodeOperationalState backupOpState =
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
index 75db659467..af35a1521f 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
@@ -31,11 +31,11 @@
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
-import java.util.UUID;
 import java.util.concurrent.TimeoutException;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.DatanodeID;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State;
@@ -137,7 +137,7 @@ public void testProcessICRStateMismatch()
       DatanodeDetails datanodeDetails =
           containerWithPipeline.getPipeline().getFirstNode();
       NodeManager nodeManagerMock = mock(NodeManager.class);
-      when(nodeManagerMock.getNodeByUuid(any(UUID.class)))
+      when(nodeManagerMock.getNode(any(DatanodeID.class)))
           .thenReturn(datanodeDetails);
       IncrementalContainerReportFromDatanode reportMock =
           mock(IncrementalContainerReportFromDatanode.class);
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java
index 9fa7434076..748a778fb3 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java
@@ -39,6 +39,7 @@
 import java.util.UUID;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.DatanodeID;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
@@ -109,7 +110,6 @@ public void 
testReconNodeManagerInitWithInvalidNetworkTopology() throws IOExcept
     DatanodeDetails datanodeDetails = randomDatanodeDetails();
     // Updating the node's topology depth to make it invalid.
     datanodeDetails.setNetworkLocation("/default-rack/xyz/");
-    String uuidString = datanodeDetails.getUuidString();
 
     // Register a random datanode.
     RegisteredCommand register = reconNodeManager.register(datanodeDetails, 
null, null);
@@ -121,7 +121,7 @@ public void 
testReconNodeManagerInitWithInvalidNetworkTopology() throws IOExcept
     
assertTrue(reconContext.getErrors().get(0).equals(ReconContext.ErrorCode.INVALID_NETWORK_TOPOLOGY));
 
     assertEquals(0, reconNodeManager.getAllNodes().size());
-    assertNull(reconNodeManager.getNodeByUuid(uuidString));
+    assertNull(reconNodeManager.getNode(datanodeDetails.getID()));
   }
 
   @Test
@@ -139,15 +139,15 @@ public void testReconNodeDB() throws IOException, 
NodeNotFoundException {
     assertThat(reconNodeManager.getAllNodes()).isEmpty();
 
     DatanodeDetails datanodeDetails = randomDatanodeDetails();
-    String uuidString = datanodeDetails.getUuidString();
+    final DatanodeID datanodeID = datanodeDetails.getID();
 
     // Register a random datanode.
     reconNodeManager.register(datanodeDetails, null, null);
-    reconNewNodeHandler.onMessage(reconNodeManager.getNodeByUuid(uuidString),
+    reconNewNodeHandler.onMessage(reconNodeManager.getNode(datanodeID),
         null);
 
     assertEquals(1, reconNodeManager.getAllNodes().size());
-    assertNotNull(reconNodeManager.getNodeByUuid(uuidString));
+    assertNotNull(reconNodeManager.getNode(datanodeID));
 
     // If any commands are added to the eventQueue without using the onMessage
     // interface, then they should be filtered out and not returned to the DN
@@ -162,8 +162,7 @@ public void testReconNodeDB() throws IOException, 
NodeNotFoundException {
         new ReregisterCommand());
 
     // OperationalState sanity check
-    final DatanodeDetails dnDetails =
-        reconNodeManager.getNodeByUuid(datanodeDetails.getUuidString());
+    final DatanodeDetails dnDetails = reconNodeManager.getNode(datanodeID);
     assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
         dnDetails.getPersistedOpState());
     assertEquals(dnDetails.getPersistedOpState(),
@@ -204,8 +203,7 @@ public void testReconNodeDB() throws IOException, 
NodeNotFoundException {
 
     // Verify that the node information was persisted and loaded back.
     assertEquals(1, reconNodeManager.getAllNodes().size());
-    assertNotNull(
-        reconNodeManager.getNodeByUuid(datanodeDetails.getUuidString()));
+    assertNotNull(reconNodeManager.getNode(datanodeDetails.getID()));
   }
 
   @Test
@@ -229,13 +227,13 @@ public void testUpdateNodeOperationalStateFromScm() 
throws Exception {
 
     reconNodeManager.register(datanodeDetails, null, null);
     assertEquals(IN_SERVICE, reconNodeManager
-        .getNodeByUuid(datanodeDetails.getUuidString()).getPersistedOpState());
+        .getNode(datanodeDetails.getID()).getPersistedOpState());
 
     when(node.getNodeOperationalStates(eq(0)))
         .thenReturn(DECOMMISSIONING);
     reconNodeManager.updateNodeOperationalStateFromScm(node, datanodeDetails);
     assertEquals(DECOMMISSIONING, reconNodeManager
-        .getNodeByUuid(datanodeDetails.getUuidString()).getPersistedOpState());
+        .getNode(datanodeDetails.getID()).getPersistedOpState());
     List<DatanodeDetails> nodes =
         reconNodeManager.getNodes(DECOMMISSIONING, null);
     assertEquals(1, nodes.size());
@@ -258,17 +256,17 @@ public void testDatanodeUpdate() throws IOException {
 
     DatanodeDetails datanodeDetails = randomDatanodeDetails();
     datanodeDetails.setHostName("hostname1");
-    String uuidString = datanodeDetails.getUuidString();
+    final DatanodeID datanodeID = datanodeDetails.getID();
 
     // Register "hostname1" datanode.
     reconNodeManager.register(datanodeDetails, null, null);
-    reconNewNodeHandler.onMessage(reconNodeManager.getNodeByUuid(uuidString),
+    reconNewNodeHandler.onMessage(reconNodeManager.getNode(datanodeID),
         null);
 
     assertEquals(1, reconNodeManager.getAllNodes().size());
-    assertNotNull(reconNodeManager.getNodeByUuid(uuidString));
+    assertNotNull(reconNodeManager.getNode(datanodeID));
     assertEquals("hostname1",
-        reconNodeManager.getNodeByUuid(uuidString).getHostName());
+        reconNodeManager.getNode(datanodeID).getHostName());
 
     datanodeDetails.setHostName("hostname2");
     // Upon processing the heartbeat, the illegal command should be filtered 
out


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to