This is an automated email from the ASF dual-hosted git repository.
szetszwo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new d51ccf72d89 HDDS-13016. Add a getAllNodeCount() method to NodeManager.
(#8445)
d51ccf72d89 is described below
commit d51ccf72d89171695dfa29ea420279fecebb888d
Author: Tsz-Wo Nicholas Sze <[email protected]>
AuthorDate: Wed May 14 01:33:38 2025 -0700
HDDS-13016. Add a getAllNodeCount() method to NodeManager. (#8445)
---
.../apache/hadoop/hdds/scm/pipeline/Pipeline.java | 4 +-
.../hdds/scm/node/NodeDecommissionManager.java | 4 +-
.../apache/hadoop/hdds/scm/node/NodeManager.java | 11 ++++--
.../hadoop/hdds/scm/node/NodeStateManager.java | 4 ++
.../hadoop/hdds/scm/node/SCMNodeManager.java | 43 +++++++---------------
.../hdds/scm/TestSCMCommonPlacementPolicy.java | 6 +--
.../hadoop/hdds/scm/node/TestSCMNodeManager.java | 8 ++--
.../hadoop/hdds/scm/TestContainerOperations.java | 2 +-
.../hdds/scm/TestStorageContainerManager.java | 2 +-
.../scm/node/TestDecommissionAndMaintenance.java | 4 +-
.../hdds/scm/pipeline/TestMultiRaftSetup.java | 2 +-
.../apache/hadoop/ozone/TestOMSortDatanodes.java | 8 ++--
.../hadoop/ozone/recon/TestReconScmSnapshot.java | 4 +-
.../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 40 ++++++++++----------
.../ozone/recon/api/ClusterStateEndpoint.java | 5 +--
.../hadoop/ozone/recon/api/NodeEndpoint.java | 9 +----
.../ozone/recon/api/types/DatanodesResponse.java | 8 +++-
.../ozone/recon/fsck/ReconSafeModeMgrTask.java | 9 ++---
18 files changed, 79 insertions(+), 94 deletions(-)
diff --git
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
index 991d89e222f..f2da85349d4 100644
---
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
+++
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
@@ -421,7 +421,7 @@ private static Pipeline
getFromProtobufSetCreationTimestamp(HddsProtos.Pipeline
.build();
}
- public Pipeline copyWithNodesInOrder(List<DatanodeDetails> nodes) {
+ public Pipeline copyWithNodesInOrder(List<? extends DatanodeDetails> nodes) {
return toBuilder().setNodesInOrder(nodes).build();
}
@@ -611,7 +611,7 @@ public Builder setNodeOrder(List<Integer> orders) {
return this;
}
- public Builder setNodesInOrder(List<DatanodeDetails> nodes) {
+ public Builder setNodesInOrder(List<? extends DatanodeDetails> nodes) {
this.nodesInOrder = new LinkedList<>(nodes);
return this;
}
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java
index bb04e776043..e279a63a4ae 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java
@@ -423,7 +423,7 @@ private synchronized boolean
checkIfDecommissionPossible(List<DatanodeDetails> d
}
int reqNodes = cif.getReplicationConfig().getRequiredNodes();
if ((inServiceTotal - numDecom) < reqNodes) {
- int unHealthyTotal = nodeManager.getAllNodes().size() -
inServiceTotal;
+ final int unHealthyTotal = nodeManager.getAllNodeCount() -
inServiceTotal;
String errorMsg = "Insufficient nodes. Tried to decommission " +
dns.size() +
" nodes out of " + inServiceTotal + " IN-SERVICE HEALTHY and "
+ unHealthyTotal +
" not IN-SERVICE or not HEALTHY nodes. Cannot decommission as
a minimum of " + reqNodes +
@@ -591,7 +591,7 @@ private synchronized boolean
checkIfMaintenancePossible(List<DatanodeDetails> dn
minInService = maintenanceReplicaMinimum;
}
if ((inServiceTotal - numMaintenance) < minInService) {
- int unHealthyTotal = nodeManager.getAllNodes().size() -
inServiceTotal;
+ final int unHealthyTotal = nodeManager.getAllNodeCount() -
inServiceTotal;
String errorMsg = "Insufficient nodes. Tried to start maintenance
for " + dns.size() +
" nodes out of " + inServiceTotal + " IN-SERVICE HEALTHY and "
+ unHealthyTotal +
" not IN-SERVICE or not HEALTHY nodes. Cannot enter
maintenance mode as a minimum of " + minInService +
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index cd64d669aab..6be01d6db56 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@ -136,11 +136,14 @@ int getNodeCount(
NodeOperationalState opState, NodeState health);
/**
- * Get all datanodes known to SCM.
- *
- * @return List of DatanodeDetails known to SCM.
+ * @return all datanodes known to SCM.
*/
- List<DatanodeDetails> getAllNodes();
+ List<? extends DatanodeDetails> getAllNodes();
+
+ /** @return the number of datanodes. */
+ default int getAllNodeCount() {
+ return getAllNodes().size();
+ }
/**
* Returns the aggregated node stats.
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
index 79a239a5297..5d98c31af0c 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
@@ -544,6 +544,10 @@ public List<DatanodeInfo> getAllNodes() {
return nodeStateMap.getAllDatanodeInfos();
}
+ int getAllNodeCount() {
+ return nodeStateMap.getNodeCount();
+ }
+
/**
* Sets the operational state of the given node. Intended to be called when
* a node is being decommissioned etc.
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index edf934084d7..6ca2f6493da 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -36,7 +36,6 @@
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
-import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
@@ -252,15 +251,14 @@ public List<DatanodeDetails> getNodes(
.map(node -> (DatanodeDetails)node).collect(Collectors.toList());
}
- /**
- * Returns all datanodes that are known to SCM.
- *
- * @return List of DatanodeDetails
- */
@Override
- public List<DatanodeDetails> getAllNodes() {
- return nodeStateManager.getAllNodes().stream()
- .map(node -> (DatanodeDetails) node).collect(Collectors.toList());
+ public List<DatanodeInfo> getAllNodes() {
+ return nodeStateManager.getAllNodes();
+ }
+
+ @Override
+ public int getAllNodeCount() {
+ return nodeStateManager.getAllNodeCount();
}
/**
@@ -449,9 +447,9 @@ public RegisteredCommand register(
LOG.info("Updated datanode to: {}", dn);
scmNodeEventPublisher.fireEvent(SCMEvents.NODE_ADDRESS_UPDATE, dn);
} else if (isVersionChange(oldNode.getVersion(),
datanodeDetails.getVersion())) {
- LOG.info("Update the version for registered datanode = {}, " +
+ LOG.info("Update the version for registered datanode {}, " +
"oldVersion = {}, newVersion = {}.",
- datanodeDetails.getUuid(), oldNode.getVersion(),
datanodeDetails.getVersion());
+ datanodeDetails, oldNode.getVersion(),
datanodeDetails.getVersion());
nodeStateManager.updateNode(datanodeDetails, layoutInfo);
}
} catch (NodeNotFoundException e) {
@@ -1725,29 +1723,16 @@ public DatanodeInfo getNode(DatanodeID id) {
*/
@Override
public List<DatanodeDetails> getNodesByAddress(String address) {
- List<DatanodeDetails> allNodes = getAllNodes();
- List<DatanodeDetails> results = new LinkedList<>();
if (Strings.isNullOrEmpty(address)) {
- LOG.warn("address is null");
- return results;
+ return Collections.emptyList();
}
Set<DatanodeID> datanodeIDS = dnsToDnIdMap.get(address);
if (datanodeIDS == null) {
- LOG.debug("Cannot find node for address {}", address);
- return results;
+ return Collections.emptyList();
}
-
- datanodeIDS.forEach(datanodeID -> {
- try {
- List<DatanodeDetails> datanodeDetails = allNodes.stream().
- filter(node -> node.getID().equals(datanodeID)).
- collect(Collectors.toList());
- results.addAll(datanodeDetails);
- } catch (Exception e) {
- LOG.warn("Error find node for DataNode ID {}", datanodeID);
- }
- });
- return results;
+ return datanodeIDS.stream()
+ .map(this::getNode)
+ .collect(Collectors.toList());
}
/**
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java
index 8646fd9ec2b..b1f9a6f0f1c 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java
@@ -68,7 +68,7 @@
*/
public class TestSCMCommonPlacementPolicy {
- private NodeManager nodeManager;
+ private MockNodeManager nodeManager;
private OzoneConfiguration conf;
@BeforeEach
@@ -535,7 +535,7 @@ private static class DummyPlacementPolicy extends
SCMCommonPlacementPolicy {
DummyPlacementPolicy(NodeManager nodeManager, ConfigurationSource conf,
int rackCnt) {
this(nodeManager, conf,
- IntStream.range(0, nodeManager.getAllNodes().size()).boxed()
+ IntStream.range(0, nodeManager.getAllNodeCount()).boxed()
.collect(Collectors.toMap(Function.identity(),
idx -> idx % rackCnt)), rackCnt);
}
@@ -552,7 +552,7 @@ private static class DummyPlacementPolicy extends
SCMCommonPlacementPolicy {
this.rackCnt = rackCnt;
this.racks = IntStream.range(0, rackCnt)
.mapToObj(i -> mock(Node.class)).collect(Collectors.toList());
- List<DatanodeDetails> datanodeDetails = nodeManager.getAllNodes();
+ final List<? extends DatanodeDetails> datanodeDetails =
nodeManager.getAllNodes();
rackMap = datanodeRackMap.entrySet().stream()
.collect(Collectors.toMap(
entry -> datanodeDetails.get(entry.getKey()),
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
index 9e9336767d8..52f1feddd16 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
@@ -1828,7 +1828,7 @@ public void testScmRegisterNodeWith4LayerNetworkTopology()
assertEquals(nodeCount,
nodeManager.getNodeCount(NodeStatus.inServiceHealthy()));
assertEquals(nodeCount, clusterMap.getNumOfLeafNode(""));
assertEquals(4, clusterMap.getMaxLevel());
- List<DatanodeDetails> nodeList = nodeManager.getAllNodes();
+ final List<DatanodeInfo> nodeList = nodeManager.getAllNodes();
nodeList.forEach(node -> assertTrue(
node.getNetworkLocation().startsWith("/rack1/ng")));
nodeList.forEach(node -> assertNotNull(node.getParent()));
@@ -1872,7 +1872,7 @@ void testScmRegisterNodeWithNetworkTopology(boolean
useHostname)
nodeManager.getNodeCount(NodeStatus.inServiceHealthy()));
assertEquals(nodeCount, clusterMap.getNumOfLeafNode(""));
assertEquals(3, clusterMap.getMaxLevel());
- List<DatanodeDetails> nodeList = nodeManager.getAllNodes();
+ final List<DatanodeInfo> nodeList = nodeManager.getAllNodes();
nodeList.forEach(node ->
assertEquals("/rack1", node.getNetworkLocation()));
@@ -2019,7 +2019,7 @@ public void testScmRegisterNodeWithUpdatedIpAndHostname()
nodeManager.getNodeCount(NodeStatus.inServiceHealthy()));
assertEquals(1, clusterMap.getNumOfLeafNode(""));
assertEquals(4, clusterMap.getMaxLevel());
- List<DatanodeDetails> nodeList = nodeManager.getAllNodes();
+ final List<DatanodeInfo> nodeList = nodeManager.getAllNodes();
assertEquals(1, nodeList.size());
DatanodeDetails returnedNode = nodeList.get(0);
@@ -2039,7 +2039,7 @@ public void testScmRegisterNodeWithUpdatedIpAndHostname()
assertEquals(1, nodeManager.getNodeCount(NodeStatus.inServiceHealthy()));
assertEquals(1, clusterMap.getNumOfLeafNode(""));
assertEquals(4, clusterMap.getMaxLevel());
- List<DatanodeDetails> updatedNodeList = nodeManager.getAllNodes();
+ final List<DatanodeInfo> updatedNodeList = nodeManager.getAllNodes();
assertEquals(1, updatedNodeList.size());
DatanodeDetails returnedUpdatedNode = updatedNodeList.get(0);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerOperations.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerOperations.java
index cc0d515f198..492cf1e5638 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerOperations.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerOperations.java
@@ -188,7 +188,7 @@ public void testDatanodeUsageInfoCompatibility() throws
IOException {
@Test
public void testDatanodeUsageInfoContainerCount() throws Exception {
- List<DatanodeDetails> dnList = cluster().getStorageContainerManager()
+ List<? extends DatanodeDetails> dnList =
cluster().getStorageContainerManager()
.getScmNodeManager()
.getAllNodes();
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
index cd93f8905ca..6168829f70a 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
@@ -630,7 +630,7 @@ public void testScmInfo(@TempDir Path tempDir) throws
Exception {
*/
private void testScmProcessDatanodeHeartbeat(MiniOzoneCluster cluster) {
NodeManager nodeManager =
cluster.getStorageContainerManager().getScmNodeManager();
- List<DatanodeDetails> allNodes = nodeManager.getAllNodes();
+ List<? extends DatanodeDetails> allNodes = nodeManager.getAllNodes();
assertEquals(cluster.getHddsDatanodes().size(), allNodes.size());
for (DatanodeDetails node : allNodes) {
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java
index edcbf75f82a..341bbedf42d 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java
@@ -316,7 +316,7 @@ public void testInsufficientNodesCannotBeDecommissioned()
// Generate some data on the empty cluster to create some containers
generateData(20, "key", ratisRepConfig);
- final List<DatanodeDetails> toDecommission = nm.getAllNodes();
+ final List<? extends DatanodeDetails> toDecommission = nm.getAllNodes();
// trying to decommission 5 nodes should leave the cluster with 2 nodes,
// which is not sufficient for RATIS.THREE replication. It should not be
allowed.
@@ -706,7 +706,7 @@ public void testInsufficientNodesCannotBePutInMaintenance()
throws Exception {
// Generate some data on the empty cluster to create some containers
generateData(20, "key", ratisRepConfig);
- final List<DatanodeDetails> toMaintenance = nm.getAllNodes();
+ final List<? extends DatanodeDetails> toMaintenance = nm.getAllNodes();
// trying to move 6 nodes to maintenance should leave the cluster with 1
node,
// which is not sufficient for RATIS.THREE replication (3 -
maintenanceReplicaMinimum = 2).
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java
index 2b9a6f9ed5b..a7bee7f3d8b 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java
@@ -149,7 +149,7 @@ private void assertNotSamePeers() {
nodeManager.getAllNodes().forEach((dn) -> {
Collection<DatanodeDetails> peers = nodeManager.getPeerList(dn);
assertThat(peers).doesNotContain(dn);
- List<DatanodeDetails> trimList = nodeManager.getAllNodes();
+ List<? extends DatanodeDetails> trimList = nodeManager.getAllNodes();
trimList.remove(dn);
assertThat(peers).containsAll(trimList);
});
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java
index c3ecf664942..1a5fe8bbf8d 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java
@@ -128,7 +128,7 @@ public static void cleanup() throws Exception {
public void sortDatanodesRelativeToDatanode() {
for (DatanodeDetails dn : nodeManager.getAllNodes()) {
assertEquals(ROOT_LEVEL + 2, dn.getLevel());
- List<DatanodeDetails> sorted =
+ List<? extends DatanodeDetails> sorted =
keyManager.sortDatanodes(nodeManager.getAllNodes(), nodeAddress(dn));
assertEquals(dn, sorted.get(0),
"Source node should be sorted very first");
@@ -146,12 +146,12 @@ public void sortDatanodesRelativeToNonDatanode() {
@Test
public void testSortDatanodes() {
- List<DatanodeDetails> nodes = nodeManager.getAllNodes();
+ List<? extends DatanodeDetails> nodes = nodeManager.getAllNodes();
// sort normal datanodes
String client;
client = nodeManager.getAllNodes().get(0).getIpAddress();
- List<DatanodeDetails> datanodeDetails =
+ List<? extends DatanodeDetails> datanodeDetails =
keyManager.sortDatanodes(nodes, client);
assertEquals(NODE_COUNT, datanodeDetails.size());
@@ -166,7 +166,7 @@ public void testSortDatanodes() {
assertEquals(NODE_COUNT, datanodeDetails.size());
}
- private static void assertRackOrder(String rack, List<DatanodeDetails> list)
{
+ private static void assertRackOrder(String rack, List<? extends
DatanodeDetails> list) {
int size = list.size();
for (int i = 0; i < size / 2; i++) {
assertEquals(rack, list.get(i).getNetworkLocation(),
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java
index 658cbee5fcb..9f75a3f08a4 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java
@@ -127,7 +127,7 @@ public void testExplicitRemovalOfNode() throws Exception {
ReconNodeManager nodeManager = (ReconNodeManager)
ozoneCluster.getReconServer()
.getReconStorageContainerManager().getScmNodeManager();
long nodeDBCountBefore = nodeManager.getNodeDBKeyCount();
- List<DatanodeDetails> allNodes = nodeManager.getAllNodes();
+ List<? extends DatanodeDetails> allNodes = nodeManager.getAllNodes();
assertEquals(nodeDBCountBefore, allNodes.size());
DatanodeDetails datanodeDetails = allNodes.get(3);
@@ -137,7 +137,7 @@ public void testExplicitRemovalOfNode() throws Exception {
try {
return nodeManager.getNodeStatus(datanodeDetails).isDead();
} catch (NodeNotFoundException e) {
- fail("getNodeStatus() Failed for " + datanodeDetails.getUuid(), e);
+ fail("getNodeStatus() Failed for " + datanodeDetails, e);
throw new RuntimeException(e);
}
}, 2000, 10000);
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 96673113771..909ee5a277d 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -1833,7 +1833,7 @@ public List<OzoneFileStatus> listStatus(OmKeyArgs args,
boolean recursive,
refreshPipelineFromCache(keyInfoList);
if (args.getSortDatanodes()) {
- sortDatanodes(clientAddress, keyInfoList.toArray(new OmKeyInfo[0]));
+ sortDatanodes(clientAddress, keyInfoList);
}
return fileStatusList;
}
@@ -1962,7 +1962,7 @@ private List<OzoneFileStatus> sortPipelineInfo(
refreshPipelineFromCache(keyInfoList);
if (omKeyArgs.getSortDatanodes()) {
- sortDatanodes(clientAddress, keyInfoList.toArray(new OmKeyInfo[0]));
+ sortDatanodes(clientAddress, keyInfoList);
}
return fileStatusFinalList;
@@ -2001,9 +2001,13 @@ private FileEncryptionInfo
getFileEncryptionInfo(OmBucketInfo bucketInfo)
return encInfo;
}
- private void sortDatanodes(String clientMachine, OmKeyInfo... keyInfos) {
+ private void sortDatanodes(String clientMachine, OmKeyInfo keyInfo) {
+ sortDatanodes(clientMachine, Collections.singletonList(keyInfo));
+ }
+
+ private void sortDatanodes(String clientMachine, List<OmKeyInfo> keyInfos) {
if (keyInfos != null && clientMachine != null) {
- Map<Set<String>, List<DatanodeDetails>> sortedPipelines = new
HashMap<>();
+ final Map<Set<String>, List<? extends DatanodeDetails>> sortedPipelines
= new HashMap<>();
for (OmKeyInfo keyInfo : keyInfos) {
OmKeyLocationInfoGroup key = keyInfo.getLatestVersionLocations();
if (key == null) {
@@ -2013,14 +2017,16 @@ private void sortDatanodes(String clientMachine,
OmKeyInfo... keyInfos) {
for (OmKeyLocationInfo k : key.getLocationList()) {
Pipeline pipeline = k.getPipeline();
List<DatanodeDetails> nodes = pipeline.getNodes();
- List<String> uuidList = toNodeUuid(nodes);
- Set<String> uuidSet = new HashSet<>(uuidList);
- List<DatanodeDetails> sortedNodes = sortedPipelines.get(uuidSet);
+ if (nodes.isEmpty()) {
+ LOG.warn("No datanodes in pipeline {}", pipeline.getId());
+ continue;
+ }
+
+ final Set<String> uuidSet =
nodes.stream().map(DatanodeDetails::getUuidString)
+ .collect(Collectors.toSet());
+
+ List<? extends DatanodeDetails> sortedNodes =
sortedPipelines.get(uuidSet);
if (sortedNodes == null) {
- if (nodes.isEmpty()) {
- LOG.warn("No datanodes in pipeline {}", pipeline.getId());
- continue;
- }
sortedNodes = sortDatanodes(nodes, clientMachine);
if (sortedNodes != null) {
sortedPipelines.put(uuidSet, sortedNodes);
@@ -2038,7 +2044,7 @@ private void sortDatanodes(String clientMachine,
OmKeyInfo... keyInfos) {
}
@VisibleForTesting
- public List<DatanodeDetails> sortDatanodes(List<DatanodeDetails> nodes,
+ public List<? extends DatanodeDetails> sortDatanodes(List<? extends
DatanodeDetails> nodes,
String clientMachine) {
final Node client = getClientNode(clientMachine, nodes);
return ozoneManager.getClusterMap()
@@ -2046,7 +2052,7 @@ public List<DatanodeDetails>
sortDatanodes(List<DatanodeDetails> nodes,
}
private Node getClientNode(String clientMachine,
- List<DatanodeDetails> nodes) {
+ List<? extends DatanodeDetails> nodes) {
List<DatanodeDetails> matchingNodes = new ArrayList<>();
boolean useHostname = ozoneManager.getConfiguration().getBoolean(
HddsConfigKeys.HDDS_DATANODE_USE_DN_HOSTNAME,
@@ -2092,14 +2098,6 @@ private String resolveNodeLocation(String hostname) {
}
}
- private static List<String> toNodeUuid(Collection<DatanodeDetails> nodes) {
- List<String> nodeSet = new ArrayList<>(nodes.size());
- for (DatanodeDetails node : nodes) {
- nodeSet.add(node.getUuidString());
- }
- return nodeSet;
- }
-
private void slimLocationVersion(OmKeyInfo... keyInfos) {
if (keyInfos != null) {
for (OmKeyInfo keyInfo : keyInfos) {
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java
index 9d72dcb6b0b..e3bbf10fe1a 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java
@@ -34,7 +34,6 @@
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
import org.apache.hadoop.hdds.scm.node.NodeStatus;
@@ -95,8 +94,6 @@ public class ClusterStateEndpoint {
@GET
public Response getClusterState() {
ContainerStateCounts containerStateCounts = new ContainerStateCounts();
- List<DatanodeDetails> datanodeDetails = nodeManager.getAllNodes();
-
int pipelines = this.pipelineManager.getPipelines().size();
List<UnhealthyContainers> missingContainers = containerHealthSchemaManager
@@ -181,7 +178,7 @@ public Response getClusterState() {
.setPipelines(pipelines)
.setContainers(containerStateCounts.getTotalContainerCount())
.setMissingContainers(containerStateCounts.getMissingContainerCount())
- .setTotalDatanodes(datanodeDetails.size())
+ .setTotalDatanodes(nodeManager.getAllNodeCount())
.setHealthyDatanodes(healthyDatanodes)
.setOpenContainers(containerStateCounts.getOpenContainersCount())
.setDeletedContainers(containerStateCounts.getDeletedContainersCount())
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
index c4df5a2b529..a0bad57d2a5 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
@@ -54,7 +54,6 @@
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
-import org.apache.hadoop.hdds.scm.node.DatanodeInfo;
import org.apache.hadoop.hdds.scm.node.NodeStatus;
import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
@@ -108,9 +107,7 @@ public class NodeEndpoint {
@GET
public Response getDatanodes() {
List<DatanodeMetadata> datanodes = new ArrayList<>();
- List<DatanodeDetails> datanodeDetails = nodeManager.getAllNodes();
-
- datanodeDetails.forEach(datanode -> {
+ nodeManager.getAllNodes().forEach(datanode -> {
DatanodeStorageReport storageReport = getStorageReport(datanode);
NodeState nodeState = null;
try {
@@ -157,7 +154,6 @@ public Response getDatanodes() {
datanode.getUuid(), ex);
}
- DatanodeInfo dnInfo = (DatanodeInfo) datanode;
datanodes.add(builder.setHostname(nodeManager.getHostName(datanode))
.setDatanodeStorageReport(storageReport)
.setLastHeartbeat(nodeManager.getLastHeartbeat(datanode))
@@ -169,8 +165,7 @@ public Response getDatanodes() {
.setVersion(nodeManager.getVersion(datanode))
.setSetupTime(nodeManager.getSetupTime(datanode))
.setRevision(nodeManager.getRevision(datanode))
- .setLayoutVersion(
- dnInfo.getLastKnownLayoutVersion().getMetadataLayoutVersion())
+
.setLayoutVersion(datanode.getLastKnownLayoutVersion().getMetadataLayoutVersion())
.setNetworkLocation(datanode.getNetworkLocation())
.build());
});
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodesResponse.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodesResponse.java
index ac0c0585e46..c0ad252b8e7 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodesResponse.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodesResponse.java
@@ -19,8 +19,8 @@
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
-import java.util.ArrayList;
import java.util.Collection;
+import java.util.Collections;
import java.util.Map;
/**
@@ -48,7 +48,11 @@ public class DatanodesResponse {
private Map<String, String> failedNodeErrorResponseMap;
public DatanodesResponse() {
- this(0, new ArrayList<>());
+ this(Collections.emptyList());
+ }
+
+ public DatanodesResponse(Collection<DatanodeMetadata> datanodes) {
+ this(datanodes.size(), datanodes);
}
public DatanodesResponse(long totalCount,
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ReconSafeModeMgrTask.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ReconSafeModeMgrTask.java
index 21ecf56e016..5cffb5a84cf 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ReconSafeModeMgrTask.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ReconSafeModeMgrTask.java
@@ -25,10 +25,10 @@
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.node.DatanodeInfo;
import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
import org.apache.hadoop.ozone.recon.scm.ReconNodeManager;
import org.apache.hadoop.ozone.recon.scm.ReconSafeModeManager;
@@ -48,7 +48,7 @@ public class ReconSafeModeMgrTask {
private ContainerManager containerManager;
private ReconNodeManager nodeManager;
private ReconSafeModeManager safeModeManager;
- private List<DatanodeDetails> allNodes;
+ private List<DatanodeInfo> allNodes;
private List<ContainerInfo> containers;
private final long interval;
private final long dnHBInterval;
@@ -92,8 +92,7 @@ public synchronized void start() {
}
}
- private void tryReconExitSafeMode()
- throws InterruptedException {
+ private void tryReconExitSafeMode() {
// Recon starting first time
if (null == allNodes || allNodes.isEmpty()) {
return;
@@ -108,7 +107,7 @@ private void tryReconExitSafeMode()
currentContainersInAllDatanodes.addAll(
nodeManager.getContainers(node));
} catch (NodeNotFoundException e) {
- LOG.error("{} node not found.", node.getUuid());
+ LOG.error("Node not found: {}", node);
}
});
if (containers.size() == currentContainersInAllDatanodes.size()) {
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]