This is an automated email from the ASF dual-hosted git repository.
sshenoy pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 423a7f568cd HDDS-13096. Counter for total volumes and healthy volumes
per node in datanode list command. (#9023)
423a7f568cd is described below
commit 423a7f568cdf649c3e95e503ada68670439cc570
Author: sreejasahithi <[email protected]>
AuthorDate: Wed Oct 22 13:20:02 2025 +0530
HDDS-13096. Counter for total volumes and healthy volumes per node in
datanode list command. (#9023)
---
.../interface-client/src/main/proto/hdds.proto | 2 +
.../hdds/scm/server/SCMClientProtocolServer.java | 25 ++++++--
.../hdds/scm/cli/datanode/BasicDatanodeInfo.java | 72 ++++++++++++++++++----
.../hdds/scm/cli/datanode/ListInfoSubcommand.java | 32 ++++++----
.../scm/cli/datanode/TestListInfoSubcommand.java | 45 ++++++++++++++
5 files changed, 147 insertions(+), 29 deletions(-)
diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto
b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
index ef76205d91f..504f1a7ebdf 100644
--- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto
+++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
@@ -200,6 +200,8 @@ message Node {
required DatanodeDetailsProto nodeID = 1;
repeated NodeState nodeStates = 2;
repeated NodeOperationalState nodeOperationalStates = 3;
+ optional int32 totalVolumeCount = 4;
+ optional int32 healthyVolumeCount = 5;
}
message NodePool {
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index c9d4f0b0792..c4d333632ef 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -89,6 +89,7 @@
import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
import org.apache.hadoop.hdds.scm.ha.SCMRatisServer;
import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl;
+import org.apache.hadoop.hdds.scm.node.DatanodeInfo;
import org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo;
import org.apache.hadoop.hdds.scm.node.NodeStatus;
import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
@@ -643,11 +644,17 @@ public List<HddsProtos.Node> queryNode(
List<HddsProtos.Node> result = new ArrayList<>();
for (DatanodeDetails node : queryNode(opState, state)) {
NodeStatus ns = scm.getScmNodeManager().getNodeStatus(node);
- result.add(HddsProtos.Node.newBuilder()
+ DatanodeInfo datanodeInfo =
scm.getScmNodeManager().getDatanodeInfo(node);
+ HddsProtos.Node.Builder nodeBuilder = HddsProtos.Node.newBuilder()
.setNodeID(node.toProto(clientVersion))
.addNodeStates(ns.getHealth())
- .addNodeOperationalStates(ns.getOperationalState())
- .build());
+ .addNodeOperationalStates(ns.getOperationalState());
+
+ if (datanodeInfo != null) {
+
nodeBuilder.setTotalVolumeCount(datanodeInfo.getStorageReports().size());
+
nodeBuilder.setHealthyVolumeCount(datanodeInfo.getHealthyVolumeCount());
+ }
+ result.add(nodeBuilder.build());
}
AUDIT.logReadSuccess(buildAuditMessageForSuccess(
SCMAction.QUERY_NODE, auditMap));
@@ -669,11 +676,17 @@ public HddsProtos.Node queryNode(UUID uuid)
DatanodeDetails node =
scm.getScmNodeManager().getNode(DatanodeID.of(uuid));
if (node != null) {
NodeStatus ns = scm.getScmNodeManager().getNodeStatus(node);
- result = HddsProtos.Node.newBuilder()
+ DatanodeInfo datanodeInfo =
scm.getScmNodeManager().getDatanodeInfo(node);
+ HddsProtos.Node.Builder nodeBuilder = HddsProtos.Node.newBuilder()
.setNodeID(node.getProtoBufMessage())
.addNodeStates(ns.getHealth())
- .addNodeOperationalStates(ns.getOperationalState())
- .build();
+ .addNodeOperationalStates(ns.getOperationalState());
+
+ if (datanodeInfo != null) {
+
nodeBuilder.setTotalVolumeCount(datanodeInfo.getStorageReports().size());
+
nodeBuilder.setHealthyVolumeCount(datanodeInfo.getHealthyVolumeCount());
+ }
+ result = nodeBuilder.build();
}
} catch (NodeNotFoundException e) {
IOException ex = new IOException(
diff --git
a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/BasicDatanodeInfo.java
b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/BasicDatanodeInfo.java
index 4a925ffad5f..ae0be5faab5 100644
---
a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/BasicDatanodeInfo.java
+++
b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/BasicDatanodeInfo.java
@@ -27,7 +27,7 @@
/**
* Represents filtered Datanode information for json use.
*/
-public class BasicDatanodeInfo {
+public final class BasicDatanodeInfo {
@JsonInclude(JsonInclude.Include.NON_NULL)
private Long used = null;
@JsonInclude(JsonInclude.Include.NON_NULL)
@@ -37,20 +37,58 @@ public class BasicDatanodeInfo {
private final DatanodeDetails dn;
private final HddsProtos.NodeOperationalState opState;
private final HddsProtos.NodeState healthState;
+ @JsonInclude(JsonInclude.Include.NON_NULL)
+ private Integer totalVolumeCount = null;
+ @JsonInclude(JsonInclude.Include.NON_NULL)
+ private Integer healthyVolumeCount = null;
- public BasicDatanodeInfo(DatanodeDetails dnDetails,
HddsProtos.NodeOperationalState opState,
- HddsProtos.NodeState healthState) {
- this.dn = dnDetails;
- this.opState = opState;
- this.healthState = healthState;
+ private BasicDatanodeInfo(Builder builder) {
+ this.dn = builder.dn;
+ this.opState = builder.opState;
+ this.healthState = builder.healthState;
+ this.used = builder.used;
+ this.capacity = builder.capacity;
+ this.percentUsed = builder.percentUsed;
+ this.totalVolumeCount = builder.totalVolumeCount;
+ this.healthyVolumeCount = builder.healthyVolumeCount;
}
- public BasicDatanodeInfo(DatanodeDetails dnDetails,
HddsProtos.NodeOperationalState opState,
- HddsProtos.NodeState healthState, long used, long capacity, double
percentUsed) {
- this(dnDetails, opState, healthState);
- this.used = used;
- this.capacity = capacity;
- this.percentUsed = percentUsed;
+ /**
+ * Builder class for creating instances of BasicDatanodeInfo.
+ */
+ public static class Builder {
+ private DatanodeDetails dn;
+ private HddsProtos.NodeOperationalState opState;
+ private HddsProtos.NodeState healthState;
+ private Long used;
+ private Long capacity;
+ private Double percentUsed;
+ private Integer totalVolumeCount;
+ private Integer healthyVolumeCount;
+
+ public Builder(DatanodeDetails dn, HddsProtos.NodeOperationalState opState,
+ HddsProtos.NodeState healthState) {
+ this.dn = dn;
+ this.opState = opState;
+ this.healthState = healthState;
+ }
+
+ public Builder withUsageInfo(long usedBytes, long capacityBytes, double
percentUsedBytes) {
+ this.used = usedBytes;
+ this.capacity = capacityBytes;
+ this.percentUsed = percentUsedBytes;
+ return this;
+ }
+
+ public Builder withVolumeCounts(Integer total, Integer healthy) {
+ this.totalVolumeCount = total;
+ this.healthyVolumeCount = healthy;
+ return this;
+ }
+
+ public BasicDatanodeInfo build() {
+ return new BasicDatanodeInfo(this);
+ }
}
@JsonProperty(index = 5)
@@ -158,6 +196,16 @@ public Double getPercentUsed() {
return percentUsed;
}
+ @JsonProperty(index = 110)
+ public Integer getTotalVolumeCount() {
+ return totalVolumeCount;
+ }
+
+ @JsonProperty(index = 111)
+ public Integer getHealthyVolumeCount() {
+ return healthyVolumeCount;
+ }
+
@JsonIgnore
public DatanodeDetails getDatanodeDetails() {
return dn;
diff --git
a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java
b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java
index 49fb032d6ce..0ed71c99e70 100644
---
a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java
+++
b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java
@@ -88,8 +88,11 @@ public void execute(ScmClient scmClient) throws IOException {
pipelines = scmClient.listPipelines();
if (exclusiveNodeOptions != null &&
!Strings.isNullOrEmpty(exclusiveNodeOptions.getNodeId())) {
HddsProtos.Node node =
scmClient.queryNode(UUID.fromString(exclusiveNodeOptions.getNodeId()));
- BasicDatanodeInfo singleNodeInfo = new
BasicDatanodeInfo(DatanodeDetails.getFromProtoBuf(node.getNodeID()),
- node.getNodeOperationalStates(0), node.getNodeStates(0));
+ Integer totalVolumeCount = node.hasTotalVolumeCount() ?
node.getTotalVolumeCount() : null;
+ Integer healthyVolumeCount = node.hasHealthyVolumeCount() ?
node.getHealthyVolumeCount() : null;
+ BasicDatanodeInfo singleNodeInfo = new BasicDatanodeInfo.Builder(
+ DatanodeDetails.getFromProtoBuf(node.getNodeID()),
node.getNodeOperationalStates(0),
+ node.getNodeStates(0)).withVolumeCounts(totalVolumeCount,
healthyVolumeCount).build();
if (json) {
List<BasicDatanodeInfo> dtoList =
Collections.singletonList(singleNodeInfo);
System.out.println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(dtoList));
@@ -151,13 +154,13 @@ private List<BasicDatanodeInfo> getAllNodes(ScmClient
scmClient)
long capacity = p.getCapacity();
long used = capacity - p.getRemaining();
double percentUsed = (capacity > 0) ? (used * 100.0) / capacity
: 0.0;
- return new BasicDatanodeInfo(
+ Integer totalVolumeCount = node.hasTotalVolumeCount() ?
node.getTotalVolumeCount() : null;
+ Integer healthyVolumeCount = node.hasHealthyVolumeCount() ?
node.getHealthyVolumeCount() : null;
+ return new BasicDatanodeInfo.Builder(
DatanodeDetails.getFromProtoBuf(node.getNodeID()),
- node.getNodeOperationalStates(0),
- node.getNodeStates(0),
- used,
- capacity,
- percentUsed);
+ node.getNodeOperationalStates(0), node.getNodeStates(0))
+ .withUsageInfo(used, capacity, percentUsed)
+ .withVolumeCounts(totalVolumeCount,
healthyVolumeCount).build();
} catch (Exception e) {
String reason = "Could not process info for an unknown datanode";
if (p != null && p.getNode() != null &&
!Strings.isNullOrEmpty(p.getNode().getUuid())) {
@@ -174,9 +177,12 @@ private List<BasicDatanodeInfo> getAllNodes(ScmClient
scmClient)
List<HddsProtos.Node> nodes = scmClient.queryNode(null,
null, HddsProtos.QueryScope.CLUSTER, "");
- return nodes.stream().map(p -> new BasicDatanodeInfo(
- DatanodeDetails.getFromProtoBuf(p.getNodeID()),
- p.getNodeOperationalStates(0), p.getNodeStates(0)))
+ return nodes.stream().map(p -> {
+ Integer totalVolumeCount = p.hasTotalVolumeCount() ?
p.getTotalVolumeCount() : null;
+ Integer healthyVolumeCount = p.hasHealthyVolumeCount() ?
p.getHealthyVolumeCount() : null;
+ return new BasicDatanodeInfo.Builder(
+ DatanodeDetails.getFromProtoBuf(p.getNodeID()),
p.getNodeOperationalStates(0), p.getNodeStates(0))
+ .withVolumeCounts(totalVolumeCount, healthyVolumeCount).build(); })
.sorted(Comparator.comparing(BasicDatanodeInfo::getHealthState))
.collect(Collectors.toList());
}
@@ -211,6 +217,10 @@ private void printDatanodeInfo(BasicDatanodeInfo dn) {
" pipelines)");
System.out.println("Operational State: " + dn.getOpState());
System.out.println("Health State: " + dn.getHealthState());
+ if (dn.getTotalVolumeCount() != null && dn.getHealthyVolumeCount() !=
null) {
+ System.out.println("Total volume count: " + dn.getTotalVolumeCount() +
"\n" +
+ "Healthy volume count: " + dn.getHealthyVolumeCount());
+ }
System.out.println("Related pipelines:\n" + pipelineListInfo);
if (dn.getUsed() != null && dn.getCapacity() != null && dn.getUsed() >= 0
&& dn.getCapacity() > 0) {
diff --git
a/hadoop-ozone/cli-admin/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java
b/hadoop-ozone/cli-admin/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java
index fd6834068ee..13ae6a35f10 100644
---
a/hadoop-ozone/cli-admin/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java
+++
b/hadoop-ozone/cli-admin/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java
@@ -312,6 +312,51 @@ public void
testNodeSelectionAndUsageSortingAreMutuallyExclusive(String sortingF
"Exception message should contain '" + expectedErrorMessagePart + "'
but was: " + thrown.getMessage());
}
+ @Test
+ public void testVolumeCounters() throws Exception {
+ ScmClient scmClient = mock(ScmClient.class);
+ List<HddsProtos.Node> nodes = getNodeDetails();
+
+ // Create nodes with volume counts
+ List<HddsProtos.Node> nodesWithVolumeCounts = new ArrayList<>();
+ for (int i = 0; i < nodes.size(); i++) {
+ HddsProtos.Node originalNode = nodes.get(i);
+ HddsProtos.Node nodeWithVolumes =
HddsProtos.Node.newBuilder(originalNode)
+ .setTotalVolumeCount(10 + i)
+ .setHealthyVolumeCount(8 + i)
+ .build();
+ nodesWithVolumeCounts.add(nodeWithVolumes);
+ }
+
+ when(scmClient.queryNode(any(), any(), any(),
any())).thenReturn(nodesWithVolumeCounts);
+ when(scmClient.listPipelines()).thenReturn(new ArrayList<>());
+
+ // ----- JSON output test -----
+ CommandLine c = new CommandLine(cmd);
+ c.parseArgs("--json");
+ cmd.execute(scmClient);
+ JsonNode root = mapper.readTree(outContent.toString(DEFAULT_ENCODING));
+
+ assertTrue(root.isArray(), "JSON output should be an array");
+ assertEquals(4, root.size(), "Expected 4 nodes in JSON output");
+
+ for (JsonNode node : root) {
+ assertTrue(node.has("totalVolumeCount"), "JSON should include
totalVolumeCount field");
+ assertTrue(node.has("healthyVolumeCount"), "JSON should include
healthyVolumeCount field");
+ }
+
+ outContent.reset();
+
+ // ----- Text output test -----
+ c = new CommandLine(cmd);
+ c.parseArgs();
+ cmd.execute(scmClient);
+ String output = outContent.toString(DEFAULT_ENCODING);
+
+ assertTrue(output.contains("Total volume count:"), "Should display total
volume count");
+ assertTrue(output.contains("Healthy volume count:"), "Should display
healthy volume count");
+ }
+
private void validateOrdering(JsonNode root, String orderDirection) {
for (int i = 0; i < root.size() - 1; i++) {
long usedCurrent = root.get(i).get("used").asLong();
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]