This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new e5a55655686 HDDS-13945. Show datanode reserved space in 
StorageDistributionEndpoint (#9488)
e5a55655686 is described below

commit e5a556556865925bfbd6a924c90412d9fa8e4139
Author: Priyesh Karatha <[email protected]>
AuthorDate: Fri Dec 19 22:47:14 2025 +0530

    HDDS-13945. Show datanode reserved space in StorageDistributionEndpoint 
(#9488)
---
 .../container/balancer/ContainerBalancerTask.java  |  2 +-
 .../scm/container/placement/metrics/NodeStat.java  |  8 ++++-
 .../container/placement/metrics/SCMNodeMetric.java |  6 ++--
 .../container/placement/metrics/SCMNodeStat.java   | 36 ++++++++++++++++------
 .../hadoop/hdds/scm/node/SCMNodeManager.java       |  8 +++--
 .../hadoop/hdds/scm/container/MockNodeManager.java |  2 +-
 .../balancer/TestContainerBalancerTask.java        |  2 +-
 .../container/balancer/TestFindTargetStrategy.java | 22 ++++++-------
 .../scm/container/balancer/TestableCluster.java    |  2 +-
 .../TestSCMContainerPlacementCapacity.java         |  8 ++---
 .../replication/TestReplicationManagerUtil.java    |  6 ++--
 .../TestCapacityPipelineChoosePolicy.java          |  8 ++---
 .../container/placement/TestDatanodeMetrics.java   |  8 ++---
 .../recon/TestStorageDistributionEndpoint.java     | 18 ++++++++++-
 .../recon/api/StorageDistributionEndpoint.java     |  2 ++
 .../recon/api/types/DatanodeStorageReport.java     | 22 +++++++++++++
 .../recon/api/TestNSSummaryEndpointWithFSO.java    |  2 +-
 .../recon/api/TestNSSummaryEndpointWithLegacy.java |  2 +-
 .../api/TestNSSummaryEndpointWithOBSAndLegacy.java |  2 +-
 19 files changed, 116 insertions(+), 50 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
index 6d8614ecc80..59c010e091c 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
@@ -1063,7 +1063,7 @@ public static double 
calculateAvgUtilization(List<DatanodeUsageInfo> nodes) {
       return 0;
     }
     SCMNodeStat aggregatedStats = new SCMNodeStat(
-        0, 0, 0, 0, 0);
+        0, 0, 0, 0, 0, 0);
     for (DatanodeUsageInfo node : nodes) {
       aggregatedStats.add(node.getScmNodeStat());
     }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java
index 1d09e01e533..dacc1487b54 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java
@@ -53,6 +53,12 @@ interface NodeStat {
    */
   LongMetric getFreeSpaceToSpare();
 
+  /**
+   * Get the reserved space on the node.
+   * @return the reserved space on the node
+   */
+  LongMetric getReserved();
+
   /**
    * Set the total/used/remaining space.
    * @param capacity - total space.
@@ -61,7 +67,7 @@ interface NodeStat {
    */
   @VisibleForTesting
   void set(long capacity, long used, long remain, long committed,
-           long freeSpaceToSpare);
+           long freeSpaceToSpare, long reserved);
 
   /**
    * Adding of the stat.
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
index 184dd715c20..2349c059472 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
@@ -46,9 +46,9 @@ public SCMNodeMetric(SCMNodeStat stat) {
    */
   @VisibleForTesting
   public SCMNodeMetric(long capacity, long used, long remaining,
-                       long committed, long freeSpaceToSpare) {
+                       long committed, long freeSpaceToSpare, long reserved) {
     this.stat = new SCMNodeStat();
-    this.stat.set(capacity, used, remaining, committed, freeSpaceToSpare);
+    this.stat.set(capacity, used, remaining, committed, freeSpaceToSpare, 
reserved);
   }
 
   /**
@@ -159,7 +159,7 @@ public SCMNodeStat get() {
   public void set(SCMNodeStat value) {
     stat.set(value.getCapacity().get(), value.getScmUsed().get(),
         value.getRemaining().get(), value.getCommitted().get(),
-        value.getFreeSpaceToSpare().get());
+        value.getFreeSpaceToSpare().get(), value.getReserved().get());
   }
 
   /**
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
index ef547b70260..d7b8b5892c0 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
@@ -29,18 +29,19 @@ public class SCMNodeStat implements NodeStat {
   private LongMetric remaining;
   private LongMetric committed;
   private LongMetric freeSpaceToSpare;
+  private LongMetric reserved;
 
   public SCMNodeStat() {
-    this(0L, 0L, 0L, 0L, 0L);
+    this(0L, 0L, 0L, 0L, 0L, 0L);
   }
 
   public SCMNodeStat(SCMNodeStat other) {
     this(other.capacity.get(), other.scmUsed.get(), other.remaining.get(),
-        other.committed.get(), other.freeSpaceToSpare.get());
+        other.committed.get(), other.freeSpaceToSpare.get(), 
other.reserved.get());
   }
 
   public SCMNodeStat(long capacity, long used, long remaining, long committed,
-                     long freeSpaceToSpare) {
+                     long freeSpaceToSpare, long reserved) {
     Preconditions.checkArgument(capacity >= 0, "Capacity cannot be " +
         "negative.");
     Preconditions.checkArgument(used >= 0, "used space cannot be " +
@@ -52,6 +53,7 @@ public SCMNodeStat(long capacity, long used, long remaining, 
long committed,
     this.remaining = new LongMetric(remaining);
     this.committed = new LongMetric(committed);
     this.freeSpaceToSpare = new LongMetric(freeSpaceToSpare);
+    this.reserved = new LongMetric(reserved);
   }
 
   /**
@@ -96,6 +98,15 @@ public LongMetric getFreeSpaceToSpare() {
     return freeSpaceToSpare;
   }
 
+  /**
+   * Get the reserved space on the node.
+   * @return the reserved space on the node
+   */
+  @Override
+  public LongMetric getReserved() {
+    return reserved;
+  }
+
   /**
    * Set the capacity, used and remaining space on a datanode.
    *
@@ -106,7 +117,7 @@ public LongMetric getFreeSpaceToSpare() {
   @Override
   @VisibleForTesting
   public void set(long newCapacity, long newUsed, long newRemaining,
-                  long newCommitted, long newFreeSpaceToSpare) {
+                  long newCommitted, long newFreeSpaceToSpare, long 
newReserved) {
     Preconditions.checkArgument(newCapacity >= 0, "Capacity cannot be " +
         "negative.");
     Preconditions.checkArgument(newUsed >= 0, "used space cannot be " +
@@ -119,6 +130,7 @@ public void set(long newCapacity, long newUsed, long 
newRemaining,
     this.remaining = new LongMetric(newRemaining);
     this.committed = new LongMetric(newCommitted);
     this.freeSpaceToSpare = new LongMetric(newFreeSpaceToSpare);
+    this.reserved = new LongMetric(newReserved);
   }
 
   /**
@@ -133,8 +145,8 @@ public SCMNodeStat add(NodeStat stat) {
     this.scmUsed.set(this.getScmUsed().get() + stat.getScmUsed().get());
     this.remaining.set(this.getRemaining().get() + stat.getRemaining().get());
     this.committed.set(this.getCommitted().get() + stat.getCommitted().get());
-    this.freeSpaceToSpare.set(this.freeSpaceToSpare.get() +
-        stat.getFreeSpaceToSpare().get());
+    this.freeSpaceToSpare.set(this.freeSpaceToSpare.get() + 
stat.getFreeSpaceToSpare().get());
+    this.reserved.set(this.reserved.get() + stat.getReserved().get());
     return this;
   }
 
@@ -150,8 +162,8 @@ public SCMNodeStat subtract(NodeStat stat) {
     this.scmUsed.set(this.getScmUsed().get() - stat.getScmUsed().get());
     this.remaining.set(this.getRemaining().get() - stat.getRemaining().get());
     this.committed.set(this.getCommitted().get() - stat.getCommitted().get());
-    this.freeSpaceToSpare.set(freeSpaceToSpare.get() -
-        stat.getFreeSpaceToSpare().get());
+    this.freeSpaceToSpare.set(freeSpaceToSpare.get() - 
stat.getFreeSpaceToSpare().get());
+    this.reserved.set(reserved.get() - stat.getReserved().get());
     return this;
   }
 
@@ -163,7 +175,8 @@ public boolean equals(Object to) {
           scmUsed.isEqual(tempStat.getScmUsed().get()) &&
           remaining.isEqual(tempStat.getRemaining().get()) &&
           committed.isEqual(tempStat.getCommitted().get()) &&
-          freeSpaceToSpare.isEqual(tempStat.freeSpaceToSpare.get());
+          freeSpaceToSpare.isEqual(tempStat.freeSpaceToSpare.get()) &&
+          reserved.isEqual(tempStat.reserved.get());
     }
     return false;
   }
@@ -171,7 +184,7 @@ public boolean equals(Object to) {
   @Override
   public int hashCode() {
     return Long.hashCode(capacity.get() ^ scmUsed.get() ^ remaining.get() ^
-        committed.get() ^ freeSpaceToSpare.get());
+        committed.get() ^ freeSpaceToSpare.get() ^ reserved.get());
   }
 
   @Override
@@ -180,6 +193,9 @@ public String toString() {
         "capacity=" + capacity.get() +
         ", scmUsed=" + scmUsed.get() +
         ", remaining=" + remaining.get() +
+        ", committed=" + committed.get() +
+        ", freeSpaceToSpare=" + freeSpaceToSpare.get() +
+        ", reserved=" + reserved.get() +
         '}';
   }
 }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index da0e82f69d0..4096c35f3b8 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -940,6 +940,7 @@ public SCMNodeStat getStats() {
     long remaining = 0L;
     long committed = 0L;
     long freeSpaceToSpare = 0L;
+    long reserved = 0L;
 
     for (SCMNodeStat stat : getNodeStats().values()) {
       capacity += stat.getCapacity().get();
@@ -947,9 +948,10 @@ public SCMNodeStat getStats() {
       remaining += stat.getRemaining().get();
       committed += stat.getCommitted().get();
       freeSpaceToSpare += stat.getFreeSpaceToSpare().get();
+      reserved += stat.getReserved().get();
     }
     return new SCMNodeStat(capacity, used, remaining, committed,
-        freeSpaceToSpare);
+        freeSpaceToSpare, reserved);
   }
 
   /**
@@ -1057,6 +1059,7 @@ private SCMNodeStat getNodeStatInternal(DatanodeDetails 
datanodeDetails) {
       long remaining = 0L;
       long committed = 0L;
       long freeSpaceToSpare = 0L;
+      long reserved = 0L;
 
       final DatanodeInfo datanodeInfo = nodeStateManager
           .getNode(datanodeDetails);
@@ -1068,9 +1071,10 @@ private SCMNodeStat getNodeStatInternal(DatanodeDetails 
datanodeDetails) {
         remaining += reportProto.getRemaining();
         committed += reportProto.getCommitted();
         freeSpaceToSpare += reportProto.getFreeSpaceToSpare();
+        reserved += reportProto.getReserved();
       }
       return new SCMNodeStat(capacity, used, remaining, committed,
-          freeSpaceToSpare);
+          freeSpaceToSpare, reserved);
     } catch (NodeNotFoundException e) {
       LOG.warn("Cannot generate NodeStat, datanode {} not found.", 
datanodeDetails);
       return null;
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index 210409dc6ae..e87f9a66478 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -218,7 +218,7 @@ private void populateNodeMetric(DatanodeDetails 
datanodeDetails, int x) {
         NODES[x % NODES.length].capacity - NODES[x % NODES.length].used;
     newStat.set(
         (NODES[x % NODES.length].capacity),
-        (NODES[x % NODES.length].used), remaining, 0, 100000);
+        (NODES[x % NODES.length].used), remaining, 0, 100000, 0);
     this.nodeMetricMap.put(datanodeDetails, newStat);
     aggregateStat.add(newStat);
 
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java
index 85947e27d48..8205f1c7206 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java
@@ -460,7 +460,7 @@ private void createCluster(int[] sizeArray) {
       }
       SCMNodeStat stat = new SCMNodeStat(datanodeCapacity, datanodeUsedSpace,
           datanodeCapacity - datanodeUsedSpace, 0,
-          datanodeCapacity - datanodeUsedSpace - 1);
+          datanodeCapacity - datanodeUsedSpace - 1, 0);
       nodesInCluster.get(i).setScmNodeStat(stat);
     }
   }
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java
index 7426fae11ba..776bb5d1434 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java
@@ -53,11 +53,11 @@ public void testFindTargetGreedyByUsage() {
 
     //create three datanodes with different usageinfo
     DatanodeUsageInfo dui1 = new DatanodeUsageInfo(MockDatanodeDetails
-        .randomDatanodeDetails(), new SCMNodeStat(100, 0, 40, 0, 30));
+        .randomDatanodeDetails(), new SCMNodeStat(100, 0, 40, 0, 30, 0));
     DatanodeUsageInfo dui2 = new DatanodeUsageInfo(MockDatanodeDetails
-        .randomDatanodeDetails(), new SCMNodeStat(100, 0, 60, 0, 30));
+        .randomDatanodeDetails(), new SCMNodeStat(100, 0, 60, 0, 30, 0));
     DatanodeUsageInfo dui3 = new DatanodeUsageInfo(MockDatanodeDetails
-        .randomDatanodeDetails(), new SCMNodeStat(100, 0, 80, 0, 30));
+        .randomDatanodeDetails(), new SCMNodeStat(100, 0, 80, 0, 30, 0));
 
     //insert in ascending order
     overUtilizedDatanodes.add(dui1);
@@ -92,11 +92,11 @@ public void testFindTargetGreedyByUsage() {
   public void testResetPotentialTargets() {
     // create three datanodes with different usage infos
     DatanodeUsageInfo dui1 = new DatanodeUsageInfo(MockDatanodeDetails
-        .randomDatanodeDetails(), new SCMNodeStat(100, 30, 70, 0, 50));
+        .randomDatanodeDetails(), new SCMNodeStat(100, 30, 70, 0, 50, 0));
     DatanodeUsageInfo dui2 = new DatanodeUsageInfo(MockDatanodeDetails
-        .randomDatanodeDetails(), new SCMNodeStat(100, 20, 80, 0, 60));
+        .randomDatanodeDetails(), new SCMNodeStat(100, 20, 80, 0, 60, 0));
     DatanodeUsageInfo dui3 = new DatanodeUsageInfo(MockDatanodeDetails
-        .randomDatanodeDetails(), new SCMNodeStat(100, 10, 90, 0, 70));
+        .randomDatanodeDetails(), new SCMNodeStat(100, 10, 90, 0, 70, 0));
 
     List<DatanodeUsageInfo> potentialTargets = new ArrayList<>();
     potentialTargets.add(dui1);
@@ -171,18 +171,18 @@ public void testFindTargetGreedyByNetworkTopology() {
     List<DatanodeUsageInfo> overUtilizedDatanodes = new ArrayList<>();
     //set the farthest target with the lowest usage info
     overUtilizedDatanodes.add(
-        new DatanodeUsageInfo(target5, new SCMNodeStat(100, 0, 90, 0, 80)));
+        new DatanodeUsageInfo(target5, new SCMNodeStat(100, 0, 90, 0, 80, 0)));
     //set the tree targets, which have the same network topology distance
     //to source , with different usage info
     overUtilizedDatanodes.add(
-        new DatanodeUsageInfo(target2, new SCMNodeStat(100, 0, 20, 0, 10)));
+        new DatanodeUsageInfo(target2, new SCMNodeStat(100, 0, 20, 0, 10, 0)));
     overUtilizedDatanodes.add(
-        new DatanodeUsageInfo(target3, new SCMNodeStat(100, 0, 40, 0, 30)));
+        new DatanodeUsageInfo(target3, new SCMNodeStat(100, 0, 40, 0, 30, 0)));
     overUtilizedDatanodes.add(
-        new DatanodeUsageInfo(target4, new SCMNodeStat(100, 0, 60, 0, 50)));
+        new DatanodeUsageInfo(target4, new SCMNodeStat(100, 0, 60, 0, 50, 0)));
     //set the nearest target with the highest usage info
     overUtilizedDatanodes.add(
-        new DatanodeUsageInfo(target1, new SCMNodeStat(100, 0, 10, 0, 5)));
+        new DatanodeUsageInfo(target1, new SCMNodeStat(100, 0, 10, 0, 5, 0)));
 
 
     FindTargetGreedyByNetworkTopology findTargetGreedyByNetworkTopology =
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestableCluster.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestableCluster.java
index 0e86dea2696..1e9591ab194 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestableCluster.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestableCluster.java
@@ -80,7 +80,7 @@ public final class TestableCluster {
 
       SCMNodeStat stat = new SCMNodeStat(datanodeCapacity, datanodeUsedSpace,
           datanodeCapacity - datanodeUsedSpace, 0,
-          datanodeCapacity - datanodeUsedSpace - 1);
+          datanodeCapacity - datanodeUsedSpace - 1, 0);
       nodesInCluster[i].setScmNodeStat(stat);
       clusterUsedSpace += datanodeUsedSpace;
       clusterCapacity += datanodeCapacity;
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
index 885ba552825..5b940fc543f 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
@@ -105,13 +105,13 @@ public void chooseDatanodes() throws SCMException {
         .thenReturn(new ArrayList<>(datanodes));
 
     when(mockNodeManager.getNodeStat(any()))
-        .thenReturn(new SCMNodeMetric(100L, 0L, 100L, 0, 90));
+        .thenReturn(new SCMNodeMetric(100L, 0L, 100L, 0, 90, 0));
     when(mockNodeManager.getNodeStat(datanodes.get(2)))
-        .thenReturn(new SCMNodeMetric(100L, 90L, 10L, 0, 9));
+        .thenReturn(new SCMNodeMetric(100L, 90L, 10L, 0, 9, 0));
     when(mockNodeManager.getNodeStat(datanodes.get(3)))
-        .thenReturn(new SCMNodeMetric(100L, 80L, 20L, 0, 19));
+        .thenReturn(new SCMNodeMetric(100L, 80L, 20L, 0, 19, 0));
     when(mockNodeManager.getNodeStat(datanodes.get(4)))
-        .thenReturn(new SCMNodeMetric(100L, 70L, 30L, 0, 20));
+        .thenReturn(new SCMNodeMetric(100L, 70L, 30L, 0, 20, 0));
     when(mockNodeManager.getNode(any(DatanodeID.class))).thenAnswer(
             invocation -> datanodes.stream()
                 .filter(dn -> dn.getID().equals(invocation.getArgument(0)))
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerUtil.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerUtil.java
index d5c465aa238..ffca82e231b 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerUtil.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerUtil.java
@@ -317,13 +317,13 @@ public void 
testDatanodesWithInSufficientDiskSpaceAreExcluded() throws NodeNotFo
     when(replicationManager.getNodeManager()).thenReturn(nodeManagerMock);
     doReturn(fullDn).when(nodeManagerMock).getNode(fullDn.getID());
     doReturn(new SCMNodeMetric(50 * oneGb, 20 * oneGb, 30 * oneGb, 5 * oneGb,
-        20 * oneGb)).when(nodeManagerMock).getNodeStat(fullDn);
+        20 * oneGb, 0)).when(nodeManagerMock).getNodeStat(fullDn);
     
doReturn(spaceAvailableDn).when(nodeManagerMock).getNode(spaceAvailableDn.getID());
     doReturn(new SCMNodeMetric(50 * oneGb, 10 * oneGb, 40 * oneGb, 5 * oneGb,
-        20 * oneGb)).when(nodeManagerMock).getNodeStat(spaceAvailableDn);
+        20 * oneGb, 0)).when(nodeManagerMock).getNodeStat(spaceAvailableDn);
     doReturn(expiredOpDn).when(nodeManagerMock).getNode(expiredOpDn.getID());
     doReturn(new SCMNodeMetric(50 * oneGb, 20 * oneGb, 30 * oneGb, 5 * oneGb,
-        20 * oneGb)).when(nodeManagerMock).getNodeStat(expiredOpDn);
+        20 * oneGb, 0)).when(nodeManagerMock).getNodeStat(expiredOpDn);
 
     when(replicationManager.getNodeStatus(any())).thenAnswer(
         invocation -> {
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestCapacityPipelineChoosePolicy.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestCapacityPipelineChoosePolicy.java
index 352468baa5d..2cd399e6d75 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestCapacityPipelineChoosePolicy.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestCapacityPipelineChoosePolicy.java
@@ -53,13 +53,13 @@ public void testChoosePipeline() throws Exception {
     // used       0   10    20    30
     NodeManager mockNodeManager = mock(NodeManager.class);
     when(mockNodeManager.getNodeStat(datanodes.get(0)))
-        .thenReturn(new SCMNodeMetric(100L, 0, 100L, 0, 0));
+        .thenReturn(new SCMNodeMetric(100L, 0, 100L, 0, 0, 0));
     when(mockNodeManager.getNodeStat(datanodes.get(1)))
-        .thenReturn(new SCMNodeMetric(100L, 10L, 90L, 0, 0));
+        .thenReturn(new SCMNodeMetric(100L, 10L, 90L, 0, 0, 0));
     when(mockNodeManager.getNodeStat(datanodes.get(2)))
-        .thenReturn(new SCMNodeMetric(100L, 20L, 80L, 0, 0));
+        .thenReturn(new SCMNodeMetric(100L, 20L, 80L, 0, 0, 0));
     when(mockNodeManager.getNodeStat(datanodes.get(3)))
-        .thenReturn(new SCMNodeMetric(100L, 30L, 70L, 0, 0));
+        .thenReturn(new SCMNodeMetric(100L, 30L, 70L, 0, 0, 0));
 
     PipelineChoosePolicy policy = new 
CapacityPipelineChoosePolicy().init(mockNodeManager);
 
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java
index c997b33a2ea..33fa6376410 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java
@@ -30,13 +30,13 @@
 public class TestDatanodeMetrics {
   @Test
   public void testSCMNodeMetric() {
-    SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L, 0, 80);
+    SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L, 0, 80, 0);
     assertEquals((long) stat.getCapacity().get(), 100L);
     assertEquals(10L, (long) stat.getScmUsed().get());
     assertEquals(90L, (long) stat.getRemaining().get());
     SCMNodeMetric metric = new SCMNodeMetric(stat);
 
-    SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L, 0, 80);
+    SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L, 0, 80, 0);
     assertEquals(100L, (long) stat.getCapacity().get());
     assertEquals(10L, (long) stat.getScmUsed().get());
     assertEquals(90L, (long) stat.getRemaining().get());
@@ -52,8 +52,8 @@ public void testSCMNodeMetric() {
     assertTrue(metric.isGreater(zeroMetric.get()));
 
     // Another case when nodes have similar weight
-    SCMNodeStat stat1 = new SCMNodeStat(10000000L, 50L, 9999950L, 0, 100000);
-    SCMNodeStat stat2 = new SCMNodeStat(10000000L, 51L, 9999949L, 0, 100000);
+    SCMNodeStat stat1 = new SCMNodeStat(10000000L, 50L, 9999950L, 0, 100000, 
0);
+    SCMNodeStat stat2 = new SCMNodeStat(10000000L, 51L, 9999949L, 0, 100000, 
0);
     assertTrue(new SCMNodeMetric(stat2).isGreater(stat1));
   }
 }
diff --git 
a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestStorageDistributionEndpoint.java
 
b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestStorageDistributionEndpoint.java
index 5ac779d6596..ea6894831a7 100644
--- 
a/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestStorageDistributionEndpoint.java
+++ 
b/hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestStorageDistributionEndpoint.java
@@ -49,6 +49,7 @@
 import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.ScmConfig;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
@@ -74,6 +75,7 @@
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.recon.api.DataNodeMetricsService;
 import org.apache.hadoop.ozone.recon.api.types.DataNodeMetricsServiceResponse;
+import org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport;
 import org.apache.hadoop.ozone.recon.api.types.ScmPendingDeletion;
 import 
org.apache.hadoop.ozone.recon.api.types.StorageCapacityDistributionResponse;
 import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
@@ -215,7 +217,21 @@ private boolean 
verifyStorageDistributionAfterKeyCreation() {
       assertEquals(0, 
storageResponse.getUsedSpaceBreakDown().getOpenKeyBytes());
       assertEquals(60, 
storageResponse.getUsedSpaceBreakDown().getCommittedKeyBytes());
       assertEquals(3, storageResponse.getDataNodeUsage().size());
-
+      List<DatanodeStorageReport> reports = storageResponse.getDataNodeUsage();
+      List<HddsProtos.DatanodeUsageInfoProto> scmReports =
+          scm.getClientProtocolServer().getDatanodeUsageInfo(true, 3, 1);
+      for (DatanodeStorageReport report : reports) {
+        for (HddsProtos.DatanodeUsageInfoProto scmReport : scmReports) {
+          if (scmReport.getNode().getUuid().equals(report.getDatanodeUuid())) {
+            assertEquals(report.getMinimumFreeSpace(), 
scmReport.getFreeSpaceToSpare());
+            assertEquals(report.getReserved(), scmReport.getReserved());
+            assertEquals(report.getCapacity(), scmReport.getCapacity());
+            assertEquals(report.getRemaining(), scmReport.getRemaining());
+            assertEquals(report.getUsed(), scmReport.getUsed());
+            assertEquals(report.getCommitted(), scmReport.getCommitted());
+          }
+        }
+      }
       return true;
     } catch (Exception e) {
       LOG.debug("Waiting for storage distribution assertions to pass", e);
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/StorageDistributionEndpoint.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/StorageDistributionEndpoint.java
index 39262b24f1e..96cf36fff31 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/StorageDistributionEndpoint.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/StorageDistributionEndpoint.java
@@ -237,6 +237,7 @@ private DatanodeStorageReport 
getStorageReport(DatanodeDetails datanode) {
       long remaining = nodeStat.getRemaining() != null ? 
nodeStat.getRemaining().get() : 0L;
       long committed = nodeStat.getCommitted() != null ? 
nodeStat.getCommitted().get() : 0L;
       long minFreeSpace  = nodeStat.getFreeSpaceToSpare() != null ? 
nodeStat.getFreeSpaceToSpare().get() : 0L;
+      long reservedSpace = nodeStat.getReserved() != null ? 
nodeStat.getReserved().get() : 0L;
 
       return DatanodeStorageReport.newBuilder()
           .setCapacity(capacity)
@@ -244,6 +245,7 @@ private DatanodeStorageReport 
getStorageReport(DatanodeDetails datanode) {
           .setRemaining(remaining)
           .setCommitted(committed)
           .setMinimumFreeSpace(minFreeSpace)
+          .setReserved(reservedSpace)
           .setDatanodeUuid(datanode.getUuidString())
           .setHostName(datanode.getHostName())
           .build();
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java
index 97100bac9cf..e26a761eb5b 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java
@@ -32,6 +32,7 @@ public final class DatanodeStorageReport {
   private long remaining;
   private long committed;
   private long minimumFreeSpace;
+  private long reserved;
 
   public DatanodeStorageReport() {
   }
@@ -44,6 +45,8 @@ private DatanodeStorageReport(Builder builder) {
     this.remaining = builder.remaining;
     this.committed = builder.committed;
     this.minimumFreeSpace = builder.minimumFreeSpace;
+    this.reserved = builder.reserved;
+    builder.validate();
   }
 
   public String getDatanodeUuid() {
@@ -74,6 +77,10 @@ public long getMinimumFreeSpace() {
     return minimumFreeSpace;
   }
 
+  public long getReserved() {
+    return reserved;
+  }
+
   public static Builder newBuilder() {
     return new Builder();
   }
@@ -89,6 +96,7 @@ public static final class Builder {
     private long remaining = 0;
     private long committed = 0;
     private long minimumFreeSpace = 0;
+    private long reserved = 0;
 
     private static final Logger LOG =
         LoggerFactory.getLogger(Builder.class);
@@ -131,6 +139,11 @@ public Builder setMinimumFreeSpace(long minimumFreeSpace) {
       return this;
     }
 
+    public Builder setReserved(long reserved) {
+      this.reserved = reserved;
+      return this;
+    }
+
     public void validate() {
       Objects.requireNonNull(hostName, "hostName cannot be null");
 
@@ -146,6 +159,15 @@ public void validate() {
       if (committed < 0) {
         throw new IllegalArgumentException("committed cannot be negative");
       }
+
+      if (minimumFreeSpace < 0) {
+        throw new IllegalArgumentException("minimumFreeSpace cannot be 
negative");
+      }
+
+      if (reserved < 0) {
+        throw new IllegalArgumentException("reserved cannot be negative");
+      }
+
       // Logical consistency checks
       if (used + remaining > capacity) {
         LOG.warn("Inconsistent storage report for {}: used({}) + remaining({}) 
> capacity({})",
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
index 5b15235431f..f08a8131d4e 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
@@ -1573,6 +1573,6 @@ private static BucketLayout getBucketLayout() {
 
   private static SCMNodeStat getMockSCMRootStat() {
     return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE,
-        ROOT_QUOTA - ROOT_DATA_SIZE, 0, ROOT_QUOTA - ROOT_DATA_SIZE - 1);
+        ROOT_QUOTA - ROOT_DATA_SIZE, 0, ROOT_QUOTA - ROOT_DATA_SIZE - 1, 0);
   }
 }
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
index 8bf0e9ba3cb..7ac14a9eb4d 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
@@ -1486,6 +1486,6 @@ private static BucketLayout getBucketLayout() {
 
   private static SCMNodeStat getMockSCMRootStat() {
     return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, 
-        ROOT_QUOTA - ROOT_DATA_SIZE, 0, ROOT_QUOTA - ROOT_DATA_SIZE - 1);
+        ROOT_QUOTA - ROOT_DATA_SIZE, 0, ROOT_QUOTA - ROOT_DATA_SIZE - 1, 0);
   }
 }
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java
index ba190bee4c3..146d84b400e 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java
@@ -1446,7 +1446,7 @@ private static BucketLayout getLegacyBucketLayout() {
 
   private static SCMNodeStat getMockSCMRootStat() {
     return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE,
-        ROOT_QUOTA - ROOT_DATA_SIZE, 0L, 0L);
+        ROOT_QUOTA - ROOT_DATA_SIZE, 0L, 0L, 0);
   }
 
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to