This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 0e07225dbd HDDS-9807. Consider volume committed space when checking if 
datanode can host new container (#5721)
0e07225dbd is described below

commit 0e07225dbd20eeaf3a9c67a7facf91baeb105bb7
Author: Slava Tutrinov <[email protected]>
AuthorDate: Wed Dec 20 20:19:56 2023 +0300

    HDDS-9807. Consider volume committed space when checking if datanode can 
host new container (#5721)
---
 .../common/impl/StorageLocationReport.java         | 56 ++++++++++++++++-
 .../interfaces/StorageLocationReportMXBean.java    |  4 ++
 .../common/volume/AvailableSpaceFilter.java        |  8 +--
 .../container/common/volume/MutableVolumeSet.java  |  4 ++
 .../container/common/volume/VolumeInfoMetrics.java |  5 ++
 .../ozone/container/common/volume/VolumeUsage.java |  8 +++
 .../ozone/container/ozoneimpl/OzoneContainer.java  |  2 +-
 .../static/swagger-resources/recon-api.yaml        |  6 ++
 .../interface-client/src/main/proto/hdds.proto     |  2 +
 .../proto/ScmServerDatanodeHeartbeatProtocol.proto |  2 +
 .../hadoop/hdds/scm/SCMCommonPlacementPolicy.java  | 13 ++--
 .../container/balancer/ContainerBalancerTask.java  |  2 +-
 .../scm/container/placement/metrics/NodeStat.java  | 15 ++++-
 .../container/placement/metrics/SCMNodeMetric.java | 16 +++--
 .../container/placement/metrics/SCMNodeStat.java   | 48 ++++++++++++--
 .../hadoop/hdds/scm/node/DatanodeUsageInfo.java    |  2 +
 .../hadoop/hdds/scm/node/SCMNodeManager.java       | 14 ++++-
 .../hadoop/hdds/scm/pipeline/PipelineProvider.java |  8 ++-
 .../hdds/scm/pipeline/RatisPipelineProvider.java   |  2 +-
 .../hdds/scm/TestSCMCommonPlacementPolicy.java     | 73 ++++++++++++++++++++--
 .../hadoop/hdds/scm/container/MockNodeManager.java |  2 +-
 .../balancer/TestContainerBalancerTask.java        |  3 +-
 .../container/balancer/TestFindTargetStrategy.java | 22 +++----
 .../TestSCMContainerPlacementCapacity.java         |  8 +--
 .../container/placement/TestDatanodeMetrics.java   |  8 +--
 .../hdds/scm/cli/datanode/UsageInfoSubcommand.java | 24 ++++++-
 .../scm/cli/datanode/TestUsageInfoSubcommand.java  | 33 ++++++++++
 .../ozone/recon/api/ClusterStateEndpoint.java      |  3 +-
 .../hadoop/ozone/recon/api/NodeEndpoint.java       |  3 +-
 .../recon/api/types/DatanodeStorageReport.java     |  9 ++-
 .../webapps/recon/ozone-recon-web/api/db.json      | 57 +++++++++++------
 .../src/components/storageBar/storageBar.less      |  5 ++
 .../src/components/storageBar/storageBar.tsx       |  5 +-
 .../ozone-recon-web/src/types/datanode.types.tsx   |  1 +
 .../src/views/datanodes/datanodes.tsx              |  4 +-
 .../recon/api/TestNSSummaryEndpointWithFSO.java    |  2 +-
 .../recon/api/TestNSSummaryEndpointWithLegacy.java |  2 +-
 37 files changed, 398 insertions(+), 83 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
index 0222050da5..f31d45a778 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.ozone.container.common.impl;
 
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.proto.
     StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto;
 import org.apache.hadoop.hdds.protocol.proto.
@@ -27,6 +28,7 @@ import org.apache.hadoop.hdds.protocol.proto.
     StorageContainerDatanodeProtocolProtos.StorageTypeProto;
 import org.apache.hadoop.ozone.container.common.interfaces
     .StorageLocationReportMXBean;
+import org.apache.hadoop.ozone.container.common.volume.VolumeUsage;
 
 import java.io.IOException;
 
@@ -42,17 +44,22 @@ public final class StorageLocationReport implements
   private final long capacity;
   private final long scmUsed;
   private final long remaining;
+  private final long committed;
+  private final long freeSpaceToSpare;
   private final StorageType storageType;
   private final String storageLocation;
 
+  @SuppressWarnings("checkstyle:parameternumber")
   private StorageLocationReport(String id, boolean failed, long capacity,
-      long scmUsed, long remaining, StorageType storageType,
-      String storageLocation) {
+      long scmUsed, long remaining, long committed, long freeSpaceToSpare,
+      StorageType storageType, String storageLocation) {
     this.id = id;
     this.failed = failed;
     this.capacity = capacity;
     this.scmUsed = scmUsed;
     this.remaining = remaining;
+    this.committed = committed;
+    this.freeSpaceToSpare = freeSpaceToSpare;
     this.storageType = storageType;
     this.storageLocation = storageLocation;
   }
@@ -82,6 +89,16 @@ public final class StorageLocationReport implements
     return remaining;
   }
 
+  @Override
+  public long getCommitted() {
+    return committed;
+  }
+
+  @Override
+  public long getFreeSpaceToSpare() {
+    return freeSpaceToSpare;
+  }
+
   @Override
   public String getStorageLocation() {
     return storageLocation;
@@ -157,14 +174,22 @@ public final class StorageLocationReport implements
    * @throws IOException In case, the storage type specified is invalid.
    */
   public StorageReportProto getProtoBufMessage() throws IOException {
+    return getProtoBufMessage(null);
+  }
+
+  public StorageReportProto getProtoBufMessage(ConfigurationSource conf)
+      throws IOException {
     StorageReportProto.Builder srb = StorageReportProto.newBuilder();
     return srb.setStorageUuid(getId())
         .setCapacity(getCapacity())
         .setScmUsed(getScmUsed())
         .setRemaining(getRemaining())
+        .setCommitted(getCommitted())
         .setStorageType(getStorageTypeProto())
         .setStorageLocation(getStorageLocation())
         .setFailed(isFailed())
+        .setFreeSpaceToSpare(conf != null ?
+            VolumeUsage.getMinVolumeFreeSpace(conf, getCapacity()) : 0)
         .build();
   }
 
@@ -266,6 +291,8 @@ public final class StorageLocationReport implements
     private long capacity;
     private long scmUsed;
     private long remaining;
+    private long committed;
+    private long freeSpaceToSpare;
     private StorageType storageType;
     private String storageLocation;
 
@@ -334,6 +361,29 @@ public final class StorageLocationReport implements
       return this;
     }
 
+    /**
+     * Sets the committed bytes count.
+     * (bytes for previously created containers)
+     * @param committed previously created containers size
+     * @return StorageLocationReport.Builder
+     */
+    public Builder setCommitted(long committed) {
+      this.committed = committed;
+      return this;
+    }
+
+    /**
+     * Sets the free space available to spare.
+     * (depends on datanode volume config,
+     * consider 'hdds.datanode.volume.min.*' configuration properties)
+     * @param freeSpaceToSpare the size of free volume space available to spare
+     * @return StorageLocationReport.Builder
+     */
+    public Builder setFreeSpaceToSpare(long freeSpaceToSpare) {
+      this.freeSpaceToSpare = freeSpaceToSpare;
+      return this;
+    }
+
     /**
      * Sets the storageLocation.
      *
@@ -352,7 +402,7 @@ public final class StorageLocationReport implements
      */
     public StorageLocationReport build() {
       return new StorageLocationReport(id, failed, capacity, scmUsed,
-          remaining, storageType, storageLocation);
+          remaining, committed, freeSpaceToSpare, storageType, 
storageLocation);
     }
 
   }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java
index fd06367813..74c4336bc6 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/StorageLocationReportMXBean.java
@@ -33,6 +33,10 @@ public interface StorageLocationReportMXBean {
 
   long getRemaining();
 
+  long getCommitted();
+
+  long getFreeSpaceToSpare();
+
   String getStorageLocation();
 
   String getStorageTypeName();
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java
index 13041eb4d6..622c85a52f 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java
@@ -25,7 +25,7 @@ import java.util.function.Predicate;
  * Filter for selecting volumes with enough space for a new container.
  * Keeps track of ineligible volumes for logging/debug purposes.
  */
-class AvailableSpaceFilter implements Predicate<HddsVolume> {
+public class AvailableSpaceFilter implements Predicate<HddsVolume> {
 
   private final long requiredSpace;
   private final Map<HddsVolume, AvailableSpace> fullVolumes =
@@ -42,10 +42,10 @@ class AvailableSpaceFilter implements Predicate<HddsVolume> 
{
     long free = vol.getAvailable();
     long committed = vol.getCommittedBytes();
     long available = free - committed;
-    long volumeFreeSpace =
+    long volumeFreeSpaceToSpare =
         VolumeUsage.getMinVolumeFreeSpace(vol.getConf(), volumeCapacity);
-    boolean hasEnoughSpace =
-        available > Math.max(requiredSpace, volumeFreeSpace);
+    boolean hasEnoughSpace = VolumeUsage.hasVolumeEnoughSpace(free, committed,
+        requiredSpace, volumeFreeSpaceToSpare);
 
     mostAvailableSpace = Math.max(available, mostAvailableSpace);
 
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
index 985ddea8de..3c0b6e618e 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
@@ -464,6 +464,7 @@ public class MutableVolumeSet implements VolumeSet {
         long scmUsed = 0;
         long remaining = 0;
         long capacity = 0;
+        long committed = 0;
         String rootDir = "";
         failed = true;
         if (volumeInfo.isPresent()) {
@@ -472,6 +473,8 @@ public class MutableVolumeSet implements VolumeSet {
             scmUsed = volumeInfo.get().getScmUsed();
             remaining = volumeInfo.get().getAvailable();
             capacity = volumeInfo.get().getCapacity();
+            committed = (volume instanceof HddsVolume) ?
+                ((HddsVolume) volume).getCommittedBytes() : 0;
             failed = false;
           } catch (UncheckedIOException ex) {
             LOG.warn("Failed to get scmUsed and remaining for container " +
@@ -491,6 +494,7 @@ public class MutableVolumeSet implements VolumeSet {
             .setCapacity(capacity)
             .setRemaining(remaining)
             .setScmUsed(scmUsed)
+            .setCommitted(committed)
             .setStorageType(volume.getStorageType());
         StorageLocationReport r = builder.build();
         reports[counter++] = r;
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java
index c90dcea81f..18e7354ec1 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java
@@ -142,4 +142,9 @@ public class VolumeInfoMetrics {
     return (getUsed() + getAvailable() + getReserved());
   }
 
+  @Metric("Returns the Committed bytes of the Volume")
+  public long getCommitted() {
+    return volume.getCommittedBytes();
+  }
+
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
index e7a06abc9e..57cf0a8b9d 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
@@ -162,6 +162,14 @@ public class VolumeUsage implements SpaceUsageSource {
 
   }
 
+  public static boolean hasVolumeEnoughSpace(long volumeAvailableSpace,
+                                             long volumeCommittedBytesCount,
+                                             long requiredSpace,
+                                             long volumeFreeSpaceToSpare) {
+    return (volumeAvailableSpace - volumeCommittedBytesCount) >
+        Math.max(requiredSpace, volumeFreeSpaceToSpare);
+  }
+
   /**
    * Class representing precomputed space values of a volume.
    * This class is intended to store precomputed values, such as capacity
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index 1e34fb1049..277ab4464e 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -524,7 +524,7 @@ public class OzoneContainer {
             = StorageContainerDatanodeProtocolProtos.
             NodeReportProto.newBuilder();
     for (int i = 0; i < reports.length; i++) {
-      nrb.addStorageReport(reports[i].getProtoBufMessage());
+      nrb.addStorageReport(reports[i].getProtoBufMessage(config));
     }
 
     StorageLocationReport[] metaReports = metaVolumeSet.getStorageReport();
diff --git 
a/hadoop-hdds/docs/themes/ozonedoc/static/swagger-resources/recon-api.yaml 
b/hadoop-hdds/docs/themes/ozonedoc/static/swagger-resources/recon-api.yaml
index 3b41132f5f..9ff3287766 100644
--- a/hadoop-hdds/docs/themes/ozonedoc/static/swagger-resources/recon-api.yaml
+++ b/hadoop-hdds/docs/themes/ozonedoc/static/swagger-resources/recon-api.yaml
@@ -1433,6 +1433,9 @@ components:
             remaining:
               type: number
               example: 1080410456064
+            committed:
+              type: number
+              example: 1080410456
         containers:
           type: integer
           example: 26
@@ -1480,6 +1483,9 @@ components:
                   remaining:
                     type: number
                     example: 270071111680
+                  committed:
+                    type: number
+                    example: 27007111
               pipelines:
                 type: array
                 items:
diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto 
b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
index 5c20745c06..3f346300b3 100644
--- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto
+++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
@@ -187,6 +187,8 @@ message DatanodeUsageInfoProto {
     optional int64 remaining = 3;
     optional DatanodeDetailsProto node = 4;
     optional int64 containerCount = 5;
+    optional int64 committed = 6;
+    optional int64 freeSpaceToSpare = 7;
 }
 
 /**
diff --git 
a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
 
b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
index de9e39789b..2994073c02 100644
--- 
a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
+++ 
b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
@@ -179,6 +179,8 @@ message StorageReportProto {
   optional uint64 remaining = 5 [default = 0];
   optional StorageTypeProto storageType = 6 [default = DISK];
   optional bool failed = 7 [default = false];
+  optional uint64 committed = 8 [default = 0];
+  optional uint64 freeSpaceToSpare = 9 [default = 0];
 }
 
 message MetadataStorageReportProto {
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
index 4c96175b6c..46cb142bb1 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hdds.scm.net.Node;
 import org.apache.hadoop.hdds.scm.node.DatanodeInfo;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.node.NodeStatus;
+import org.apache.hadoop.ozone.container.common.volume.VolumeUsage;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -274,7 +275,7 @@ public abstract class SCMCommonPlacementPolicy implements
       int nodesRequired, long metadataSizeRequired, long dataSizeRequired)
       throws SCMException {
     List<DatanodeDetails> nodesWithSpace = nodes.stream().filter(d ->
-        hasEnoughSpace(d, metadataSizeRequired, dataSizeRequired))
+        hasEnoughSpace(d, metadataSizeRequired, dataSizeRequired, conf))
         .collect(Collectors.toList());
 
     if (nodesWithSpace.size() < nodesRequired) {
@@ -298,7 +299,9 @@ public abstract class SCMCommonPlacementPolicy implements
    * @return true if we have enough space.
    */
   public static boolean hasEnoughSpace(DatanodeDetails datanodeDetails,
-      long metadataSizeRequired, long dataSizeRequired) {
+                                       long metadataSizeRequired,
+                                       long dataSizeRequired,
+                                       ConfigurationSource conf) {
     Preconditions.checkArgument(datanodeDetails instanceof DatanodeInfo);
 
     boolean enoughForData = false;
@@ -308,7 +311,9 @@ public abstract class SCMCommonPlacementPolicy implements
 
     if (dataSizeRequired > 0) {
       for (StorageReportProto reportProto : datanodeInfo.getStorageReports()) {
-        if (reportProto.getRemaining() > dataSizeRequired) {
+        if (VolumeUsage.hasVolumeEnoughSpace(reportProto.getRemaining(),
+              reportProto.getCommitted(), dataSizeRequired,
+              reportProto.getFreeSpaceToSpare())) {
           enoughForData = true;
           break;
         }
@@ -494,7 +499,7 @@ public abstract class SCMCommonPlacementPolicy implements
     NodeStatus nodeStatus = datanodeInfo.getNodeStatus();
     if (nodeStatus.isNodeWritable() &&
         (hasEnoughSpace(datanodeInfo, metadataSizeRequired,
-            dataSizeRequired))) {
+            dataSizeRequired, conf))) {
       LOG.debug("Datanode {} is chosen. Required metadata size is {} and " +
               "required data size is {} and NodeStatus is {}",
           datanodeDetails, metadataSizeRequired, dataSizeRequired, nodeStatus);
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
index 6541d75d27..abbc50ac86 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
@@ -932,7 +932,7 @@ public class ContainerBalancerTask implements Runnable {
       return 0;
     }
     SCMNodeStat aggregatedStats = new SCMNodeStat(
-        0, 0, 0);
+        0, 0, 0, 0, 0);
     for (DatanodeUsageInfo node : nodes) {
       aggregatedStats.add(node.getScmNodeStat());
     }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java
index d6857d395c..eedc89dfc5 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java
@@ -42,6 +42,18 @@ interface NodeStat {
    */
   LongMetric getRemaining();
 
+  /**
+   * Get the committed space of the node.
+   * @return the committed space of the node
+   */
+  LongMetric getCommitted();
+
+  /**
+   * Get a min free space available to spare on the node.
+   * @return a min free space available to spare
+   */
+  LongMetric getFreeSpaceToSpare();
+
   /**
    * Set the total/used/remaining space.
    * @param capacity - total space.
@@ -49,7 +61,8 @@ interface NodeStat {
    * @param remain - remaining space.
    */
   @VisibleForTesting
-  void set(long capacity, long used, long remain);
+  void set(long capacity, long used, long remain, long committed,
+           long freeSpaceToSpare);
 
   /**
    * Adding of the stat.
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
index 2f5c6f33f7..330bf67416 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java
@@ -36,16 +36,19 @@ public class SCMNodeMetric  implements 
DatanodeMetric<SCMNodeStat, Long> {
   }
 
   /**
-   * Set the capacity, used and remaining space on a datanode.
+   * Set the capacity, used, remaining and committed space on a datanode.
    *
-   * @param capacity in bytes
-   * @param used in bytes
+   * @param capacity  in bytes
+   * @param used      in bytes
    * @param remaining in bytes
+   * @param committed
+   * @paaram committed in bytes
    */
   @VisibleForTesting
-  public SCMNodeMetric(long capacity, long used, long remaining) {
+  public SCMNodeMetric(long capacity, long used, long remaining,
+                       long committed, long freeSpaceToSpare) {
     this.stat = new SCMNodeStat();
-    this.stat.set(capacity, used, remaining);
+    this.stat.set(capacity, used, remaining, committed, freeSpaceToSpare);
   }
 
   /**
@@ -156,7 +159,8 @@ public class SCMNodeMetric  implements 
DatanodeMetric<SCMNodeStat, Long> {
   @Override
   public void set(SCMNodeStat value) {
     stat.set(value.getCapacity().get(), value.getScmUsed().get(),
-        value.getRemaining().get());
+        value.getRemaining().get(), value.getCommitted().get(),
+        value.getFreeSpaceToSpare().get());
   }
 
   /**
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
index 962bbb464e..2a848a04ef 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java
@@ -28,16 +28,20 @@ public class SCMNodeStat implements NodeStat {
   private LongMetric capacity;
   private LongMetric scmUsed;
   private LongMetric remaining;
+  private LongMetric committed;
+  private LongMetric freeSpaceToSpare;
 
   public SCMNodeStat() {
-    this(0L, 0L, 0L);
+    this(0L, 0L, 0L, 0L, 0L);
   }
 
   public SCMNodeStat(SCMNodeStat other) {
-    this(other.capacity.get(), other.scmUsed.get(), other.remaining.get());
+    this(other.capacity.get(), other.scmUsed.get(), other.remaining.get(),
+        other.committed.get(), other.freeSpaceToSpare.get());
   }
 
-  public SCMNodeStat(long capacity, long used, long remaining) {
+  public SCMNodeStat(long capacity, long used, long remaining, long committed,
+                     long freeSpaceToSpare) {
     Preconditions.checkArgument(capacity >= 0, "Capacity cannot be " +
         "negative.");
     Preconditions.checkArgument(used >= 0, "used space cannot be " +
@@ -47,6 +51,8 @@ public class SCMNodeStat implements NodeStat {
     this.capacity = new LongMetric(capacity);
     this.scmUsed = new LongMetric(used);
     this.remaining = new LongMetric(remaining);
+    this.committed = new LongMetric(committed);
+    this.freeSpaceToSpare = new LongMetric(freeSpaceToSpare);
   }
 
   /**
@@ -73,6 +79,24 @@ public class SCMNodeStat implements NodeStat {
     return remaining;
   }
 
+  /**
+   *
+   * @return the total committed space on the node
+   */
+  @Override
+  public LongMetric getCommitted() {
+    return committed;
+  }
+
+  /**
+   * Get a min space available to spare on the node.
+   * @return a min free space available to spare on the node
+   */
+  @Override
+  public LongMetric getFreeSpaceToSpare() {
+    return freeSpaceToSpare;
+  }
+
   /**
    * Set the capacity, used and remaining space on a datanode.
    *
@@ -82,7 +106,8 @@ public class SCMNodeStat implements NodeStat {
    */
   @Override
   @VisibleForTesting
-  public void set(long newCapacity, long newUsed, long newRemaining) {
+  public void set(long newCapacity, long newUsed, long newRemaining,
+                  long newCommitted, long newFreeSpaceToSpare) {
     Preconditions.checkArgument(newCapacity >= 0, "Capacity cannot be " +
         "negative.");
     Preconditions.checkArgument(newUsed >= 0, "used space cannot be " +
@@ -93,6 +118,8 @@ public class SCMNodeStat implements NodeStat {
     this.capacity = new LongMetric(newCapacity);
     this.scmUsed = new LongMetric(newUsed);
     this.remaining = new LongMetric(newRemaining);
+    this.committed = new LongMetric(newCommitted);
+    this.freeSpaceToSpare = new LongMetric(newFreeSpaceToSpare);
   }
 
   /**
@@ -106,6 +133,9 @@ public class SCMNodeStat implements NodeStat {
     this.capacity.set(this.getCapacity().get() + stat.getCapacity().get());
     this.scmUsed.set(this.getScmUsed().get() + stat.getScmUsed().get());
     this.remaining.set(this.getRemaining().get() + stat.getRemaining().get());
+    this.committed.set(this.getCommitted().get() + stat.getCommitted().get());
+    this.freeSpaceToSpare.set(this.freeSpaceToSpare.get() +
+        stat.getFreeSpaceToSpare().get());
     return this;
   }
 
@@ -120,6 +150,9 @@ public class SCMNodeStat implements NodeStat {
     this.capacity.set(this.getCapacity().get() - stat.getCapacity().get());
     this.scmUsed.set(this.getScmUsed().get() - stat.getScmUsed().get());
     this.remaining.set(this.getRemaining().get() - stat.getRemaining().get());
+    this.committed.set(this.getCommitted().get() - stat.getCommitted().get());
+    this.freeSpaceToSpare.set(freeSpaceToSpare.get() -
+        stat.getFreeSpaceToSpare().get());
     return this;
   }
 
@@ -129,13 +162,16 @@ public class SCMNodeStat implements NodeStat {
       SCMNodeStat tempStat = (SCMNodeStat) to;
       return capacity.isEqual(tempStat.getCapacity().get()) &&
           scmUsed.isEqual(tempStat.getScmUsed().get()) &&
-          remaining.isEqual(tempStat.getRemaining().get());
+          remaining.isEqual(tempStat.getRemaining().get()) &&
+          committed.isEqual(tempStat.getCommitted().get()) &&
+          freeSpaceToSpare.isEqual(tempStat.freeSpaceToSpare.get());
     }
     return false;
   }
 
   @Override
   public int hashCode() {
-    return Long.hashCode(capacity.get() ^ scmUsed.get() ^ remaining.get());
+    return Long.hashCode(capacity.get() ^ scmUsed.get() ^ remaining.get() ^
+        committed.get() ^ freeSpaceToSpare.get());
   }
 }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java
index 14353cfa7e..4f7df49690 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java
@@ -205,6 +205,8 @@ public class DatanodeUsageInfo {
       builder.setCapacity(scmNodeStat.getCapacity().get());
       builder.setUsed(scmNodeStat.getScmUsed().get());
       builder.setRemaining(scmNodeStat.getRemaining().get());
+      builder.setCommitted(scmNodeStat.getCommitted().get());
+      builder.setFreeSpaceToSpare(scmNodeStat.getFreeSpaceToSpare().get());
     }
 
     builder.setContainerCount(containerCount);
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index b34f5819f6..e2cce7ac09 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -876,13 +876,18 @@ public class SCMNodeManager implements NodeManager {
     long capacity = 0L;
     long used = 0L;
     long remaining = 0L;
+    long committed = 0L;
+    long freeSpaceToSpare = 0L;
 
     for (SCMNodeStat stat : getNodeStats().values()) {
       capacity += stat.getCapacity().get();
       used += stat.getScmUsed().get();
       remaining += stat.getRemaining().get();
+      committed += stat.getCommitted().get();
+      freeSpaceToSpare += stat.getFreeSpaceToSpare().get();
     }
-    return new SCMNodeStat(capacity, used, remaining);
+    return new SCMNodeStat(capacity, used, remaining, committed,
+        freeSpaceToSpare);
   }
 
   /**
@@ -987,6 +992,8 @@ public class SCMNodeManager implements NodeManager {
       long capacity = 0L;
       long used = 0L;
       long remaining = 0L;
+      long committed = 0L;
+      long freeSpaceToSpare = 0L;
 
       final DatanodeInfo datanodeInfo = nodeStateManager
           .getNode(datanodeDetails);
@@ -996,8 +1003,11 @@ public class SCMNodeManager implements NodeManager {
         capacity += reportProto.getCapacity();
         used += reportProto.getScmUsed();
         remaining += reportProto.getRemaining();
+        committed += reportProto.getCommitted();
+        freeSpaceToSpare += reportProto.getFreeSpaceToSpare();
       }
-      return new SCMNodeStat(capacity, used, remaining);
+      return new SCMNodeStat(capacity, used, remaining, committed,
+          freeSpaceToSpare);
     } catch (NodeNotFoundException e) {
       LOG.warn("Cannot generate NodeStat, datanode {} not found.",
           datanodeDetails.getUuidString());
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java
index 4adcd53eb3..0ec74d2405 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java
@@ -25,6 +25,7 @@ import java.util.Set;
 import java.util.stream.Collectors;
 
 import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.SCMCommonPlacementPolicy;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
@@ -85,12 +86,15 @@ public abstract class PipelineProvider<REPLICATION_CONFIG
   protected abstract void shutdown();
 
   List<DatanodeDetails> pickNodesNotUsed(REPLICATION_CONFIG replicationConfig,
-      long metadataSizeRequired, long dataSizeRequired) throws SCMException {
+                                         long metadataSizeRequired,
+                                         long dataSizeRequired,
+                                         ConfigurationSource conf)
+      throws SCMException {
     int nodesRequired = replicationConfig.getRequiredNodes();
     List<DatanodeDetails> healthyDNs = pickAllNodesNotUsed(replicationConfig);
     List<DatanodeDetails> healthyDNsWithSpace = healthyDNs.stream()
         .filter(dn -> SCMCommonPlacementPolicy
-            .hasEnoughSpace(dn, metadataSizeRequired, dataSizeRequired))
+            .hasEnoughSpace(dn, metadataSizeRequired, dataSizeRequired, conf))
         .limit(nodesRequired)
         .collect(Collectors.toList());
 
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
index 1b62120c1e..8336bce5ea 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
@@ -163,7 +163,7 @@ public class RatisPipelineProvider
     switch (factor) {
     case ONE:
       dns = pickNodesNotUsed(replicationConfig, minRatisVolumeSizeBytes,
-          containerSizeBytes);
+          containerSizeBytes, conf);
       break;
     case THREE:
       List<DatanodeDetails> excludeDueToEngagement = 
filterPipelineEngagement();
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java
index ffefc7c5f5..87497a9f07 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java
@@ -24,30 +24,40 @@ import com.google.common.collect.Sets;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
 import org.apache.hadoop.hdds.scm.container.MockNodeManager;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.net.Node;
+import org.apache.hadoop.hdds.scm.node.DatanodeInfo;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.node.NodeStatus;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.Assertions;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
 
-import org.mockito.Mockito;
-
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.UUID;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 
 import static 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED;
+import static 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto.DISK;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
 import java.util.function.Function;
 import java.util.stream.Stream;
 
@@ -448,11 +458,66 @@ public class TestSCMCommonPlacementPolicy {
           }
         };
     dummyPlacementPolicy.chooseDatanodes(null, null, 1, 1, 1);
-    Assertions.assertFalse(usedNodesIdentity.get());
+    assertFalse(usedNodesIdentity.get());
     dummyPlacementPolicy.chooseDatanodes(null, null, null, 1, 1, 1);
     Assertions.assertTrue(usedNodesIdentity.get());
   }
 
+  @Test
+  public void testDatanodeIsInvalidInCaseOfIncreasingCommittedBytes() {
+    NodeManager nodeMngr = mock(NodeManager.class);
+    UUID datanodeUuid = UUID.randomUUID();
+    DummyPlacementPolicy placementPolicy =
+        new DummyPlacementPolicy(nodeMngr, conf, 1);
+    DatanodeDetails datanodeDetails = mock(DatanodeDetails.class);
+    when(datanodeDetails.getUuid()).thenReturn(datanodeUuid);
+
+    DatanodeInfo datanodeInfo = mock(DatanodeInfo.class);
+    NodeStatus nodeStatus = mock(NodeStatus.class);
+    when(nodeStatus.isNodeWritable()).thenReturn(true);
+    when(datanodeInfo.getNodeStatus()).thenReturn(nodeStatus);
+    when(nodeMngr.getNodeByUuid(eq(datanodeUuid))).thenReturn(datanodeInfo);
+
+    // capacity = 200000, used = 90000, remaining = 101000, committed = 500
+    StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport1 =
+        HddsTestUtils.createStorageReport(UUID.randomUUID(), "/data/hdds",
+                200000, 90000, 101000, DISK).toBuilder()
+            .setCommitted(500)
+            .setFreeSpaceToSpare(10000)
+            .build();
+    // capacity = 200000, used = 90000, remaining = 101000, committed = 1000
+    StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport2 =
+        HddsTestUtils.createStorageReport(UUID.randomUUID(), "/data/hdds",
+                200000, 90000, 101000, DISK).toBuilder()
+            .setCommitted(1000)
+            .setFreeSpaceToSpare(100000)
+            .build();
+    StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto
+        metaReport = 
HddsTestUtils.createMetadataStorageReport("/data/metadata",
+          200);
+    when(datanodeInfo.getStorageReports())
+        .thenReturn(Collections.singletonList(storageReport1))
+        .thenReturn(Collections.singletonList(storageReport2));
+    when(datanodeInfo.getMetadataStorageReports())
+        .thenReturn(Collections.singletonList(metaReport));
+
+
+    // 500 committed bytes:
+    //
+    //   101000       500
+    //     |           |
+    // (remaining - committed) > Math.max(4000, freeSpaceToSpare)
+    //                                                    |
+    //                                                  100000
+    //
+    // Summary: 101000 - 500 > 100000 == true
+    assertTrue(placementPolicy.isValidNode(datanodeDetails, 100, 4000));
+
+    // 1000 committed bytes:
+    // Summary: 101000 - 1000 > 100000 == false
+    assertFalse(placementPolicy.isValidNode(datanodeDetails, 100, 4000));
+  }
+
   private static class DummyPlacementPolicy extends SCMCommonPlacementPolicy {
     private Map<DatanodeDetails, Node> rackMap;
     private List<Node> racks;
@@ -485,7 +550,7 @@ public class TestSCMCommonPlacementPolicy {
       super(nodeManager, conf);
       this.rackCnt = rackCnt;
       this.racks = IntStream.range(0, rackCnt)
-      .mapToObj(i -> Mockito.mock(Node.class)).collect(Collectors.toList());
+      .mapToObj(i -> mock(Node.class)).collect(Collectors.toList());
       List<DatanodeDetails> datanodeDetails = nodeManager.getAllNodes();
       rackMap = datanodeRackMap.entrySet().stream()
               .collect(Collectors.toMap(
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index 98638ebe00..794dedceef 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -227,7 +227,7 @@ public class MockNodeManager implements NodeManager {
         NODES[x % NODES.length].capacity - NODES[x % NODES.length].used;
     newStat.set(
         (NODES[x % NODES.length].capacity),
-        (NODES[x % NODES.length].used), remaining);
+        (NODES[x % NODES.length].used), remaining, 0, 100000);
     this.nodeMetricMap.put(datanodeDetails, newStat);
     aggregateStat.add(newStat);
 
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java
index 4bc3cf43cf..56d02dabb5 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java
@@ -1207,7 +1207,8 @@ public class TestContainerBalancerTask {
         datanodeCapacity = (long) (datanodeUsedSpace / 
nodeUtilizations.get(i));
       }
       SCMNodeStat stat = new SCMNodeStat(datanodeCapacity, datanodeUsedSpace,
-          datanodeCapacity - datanodeUsedSpace);
+          datanodeCapacity - datanodeUsedSpace, 0,
+          datanodeCapacity - datanodeUsedSpace - 1);
       nodesInCluster.get(i).setScmNodeStat(stat);
       clusterUsedSpace += datanodeUsedSpace;
       clusterCapacity += datanodeCapacity;
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java
index 7e734042d8..bb6f17bcc1 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java
@@ -56,11 +56,11 @@ public class TestFindTargetStrategy {
 
     //create three datanodes with different usageinfo
     DatanodeUsageInfo dui1 = new DatanodeUsageInfo(MockDatanodeDetails
-        .randomDatanodeDetails(), new SCMNodeStat(100, 0, 40));
+        .randomDatanodeDetails(), new SCMNodeStat(100, 0, 40, 0, 30));
     DatanodeUsageInfo dui2 = new DatanodeUsageInfo(MockDatanodeDetails
-        .randomDatanodeDetails(), new SCMNodeStat(100, 0, 60));
+        .randomDatanodeDetails(), new SCMNodeStat(100, 0, 60, 0, 30));
     DatanodeUsageInfo dui3 = new DatanodeUsageInfo(MockDatanodeDetails
-        .randomDatanodeDetails(), new SCMNodeStat(100, 0, 80));
+        .randomDatanodeDetails(), new SCMNodeStat(100, 0, 80, 0, 30));
 
     //insert in ascending order
     overUtilizedDatanodes.add(dui1);
@@ -98,11 +98,11 @@ public class TestFindTargetStrategy {
   public void testResetPotentialTargets() {
     // create three datanodes with different usage infos
     DatanodeUsageInfo dui1 = new DatanodeUsageInfo(MockDatanodeDetails
-        .randomDatanodeDetails(), new SCMNodeStat(100, 30, 70));
+        .randomDatanodeDetails(), new SCMNodeStat(100, 30, 70, 0, 50));
     DatanodeUsageInfo dui2 = new DatanodeUsageInfo(MockDatanodeDetails
-        .randomDatanodeDetails(), new SCMNodeStat(100, 20, 80));
+        .randomDatanodeDetails(), new SCMNodeStat(100, 20, 80, 0, 60));
     DatanodeUsageInfo dui3 = new DatanodeUsageInfo(MockDatanodeDetails
-        .randomDatanodeDetails(), new SCMNodeStat(100, 10, 90));
+        .randomDatanodeDetails(), new SCMNodeStat(100, 10, 90, 0, 70));
 
     List<DatanodeUsageInfo> potentialTargets = new ArrayList<>();
     potentialTargets.add(dui1);
@@ -179,18 +179,18 @@ public class TestFindTargetStrategy {
     List<DatanodeUsageInfo> overUtilizedDatanodes = new ArrayList<>();
     //set the farthest target with the lowest usage info
     overUtilizedDatanodes.add(
-        new DatanodeUsageInfo(target5, new SCMNodeStat(100, 0, 90)));
+        new DatanodeUsageInfo(target5, new SCMNodeStat(100, 0, 90, 0, 80)));
     //set the tree targets, which have the same network topology distance
     //to source , with different usage info
     overUtilizedDatanodes.add(
-        new DatanodeUsageInfo(target2, new SCMNodeStat(100, 0, 20)));
+        new DatanodeUsageInfo(target2, new SCMNodeStat(100, 0, 20, 0, 10)));
     overUtilizedDatanodes.add(
-        new DatanodeUsageInfo(target3, new SCMNodeStat(100, 0, 40)));
+        new DatanodeUsageInfo(target3, new SCMNodeStat(100, 0, 40, 0, 30)));
     overUtilizedDatanodes.add(
-        new DatanodeUsageInfo(target4, new SCMNodeStat(100, 0, 60)));
+        new DatanodeUsageInfo(target4, new SCMNodeStat(100, 0, 60, 0, 50)));
     //set the nearest target with the highest usage info
     overUtilizedDatanodes.add(
-        new DatanodeUsageInfo(target1, new SCMNodeStat(100, 0, 10)));
+        new DatanodeUsageInfo(target1, new SCMNodeStat(100, 0, 10, 0, 5)));
 
 
     FindTargetGreedyByNetworkTopology findTargetGreedyByNetworkTopology =
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
index 910fe75ede..e51f9731ad 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
@@ -103,13 +103,13 @@ public class TestSCMContainerPlacementCapacity {
         .thenReturn(new ArrayList<>(datanodes));
 
     when(mockNodeManager.getNodeStat(any()))
-        .thenReturn(new SCMNodeMetric(100L, 0L, 100L));
+        .thenReturn(new SCMNodeMetric(100L, 0L, 100L, 0, 90));
     when(mockNodeManager.getNodeStat(datanodes.get(2)))
-        .thenReturn(new SCMNodeMetric(100L, 90L, 10L));
+        .thenReturn(new SCMNodeMetric(100L, 90L, 10L, 0, 9));
     when(mockNodeManager.getNodeStat(datanodes.get(3)))
-        .thenReturn(new SCMNodeMetric(100L, 80L, 20L));
+        .thenReturn(new SCMNodeMetric(100L, 80L, 20L, 0, 19));
     when(mockNodeManager.getNodeStat(datanodes.get(4)))
-        .thenReturn(new SCMNodeMetric(100L, 70L, 30L));
+        .thenReturn(new SCMNodeMetric(100L, 70L, 30L, 0, 20));
     when(mockNodeManager.getNodeByUuid(any(UUID.class))).thenAnswer(
             invocation -> datanodes.stream()
                 .filter(dn -> dn.getUuid().equals(invocation.getArgument(0)))
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java
index 6ba2fc440a..9c9bfad582 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java
@@ -31,13 +31,13 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
 public class TestDatanodeMetrics {
   @Test
   public void testSCMNodeMetric() {
-    SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L);
+    SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L, 0, 80);
     assertEquals((long) stat.getCapacity().get(), 100L);
     assertEquals(10L, (long) stat.getScmUsed().get());
     assertEquals(90L, (long) stat.getRemaining().get());
     SCMNodeMetric metric = new SCMNodeMetric(stat);
 
-    SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L);
+    SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L, 0, 80);
     assertEquals(100L, (long) stat.getCapacity().get());
     assertEquals(10L, (long) stat.getScmUsed().get());
     assertEquals(90L, (long) stat.getRemaining().get());
@@ -53,8 +53,8 @@ public class TestDatanodeMetrics {
     assertTrue(metric.isGreater(zeroMetric.get()));
 
     // Another case when nodes have similar weight
-    SCMNodeStat stat1 = new SCMNodeStat(10000000L, 50L, 9999950L);
-    SCMNodeStat stat2 = new SCMNodeStat(10000000L, 51L, 9999949L);
+    SCMNodeStat stat1 = new SCMNodeStat(10000000L, 50L, 9999950L, 0, 100000);
+    SCMNodeStat stat2 = new SCMNodeStat(10000000L, 51L, 9999949L, 0, 100000);
     assertTrue(new SCMNodeMetric(stat2).isGreater(stat1));
   }
 }
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java
index d46513b24b..b967fa0658 100644
--- 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java
@@ -155,8 +155,16 @@ public class UsageInfoSubcommand extends ScmSubcommand {
         + " B", StringUtils.byteDesc(info.getRemaining()));
     System.out.printf("%-13s: %s %n", "Remaining %",
         PERCENT_FORMAT.format(info.getRemainingRatio()));
-    System.out.printf("%-13s: %d %n%n", "Container(s)",
+    System.out.printf("%-13s: %d %n", "Container(s)",
             info.getContainerCount());
+    System.out.printf("%-24s: %s (%s) %n", "Container Pre-allocated",
+        info.getCommitted() + " B", StringUtils.byteDesc(info.getCommitted()));
+    System.out.printf("%-24s: %s (%s) %n", "Remaining Allocatable",
+        (info.getRemaining() - info.getCommitted()) + " B",
+        StringUtils.byteDesc((info.getRemaining() - info.getCommitted())));
+    System.out.printf("%-24s: %s (%s) %n%n", "Free Space To Spare",
+        info.getFreeSpaceToSpare() + " B",
+        StringUtils.byteDesc(info.getFreeSpaceToSpare()));
   }
 
   /**
@@ -181,6 +189,8 @@ public class UsageInfoSubcommand extends ScmSubcommand {
     private long capacity = 0;
     private long used = 0;
     private long remaining = 0;
+    private long committed = 0;
+    private long freeSpaceToSpare = 0;
     private long containerCount = 0;
 
     DatanodeUsage(HddsProtos.DatanodeUsageInfoProto proto) {
@@ -196,9 +206,15 @@ public class UsageInfoSubcommand extends ScmSubcommand {
       if (proto.hasRemaining()) {
         remaining = proto.getRemaining();
       }
+      if (proto.hasCommitted()) {
+        committed = proto.getCommitted();
+      }
       if (proto.hasContainerCount()) {
         containerCount = proto.getContainerCount();
       }
+      if (proto.hasFreeSpaceToSpare()) {
+        freeSpaceToSpare = proto.getFreeSpaceToSpare();
+      }
     }
 
     public DatanodeDetails getDatanodeDetails() {
@@ -220,6 +236,12 @@ public class UsageInfoSubcommand extends ScmSubcommand {
     public long getRemaining() {
       return remaining;
     }
+    public long getCommitted() {
+      return committed;
+    }
+    public long getFreeSpaceToSpare() {
+      return freeSpaceToSpare;
+    }
 
     public long getContainerCount() {
       return containerCount;
diff --git 
a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java
 
b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java
index 0cc8ed9be6..a52a0a7ed8 100644
--- 
a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java
+++ 
b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.scm.cli.datanode;
 
 import com.fasterxml.jackson.databind.JsonNode;
 import com.fasterxml.jackson.databind.ObjectMapper;
+import org.apache.commons.codec.CharEncoding;
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
@@ -97,6 +98,38 @@ public class TestUsageInfoSubcommand {
             json.get(0).get("containerCount").longValue());
   }
 
+  @Test
+  public void testOutputDataFieldsAligning() throws IOException {
+    // given
+    ScmClient scmClient = mock(ScmClient.class);
+    Mockito.when(scmClient.getDatanodeUsageInfo(
+            Mockito.anyBoolean(), Mockito.anyInt()))
+        .thenAnswer(invocation -> getUsageProto());
+
+    CommandLine c = new CommandLine(cmd);
+    c.parseArgs("-m");
+
+    // when
+    cmd.execute(scmClient);
+
+    // then
+    String output = outContent.toString(CharEncoding.UTF_8);
+    Assertions.assertTrue(output.contains("UUID         :"));
+    Assertions.assertTrue(output.contains("IP Address   :"));
+    Assertions.assertTrue(output.contains("Hostname     :"));
+    Assertions.assertTrue(output.contains("Capacity     :"));
+    Assertions.assertTrue(output.contains("Total Used   :"));
+    Assertions.assertTrue(output.contains("Total Used % :"));
+    Assertions.assertTrue(output.contains("Ozone Used   :"));
+    Assertions.assertTrue(output.contains("Ozone Used % :"));
+    Assertions.assertTrue(output.contains("Remaining    :"));
+    Assertions.assertTrue(output.contains("Remaining %  :"));
+    Assertions.assertTrue(output.contains("Container(s) :"));
+    Assertions.assertTrue(output.contains("Container Pre-allocated :"));
+    Assertions.assertTrue(output.contains("Remaining Allocatable   :"));
+    Assertions.assertTrue(output.contains("Free Space To Spare     :"));
+  }
+
   private List<HddsProtos.DatanodeUsageInfoProto> getUsageProto() {
     List<HddsProtos.DatanodeUsageInfoProto> result = new ArrayList<>();
     result.add(HddsProtos.DatanodeUsageInfoProto.newBuilder()
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java
index bc87c402eb..b074e5ba56 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ClusterStateEndpoint.java
@@ -120,7 +120,8 @@ public class ClusterStateEndpoint {
     SCMNodeStat stats = nodeManager.getStats();
     DatanodeStorageReport storageReport =
         new DatanodeStorageReport(stats.getCapacity().get(),
-            stats.getScmUsed().get(), stats.getRemaining().get());
+            stats.getScmUsed().get(), stats.getRemaining().get(),
+            stats.getCommitted().get());
 
     ClusterStateResponse.Builder builder = ClusterStateResponse.newBuilder();
     GlobalStats volumeRecord = globalStatsDao.findById(
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
index 33df0ca1bd..968bfbc463 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
@@ -168,6 +168,7 @@ public class NodeEndpoint {
     long capacity = nodeStat.getCapacity().get();
     long used = nodeStat.getScmUsed().get();
     long remaining = nodeStat.getRemaining().get();
-    return new DatanodeStorageReport(capacity, used, remaining);
+    long committed = nodeStat.getCommitted().get();
+    return new DatanodeStorageReport(capacity, used, remaining, committed);
   }
 }
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java
index d3fbb598c1..43a20317a2 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java
@@ -24,11 +24,14 @@ public class DatanodeStorageReport {
   private long capacity;
   private long used;
   private long remaining;
+  private long committed;
 
-  public DatanodeStorageReport(long capacity, long used, long remaining) {
+  public DatanodeStorageReport(long capacity, long used, long remaining,
+                               long committed) {
     this.capacity = capacity;
     this.used = used;
     this.remaining = remaining;
+    this.committed = committed;
   }
 
   public long getCapacity() {
@@ -42,4 +45,8 @@ public class DatanodeStorageReport {
   public long getRemaining() {
     return remaining;
   }
+
+  public long getCommitted() {
+    return committed;
+  }
 }
diff --git 
a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json
 
b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json
index 60362299fa..204609f66f 100644
--- 
a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json
+++ 
b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json
@@ -29,7 +29,8 @@
         "storageReport": {
           "capacity": 62725623808,
           "used": 488288256,
-          "remaining": 21005319168
+          "remaining": 21005319168,
+          "committed": 10240000
         },
         "pipelines": [
           {
@@ -62,7 +63,8 @@
         "storageReport": {
           "capacity": 549755813888,
           "used": 450971566080,
-          "remaining": 95784247808
+          "remaining": 95784247808,
+          "committed": 34563456
         },
         "pipelines": [
           {
@@ -95,7 +97,8 @@
         "storageReport": {
           "capacity": 549755813888,
           "used": 450971566080,
-          "remaining": 95784247808
+          "remaining": 95784247808,
+          "committed": 34562
         },
         "pipelines": [
           {
@@ -128,7 +131,8 @@
         "storageReport": {
           "capacity": 549755813888,
           "used": 450971566080,
-          "remaining": 95784247808
+          "remaining": 95784247808,
+          "committed": 4576435
         },
         "pipelines": [
           {
@@ -161,7 +165,8 @@
         "storageReport": {
           "capacity": 549755813888,
           "used": 450971566080,
-          "remaining": 95784247808
+          "remaining": 95784247808,
+          "committed": 3453121
         },
         "pipelines": [
           {
@@ -194,7 +199,8 @@
         "storageReport": {
           "capacity": 140737488355328,
           "used": 43980465111040,
-          "remaining": 86757023244288
+          "remaining": 86757023244288,
+          "committed": 3457623435
         },
         "pipelines": [
           {
@@ -233,7 +239,8 @@
         "storageReport": {
           "capacity": 140737488355328,
           "used": 43980465111040,
-          "remaining": 86757023244288
+          "remaining": 86757023244288,
+          "committed": 345624
         },
         "pipelines": [
           {
@@ -272,7 +279,8 @@
         "storageReport": {
           "capacity": 140737488355328,
           "used": 43980465111040,
-          "remaining": 86757023244288
+          "remaining": 86757023244288,
+          "committed": 123464574
         },
         "pipelines": [
           {
@@ -311,7 +319,8 @@
         "storageReport": {
           "capacity": 140737488355328,
           "used": 43980465111040,
-          "remaining": 86757023244288
+          "remaining": 86757023244288,
+          "committed": 556721345
         },
         "pipelines": [
           {
@@ -350,7 +359,8 @@
         "storageReport": {
           "capacity": 140737488355328,
           "used": 43980465111040,
-          "remaining": 86757023244288
+          "remaining": 86757023244288,
+          "committed": 45671235234
         },
         "pipelines": [
           {
@@ -389,7 +399,8 @@
         "storageReport": {
           "capacity": 140737488355328,
           "used": 0,
-          "remaining": 110737488355328
+          "remaining": 110737488355328,
+          "committed": 0
         },
         "pipelines": [],
         "containers": 0,
@@ -409,7 +420,8 @@
         "storageReport": {
           "capacity": 805306368000,
           "used": 644245094400,
-          "remaining": 121061273600
+          "remaining": 121061273600,
+          "committed": 4572345234
         },
         "pipelines": [
           {
@@ -442,7 +454,8 @@
         "storageReport": {
           "capacity": 140737488355328,
           "used": 43980465111040,
-          "remaining": 92757023244288
+          "remaining": 92757023244288,
+          "committed": 34563453
         },
         "pipelines": [
           {
@@ -475,7 +488,8 @@
         "storageReport": {
           "capacity": 549755813888,
           "used": 450971566080,
-          "remaining": 94784247808
+          "remaining": 94784247808,
+          "committed": 7234234
         },
         "pipelines": [
           {
@@ -514,7 +528,8 @@
         "storageReport": {
           "capacity": 140737488355328,
           "used": 43980465111040,
-          "remaining": 92757023244288
+          "remaining": 92757023244288,
+          "committed": 34562346
         },
         "pipelines": [
           {
@@ -547,7 +562,8 @@
         "storageReport": {
           "capacity": 140737488355328,
           "used": 43980465111040,
-          "remaining": 76757023244288
+          "remaining": 76757023244288,
+          "committed": 834324523
         },
         "pipelines": [
           {
@@ -580,7 +596,8 @@
         "storageReport": {
           "capacity": 140737488355328,
           "used": 43980465111040,
-          "remaining": 66757023244288
+          "remaining": 66757023244288,
+          "committed": 346467345
         },
         "pipelines": [
           {
@@ -619,7 +636,8 @@
         "storageReport": {
           "capacity": 140737488355328,
           "used": 43980465111040,
-          "remaining": 96157023244288
+          "remaining": 96157023244288,
+          "committed": 45245456
         },
         "pipelines": [
           {
@@ -652,7 +670,8 @@
         "storageReport": {
           "capacity": 140737488355328,
           "used": 43980465111040,
-          "remaining": 94757023244288
+          "remaining": 94757023244288,
+          "committed": 45673234
         },
         "pipelines": [
           {
diff --git 
a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.less
 
b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.less
index b2dddbcaa3..ecba534cc0 100644
--- 
a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.less
+++ 
b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.less
@@ -19,6 +19,7 @@
 @progress-gray: #d0d0d0;
 @progress-blue: #1890ff;
 @progress-green: #52c41a;
+@progress-dark-grey: #424242;
 
 .storage-cell-container {
   position: relative;
@@ -45,3 +46,7 @@
 .remaining-bg {
   color: @progress-gray;
 }
+
+.committed-bg {
+  color: @progress-dark-grey;
+}
diff --git 
a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.tsx
 
b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.tsx
index 10decce103..9263c6817b 100644
--- 
a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.tsx
+++ 
b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/storageBar/storageBar.tsx
@@ -32,6 +32,7 @@ interface IStorageBarProps extends 
RouteComponentProps<object> {
   total: number;
   used: number;
   remaining: number;
+  committed: number;
   showMeta?: boolean;
 }
 
@@ -39,6 +40,7 @@ const defaultProps = {
   total: 0,
   used: 0,
   remaining: 0,
+  committed: 0,
   showMeta: true
 };
 
@@ -46,7 +48,7 @@ class StorageBar extends React.Component<IStorageBarProps> {
   static defaultProps = defaultProps;
 
   render() {
-    const {total, used, remaining, showMeta} = this.props;
+    const {total, used, remaining, committed, showMeta} = this.props;
     const nonOzoneUsed = total - remaining - used;
     const totalUsed = total - remaining;
     const tooltip = (
@@ -54,6 +56,7 @@ class StorageBar extends React.Component<IStorageBarProps> {
         <div><Icon component={FilledIcon} className='ozone-used-bg'/> Ozone 
Used ({size(used)})</div>
         <div><Icon component={FilledIcon} className='non-ozone-used-bg'/> Non 
Ozone Used ({size(nonOzoneUsed)})</div>
         <div><Icon component={FilledIcon} className='remaining-bg'/> Remaining 
({size(remaining)})</div>
+        <div><Icon component={FilledIcon} className='committed-bg'/> Container 
Pre-allocated ({size(committed)})</div>
       </div>
     );
     const metaElement = showMeta ? <div>{size(used)} + {size(nonOzoneUsed)} / 
{size(total)}</div> : null;
diff --git 
a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx
 
b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx
index 8f92742916..d69466ac0f 100644
--- 
a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx
+++ 
b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx
@@ -30,4 +30,5 @@ export interface IStorageReport {
   capacity: number;
   used: number;
   remaining: number;
+  committed: number;
 }
diff --git 
a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx
 
b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx
index 6a6118494f..19f306ae4f 100644
--- 
a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx
+++ 
b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx
@@ -69,6 +69,7 @@ interface IDatanode {
   storageUsed: number;
   storageTotal: number;
   storageRemaining: number;
+  storageCommitted: number;
   pipelines: IPipeline[];
   containers: number;
   openContainers: number;
@@ -173,7 +174,7 @@ const COLUMNS = [
     render: (text: string, record: IDatanode) => (
       <StorageBar
         total={record.storageTotal} used={record.storageUsed}
-        remaining={record.storageRemaining}/>
+        remaining={record.storageRemaining} 
committed={record.storageCommitted}/>
     )},
   {
     title: 'Last Heartbeat',
@@ -378,6 +379,7 @@ export class Datanodes extends 
React.Component<Record<string, object>, IDatanode
           storageUsed: datanode.storageReport.used,
           storageTotal: datanode.storageReport.capacity,
           storageRemaining: datanode.storageReport.remaining,
+          storageCommitted: datanode.storageReport.committed,
           pipelines: datanode.pipelines,
           containers: datanode.containers,
           openContainers: datanode.openContainers,
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
index d3bee19ba6..cbe850b918 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
@@ -1248,6 +1248,6 @@ public class TestNSSummaryEndpointWithFSO {
 
   private static SCMNodeStat getMockSCMRootStat() {
     return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, 
-        ROOT_QUOTA - ROOT_DATA_SIZE);
+        ROOT_QUOTA - ROOT_DATA_SIZE, 0, ROOT_QUOTA - ROOT_DATA_SIZE - 1);
   }
 }
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
index b324bd6b42..ba00f843f4 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
@@ -1286,6 +1286,6 @@ public class TestNSSummaryEndpointWithLegacy {
 
   private static SCMNodeStat getMockSCMRootStat() {
     return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, 
-        ROOT_QUOTA - ROOT_DATA_SIZE);
+        ROOT_QUOTA - ROOT_DATA_SIZE, 0, ROOT_QUOTA - ROOT_DATA_SIZE - 1);
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to