This is an automated email from the ASF dual-hosted git repository.

siddhant pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 1f4b3e06ff6 HDDS-14446. Clarify Datanode disk space related commands 
and metrics (#9646)
1f4b3e06ff6 is described below

commit 1f4b3e06ff65ff463d964291fea3ea0772dc3ec2
Author: Siddhant Sangwan <[email protected]>
AuthorDate: Tue Jan 27 15:59:28 2026 +0530

    HDDS-14446. Clarify Datanode disk space related commands and metrics (#9646)
---
 .../common/impl/StorageLocationReport.java         |  45 +++++-
 .../container/common/volume/StorageVolume.java     |   7 +-
 .../container/common/volume/VolumeInfoMetrics.java |  22 ++-
 .../ozone/container/common/volume/VolumeUsage.java |   6 +-
 .../webapps/hddsDatanode/dn-overview.html          |  18 ++-
 .../src/main/resources/webapps/hddsDatanode/dn.js  |   8 +-
 .../common/impl/TestStorageLocationReport.java     |  60 ++++++++
 .../common/volume/TestVolumeInfoMetrics.java       |  91 ++++++++++++
 .../interface-client/src/main/proto/hdds.proto     |   9 +-
 .../proto/ScmServerDatanodeHeartbeatProtocol.proto |   6 +
 .../hadoop/hdds/scm/node/DatanodeUsageInfo.java    |  13 ++
 .../hadoop/hdds/scm/node/SCMNodeManager.java       |  75 +++++++++-
 .../hadoop/hdds/scm/node/SCMNodeMetrics.java       |  19 ++-
 .../hdds/scm/node/TestDatanodeUsageInfo.java       |  70 +++++++++
 .../hadoop/hdds/scm/node/TestSCMNodeManager.java   |  49 ++++++-
 .../hadoop/hdds/scm/node/TestSCMNodeMetrics.java   |  16 +-
 .../hdds/scm/cli/datanode/UsageInfoSubcommand.java | 161 ++++++++++++++-------
 .../scm/cli/datanode/TestUsageInfoSubcommand.java  |  43 +++---
 .../src/main/smoketest/admincli/datanode.robot     |  13 +-
 .../hadoop/ozone/recon/api/NodeEndpoint.java       |  15 +-
 .../recon/api/types/DatanodeStorageReport.java     |  48 ++++++
 .../__tests__/datanodes/DatanodesTable.test.tsx    |  28 ++++
 .../mocks/datanodeMocks/datanodeResponseMocks.ts   |  25 +++-
 .../src/v2/components/storageBar/storageBar.tsx    |  48 ++++--
 .../src/v2/components/tables/datanodesTable.tsx    |   5 +-
 .../src/v2/pages/datanodes/datanodes.tsx           |   1 +
 .../ozone-recon-web/src/v2/types/datanode.types.ts |   5 +-
 .../src/v2/types/datanodeStorageReport.types.ts    |  36 +++++
 .../hadoop/ozone/recon/api/TestEndpoints.java      |  30 ++++
 29 files changed, 829 insertions(+), 143 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
index 7e4b4995b2e..34643cc6161 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
@@ -43,6 +43,8 @@ public final class StorageLocationReport implements 
StorageLocationReportMXBean
   private final StorageType storageType;
   private final String storageLocation;
   private final long reserved;
+  private final long fsCapacity;
+  private final long fsAvailable;
 
   private StorageLocationReport(Builder builder) {
     this.id = builder.id;
@@ -55,6 +57,8 @@ private StorageLocationReport(Builder builder) {
     this.storageType = builder.storageType;
     this.storageLocation = builder.storageLocation;
     this.reserved = builder.reserved;
+    this.fsCapacity = builder.fsCapacity;
+    this.fsAvailable = builder.fsAvailable;
   }
 
   public long getUsableSpace() {
@@ -143,6 +147,14 @@ public long getReserved() {
     return reserved;
   }
 
+  public long getFsCapacity() {
+    return fsCapacity;
+  }
+
+  public long getFsAvailable() {
+    return fsAvailable;
+  }
+
   private static StorageType getStorageType(StorageTypeProto proto) throws
       IOException {
     StorageType storageType;
@@ -186,6 +198,8 @@ public StorageReportProto getProtoBufMessage() throws 
IOException {
         .setFailed(isFailed())
         .setFreeSpaceToSpare(getFreeSpaceToSpare())
         .setReserved(getReserved())
+        .setFsCapacity(getFsCapacity())
+        .setFsAvailable(getFsAvailable())
         .build();
   }
 
@@ -240,6 +254,12 @@ public static StorageLocationReport 
getFromProtobuf(StorageReportProto report)
     if (report.hasReserved()) {
       builder.setReserved(report.getReserved());
     }
+    if (report.hasFsCapacity()) {
+      builder.setFsCapacity(report.getFsCapacity());
+    }
+    if (report.hasFsAvailable()) {
+      builder.setFsAvailable(report.getFsAvailable());
+    }
     return builder.build();
   }
 
@@ -254,11 +274,16 @@ public String toString() {
     if (failed) {
       sb.append(" failed");
     } else {
-      sb.append(" capacity=").append(capacity)
-          .append(" used=").append(scmUsed)
-          .append(" available=").append(remaining)
+      long fsUsed = fsCapacity - fsAvailable;
+      sb.append(" ozoneCapacity=").append(capacity)
+          .append(" ozoneUsed=").append(scmUsed)
+          .append(" ozoneAvailable=").append(remaining)
           .append(" minFree=").append(freeSpaceToSpare)
-          .append(" committed=").append(committed);
+          .append(" committed=").append(committed)
+          .append(" reserved=").append(reserved)
+          .append(" fsCapacity=").append(fsCapacity)
+          .append(" fsAvailable=").append(fsAvailable)
+          .append(" fsUsed=").append(fsUsed);
     }
 
     return sb.append(" }").toString();
@@ -287,6 +312,8 @@ public static class Builder {
     private StorageType storageType;
     private String storageLocation;
     private long reserved;
+    private long fsCapacity;
+    private long fsAvailable;
 
     /**
      * Sets the storageId.
@@ -405,6 +432,16 @@ public long getReserved() {
       return reserved;
     }
 
+    public Builder setFsCapacity(long fsCapacity) {
+      this.fsCapacity = fsCapacity;
+      return this;
+    }
+
+    public Builder setFsAvailable(long fsAvailable) {
+      this.fsAvailable = fsAvailable;
+      return this;
+    }
+
     /**
      * Builds and returns StorageLocationReport instance.
      *
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
index 4e2053f860c..b3946831831 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
@@ -480,11 +480,14 @@ protected StorageLocationReport.Builder reportBuilder() {
         .setStorageType(storageType);
 
     if (!builder.isFailed()) {
-      SpaceUsageSource usage = volumeUsage.getCurrentUsage();
+      SpaceUsageSource.Fixed fsUsage = volumeUsage.realUsage();
+      SpaceUsageSource usage = volumeUsage.getCurrentUsage(fsUsage);
       builder.setCapacity(usage.getCapacity())
           .setRemaining(usage.getAvailable())
           .setScmUsed(usage.getUsedSpace())
-          .setReserved(volumeUsage.getReservedInBytes());
+          .setReserved(volumeUsage.getReservedInBytes())
+          .setFsCapacity(fsUsage.getCapacity())
+          .setFsAvailable(fsUsage.getAvailable());
     }
 
     return builder;
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java
index 7478e25a671..8340c1c4f7f 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java
@@ -44,15 +44,21 @@ public class VolumeInfoMetrics implements MetricsSource {
       VolumeInfoMetrics.class.getSimpleName();
 
   private static final MetricsInfo CAPACITY =
-      Interns.info("Capacity", "Capacity");
+      Interns.info("OzoneCapacity", "Ozone usable capacity (after reserved 
space adjustment)");
   private static final MetricsInfo AVAILABLE =
-      Interns.info("Available", "Available Space");
+      Interns.info("OzoneAvailable", "Ozone available space (after reserved 
space adjustment)");
   private static final MetricsInfo USED =
-      Interns.info("Used", "Used Space");
+      Interns.info("OzoneUsed", "Ozone used space");
   private static final MetricsInfo RESERVED =
       Interns.info("Reserved", "Reserved Space");
   private static final MetricsInfo TOTAL_CAPACITY =
-      Interns.info("TotalCapacity", "Total Capacity");
+      Interns.info("TotalCapacity", "Ozone capacity + reserved space");
+  private static final MetricsInfo FS_CAPACITY =
+      Interns.info("FilesystemCapacity", "Filesystem capacity as reported by 
the local filesystem");
+  private static final MetricsInfo FS_AVAILABLE =
+      Interns.info("FilesystemAvailable", "Filesystem available space as 
reported by the local filesystem");
+  private static final MetricsInfo FS_USED =
+      Interns.info("FilesystemUsed", "Filesystem used space 
(FilesystemCapacity - FilesystemAvailable)");
 
   private final MetricsRegistry registry;
   private final String metricsSourceName;
@@ -185,14 +191,18 @@ public void getMetrics(MetricsCollector collector, 
boolean all) {
     registry.snapshot(builder, all);
     VolumeUsage volumeUsage = volume.getVolumeUsage();
     if (volumeUsage != null) {
-      SpaceUsageSource usage = volumeUsage.getCurrentUsage();
+      SpaceUsageSource.Fixed fsUsage = volumeUsage.realUsage();
+      SpaceUsageSource usage = volumeUsage.getCurrentUsage(fsUsage);
       long reserved = volumeUsage.getReservedInBytes();
       builder
           .addGauge(CAPACITY, usage.getCapacity())
           .addGauge(AVAILABLE, usage.getAvailable())
           .addGauge(USED, usage.getUsedSpace())
           .addGauge(RESERVED, reserved)
-          .addGauge(TOTAL_CAPACITY, usage.getCapacity() + reserved);
+          .addGauge(TOTAL_CAPACITY, usage.getCapacity() + reserved)
+          .addGauge(FS_CAPACITY, fsUsage.getCapacity())
+          .addGauge(FS_AVAILABLE, fsUsage.getAvailable())
+          .addGauge(FS_USED, fsUsage.getUsedSpace());
     }
   }
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
index 15275fcd6c5..280d5cf6c1b 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
@@ -124,7 +124,11 @@ SpaceUsageSource.Fixed realUsage() {
    * B) avail = fsAvail - Max(reserved - other, 0);
    */
   public SpaceUsageSource.Fixed getCurrentUsage() {
-    final SpaceUsageSource.Fixed real = realUsage();
+    return getCurrentUsage(realUsage());
+  }
+
+  // use this variant if real usage values are also needed at the caller
+  public SpaceUsageSource.Fixed getCurrentUsage(SpaceUsageSource.Fixed real) {
     return reservedInBytes == 0
         ? real
         : new SpaceUsageSource.Fixed(
diff --git 
a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-overview.html
 
b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-overview.html
index 4f51b423e8a..f1a89b779ee 100644
--- 
a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-overview.html
+++ 
b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn-overview.html
@@ -53,10 +53,14 @@ <h2>Volume Information</h2>
         <th>Directory</th>
         <th>Storage Type</th>
         <th>Volume Type</th>
-        <th>Used Space</th>
-        <th>Available Space</th>
+        <th>Ozone Capacity</th>
+        <th>Ozone Used</th>
+        <th>Ozone Available</th>
         <th>Reserved</th>
-        <th>Total Capacity</th>
+        <th>Total Capacity (Ozone Capacity + Reserved)</th>
+        <th>Filesystem Capacity</th>
+        <th>Filesystem Available</th>
+        <th>Filesystem Used</th>
         <th>Containers</th>
         <th>State</th>
     </tr>
@@ -66,10 +70,14 @@ <h2>Volume Information</h2>
         <td>{{volumeInfo["tag.StorageDirectory"]}}</td>
         <td>{{volumeInfo["tag.StorageType"]}}</td>
         <td>{{volumeInfo["tag.VolumeType"]}}</td>
-        <td>{{volumeInfo.Used}}</td>
-        <td>{{volumeInfo.Available}}</td>
+        <td>{{volumeInfo.OzoneCapacity}}</td>
+        <td>{{volumeInfo.OzoneUsed}}</td>
+        <td>{{volumeInfo.OzoneAvailable}}</td>
         <td>{{volumeInfo.Reserved}}</td>
         <td>{{volumeInfo.TotalCapacity}}</td>
+        <td>{{volumeInfo.FilesystemCapacity}}</td>
+        <td>{{volumeInfo.FilesystemAvailable}}</td>
+        <td>{{volumeInfo.FilesystemUsed}}</td>
         <td>{{volumeInfo.Containers}}</td>
         <td>{{volumeInfo["tag.VolumeState"]}}</td>
     </tr>
diff --git 
a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn.js 
b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn.js
index 547e566ef8a..cd3a238a883 100644
--- 
a/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn.js
+++ 
b/hadoop-hdds/container-service/src/main/resources/webapps/hddsDatanode/dn.js
@@ -30,10 +30,14 @@
                 .then(function (result) {
                     ctrl.dnmetrics = result.data.beans;
                     ctrl.dnmetrics.forEach(volume => {
-                 volume.Used = transform(volume.Used);
-                 volume.Available = transform(volume.Available);
+                 volume.OzoneCapacity = transform(volume.OzoneCapacity);
+                 volume.OzoneUsed = transform(volume.OzoneUsed);
+                 volume.OzoneAvailable = transform(volume.OzoneAvailable);
                  volume.Reserved = transform(volume.Reserved);
                  volume.TotalCapacity = transform(volume.TotalCapacity);
+                 volume.FilesystemCapacity = 
transform(volume.FilesystemCapacity);
+                 volume.FilesystemAvailable = 
transform(volume.FilesystemAvailable);
+                 volume.FilesystemUsed = transform(volume.FilesystemUsed);
                 })
                 });
 
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestStorageLocationReport.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestStorageLocationReport.java
new file mode 100644
index 00000000000..76840b0a2f3
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestStorageLocationReport.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.impl;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import java.io.IOException;
+import org.apache.hadoop.fs.StorageType;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
+import org.junit.jupiter.api.Test;
+
+class TestStorageLocationReport {
+
+  @Test
+  void testStorageReportProtoIncludesFilesystemFieldsAndRoundTrips() throws 
IOException {
+    StorageLocationReport report = StorageLocationReport.newBuilder()
+        .setId("vol-1")
+        .setStorageLocation("/data/hdds/vol-1")
+        .setStorageType(StorageType.DISK)
+        .setCapacity(1000L)
+        .setScmUsed(100L)
+        .setRemaining(900L)
+        .setCommitted(10L)
+        .setFreeSpaceToSpare(5L)
+        .setReserved(50L)
+        .setFsCapacity(2000L)
+        .setFsAvailable(1500L)
+        .build();
+
+    StorageReportProto proto = report.getProtoBufMessage();
+    assertThat(proto.hasFsCapacity()).isTrue();
+    assertThat(proto.hasFsAvailable()).isTrue();
+    assertThat(proto.getFsCapacity()).isEqualTo(2000L);
+    assertThat(proto.getFsAvailable()).isEqualTo(1500L);
+
+    StorageLocationReport parsed = 
StorageLocationReport.getFromProtobuf(proto);
+    assertThat(parsed.getCapacity()).isEqualTo(1000L);
+    assertThat(parsed.getScmUsed()).isEqualTo(100L);
+    assertThat(parsed.getRemaining()).isEqualTo(900L);
+    assertThat(parsed.getReserved()).isEqualTo(50L);
+    assertThat(parsed.getFsCapacity()).isEqualTo(2000L);
+    assertThat(parsed.getFsAvailable()).isEqualTo(1500L);
+  }
+}
+
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeInfoMetrics.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeInfoMetrics.java
new file mode 100644
index 00000000000..7dc96458fdb
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeInfoMetrics.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.volume;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.File;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdds.fs.SpaceUsageSource;
+import org.apache.hadoop.metrics2.AbstractMetric;
+import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl;
+import org.apache.hadoop.metrics2.impl.MetricsRecordImpl;
+import org.junit.jupiter.api.Test;
+
+class TestVolumeInfoMetrics {
+
+  @Test
+  void testVolumeInfoMetricsExposeOzoneAndFilesystemGauges() {
+    HddsVolume volume = mock(HddsVolume.class);
+    when(volume.getStorageType()).thenReturn(StorageType.DISK);
+    when(volume.getStorageDir()).thenReturn(new File("/tmp/vol-1"));
+    when(volume.getDatanodeUuid()).thenReturn("dn-1");
+    when(volume.getLayoutVersion()).thenReturn(1);
+    when(volume.getStorageState()).thenReturn(HddsVolume.VolumeState.NORMAL);
+    when(volume.getType()).thenReturn(HddsVolume.VolumeType.DATA_VOLUME);
+    when(volume.getCommittedBytes()).thenReturn(10L);
+    when(volume.getContainers()).thenReturn(3L);
+
+    VolumeUsage volumeUsage = mock(VolumeUsage.class);
+    when(volume.getVolumeUsage()).thenReturn(volumeUsage);
+
+    // Ozone-usable usage and reserved
+    when(volumeUsage.getCurrentUsage(any())).thenReturn(new 
SpaceUsageSource.Fixed(
+        1000L,
+        900L,
+        100L
+    ));
+    when(volumeUsage.getReservedInBytes()).thenReturn(50L);
+
+    // Raw filesystem stats
+    when(volumeUsage.realUsage()).thenReturn(new SpaceUsageSource.Fixed(2000L, 
1500L, 500L));
+
+    VolumeInfoMetrics metrics = new VolumeInfoMetrics("test-vol-1", volume);
+    try {
+      MetricsCollectorImpl collector = new MetricsCollectorImpl();
+      metrics.getMetrics(collector, true);
+      assertThat(collector.getRecords()).hasSize(1);
+
+      MetricsRecordImpl rec = collector.getRecords().get(0);
+      Iterable<AbstractMetric> all = rec.metrics();
+
+      assertThat(findMetric(all, "OzoneCapacity")).isEqualTo(1000L);
+      assertThat(findMetric(all, "OzoneAvailable")).isEqualTo(900L);
+      assertThat(findMetric(all, "OzoneUsed")).isEqualTo(100L);
+
+      assertThat(findMetric(all, "FilesystemCapacity")).isEqualTo(2000L);
+      assertThat(findMetric(all, "FilesystemAvailable")).isEqualTo(1500L);
+      assertThat(findMetric(all, "FilesystemUsed")).isEqualTo(500L);
+    } finally {
+      metrics.unregister();
+    }
+  }
+
+  private static long findMetric(Iterable<AbstractMetric> metrics, String 
name) {
+    for (AbstractMetric m : metrics) {
+      if (name.equals(m.name())) {
+        return m.value().longValue();
+      }
+    }
+    throw new AssertionError("Missing metric: " + name);
+  }
+}
+
diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto 
b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
index d542a01b7da..eb819b80a3e 100644
--- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto
+++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
@@ -209,15 +209,18 @@ message NodePool {
 }
 
 message DatanodeUsageInfoProto {
-    optional int64 capacity = 1;
-    optional int64 used = 2;
-    optional int64 remaining = 3;
+    optional int64 capacity = 1; // ozone capacity after subtracting reserved 
space from fs capacity
+    optional int64 used = 2; // space used by ozone as calculated by DU
+    optional int64 remaining = 3; // space available to ozone after adjusting 
reserved space
     optional DatanodeDetailsProto node = 4;
     optional int64 containerCount = 5;
     optional int64 committed = 6;
     optional int64 freeSpaceToSpare = 7;
     optional int64 pipelineCount = 8;
     optional int64 reserved = 9;
+    // Raw filesystem stats
+    optional int64 fsCapacity = 10;
+    optional int64 fsAvailable = 11;
 }
 
 /**
diff --git 
a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
 
b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
index 6379a7ed89f..b5f6c9e80ad 100644
--- 
a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
+++ 
b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto
@@ -183,6 +183,12 @@ message StorageReportProto {
   optional uint64 committed = 8 [default = 0];
   optional uint64 freeSpaceToSpare = 9 [default = 0];
   optional uint64 reserved = 10;
+  /*
+   Raw filesystem stats (as reported by the local filesystem). These represent 
the real device
+   capacity/available, independent of Ozone's reserved-space adjustment.
+   */
+  optional uint64 fsCapacity = 11 [default = 0];
+  optional uint64 fsAvailable = 12 [default = 0];
 }
 
 message MetadataStorageReportProto {
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java
index 1a77e1a08f3..d6ac9edfaeb 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeUsageInfo.java
@@ -33,6 +33,9 @@ public class DatanodeUsageInfo {
   private int containerCount;
   private int pipelineCount;
   private long reserved;
+  private boolean fsUsagePresent;
+  private long fsCapacity;
+  private long fsAvailable;
 
   /**
    * Constructs a DatanodeUsageInfo with DatanodeDetails and SCMNodeStat.
@@ -167,6 +170,12 @@ public void setReserved(long reserved) {
     this.reserved = reserved; 
   }
 
+  public void setFilesystemUsage(long capacity, long available) {
+    this.fsUsagePresent = true;
+    this.fsCapacity = capacity;
+    this.fsAvailable = available;
+  }
+
   /**
    * Gets Comparator that compares two DatanodeUsageInfo on the basis of
    * their utilization values. Utilization is (capacity - remaining) divided
@@ -234,6 +243,10 @@ private DatanodeUsageInfoProto.Builder toProtoBuilder(int 
clientVersion) {
     builder.setContainerCount(containerCount);
     builder.setPipelineCount(pipelineCount);
     builder.setReserved(reserved);
+    if (fsUsagePresent) {
+      builder.setFsCapacity(fsCapacity);
+      builder.setFsAvailable(fsAvailable);
+    }
     return builder;
   }
 }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 66aeacee73c..3289e7b312a 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -55,6 +55,7 @@
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.conf.StorageUnit;
+import org.apache.hadoop.hdds.fs.SpaceUsageSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.DatanodeID;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
@@ -1036,6 +1037,10 @@ public DatanodeUsageInfo getUsageInfo(DatanodeDetails 
dn) {
       usageInfo.setContainerCount(getContainerCount(dn));
       usageInfo.setPipelineCount(getPipeLineCount(dn));
       usageInfo.setReserved(getTotalReserved(dn));
+      SpaceUsageSource.Fixed fs = getTotalFilesystemUsage(dn);
+      if (fs != null) {
+        usageInfo.setFilesystemUsage(fs.getCapacity(), fs.getAvailable());
+      }
     } catch (NodeNotFoundException ex) {
       LOG.error("Unknown datanode {}.", dn, ex);
     }
@@ -1131,8 +1136,21 @@ public Map<String, Long> getNodeInfo() {
         nodeInfo.put(s.label + stat.name(), 0L);
       }
     }
-    nodeInfo.put("TotalCapacity", 0L);
-    nodeInfo.put("TotalUsed", 0L);
+    nodeInfo.put("TotalOzoneCapacity", 0L);
+    nodeInfo.put("TotalOzoneUsed", 0L);
+    // Raw filesystem totals across non-dead nodes. -1 means old (older 
version) DN did not send fs stats.
+    nodeInfo.put("TotalFilesystemCapacity", -1L);
+    nodeInfo.put("TotalFilesystemUsed", -1L);
+    nodeInfo.put("TotalFilesystemAvailable", -1L);
+
+    long totalFsCapacity = 0L;
+    long totalFsAvailable = 0L;
+    /*
+    If any storage report is missing fs stats, this is a rolling upgrade 
scenario in which some older dn versions
+    aren't reporting fs stats. Better to not report aggregated fs stats at all 
in this case?
+     */
+    boolean fsPresent = false;
+    boolean fsMissing = false;
 
     for (DatanodeInfo node : nodeStateManager.getAllNodes()) {
       String keyPrefix = "";
@@ -1165,10 +1183,27 @@ public Map<String, Long> getNodeInfo() {
           nodeInfo.compute(keyPrefix + UsageMetrics.SSDUsed.name(),
               (k, v) -> v + reportProto.getScmUsed());
         }
-        nodeInfo.compute("TotalCapacity", (k, v) -> v + 
reportProto.getCapacity());
-        nodeInfo.compute("TotalUsed", (k, v) -> v + reportProto.getScmUsed());
+        nodeInfo.compute("TotalOzoneCapacity", (k, v) -> v + 
reportProto.getCapacity());
+        nodeInfo.compute("TotalOzoneUsed", (k, v) -> v + 
reportProto.getScmUsed());
+
+        if (reportProto.hasFailed() && reportProto.getFailed()) {
+          continue;
+        }
+        if (reportProto.hasFsCapacity() && reportProto.hasFsAvailable()) {
+          fsPresent = true;
+          totalFsCapacity += reportProto.getFsCapacity();
+          totalFsAvailable += reportProto.getFsAvailable();
+        } else {
+          fsMissing = true;
+        }
       }
     }
+    if (fsPresent && !fsMissing) {
+      // don't report aggregated fs stats if some storage reports did not have 
them
+      nodeInfo.put("TotalFilesystemCapacity", totalFsCapacity);
+      nodeInfo.put("TotalFilesystemUsed", totalFsCapacity - totalFsAvailable);
+      nodeInfo.put("TotalFilesystemAvailable", totalFsAvailable);
+    }
     return nodeInfo;
   }
 
@@ -1737,6 +1772,38 @@ public long getTotalReserved(DatanodeDetails 
datanodeDetails)
     return reserved;
   }
 
+  /**
+   * Compute aggregated raw filesystem capacity/available/used for a datanode
+   * from the storage reports.
+   */
+  public SpaceUsageSource.Fixed getTotalFilesystemUsage(DatanodeDetails 
datanodeDetails) {
+    final DatanodeInfo datanodeInfo;
+    try {
+      datanodeInfo = nodeStateManager.getNode(datanodeDetails);
+    } catch (NodeNotFoundException exception) {
+      LOG.error("Node not found when calculating fs usage for {}.", 
datanodeDetails, exception);
+      return null;
+    }
+
+    long capacity = 0L;
+    long available = 0L;
+    boolean hasFsReport = false;
+    for (StorageReportProto r : datanodeInfo.getStorageReports()) {
+      if (r.hasFailed() && r.getFailed()) {
+        continue;
+      }
+      if (r.hasFsCapacity() && r.hasFsAvailable()) {
+        hasFsReport = true;
+        capacity += r.getFsCapacity();
+        available += r.getFsAvailable();
+      }
+    }
+    if (!hasFsReport) {
+      LOG.debug("Datanode {} does not have filesystem storage stats in its 
storage reports.", datanodeDetails);
+    }
+    return hasFsReport ? new SpaceUsageSource.Fixed(capacity, available, 
capacity - available) : null;
+  }
+
   @Override
   public void addDatanodeCommand(DatanodeID datanodeID, SCMCommand<?> command) 
{
     writeLock().lock();
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java
index 323b7dbe1e0..0dfb1206bb4 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java
@@ -182,18 +182,25 @@ private String diskMetricDescription(String metric) {
     } else if (metric.indexOf("Decommissioned") >= 0) {
       sb.append(" decommissioned");
     }
+    if ("TotalFilesystemCapacity".equals(metric)) {
+      return "Total raw filesystem capacity";
+    } else if ("TotalFilesystemUsed".equals(metric)) {
+      return "Total raw filesystem used space";
+    } else if ("TotalFilesystemAvailable".equals(metric)) {
+      return "Total raw filesystem available space";
+    }
     if (metric.indexOf("DiskCapacity") >= 0) {
-      sb.append(" disk capacity");
+      sb.append(" disk capacity (Ozone usable)");
     } else if (metric.indexOf("DiskUsed") >= 0) {
-      sb.append(" disk capacity used");
+      sb.append(" disk capacity used (Ozone)");
     } else if (metric.indexOf("DiskRemaining") >= 0) {
-      sb.append(" disk capacity remaining");
+      sb.append(" disk capacity remaining (Ozone)");
     } else if (metric.indexOf("SSDCapacity") >= 0) {
-      sb.append(" SSD capacity");
+      sb.append(" SSD capacity (Ozone)");
     } else if (metric.indexOf("SSDUsed") >= 0) {
-      sb.append(" SSD capacity used");
+      sb.append(" SSD capacity used (Ozone)");
     } else if (metric.indexOf("SSDRemaining") >= 0) {
-      sb.append(" SSD capacity remaining");
+      sb.append(" SSD capacity remaining (Ozone)");
     }
     return sb.toString();
   }
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeUsageInfo.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeUsageInfo.java
new file mode 100644
index 00000000000..60eb21850ec
--- /dev/null
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeUsageInfo.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.node;
+
+import static 
org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeUsageInfoProto;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
+import org.apache.hadoop.ozone.ClientVersion;
+import org.junit.jupiter.api.Test;
+
+class TestDatanodeUsageInfo {
+
+  @Test
+  void testToProtoDoesNotIncludeFilesystemFieldsByDefault() {
+    DatanodeDetails dn = randomDatanodeDetails();
+    SCMNodeStat stat = new SCMNodeStat(
+        1000L,  // capacity
+        100L,   // scmUsed
+        900L,   // remaining
+        10L,    // committed
+        5L,     // freeSpaceToSpare
+        0L      // reserved
+    );
+
+    DatanodeUsageInfo info = new DatanodeUsageInfo(dn, stat);
+    DatanodeUsageInfoProto proto = info.toProto(ClientVersion.CURRENT_VERSION);
+
+    assertThat(proto.hasFsCapacity()).isFalse();
+    assertThat(proto.hasFsAvailable()).isFalse();
+
+    assertThat(proto.getCapacity()).isEqualTo(1000L);
+    assertThat(proto.getUsed()).isEqualTo(100L);
+    assertThat(proto.getRemaining()).isEqualTo(900L);
+  }
+
+  @Test
+  void testToProtoIncludesFilesystemFieldsWhenPresent() {
+    DatanodeDetails dn = randomDatanodeDetails();
+    SCMNodeStat stat = new SCMNodeStat(1000L, 100L, 900L, 10L, 5L, 0L);
+
+    DatanodeUsageInfo info = new DatanodeUsageInfo(dn, stat);
+    info.setFilesystemUsage(2000L, 1500L);
+
+    DatanodeUsageInfoProto proto = info.toProto(ClientVersion.CURRENT_VERSION);
+
+    assertThat(proto.hasFsCapacity()).isTrue();
+    assertThat(proto.hasFsAvailable()).isTrue();
+    assertThat(proto.getFsCapacity()).isEqualTo(2000L);
+    assertThat(proto.getFsAvailable()).isEqualTo(1500L);
+  }
+}
+
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
index 928e38295f5..59a4938e8d7 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
@@ -72,6 +72,7 @@
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.fs.SpaceUsageSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.DatanodeID;
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
@@ -1908,7 +1909,11 @@ public void testGetNodeInfo()
       String storagePath = testDir.getAbsolutePath() + "/" + dnId;
       StorageReportProto report = HddsTestUtils
           .createStorageReport(dnId, storagePath, capacity, used,
-              remaining, null);
+              remaining, null)
+          .toBuilder()
+          .setFsCapacity(3000L)
+          .setFsAvailable(2400L)
+          .build();
 
       nodeManager.register(datanodeDetails, HddsTestUtils.createNodeReport(
           Arrays.asList(report), emptyList()),
@@ -1947,8 +1952,46 @@ public void testGetNodeInfo()
     assertEquals(1900, stats.get("MaintenanceDiskRemaining").longValue());
 
     // All nodes
-    assertEquals(12000, stats.get("TotalCapacity").longValue());
-    assertEquals(600, stats.get("TotalUsed").longValue());
+    assertEquals(12000, stats.get("TotalOzoneCapacity").longValue());
+    assertEquals(600, stats.get("TotalOzoneUsed").longValue());
+    assertEquals(18000, stats.get("TotalFilesystemCapacity").longValue());
+    assertEquals(14400, stats.get("TotalFilesystemAvailable").longValue());
+    assertEquals(3600, stats.get("TotalFilesystemUsed").longValue());
+  }
+
+  @Test
+  public void testGetTotalFilesystemUsage()
+      throws IOException, AuthenticationException {
+    OzoneConfiguration conf = getConf();
+    try (SCMNodeManager nodeManager = createNodeManager(conf)) {
+      DatanodeDetails datanodeDetails = 
MockDatanodeDetails.randomDatanodeDetails();
+      DatanodeID dnId = datanodeDetails.getID();
+
+      StorageReportProto report1 = HddsTestUtils
+          .createStorageReport(dnId, testDir.getAbsolutePath() + "/vol-1",
+              500L, 100L, 400L, null)
+          .toBuilder()
+          .setFsCapacity(1000L)
+          .setFsAvailable(600L)
+          .build();
+      StorageReportProto report2 = HddsTestUtils
+          .createStorageReport(dnId, testDir.getAbsolutePath() + "/vol-2",
+              700L, 200L, 500L, null)
+          .toBuilder()
+          .setFsCapacity(2000L)
+          .setFsAvailable(1500L)
+          .build();
+
+      nodeManager.register(datanodeDetails,
+          HddsTestUtils.createNodeReport(Arrays.asList(report1, report2), 
emptyList()), null);
+      nodeManager.processHeartbeat(datanodeDetails);
+
+      SpaceUsageSource.Fixed totals = 
nodeManager.getTotalFilesystemUsage(datanodeDetails);
+      assertNotNull(totals);
+      assertEquals(3000L, totals.getCapacity());
+      assertEquals(2100L, totals.getAvailable());
+      assertEquals(900L, totals.getUsedSpace());
+    }
   }
 
   /**
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeMetrics.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeMetrics.java
index 81f95677809..ac2c1e4c51e 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeMetrics.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeMetrics.java
@@ -160,7 +160,11 @@ public void testNodeReportProcessingFailure() {
   public void testNodeCountAndInfoMetricsReported() throws Exception {
 
     StorageReportProto storageReport = HddsTestUtils.createStorageReport(
-        registeredDatanode.getID(), "/tmp", 100, 10, 90, null);
+        registeredDatanode.getID(), "/tmp", 100, 10, 90, null)
+        .toBuilder()
+        .setFsCapacity(200)
+        .setFsAvailable(150)
+        .build();
     NodeReportProto nodeReport = NodeReportProto.newBuilder()
         .addStorageReport(storageReport).build();
 
@@ -229,9 +233,15 @@ public void testNodeCountAndInfoMetricsReported() throws 
Exception {
     // The DN has no metadata volumes, so hasEnoughSpace() returns false 
indicating the DN is out of space.
     assertGauge("NonWritableNodes", 1,
         getMetrics(SCMNodeMetrics.class.getSimpleName()));
-    assertGauge("TotalCapacity", 100L,
+    assertGauge("TotalOzoneCapacity", 100L,
+        getMetrics(SCMNodeMetrics.class.getSimpleName()));
+    assertGauge("TotalOzoneUsed", 10L,
+        getMetrics(SCMNodeMetrics.class.getSimpleName()));
+    assertGauge("TotalFilesystemCapacity", 200L,
+        getMetrics(SCMNodeMetrics.class.getSimpleName()));
+    assertGauge("TotalFilesystemUsed", 50L,
         getMetrics(SCMNodeMetrics.class.getSimpleName()));
-    assertGauge("TotalUsed", 10L,
+    assertGauge("TotalFilesystemAvailable", 150L,
         getMetrics(SCMNodeMetrics.class.getSimpleName()));
     nodeManager.processHeartbeat(registeredDatanode);
     sleep(4000);
diff --git 
a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java
 
b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java
index 5d6ebb11cdd..e01de959790 100644
--- 
a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java
+++ 
b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java
@@ -42,9 +42,14 @@
  */
 @Command(
     name = "usageinfo",
-    description = "List usage information " +
-        "(such as Capacity, SCMUsed, Remaining) of a datanode by IP address " +
-        "or Host name or UUID",
+    description = "List usage information of a datanode by IP address, 
hostname or UUID.\n\n" +
+        "Legend (bytes):\n" +
+        "  Filesystem Capacity/Used/Available: raw filesystem stats for the 
Datanode, aggregated across volumes.\n" +
+        "  Ozone Capacity/Used/Available: Ozone-usable stats after 
reserved-space adjustment, aggregated across " +
+        "  volumes (see hdds.datanode.dir.du.reserved / 
hdds.datanode.dir.du.reserved.percent).\n" +
+        "  Reserved: hdds.datanode.dir.du.reserved: configured reserved space 
for non-Ozone usage.\n" +
+        "  Committed: space pre-allocated for containers.\n" +
+        "  Free Space To Spare: minimum free space to keep before closing 
containers.\n",
     mixinStandardHelpOptions = true,
     versionProvider = HddsVersionProvider.class)
 public class UsageInfoSubcommand extends ScmSubcommand {
@@ -111,46 +116,51 @@ public void execute(ScmClient scmClient) throws 
IOException {
    * @param info Information such as Capacity, SCMUsed etc.
    */
   private void printInfo(DatanodeUsage info) {
-    System.out.printf("%-13s: %s %n", "UUID",
+    System.out.printf("%-24s: %s %n", "UUID",
         info.getDatanodeDetails().getUuid());
-    System.out.printf("%-13s: %s %n", "IP Address",
+    System.out.printf("%-24s: %s %n", "IP Address",
         info.getDatanodeDetails().getIpAddress());
-    System.out.printf("%-13s: %s %n", "Hostname",
+    System.out.printf("%-24s: %s %n", "Hostname",
         info.getDatanodeDetails().getHostName());
-    // print capacity in a readable format
-    System.out.printf("%-13s: %s (%s) %n", "Capacity", info.getCapacity()
-        + " B", StringUtils.byteDesc(info.getCapacity()));
-
-    // print total used space and its percentage in a readable format
-    System.out.printf("%-13s: %s (%s) %n", "Total Used", info.getTotalUsed()
-        + " B", StringUtils.byteDesc(info.getTotalUsed()));
-    System.out.printf("%-13s: %s %n", "Total Used %",
-        PERCENT_FORMAT.format(info.getTotalUsedRatio()));
-
-    // print space used by ozone and its percentage in a readable format
-    System.out.printf("%-13s: %s (%s) %n", "Ozone Used", info.getOzoneUsed()
-        + " B", StringUtils.byteDesc(info.getOzoneUsed()));
-    System.out.printf("%-13s: %s %n", "Ozone Used %",
-        PERCENT_FORMAT.format(info.getUsedRatio()));
-
-    // print total remaining space and its percentage in a readable format
-    System.out.printf("%-13s: %s (%s) %n", "Remaining", info.getRemaining()
-        + " B", StringUtils.byteDesc(info.getRemaining()));
-    System.out.printf("%-13s: %s %n", "Remaining %",
-        PERCENT_FORMAT.format(info.getRemainingRatio()));
-    System.out.printf("%-13s: %d %n", "Pipeline(s)",
+
+    if (info.hasFilesystemUsage()) {
+      System.out.printf("%-24s: %s (%s) %n", "Filesystem Capacity",
+          info.getFilesystemCapacity() + " B", 
StringUtils.byteDesc(info.getFilesystemCapacity()));
+      System.out.printf("%-24s: %s (%s) %n", "Filesystem Used",
+          info.getFilesystemUsed() + " B", 
StringUtils.byteDesc(info.getFilesystemUsed()));
+      System.out.printf("%-24s: %s (Filesystem Used/Filesystem Capacity) %n", 
"Filesystem Used %",
+          PERCENT_FORMAT.format(info.getFilesystemUsedRatio()));
+      System.out.printf("%-24s: %s (%s) %n", "Filesystem Available",
+          info.getFilesystemAvailable() + " B", 
StringUtils.byteDesc(info.getFilesystemAvailable()));
+      System.out.printf("%-24s: %s (Filesystem Available/Filesystem Capacity) 
%n", "Filesystem Available %",
+          PERCENT_FORMAT.format(info.getFilesystemAvailableRatio()));
+    }
+
+    System.out.printf("%-24s: %s (%s) %n", "Ozone Capacity",
+        info.getOzoneCapacity() + " B", 
StringUtils.byteDesc(info.getOzoneCapacity()));
+    System.out.printf("%-24s: %s (%s) %n", "Ozone Used",
+        info.getOzoneUsed() + " B", StringUtils.byteDesc(info.getOzoneUsed()));
+    System.out.printf("%-24s: %s (Ozone Used/Ozone Capacity) %n", "Ozone Used 
%",
+        PERCENT_FORMAT.format(info.getOzoneUsedRatio()));
+    System.out.printf("%-24s: %s (%s) %n", "Ozone Available",
+        info.getOzoneAvailable() + " B", 
StringUtils.byteDesc(info.getOzoneAvailable()));
+    System.out.printf("%-24s: %s (Ozone Available/Ozone capacity) %n", "Ozone 
Available %",
+        PERCENT_FORMAT.format(info.getOzoneAvailableRatio()));
+
+
+    System.out.printf("%-24s: %d %n", "Pipeline(s)",
             info.getPipelineCount());
-    System.out.printf("%-13s: %d %n", "Container(s)",
+    System.out.printf("%-24s: %d %n", "Container(s)",
             info.getContainerCount());
     System.out.printf("%-24s: %s (%s) %n", "Container Pre-allocated",
         info.getCommitted() + " B", StringUtils.byteDesc(info.getCommitted()));
     System.out.printf("%-24s: %s (%s) %n", "Remaining Allocatable",
-        (info.getRemaining() - info.getCommitted()) + " B",
-        StringUtils.byteDesc((info.getRemaining() - info.getCommitted())));
+        (info.getOzoneAvailable() - info.getCommitted()) + " B",
+        StringUtils.byteDesc((info.getOzoneAvailable() - 
info.getCommitted())));
     System.out.printf("%-24s: %s (%s) %n", "Free Space To Spare",
         info.getFreeSpaceToSpare() + " B",
         StringUtils.byteDesc(info.getFreeSpaceToSpare()));
-    System.out.printf("%-13s: %s (%s) %n", "Reserved",
+    System.out.printf("%-24s: %s (%s) %n", "Reserved",
         info.getReserved() + " B", 
         StringUtils.byteDesc(info.getReserved()));
     System.out.println();
@@ -175,9 +185,13 @@ public void serialize(Double value, JsonGenerator jgen,
   private static class DatanodeUsage {
 
     private DatanodeDetails datanodeDetails = null;
-    private long capacity = 0;
-    private long used = 0;
-    private long remaining = 0;
+    private boolean filesystemUsagePresent = false;
+    private long filesystemCapacity = 0;
+    private long filesystemAvailable = 0;
+    private long filesystemUsed = 0;
+    private long ozoneCapacity = 0;
+    private long ozoneUsed = 0;
+    private long ozoneAvailable = 0;
     private long committed = 0;
     private long freeSpaceToSpare = 0;
     private long containerCount = 0;
@@ -188,14 +202,21 @@ private static class DatanodeUsage {
       if (proto.hasNode()) {
         datanodeDetails = DatanodeDetails.getFromProtoBuf(proto.getNode());
       }
+      if (proto.hasFsCapacity() && proto.hasFsAvailable()) {
+        filesystemUsagePresent = true;
+        filesystemCapacity = proto.getFsCapacity();
+        filesystemAvailable = proto.getFsAvailable();
+        filesystemUsed = filesystemCapacity - filesystemAvailable;
+      }
+
       if (proto.hasCapacity()) {
-        capacity = proto.getCapacity();
+        ozoneCapacity = proto.getCapacity();
       }
       if (proto.hasUsed()) {
-        used = proto.getUsed();
+        ozoneUsed = proto.getUsed();
       }
       if (proto.hasRemaining()) {
-        remaining = proto.getRemaining();
+        ozoneAvailable = proto.getRemaining();
       }
       if (proto.hasCommitted()) {
         committed = proto.getCommitted();
@@ -218,20 +239,32 @@ public DatanodeDetails getDatanodeDetails() {
       return datanodeDetails;
     }
 
-    public long getCapacity() {
-      return capacity;
+    public boolean hasFilesystemUsage() {
+      return filesystemUsagePresent;
+    }
+
+    public long getFilesystemCapacity() {
+      return filesystemCapacity;
+    }
+
+    public long getFilesystemUsed() {
+      return filesystemUsed;
     }
 
-    public long getTotalUsed() {
-      return capacity - remaining;
+    public long getFilesystemAvailable() {
+      return filesystemAvailable;
+    }
+
+    public long getOzoneCapacity() {
+      return ozoneCapacity;
     }
 
     public long getOzoneUsed() {
-      return used;
+      return ozoneUsed;
     }
 
-    public long getRemaining() {
-      return remaining;
+    public long getOzoneAvailable() {
+      return ozoneAvailable;
     }
 
     public long getCommitted() {
@@ -247,33 +280,49 @@ public long getContainerCount() {
     }
 
     @JsonSerialize(using = DecimalJsonSerializer.class)
-    public double getTotalUsedPercent() {
-      return getTotalUsedRatio() * 100;
+    public double getFilesystemUsedPercent() {
+      return getFilesystemUsedRatio() * 100;
+    }
+
+    @JsonSerialize(using = DecimalJsonSerializer.class)
+    public double getFilesystemAvailablePercent() {
+      return getFilesystemAvailableRatio() * 100;
     }
 
     @JsonSerialize(using = DecimalJsonSerializer.class)
     public double getOzoneUsedPercent() {
-      return getUsedRatio() * 100;
+      return getOzoneUsedRatio() * 100;
     }
 
     @JsonSerialize(using = DecimalJsonSerializer.class)
-    public double getRemainingPercent() {
-      return getRemainingRatio() * 100;
+    public double getOzoneAvailablePercent() {
+      return getOzoneAvailableRatio() * 100;
     }
 
     @JsonIgnore
-    public double getTotalUsedRatio() {
-      return 1 - getRemainingRatio();
+    public double getFilesystemUsedRatio() {
+      if (!filesystemUsagePresent || filesystemCapacity == 0) {
+        return 0;
+      }
+      return filesystemUsed / (double) filesystemCapacity;
+    }
+
+    @JsonIgnore
+    public double getFilesystemAvailableRatio() {
+      if (!filesystemUsagePresent || filesystemCapacity == 0) {
+        return 0;
+      }
+      return filesystemAvailable / (double) filesystemCapacity;
     }
 
     @JsonIgnore
-    public double getUsedRatio() {
-      return used / (double) capacity;
+    public double getOzoneUsedRatio() {
+      return ozoneUsed / (double) ozoneCapacity;
     }
 
     @JsonIgnore
-    public double getRemainingRatio() {
-      return remaining / (double) capacity;
+    public double getOzoneAvailableRatio() {
+      return ozoneAvailable / (double) ozoneCapacity;
     }
 
     public long getPipelineCount() {
diff --git 
a/hadoop-ozone/cli-admin/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java
 
b/hadoop-ozone/cli-admin/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java
index 89fff68fb29..b104db6ef98 100644
--- 
a/hadoop-ozone/cli-admin/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java
+++ 
b/hadoop-ozone/cli-admin/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java
@@ -84,13 +84,17 @@ public void testCorrectJsonValuesInReport() throws 
IOException {
     assertEquals(ARRAY, json.getNodeType());
     assertNotNull(json.get(0).get("datanodeDetails"));
     assertEquals(10, json.get(0).get("ozoneUsed").longValue());
-    assertEquals(100, json.get(0).get("capacity").longValue());
-    assertEquals(80, json.get(0).get("remaining").longValue());
-    assertEquals(20, json.get(0).get("totalUsed").longValue());
+    assertEquals(100, json.get(0).get("ozoneCapacity").longValue());
+    assertEquals(80, json.get(0).get("ozoneAvailable").longValue());
 
-    assertEquals(20.00, json.get(0).get("totalUsedPercent").doubleValue(), 
0.001);
     assertEquals(10.00, json.get(0).get("ozoneUsedPercent").doubleValue(), 
0.001);
-    assertEquals(80.00, json.get(0).get("remainingPercent").doubleValue(), 
0.001);
+    assertEquals(80.00, 
json.get(0).get("ozoneAvailablePercent").doubleValue(), 0.001);
+
+    assertEquals(1000, json.get(0).get("filesystemCapacity").longValue());
+    assertEquals(700, json.get(0).get("filesystemAvailable").longValue());
+    assertEquals(300, json.get(0).get("filesystemUsed").longValue());
+    assertEquals(30.00, 
json.get(0).get("filesystemUsedPercent").doubleValue(), 0.001);
+    assertEquals(70.00, 
json.get(0).get("filesystemAvailablePercent").doubleValue(), 0.001);
 
     assertEquals(5, json.get(0).get("containerCount").longValue());
     assertEquals(10, json.get(0).get("pipelineCount").longValue());
@@ -111,21 +115,24 @@ public void testOutputDataFieldsAligning() throws 
IOException {
 
     // then
     String output = outContent.toString(CharEncoding.UTF_8);
-    assertThat(output).contains("UUID         :");
-    assertThat(output).contains("IP Address   :");
-    assertThat(output).contains("Hostname     :");
-    assertThat(output).contains("Capacity     :");
-    assertThat(output).contains("Total Used   :");
-    assertThat(output).contains("Total Used % :");
-    assertThat(output).contains("Ozone Used   :");
-    assertThat(output).contains("Ozone Used % :");
-    assertThat(output).contains("Remaining    :");
-    assertThat(output).contains("Remaining %  :");
-    assertThat(output).contains("Container(s) :");
-    assertThat(output).contains("Pipeline(s)  :");
+    assertThat(output).contains("UUID                    :");
+    assertThat(output).contains("IP Address              :");
+    assertThat(output).contains("Hostname                :");
+    assertThat(output).contains("Ozone Capacity          :");
+    assertThat(output).contains("Ozone Used              :");
+    assertThat(output).contains("Ozone Used %            :");
+    assertThat(output).contains("Ozone Available         :");
+    assertThat(output).contains("Ozone Available %       :");
+    assertThat(output).contains("Container(s)            :");
+    assertThat(output).contains("Pipeline(s)             :");
     assertThat(output).contains("Container Pre-allocated :");
     assertThat(output).contains("Remaining Allocatable   :");
     assertThat(output).contains("Free Space To Spare     :");
+    assertThat(output).contains("Filesystem Capacity     :");
+    assertThat(output).contains("Filesystem Used         :");
+    assertThat(output).contains("Filesystem Available    :");
+    assertThat(output).contains("Filesystem Used %       :");
+    assertThat(output).contains("Filesystem Available %  :");
   }
 
   private List<HddsProtos.DatanodeUsageInfoProto> getUsageProto() {
@@ -137,6 +144,8 @@ private List<HddsProtos.DatanodeUsageInfoProto> 
getUsageProto() {
         .setUsed(10)
         .setContainerCount(5)
         .setPipelineCount(10)
+        .setFsCapacity(1000)
+        .setFsAvailable(700)
         .build());
     return result;
   }
diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot 
b/hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot
index d73d8465b28..4ea8167ebe1 100644
--- a/hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot
@@ -106,15 +106,18 @@ List datanodes as JSON
 
 Get usage info as JSON
     ${output} =         Execute          ozone admin datanode usageinfo -m 
--json | jq -r '.'
-                        Should contain   ${output}  capacity
+                        Should contain   ${output}  ozoneCapacity
                         Should contain   ${output}  committed
                         Should contain   ${output}  containerCount
                         Should contain   ${output}  datanodeDetails
                         Should contain   ${output}  freeSpaceToSpare
                         Should contain   ${output}  ozoneUsed
                         Should contain   ${output}  ozoneUsedPercent
-                        Should contain   ${output}  remaining
-                        Should contain   ${output}  remainingPercent
-                        Should contain   ${output}  totalUsed
-                        Should contain   ${output}  totalUsedPercent
+                        Should contain   ${output}  ozoneAvailable
+                        Should contain   ${output}  ozoneAvailablePercent
+                        Should contain   ${output}  filesystemUsed
+                        Should contain   ${output}  filesystemUsedPercent
+                        Should contain   ${output}  filesystemAvailable
+                        Should contain   ${output}  filesystemAvailablePercent
+                        Should contain   ${output}  filesystemCapacity
                         Should contain   ${output}  reserved
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
index 114ded36d0b..672becc08ca 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
@@ -46,6 +46,7 @@
 import javax.ws.rs.core.Response;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdds.client.DecommissionUtils;
+import org.apache.hadoop.hdds.fs.SpaceUsageSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.DatanodeID;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -174,14 +175,20 @@ public Response getDatanodes() {
   private DatanodeStorageReport getStorageReport(DatanodeDetails datanode) {
     SCMNodeStat nodeStat =
         nodeManager.getNodeStat(datanode).get();
-    DatanodeStorageReport storageReport = DatanodeStorageReport.newBuilder()
+    SpaceUsageSource.Fixed fsUsage = 
nodeManager.getTotalFilesystemUsage(datanode);
+    DatanodeStorageReport.Builder builder = DatanodeStorageReport.newBuilder()
         .setCapacity(nodeStat.getCapacity().get())
         .setUsed(nodeStat.getScmUsed().get())
         .setRemaining(nodeStat.getRemaining().get())
         .setCommitted(nodeStat.getCommitted().get())
-        .setMinimumFreeSpace(nodeStat.getFreeSpaceToSpare().get())
-        .build();
-    return storageReport;
+        .setMinimumFreeSpace(nodeStat.getFreeSpaceToSpare().get());
+
+    if (fsUsage != null) {
+      builder.setFilesystemCapacity(fsUsage.getCapacity())
+          .setFilesystemAvailable(fsUsage.getAvailable())
+          .setFilesystemUsed(fsUsage.getUsedSpace());
+    }
+    return builder.build();
   }
 
   /**
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java
index e26a761eb5b..390228d983f 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeStorageReport.java
@@ -33,6 +33,9 @@ public final class DatanodeStorageReport {
   private long committed;
   private long minimumFreeSpace;
   private long reserved;
+  private long filesystemCapacity;
+  private long filesystemUsed;
+  private long filesystemAvailable;
 
   public DatanodeStorageReport() {
   }
@@ -46,6 +49,9 @@ private DatanodeStorageReport(Builder builder) {
     this.committed = builder.committed;
     this.minimumFreeSpace = builder.minimumFreeSpace;
     this.reserved = builder.reserved;
+    this.filesystemCapacity = builder.filesystemCapacity;
+    this.filesystemUsed = builder.filesystemUsed;
+    this.filesystemAvailable = builder.filesystemAvailable;
     builder.validate();
   }
 
@@ -85,6 +91,18 @@ public static Builder newBuilder() {
     return new Builder();
   }
 
+  public long getFilesystemCapacity() {
+    return filesystemCapacity;
+  }
+
+  public long getFilesystemUsed() {
+    return filesystemUsed;
+  }
+
+  public long getFilesystemAvailable() {
+    return filesystemAvailable;
+  }
+
   /**
    * Builder class for DataNodeStorage Report.
    */
@@ -97,6 +115,9 @@ public static final class Builder {
     private long committed = 0;
     private long minimumFreeSpace = 0;
     private long reserved = 0;
+    private long filesystemCapacity = 0;
+    private long filesystemUsed = 0;
+    private long filesystemAvailable = 0;
 
     private static final Logger LOG =
         LoggerFactory.getLogger(Builder.class);
@@ -144,6 +165,21 @@ public Builder setReserved(long reserved) {
       return this;
     }
 
+    public Builder setFilesystemCapacity(long filesystemCapacity) {
+      this.filesystemCapacity = filesystemCapacity;
+      return this;
+    }
+
+    public Builder setFilesystemUsed(long filesystemUsed) {
+      this.filesystemUsed = filesystemUsed;
+      return this;
+    }
+
+    public Builder setFilesystemAvailable(long filesystemAvailable) {
+      this.filesystemAvailable = filesystemAvailable;
+      return this;
+    }
+
     public void validate() {
       Objects.requireNonNull(hostName, "hostName cannot be null");
 
@@ -168,6 +204,18 @@ public void validate() {
         throw new IllegalArgumentException("reserved cannot be negative");
       }
 
+      if (filesystemCapacity < 0) {
+        throw new IllegalArgumentException("filesystemCapacity cannot be 
negative");
+      }
+
+      if (filesystemAvailable < 0) {
+        throw new IllegalArgumentException("filesystemAvailable cannot be 
negative");
+      }
+
+      if (filesystemUsed < 0) {
+        throw new IllegalArgumentException("filesystemUsed cannot be 
negative");
+      }
+
       // Logical consistency checks
       if (used + remaining > capacity) {
         LOG.warn("Inconsistent storage report for {}: used({}) + remaining({}) 
> capacity({})",
diff --git 
a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/datanodes/DatanodesTable.test.tsx
 
b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/datanodes/DatanodesTable.test.tsx
index 06b878055c9..b3c3ae20450 100644
--- 
a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/datanodes/DatanodesTable.test.tsx
+++ 
b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/datanodes/DatanodesTable.test.tsx
@@ -46,6 +46,15 @@ function getDataWith(name: string, state: "HEALTHY" | 
"STALE" | "DEAD", uuid: nu
     state: state,
     opState: 'IN_SERVICE',
     lastHeartbeat: 1728280581608,
+    storageReport: {
+      capacity: 125645656770,
+      used: 4096,
+      remaining: 114225606656,
+      committed: 0,
+      filesystemCapacity: 150000000000,
+      filesystemUsed: 30000000000,
+      filesystemAvailable: 120000000000
+    },
     storageUsed: 4096,
     storageTotal: 125645656770,
     storageCommitted: 0,
@@ -143,4 +152,23 @@ describe('DatanodesTable Component', () => {
     expect(checkboxes[1]).toBeDisabled(); // HEALTHY node
     expect(checkboxes[2]).not.toBeDisabled(); // DEAD node
   });
+
+  test('shows filesystem rows in storage tooltip when provided', async () => {
+    render(
+      <DatanodesTable
+        {...defaultProps}
+        data={[
+          getDataWith('ozone-datanode-1', 'HEALTHY', 1)
+        ]}
+      />
+    );
+
+    const storageBar = document.querySelector('.capacity-bar-v2');
+    expect(storageBar).not.toBeNull();
+    fireEvent.mouseOver(storageBar as HTMLElement);
+
+    expect(await screen.findByText('Filesystem Capacity')).toBeInTheDocument();
+    expect(screen.getByText('Filesystem Used')).toBeInTheDocument();
+    expect(screen.getByText('Filesystem Available')).toBeInTheDocument();
+  });
 });
diff --git 
a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/mocks/datanodeMocks/datanodeResponseMocks.ts
 
b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/mocks/datanodeMocks/datanodeResponseMocks.ts
index 887d0b4a27a..bc382991f0c 100644
--- 
a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/mocks/datanodeMocks/datanodeResponseMocks.ts
+++ 
b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/mocks/datanodeMocks/datanodeResponseMocks.ts
@@ -29,7 +29,10 @@ export const DatanodeResponse = {
               "capacity": 125645656770,
               "used": 4096,
               "remaining": 114225606656,
-              "committed": 0
+              "committed": 0,
+              "filesystemCapacity": 150000000000,
+              "filesystemUsed": 30000000000,
+              "filesystemAvailable": 120000000000
           },
           "pipelines": [
               {
@@ -63,7 +66,10 @@ export const DatanodeResponse = {
               "capacity": 125645656770,
               "used": 4096,
               "remaining": 114225623040,
-              "committed": 0
+              "committed": 0,
+              "filesystemCapacity": 150000000000,
+              "filesystemUsed": 30000000000,
+              "filesystemAvailable": 120000000000
           },
           "pipelines": [
               {
@@ -97,7 +103,10 @@ export const DatanodeResponse = {
               "capacity": 125645656770,
               "used": 4096,
               "remaining": 114225541120,
-              "committed": 0
+              "committed": 0,
+              "filesystemCapacity": 150000000000,
+              "filesystemUsed": 30000000000,
+              "filesystemAvailable": 120000000000
           },
           "pipelines": [
               {
@@ -125,7 +134,10 @@ export const DatanodeResponse = {
               "capacity": 125645656770,
               "used": 4096,
               "remaining": 114225573888,
-              "committed": 0
+              "committed": 0,
+              "filesystemCapacity": 150000000000,
+              "filesystemUsed": 30000000000,
+              "filesystemAvailable": 120000000000
           },
           "pipelines": [
               {
@@ -153,7 +165,10 @@ export const DatanodeResponse = {
               "capacity": 125645656770,
               "used": 4096,
               "remaining": 114225614848,
-              "committed": 0
+              "committed": 0,
+              "filesystemCapacity": 150000000000,
+              "filesystemUsed": 30000000000,
+              "filesystemAvailable": 120000000000
           },
           "pipelines": [
               {
diff --git 
a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/storageBar/storageBar.tsx
 
b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/storageBar/storageBar.tsx
index 086a8eaa103..cece4d214a2 100644
--- 
a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/storageBar/storageBar.tsx
+++ 
b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/storageBar/storageBar.tsx
@@ -22,7 +22,7 @@ import filesize from 'filesize';
 import Tooltip from 'antd/lib/tooltip';
 
 import { getCapacityPercent } from '@/utils/common';
-import type { StorageReport } from '@/v2/types/overview.types';
+import type { DatanodeStorageReport } from 
'@/v2/types/datanodeStorageReport.types';
 
 import './storageBar.less';
 
@@ -34,7 +34,7 @@ const size = filesize.partial({
 type StorageReportProps = {
   showMeta?: boolean;
   strokeWidth?: number;
-} & StorageReport
+} & DatanodeStorageReport
 
 
 const StorageBar: React.FC<StorageReportProps> = ({
@@ -42,26 +42,52 @@ const StorageBar: React.FC<StorageReportProps> = ({
   used = 0,
   remaining = 0,
   committed = 0,
+  filesystemCapacity,
+  filesystemUsed,
+  filesystemAvailable,
   showMeta = false,
   strokeWidth = 3
 }) => {
 
-  const nonOzoneUsed = capacity - remaining - used;
-  const totalUsed = capacity - remaining;
+  const hasFilesystemView = (filesystemCapacity ?? 0) > 0 &&
+    filesystemAvailable !== undefined &&
+    filesystemAvailable !== null;
+
+  const fsCap = hasFilesystemView ? (filesystemCapacity as number) : undefined;
+  const fsAvail = hasFilesystemView ? (filesystemAvailable as number) : 
undefined;
+  const fsUsed = hasFilesystemView
+    ? (filesystemUsed !== undefined ? filesystemUsed : (fsCap as number) - 
(fsAvail as number))
+    : undefined;
   const tooltip = (
     <>
       <table cellPadding={5}>
         <tbody>
+          {hasFilesystemView && (
+            <>
+              <tr>
+                <td>Filesystem Capacity</td>
+                <td><strong>{size(fsCap as number)}</strong></td>
+              </tr>
+              <tr>
+                <td>Filesystem Used</td>
+                <td><strong>{size(fsUsed as number)}</strong></td>
+              </tr>
+              <tr>
+                <td>Filesystem Available</td>
+                <td><strong>{size(fsAvail as number)}</strong></td>
+              </tr>
+            </>
+          )}
           <tr>
-            <td>Ozone Used</td>
-            <td><strong>{size(used)}</strong></td>
+            <td>Ozone Capacity</td>
+            <td><strong>{size(capacity)}</strong></td>
           </tr>
           <tr>
-            <td>Non Ozone Used</td>
-            <td><strong>{size(nonOzoneUsed)}</strong></td>
+            <td>Ozone Used</td>
+            <td><strong>{size(used)}</strong></td>
           </tr>
           <tr>
-            <td>Remaining</td>
+            <td>Ozone Available</td>
             <td><strong>{size(remaining)}</strong></td>
           </tr>
           <tr>
@@ -73,7 +99,7 @@ const StorageBar: React.FC<StorageReportProps> = ({
     </>
   );
 
-  const percentage = getCapacityPercent(totalUsed, capacity)
+  const percentage = getCapacityPercent((capacity - remaining), capacity)
 
   return (
       <Tooltip
@@ -82,7 +108,7 @@ const StorageBar: React.FC<StorageReportProps> = ({
         className='storage-cell-container-v2' >
         {(showMeta) &&
           <div>
-            {size(used + nonOzoneUsed)} / {size(capacity)}
+            {`${size(capacity - remaining)} / ${size(capacity)}`}
           </div>
         }
         <Progress
diff --git 
a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/datanodesTable.tsx
 
b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/datanodesTable.tsx
index 17e6048f7e3..1f4ede6ee51 100644
--- 
a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/datanodesTable.tsx
+++ 
b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/datanodesTable.tsx
@@ -130,10 +130,7 @@ export const COLUMNS: ColumnsType<Datanode> = [
     render: (_: string, record: Datanode) => (
       <StorageBar
         strokeWidth={6}
-        capacity={record.storageTotal}
-        used={record.storageUsed}
-        remaining={record.storageRemaining}
-        committed={record.storageCommitted} />
+        {...record.storageReport} />
     )
   },
   {
diff --git 
a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/datanodes/datanodes.tsx
 
b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/datanodes/datanodes.tsx
index 101db9d4b03..0283a1872f1 100644
--- 
a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/datanodes/datanodes.tsx
+++ 
b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/datanodes/datanodes.tsx
@@ -165,6 +165,7 @@ const Datanodes: React.FC<{}> = () => {
             ? COLUMN_UPDATE_DECOMMISSIONING
             : datanode.opState,
           lastHeartbeat: datanode.lastHeartbeat,
+          storageReport: datanode.storageReport,
           storageUsed: datanode.storageReport.used,
           storageTotal: datanode.storageReport.capacity,
           storageCommitted: datanode.storageReport.committed,
diff --git 
a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/datanode.types.ts
 
b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/datanode.types.ts
index 96a37020153..809c0424962 100644
--- 
a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/datanode.types.ts
+++ 
b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/datanode.types.ts
@@ -17,7 +17,7 @@
  */
 
 import { Pipeline } from "@/v2/types/pipelines.types";
-import { StorageReport } from "@/v2/types/overview.types";
+import { DatanodeStorageReport } from "@/v2/types/datanodeStorageReport.types";
 import { Option as MultiOption } from "@/v2/components/select/multiSelect";
 
 // Corresponds to HddsProtos.NodeState
@@ -40,7 +40,7 @@ export type DatanodeResponse = {
   state: DatanodeState;
   opState: DatanodeOpState;
   lastHeartbeat: string;
-  storageReport: StorageReport;
+  storageReport: DatanodeStorageReport;
   pipelines: Pipeline[];
   containers: number;
   openContainers: number;
@@ -63,6 +63,7 @@ export type Datanode = {
   state: DatanodeState;
   opState: DatanodeOpState;
   lastHeartbeat: string;
+  storageReport: DatanodeStorageReport;
   storageUsed: number;
   storageTotal: number;
   storageRemaining: number;
diff --git 
a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/datanodeStorageReport.types.ts
 
b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/datanodeStorageReport.types.ts
new file mode 100644
index 00000000000..cbc22f2ecbc
--- /dev/null
+++ 
b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/types/datanodeStorageReport.types.ts
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Storage report used by the Datanodes page.
+ */
+export type DatanodeStorageReport = {
+  // Ozone-usable stats (after reserved-space adjustment).
+  capacity: number;
+  used: number;
+  remaining: number;
+  committed: number;
+
+  reserved?: number;
+  minimumFreeSpace?: number;
+
+  // Raw filesystem stats aggregated across volumes.
+  filesystemCapacity?: number;
+  filesystemUsed?: number;
+  filesystemAvailable?: number;
+}
+
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
index 280ff4210cd..42907339879 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
@@ -379,12 +379,16 @@ public void setUp() throws Exception {
         StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK)
             .setStorageLocation("/disk1").setScmUsed(10000).setRemaining(5400)
             .setCapacity(25000)
+            .setFsCapacity(40000)
+            .setFsAvailable(15000)
             .setStorageUuid(UUID.randomUUID().toString())
             .setFailed(false).build();
     StorageReportProto storageReportProto2 =
         StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK)
             .setStorageLocation("/disk2").setScmUsed(25000).setRemaining(10000)
             .setCapacity(50000)
+            .setFsCapacity(60000)
+            .setFsAvailable(25000)
             .setStorageUuid(UUID.randomUUID().toString())
             .setFailed(false).build();
     NodeReportProto nodeReportProto =
@@ -409,12 +413,16 @@ public void setUp() throws Exception {
         StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK)
             .setStorageLocation("/disk1").setScmUsed(20000).setRemaining(7800)
             .setCapacity(50000)
+            .setFsCapacity(70000)
+            .setFsAvailable(20000)
             .setStorageUuid(UUID.randomUUID().toString())
             .setFailed(false).build();
     StorageReportProto storageReportProto4 =
         StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK)
             .setStorageLocation("/disk2").setScmUsed(60000).setRemaining(10000)
             .setCapacity(80000)
+            .setFsCapacity(80000)
+            .setFsAvailable(30000)
             .setStorageUuid(UUID.randomUUID().toString())
             .setFailed(false).build();
     NodeReportProto nodeReportProto2 =
@@ -440,12 +448,16 @@ public void setUp() throws Exception {
         StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK)
             .setStorageLocation("/disk1").setScmUsed(20000).setRemaining(7800)
             .setCapacity(50000)
+            .setFsCapacity(60000)
+            .setFsAvailable(25000)
             .setStorageUuid(UUID.randomUUID().toString())
             .setFailed(false).build();
     StorageReportProto storageReportProto6 =
         StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK)
             .setStorageLocation("/disk2").setScmUsed(60000).setRemaining(10000)
             .setCapacity(80000)
+            .setFsCapacity(80000)
+            .setFsAvailable(35000)
             .setStorageUuid(UUID.randomUUID().toString())
             .setFailed(false).build();
     NodeReportProto nodeReportProto3 =
@@ -588,6 +600,12 @@ private void testDatanodeResponse(DatanodeMetadata 
datanodeMetadata)
           datanodeMetadata.getDatanodeStorageReport().getRemaining());
       assertEquals(35000,
           datanodeMetadata.getDatanodeStorageReport().getUsed());
+      assertEquals(100000,
+          datanodeMetadata.getDatanodeStorageReport().getFilesystemCapacity());
+      assertEquals(40000,
+          
datanodeMetadata.getDatanodeStorageReport().getFilesystemAvailable());
+      assertEquals(60000,
+          datanodeMetadata.getDatanodeStorageReport().getFilesystemUsed());
 
       assertEquals(1, datanodeMetadata.getPipelines().size());
       assertEquals(pipelineId,
@@ -607,6 +625,12 @@ private void testDatanodeResponse(DatanodeMetadata 
datanodeMetadata)
           datanodeMetadata.getDatanodeStorageReport().getRemaining());
       assertEquals(80000,
           datanodeMetadata.getDatanodeStorageReport().getUsed());
+      assertEquals(150000,
+          datanodeMetadata.getDatanodeStorageReport().getFilesystemCapacity());
+      assertEquals(50000,
+          
datanodeMetadata.getDatanodeStorageReport().getFilesystemAvailable());
+      assertEquals(100000,
+          datanodeMetadata.getDatanodeStorageReport().getFilesystemUsed());
 
       assertEquals(0, datanodeMetadata.getPipelines().size());
       assertEquals(0, datanodeMetadata.getLeaderCount());
@@ -618,6 +642,12 @@ private void testDatanodeResponse(DatanodeMetadata 
datanodeMetadata)
           datanodeMetadata.getDatanodeStorageReport().getRemaining());
       assertEquals(80000,
           datanodeMetadata.getDatanodeStorageReport().getUsed());
+      assertEquals(140000,
+          datanodeMetadata.getDatanodeStorageReport().getFilesystemCapacity());
+      assertEquals(60000,
+          
datanodeMetadata.getDatanodeStorageReport().getFilesystemAvailable());
+      assertEquals(80000,
+          datanodeMetadata.getDatanodeStorageReport().getFilesystemUsed());
 
       assertEquals(0, datanodeMetadata.getPipelines().size());
       assertEquals(0, datanodeMetadata.getLeaderCount());


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to