This is an automated email from the ASF dual-hosted git repository.

sammichen pushed a commit to branch HDDS-5713
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/HDDS-5713 by this push:
     new b8bff8ae11 HDDS-12546. [DiskBalancer] Display bytesMoved instead of 
VolumeDensity in status output (#8150)
b8bff8ae11 is described below

commit b8bff8ae114b91d012569e471f2fc815a68cbd8f
Author: Gargi Jaiswal <[email protected]>
AuthorDate: Thu Apr 3 12:07:01 2025 +0530

    HDDS-12546. [DiskBalancer] Display bytesMoved instead of VolumeDensity in 
status output (#8150)
---
 .../container/diskbalancer/DiskBalancerInfo.java   |  5 +-
 .../diskbalancer/DiskBalancerService.java          |  3 +-
 .../interface-client/src/main/proto/hdds.proto     |  1 +
 .../hadoop/hdds/scm/node/DiskBalancerManager.java  | 11 +++--
 .../hadoop/hdds/scm/node/DiskBalancerStatus.java   |  8 +++-
 .../cli/datanode/DiskBalancerStatusSubcommand.java | 11 +++--
 .../cli/datanode/TestDiskBalancerSubCommand.java   | 53 ++++++++++++++++++++++
 7 files changed, 79 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/DiskBalancerInfo.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/DiskBalancerInfo.java
index 0a296d5d9f..fcb4090225 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/DiskBalancerInfo.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/DiskBalancerInfo.java
@@ -35,6 +35,7 @@ public class DiskBalancerInfo {
   private long successCount;
   private long failureCount;
   private long bytesToMove;
+  private long balancedBytes;
 
   public DiskBalancerInfo(boolean shouldRun, double threshold,
       long bandwidthInMB, int parallelThread) {
@@ -54,7 +55,7 @@ public DiskBalancerInfo(boolean shouldRun, double threshold,
   @SuppressWarnings("checkstyle:ParameterNumber")
   public DiskBalancerInfo(boolean shouldRun, double threshold,
       long bandwidthInMB, int parallelThread, DiskBalancerVersion version,
-      long successCount, long failureCount, long bytesToMove) {
+      long successCount, long failureCount, long bytesToMove, long 
balancedBytes) {
     this.shouldRun = shouldRun;
     this.threshold = threshold;
     this.bandwidthInMB = bandwidthInMB;
@@ -63,6 +64,7 @@ public DiskBalancerInfo(boolean shouldRun, double threshold,
     this.successCount = successCount;
     this.failureCount = failureCount;
     this.bytesToMove = bytesToMove;
+    this.balancedBytes = balancedBytes;
   }
 
   public DiskBalancerInfo(boolean shouldRun,
@@ -98,6 +100,7 @@ public 
StorageContainerDatanodeProtocolProtos.DiskBalancerReportProto toDiskBala
     builder.setSuccessMoveCount(successCount);
     builder.setFailureMoveCount(failureCount);
     builder.setBytesToMove(bytesToMove);
+    builder.setBalancedBytes(balancedBytes);
     return builder.build();
   }
 
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/DiskBalancerService.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/DiskBalancerService.java
index d84d8cf72c..c3f882298c 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/DiskBalancerService.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/diskbalancer/DiskBalancerService.java
@@ -458,6 +458,7 @@ public BackgroundTaskResult call() {
         balancedBytesInLastWindow.addAndGet(containerSize);
         metrics.incrSuccessCount(1);
         metrics.incrSuccessBytes(containerSize);
+        totalBalancedBytes.addAndGet(containerSize);
       } catch (IOException e) {
         moveSucceeded = false;
         if (diskBalancerTmpDir != null) {
@@ -513,7 +514,7 @@ private void postCall() {
   public DiskBalancerInfo getDiskBalancerInfo() {
     return new DiskBalancerInfo(shouldRun, threshold, bandwidthInMB,
         parallelThread, version, metrics.getSuccessCount(),
-        metrics.getFailureCount(), bytesToMove);
+        metrics.getFailureCount(), bytesToMove, metrics.getSuccessBytes());
   }
 
   public long calculateBytesToMove(MutableVolumeSet inputVolumeSet) {
diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto 
b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
index 74934fe938..0b74223fd5 100644
--- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto
+++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
@@ -564,4 +564,5 @@ message DatanodeDiskBalancerInfoProto {
     optional uint64 successMoveCount = 5;
     optional uint64 failureMoveCount = 6;
     optional uint64 bytesToMove = 7;
+    optional uint64 bytesMoved = 8;
 }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DiskBalancerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DiskBalancerManager.java
index 5567a61b03..a18d8af6d1 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DiskBalancerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DiskBalancerManager.java
@@ -268,7 +268,8 @@ private HddsProtos.DatanodeDiskBalancerInfoProto 
getInfoProto(
             .setRunningStatus(status.getRunningStatus())
             .setSuccessMoveCount(status.getSuccessMoveCount())
             .setFailureMoveCount(status.getFailureMoveCount())
-            .setBytesToMove(status.getBytesToMove());
+            .setBytesToMove(status.getBytesToMove())
+            .setBytesMoved(status.getBalancedBytes());
     if (status.getRunningStatus() != DiskBalancerRunningStatus.UNKNOWN) {
       builder.setDiskBalancerConf(statusMap.get(dn)
           .getDiskBalancerConfiguration().toProtobufBuilder());
@@ -307,14 +308,13 @@ private double getVolumeDataDensitySumForDatanodeDetails(
 
   private DiskBalancerStatus getStatus(DatanodeDetails datanodeDetails) {
     return statusMap.computeIfAbsent(datanodeDetails,
-        dn -> new DiskBalancerStatus(DiskBalancerRunningStatus.UNKNOWN,
-            new DiskBalancerConfiguration(), 0, 0, 0));
+        dn -> new DiskBalancerStatus(DiskBalancerRunningStatus.UNKNOWN, new 
DiskBalancerConfiguration(), 0, 0, 0, 0));
   }
 
   @VisibleForTesting
   public void addRunningDatanode(DatanodeDetails datanodeDetails) {
     statusMap.put(datanodeDetails, new 
DiskBalancerStatus(DiskBalancerRunningStatus.RUNNING,
-        new DiskBalancerConfiguration(), 0, 0, 0));
+        new DiskBalancerConfiguration(), 0, 0, 0, 0));
   }
 
   public void processDiskBalancerReport(DiskBalancerReportProto reportProto,
@@ -328,9 +328,10 @@ public void 
processDiskBalancerReport(DiskBalancerReportProto reportProto,
     long successMoveCount = reportProto.getSuccessMoveCount();
     long failureMoveCount = reportProto.getFailureMoveCount();
     long bytesToMove = reportProto.getBytesToMove();
+    long balancedBytes = reportProto.getBalancedBytes();
     statusMap.put(dn, new DiskBalancerStatus(
         isRunning ? DiskBalancerRunningStatus.RUNNING : 
DiskBalancerRunningStatus.STOPPED,
-        diskBalancerConfiguration, successMoveCount, failureMoveCount, 
bytesToMove));
+        diskBalancerConfiguration, successMoveCount, failureMoveCount, 
bytesToMove, balancedBytes));
     if (reportProto.hasBalancedBytes()) {
       balancedBytesMap.put(dn, reportProto.getBalancedBytes());
     }
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DiskBalancerStatus.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DiskBalancerStatus.java
index 0b99bfbaa3..e8df44f0b5 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DiskBalancerStatus.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DiskBalancerStatus.java
@@ -34,14 +34,16 @@ public class DiskBalancerStatus {
   private long successMoveCount;
   private long failureMoveCount;
   private long bytesToMove;
+  private long balancedBytes;
 
   public DiskBalancerStatus(DiskBalancerRunningStatus isRunning, 
DiskBalancerConfiguration conf,
-      long successMoveCount, long failureMoveCount, long bytesToMove) {
+      long successMoveCount, long failureMoveCount, long bytesToMove, long 
balancedBytes) {
     this.isRunning = isRunning;
     this.diskBalancerConfiguration = conf;
     this.successMoveCount = successMoveCount;
     this.failureMoveCount = failureMoveCount;
     this.bytesToMove = bytesToMove;
+    this.balancedBytes = balancedBytes;
   }
 
   public DiskBalancerRunningStatus getRunningStatus() {
@@ -63,4 +65,8 @@ public long getFailureMoveCount() {
   public long getBytesToMove() {
     return bytesToMove;
   }
+
+  public long getBalancedBytes() {
+    return balancedBytes;
+  }
 }
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerStatusSubcommand.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerStatusSubcommand.java
index 2f14079327..5ae2b37965 100644
--- 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerStatusSubcommand.java
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerStatusSubcommand.java
@@ -60,27 +60,27 @@ public void execute(ScmClient scmClient) throws IOException 
{
   private String generateStatus(
       List<HddsProtos.DatanodeDiskBalancerInfoProto> protos) {
     StringBuilder formatBuilder = new StringBuilder("Status result:%n" +
-        "%-35s %-25s %-15s %-15s %-15s %-12s %-12s %-12s %-12s %-12s%n");
+        "%-35s %-15s %-15s %-15s %-12s %-12s %-12s %-15s %-15s %-15s%n");
 
     List<String> contentList = new ArrayList<>();
     contentList.add("Datanode");
-    contentList.add("VolumeDensity");
     contentList.add("Status");
     contentList.add("Threshold(%)");
     contentList.add("BandwidthInMB");
     contentList.add("Threads");
     contentList.add("SuccessMove");
     contentList.add("FailureMove");
+    contentList.add("BytesMoved(MB)");
     contentList.add("EstBytesToMove(MB)");
     contentList.add("EstTimeLeft(min)");
 
     for (HddsProtos.DatanodeDiskBalancerInfoProto proto: protos) {
-      formatBuilder.append("%-35s %-25s %-15s %-15s %-15s %-12s %-12s %-12s 
%-12s %-12s%n");
+      formatBuilder.append("%-35s %-15s %-15s %-15s %-12s %-12s %-12s %-15s 
%-15s %-15s%n");
       long estimatedTimeLeft = calculateEstimatedTimeLeft(proto);
-      long bytesToMoveMB = proto.getBytesToMove() / (1024 * 1024);
+      long bytesMovedMB = (long) Math.ceil(proto.getBytesMoved() / (1024.0 * 
1024.0));
+      long bytesToMoveMB = (long) Math.ceil(proto.getBytesToMove() / (1024.0 * 
1024.0));
 
       contentList.add(proto.getNode().getHostName());
-      contentList.add(String.format("%.18f", 
proto.getCurrentVolumeDensitySum()));
       contentList.add(proto.getRunningStatus().name());
       contentList.add(
           String.format("%.4f", proto.getDiskBalancerConf().getThreshold()));
@@ -90,6 +90,7 @@ private String generateStatus(
           String.valueOf(proto.getDiskBalancerConf().getParallelThread()));
       contentList.add(String.valueOf(proto.getSuccessMoveCount()));
       contentList.add(String.valueOf(proto.getFailureMoveCount()));
+      contentList.add(String.valueOf(bytesMovedMB));
       contentList.add(String.valueOf(bytesToMoveMB));
       contentList.add(estimatedTimeLeft >= 0 ? 
String.valueOf(estimatedTimeLeft) : "N/A");
     }
diff --git 
a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDiskBalancerSubCommand.java
 
b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDiskBalancerSubCommand.java
index 897eccba53..ac97a27f25 100644
--- 
a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDiskBalancerSubCommand.java
+++ 
b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDiskBalancerSubCommand.java
@@ -27,15 +27,20 @@
 import java.io.UnsupportedEncodingException;
 import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 import java.util.Random;
 import java.util.UUID;
+import java.util.stream.Stream;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.DatanodeAdminError;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
 import org.mockito.Mockito;
 
 /**
@@ -192,6 +197,54 @@ public void testDiskBalancerStopSubcommand() throws 
IOException  {
     stopCmd.setAllHosts(false);
   }
 
+  public static Stream<Arguments> values() {
+    return Stream.of(
+        Arguments.arguments(0L, 0L, 0L, 0L, 0L),  // bytesMovedMB = 0, 
bytesToMoveMB = 0, estimatedTimeLeft = 0
+        Arguments.arguments(512L, 512L, 1L, 1L, 1L), // bytesMoved and 
bytesToMove < 1MB should be rounded up to 1MB
+        Arguments.arguments(5242880L, 10485760L, 5L, 10L, 1L), // bytesMoved = 
5MB, bytesToMove = 10MB, estTimeLeft = 1
+        Arguments.arguments(13774139392L, 3229900800L, 13137L, 3081L, 6L),
+        Arguments.arguments(7482638336L, 939524096L, 7136L, 896L, 2L)
+    );
+  }
+
+  @ParameterizedTest
+  @MethodSource("values")
+  public void testDiskBalancerStatusCalculations(long bytesMoved, long 
bytesToMove, long bytesMovedMB,
+      long bytesToMoveMB, long estTimeLeft) throws IOException {
+    ScmClient scmClient = mock(ScmClient.class);
+
+    HddsProtos.DatanodeDiskBalancerInfoProto proto =
+        HddsProtos.DatanodeDiskBalancerInfoProto.newBuilder()
+            .setNode(generateDatanodeDetails())
+            .setCurrentVolumeDensitySum(random.nextDouble())
+            .setRunningStatus(HddsProtos.DiskBalancerRunningStatus.
+                valueOf(random.nextInt(2) + 1))
+            .setBytesMoved(bytesMoved)
+            .setBytesToMove(bytesToMove)
+            .setDiskBalancerConf(
+                HddsProtos.DiskBalancerConfigurationProto.newBuilder()
+                    .setDiskBandwidthInMB(10)
+                    .setThreshold(10.0)
+                    .setParallelThread(5)
+                    .build())
+            .build();
+
+    List<HddsProtos.DatanodeDiskBalancerInfoProto> resultList = 
Collections.singletonList(proto);
+    Mockito.when(scmClient.getDiskBalancerStatus(Mockito.any(), 
Mockito.any())).thenReturn(resultList);
+
+    DiskBalancerStatusSubcommand statusCmd1 = new 
DiskBalancerStatusSubcommand();
+    statusCmd1.execute(scmClient);
+
+    String output = outContent.toString(DEFAULT_ENCODING).trim();
+    String[] lines = output.split("\\n");
+
+    // Skip the header and find the data row
+    String[] columns = lines[2].split("\\s+");
+
+    assertEquals(String.valueOf(bytesMovedMB), columns[7]);
+    assertEquals(String.valueOf(bytesToMoveMB), columns[8]);
+    assertEquals(estTimeLeft >= 0 ? String.valueOf(estTimeLeft) : "N/A", 
columns[9]);
+  }
 
   private List<DatanodeAdminError> generateError(int count) {
     List<DatanodeAdminError> result = new ArrayList<>();


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to