This is an automated email from the ASF dual-hosted git repository.

devesh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 93a2489ab9 HDDS-10452. Improve Recon Disk Usage to fetch and display 
Top N records based on size. (#6318)
93a2489ab9 is described below

commit 93a2489ab9eb798c5096181703fd164c0904d565
Author: Arafat2198 <[email protected]>
AuthorDate: Tue Apr 16 10:58:41 2024 +0530

    HDDS-10452. Improve Recon Disk Usage to fetch and display Top N records 
based on size. (#6318)
---
 .../apache/hadoop/ozone/recon/ReconConstants.java  |   5 +
 .../org/apache/hadoop/ozone/recon/ReconUtils.java  |  29 ++
 .../hadoop/ozone/recon/api/NSSummaryEndpoint.java  |  12 +-
 .../recon/api/handlers/BucketEntityHandler.java    |  13 +-
 .../recon/api/handlers/DirectoryEntityHandler.java |  12 +-
 .../ozone/recon/api/handlers/EntityHandler.java    |   2 +-
 .../ozone/recon/api/handlers/KeyEntityHandler.java |   2 +-
 .../recon/api/handlers/RootEntityHandler.java      |  15 +-
 .../recon/api/handlers/UnknownEntityHandler.java   |   2 +-
 .../recon/api/handlers/VolumeEntityHandler.java    |  13 +-
 .../hadoop/ozone/recon/heatmap/HeatMapUtil.java    |   2 +-
 .../recon/api/TestNSSummaryDiskUsageOrdering.java  | 421 +++++++++++++++++++++
 .../recon/api/TestNSSummaryEndpointWithFSO.java    |  29 +-
 .../recon/api/TestNSSummaryEndpointWithLegacy.java |  28 +-
 .../api/TestNSSummaryEndpointWithOBSAndLegacy.java |  44 +--
 15 files changed, 566 insertions(+), 63 deletions(-)

diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java
index 134092146e..9c79a869c4 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java
@@ -36,6 +36,11 @@ public final class ReconConstants {
   public static final String RECON_SCM_SNAPSHOT_DB = "scm.snapshot.db";
 
   // By default, limit the number of results returned
+
+  /**
+   * The maximum number of top disk usage records to return in a /du response.
+   */
+  public static final int DISK_USAGE_TOP_RECORDS_LIMIT = 30;
   public static final String DEFAULT_OPEN_KEY_INCLUDE_NON_FSO = "false";
   public static final String DEFAULT_OPEN_KEY_INCLUDE_FSO = "false";
   public static final String DEFAULT_FETCH_COUNT = "1000";
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
index 39d091ee03..f154f024fb 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java
@@ -32,6 +32,7 @@ import java.sql.Timestamp;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.BlockingQueue;
+import java.util.stream.Collectors;
 
 import com.google.common.base.Preconditions;
 import com.google.inject.Singleton;
@@ -59,6 +60,7 @@ import static org.jooq.impl.DSL.currentTimestamp;
 import static org.jooq.impl.DSL.select;
 import static org.jooq.impl.DSL.using;
 
+import org.apache.hadoop.ozone.recon.api.types.DUResponse;
 import org.apache.hadoop.ozone.recon.scm.ReconContainerReportQueue;
 import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao;
@@ -322,6 +324,33 @@ public class ReconUtils {
     }
   }
 
+  /**
+   * Sorts a list of DiskUsage objects in descending order by size using 
parallel sorting and
+   * returns the top N records as specified by the limit.
+   *
+   * This method is optimized for large datasets and utilizes parallel 
processing to efficiently
+   * sort and retrieve the top N largest records by size. It's especially 
useful for reducing
+   * processing time and memory usage when only a subset of sorted records is 
needed.
+   *
+   * Advantages of this approach include:
+   * - Efficient handling of large datasets by leveraging multi-core 
processors.
+   * - Reduction in memory usage and improvement in processing time by 
limiting the
+   *   number of returned records.
+   * - Scalability and easy integration with existing systems.
+   *
+   * @param diskUsageList the list of DiskUsage objects to be sorted.
+   * @param limit the maximum number of DiskUsage objects to return.
+   * @return a list of the top N DiskUsage objects sorted in descending order 
by size,
+   *  where N is the specified limit.
+   */
+  public static List<DUResponse.DiskUsage> sortDiskUsageDescendingWithLimit(
+      List<DUResponse.DiskUsage> diskUsageList, int limit) {
+    return diskUsageList.parallelStream()
+        .sorted((du1, du2) -> Long.compare(du2.getSize(), du1.getSize()))
+        .limit(limit)
+        .collect(Collectors.toList());
+  }
+
   public static long getFileSizeUpperBound(long fileSize) {
     if (fileSize >= ReconConstants.MAX_FILE_SIZE_UPPER_BOUND) {
       return Long.MAX_VALUE;
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java
index 5b104c4611..71040b9fdf 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java
@@ -101,6 +101,8 @@ public class NSSummaryEndpoint {
    * @param path request path
    * @param listFile show subpath/disk usage for each key
    * @param withReplica count actual DU with replication
+   * @param sortSubpaths determines whether to sort the subpaths by their 
sizes in descending order
+   * and returns the N largest subpaths based on the configuration value 
DISK_USAGE_TOP_RECORDS_LIMIT.
    * @return DU response
    * @throws IOException
    */
@@ -108,10 +110,9 @@ public class NSSummaryEndpoint {
   @Path("/du")
   @SuppressWarnings("methodlength")
   public Response getDiskUsage(@QueryParam("path") String path,
-                               @DefaultValue("false")
-                               @QueryParam("files") boolean listFile,
-                               @DefaultValue("false")
-                               @QueryParam("replica") boolean withReplica)
+                               @DefaultValue("false") @QueryParam("files") 
boolean listFile,
+                               @DefaultValue("false") @QueryParam("replica") 
boolean withReplica,
+                               @DefaultValue("true") 
@QueryParam("sortSubPaths") boolean sortSubpaths)
       throws IOException {
     if (path == null || path.length() == 0) {
       return Response.status(Response.Status.BAD_REQUEST).build();
@@ -127,8 +128,7 @@ public class NSSummaryEndpoint {
             reconNamespaceSummaryManager,
             omMetadataManager, reconSCM, path);
 
-    duResponse = handler.getDuResponse(
-            listFile, withReplica);
+    duResponse = handler.getDuResponse(listFile, withReplica, sortSubpaths);
 
     return Response.ok(duResponse).build();
   }
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java
index 7ad961195e..00cd9617b5 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketEntityHandler.java
@@ -36,6 +36,9 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Set;
 
+import static 
org.apache.hadoop.ozone.recon.ReconConstants.DISK_USAGE_TOP_RECORDS_LIMIT;
+import static 
org.apache.hadoop.ozone.recon.ReconUtils.sortDiskUsageDescendingWithLimit;
+
 /**
  * Class for handling bucket entity type.
  */
@@ -87,7 +90,7 @@ public class BucketEntityHandler extends EntityHandler {
 
   @Override
   public DUResponse getDuResponse(
-          boolean listFile, boolean withReplica)
+      boolean listFile, boolean withReplica, boolean sortSubpaths)
           throws IOException {
     DUResponse duResponse = new DUResponse();
     duResponse.setPath(getNormalizedPath());
@@ -142,7 +145,15 @@ public class BucketEntityHandler extends EntityHandler {
     }
     duResponse.setCount(dirDUData.size());
     duResponse.setSize(bucketDataSize);
+
+    if (sortSubpaths) {
+      // Parallel sort directory/files DU data in descending order of size and 
returns the top N elements.
+      dirDUData = sortDiskUsageDescendingWithLimit(dirDUData,
+          DISK_USAGE_TOP_RECORDS_LIMIT);
+    }
+
     duResponse.setDuData(dirDUData);
+
     return duResponse;
   }
 
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java
index fc7022e2da..ae7181af70 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/DirectoryEntityHandler.java
@@ -39,6 +39,9 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Set;
 
+import static 
org.apache.hadoop.ozone.recon.ReconConstants.DISK_USAGE_TOP_RECORDS_LIMIT;
+import static 
org.apache.hadoop.ozone.recon.ReconUtils.sortDiskUsageDescendingWithLimit;
+
 /**
  * Class for handling directory entity type.
  */
@@ -80,7 +83,7 @@ public class DirectoryEntityHandler extends EntityHandler {
 
   @Override
   public DUResponse getDuResponse(
-          boolean listFile, boolean withReplica)
+      boolean listFile, boolean withReplica, boolean sortSubPaths)
           throws IOException {
     DUResponse duResponse = new DUResponse();
     duResponse.setPath(getNormalizedPath());
@@ -154,6 +157,13 @@ public class DirectoryEntityHandler extends EntityHandler {
     }
     duResponse.setCount(subdirDUData.size());
     duResponse.setSize(dirDataSize);
+
+    if (sortSubPaths) {
+      // Parallel sort subdirDUData in descending order of size and returns 
the top N elements.
+      subdirDUData = sortDiskUsageDescendingWithLimit(subdirDUData,
+          DISK_USAGE_TOP_RECORDS_LIMIT);
+    }
+
     duResponse.setDuData(subdirDUData);
 
     return duResponse;
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java
index 4f9e68ddff..f2bcb58d35 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java
@@ -79,7 +79,7 @@ public abstract class EntityHandler {
           throws IOException;
 
   public abstract DUResponse getDuResponse(
-          boolean listFile, boolean withReplica)
+      boolean listFile, boolean withReplica, boolean sort)
           throws IOException;
 
   public abstract QuotaUsageResponse getQuotaResponse()
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/KeyEntityHandler.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/KeyEntityHandler.java
index a687bf3d0b..8ea26fd284 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/KeyEntityHandler.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/KeyEntityHandler.java
@@ -71,7 +71,7 @@ public class KeyEntityHandler extends EntityHandler {
 
   @Override
   public DUResponse getDuResponse(
-          boolean listFile, boolean withReplica)
+      boolean listFile, boolean withReplica, boolean sort)
           throws IOException {
     DUResponse duResponse = new DUResponse();
     duResponse.setPath(getNormalizedPath());
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/RootEntityHandler.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/RootEntityHandler.java
index fd0e58f191..b67703257a 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/RootEntityHandler.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/RootEntityHandler.java
@@ -39,6 +39,9 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
+import static 
org.apache.hadoop.ozone.recon.ReconConstants.DISK_USAGE_TOP_RECORDS_LIMIT;
+import static 
org.apache.hadoop.ozone.recon.ReconUtils.sortDiskUsageDescendingWithLimit;
+
 /**
  * Class for handling root entity type.
  */
@@ -88,7 +91,7 @@ public class RootEntityHandler extends EntityHandler {
 
   @Override
   public DUResponse getDuResponse(
-          boolean listFile, boolean withReplica)
+      boolean listFile, boolean withReplica, boolean sortSubPaths)
           throws IOException {
     DUResponse duResponse = new DUResponse();
     duResponse.setPath(getNormalizedPath());
@@ -137,6 +140,13 @@ public class RootEntityHandler extends EntityHandler {
       duResponse.setSizeWithReplica(totalDataSizeWithReplica);
     }
     duResponse.setSize(totalDataSize);
+
+    if (sortSubPaths) {
+      // Parallel sort volumeDuData in descending order of size and returns 
the top N elements.
+      volumeDuData = sortDiskUsageDescendingWithLimit(volumeDuData,
+          DISK_USAGE_TOP_RECORDS_LIMIT);
+    }
+
     duResponse.setDuData(volumeDuData);
 
     return duResponse;
@@ -148,7 +158,8 @@ public class RootEntityHandler extends EntityHandler {
     QuotaUsageResponse quotaUsageResponse = new QuotaUsageResponse();
     SCMNodeStat stats = getReconSCM().getScmNodeManager().getStats();
     long quotaInBytes = stats.getCapacity().get();
-    long quotaUsedInBytes = getDuResponse(true, true).getSizeWithReplica();
+    long quotaUsedInBytes =
+        getDuResponse(true, true, false).getSizeWithReplica();
     quotaUsageResponse.setQuota(quotaInBytes);
     quotaUsageResponse.setQuotaUsed(quotaUsedInBytes);
     return quotaUsageResponse;
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/UnknownEntityHandler.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/UnknownEntityHandler.java
index b5a5bd9a0b..ab61ec38e8 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/UnknownEntityHandler.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/UnknownEntityHandler.java
@@ -51,7 +51,7 @@ public class UnknownEntityHandler extends EntityHandler {
 
   @Override
   public DUResponse getDuResponse(
-          boolean listFile, boolean withReplica)
+      boolean listFile, boolean withReplica, boolean sort)
           throws IOException {
     DUResponse duResponse = new DUResponse();
     duResponse.setStatus(ResponseStatus.PATH_NOT_FOUND);
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java
index fae508a99c..2ca9c352ce 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/VolumeEntityHandler.java
@@ -36,6 +36,10 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
+
+import static 
org.apache.hadoop.ozone.recon.ReconConstants.DISK_USAGE_TOP_RECORDS_LIMIT;
+import static 
org.apache.hadoop.ozone.recon.ReconUtils.sortDiskUsageDescendingWithLimit;
+
 /**
  * Class for handling volume entity type.
  */
@@ -92,7 +96,7 @@ public class VolumeEntityHandler extends EntityHandler {
 
   @Override
   public DUResponse getDuResponse(
-          boolean listFile, boolean withReplica)
+      boolean listFile, boolean withReplica, boolean sortSubPaths)
           throws IOException {
     DUResponse duResponse = new DUResponse();
     duResponse.setPath(getNormalizedPath());
@@ -131,6 +135,13 @@ public class VolumeEntityHandler extends EntityHandler {
       duResponse.setSizeWithReplica(volDataSizeWithReplica);
     }
     duResponse.setSize(volDataSize);
+
+    if (sortSubPaths) {
+      // Parallel sort bucketDuData in descending order of size and returns 
the top N elements.
+      bucketDuData = sortDiskUsageDescendingWithLimit(bucketDuData,
+          DISK_USAGE_TOP_RECORDS_LIMIT);
+    }
+
     duResponse.setDuData(bucketDuData);
     return duResponse;
   }
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java
index 2f3de1debc..57f7686263 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/heatmap/HeatMapUtil.java
@@ -71,7 +71,7 @@ public class HeatMapUtil {
         EntityHandler.getEntityHandler(reconNamespaceSummaryManager,
             omMetadataManager, reconSCM, path);
     if (null != entityHandler) {
-      DUResponse duResponse = entityHandler.getDuResponse(false, false);
+      DUResponse duResponse = entityHandler.getDuResponse(false, false, false);
       if (null != duResponse && duResponse.getStatus() == ResponseStatus.OK) {
         return duResponse.getSize();
       }
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryDiskUsageOrdering.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryDiskUsageOrdering.java
new file mode 100644
index 0000000000..a244e4ff2c
--- /dev/null
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryDiskUsageOrdering.java
@@ -0,0 +1,421 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.api;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.recon.api.types.DUResponse;
+
+import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager;
+import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider;
+import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
+import 
org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl;
+import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithFSO;
+import org.junit.jupiter.api.BeforeEach;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProviderWithFSO;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager;
+import static 
org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.mockito.Mockito.mock;
+import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
+import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.recon.ReconTestInjector;
+import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
+import org.apache.hadoop.ozone.recon.scm.ReconNodeManager;
+import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
+
+import javax.ws.rs.core.Response;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.List;
+import java.util.UUID;
+
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test NSSummary Disk Usage subpath ordering.
+ */
+public class TestNSSummaryDiskUsageOrdering {
+
+  @TempDir
+  private Path temporaryFolder;
+
+  private ReconOMMetadataManager reconOMMetadataManager;
+  private NSSummaryEndpoint nsSummaryEndpoint;
+  private OzoneConfiguration ozoneConfiguration;
+  private static final String ROOT_PATH = "/";
+  private static final String TEST_USER = "TestUser";
+  private OMMetadataManager omMetadataManager;
+  @BeforeEach
+  public void setUp() throws Exception {
+    ozoneConfiguration = new OzoneConfiguration();
+    ozoneConfiguration.setLong(OZONE_RECON_NSSUMMARY_FLUSH_TO_DB_MAX_THRESHOLD,
+        100);
+    omMetadataManager = initializeNewOmMetadataManager(
+        Files.createDirectory(temporaryFolder.resolve("JunitOmDBDir"))
+            .toFile());
+    OzoneManagerServiceProviderImpl ozoneManagerServiceProvider =
+        getMockOzoneManagerServiceProviderWithFSO();
+    reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager,
+        
Files.createDirectory(temporaryFolder.resolve("OmMetataDir")).toFile());
+
+    ReconTestInjector reconTestInjector =
+        new ReconTestInjector.Builder(temporaryFolder.toFile())
+            .withReconOm(reconOMMetadataManager)
+            .withOmServiceProvider(ozoneManagerServiceProvider)
+            .withReconSqlDb()
+            .withContainerDB()
+            .addBinding(OzoneStorageContainerManager.class,
+                getMockReconSCM())
+            .addBinding(StorageContainerServiceProvider.class,
+                mock(StorageContainerServiceProviderImpl.class))
+            .addBinding(NSSummaryEndpoint.class)
+            .build();
+    ReconNamespaceSummaryManager reconNamespaceSummaryManager =
+        reconTestInjector.getInstance(ReconNamespaceSummaryManager.class);
+    nsSummaryEndpoint = reconTestInjector.getInstance(NSSummaryEndpoint.class);
+
+    // populate OM DB and reprocess into Recon RocksDB
+    populateOMDB();
+    NSSummaryTaskWithFSO nSSummaryTaskWithFso =
+        new NSSummaryTaskWithFSO(reconNamespaceSummaryManager,
+            reconOMMetadataManager, ozoneConfiguration);
+    nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager);
+  }
+
+  /**
+   * Create a new OM Metadata manager instance with one user, one vol, and two
+   * buckets.
+   * @throws IOException ioEx
+   */
+  private static OMMetadataManager initializeNewOmMetadataManager(
+      File omDbDir)
+      throws IOException {
+    OzoneConfiguration omConfiguration = new OzoneConfiguration();
+    omConfiguration.set(OZONE_OM_DB_DIRS,
+        omDbDir.getAbsolutePath());
+    OMMetadataManager omMetadataManager = new OmMetadataManagerImpl(
+        omConfiguration, null);
+    return omMetadataManager;
+  }
+
+  @Test
+  public void testDiskUsageOrderingForRoot() throws Exception {
+    // root level DU
+    // Verify the ordering of subpaths under the root
+    verifyOrdering(ROOT_PATH);
+  }
+
+  @Test
+  public void testDiskUsageOrderingForVolume() throws Exception {
+    // volume level DU
+    // Verify the ordering of subpaths under the volume
+    verifyOrdering("/volA");
+    verifyOrdering("/volB");
+  }
+
+  @Test
+  public void testDiskUsageOrderingForBucket() throws Exception {
+    // bucket level DU
+    // Verify the ordering of subpaths under the bucket
+    verifyOrdering("/volA/bucketA1");
+    verifyOrdering("/volA/bucketA2");
+    verifyOrdering("/volA/bucketA3");
+    verifyOrdering("/volB/bucketB1");
+  }
+
+  private void verifyOrdering(String path)
+      throws IOException {
+    Response response =
+        nsSummaryEndpoint.getDiskUsage(path, true, false, true);
+    DUResponse duRes = (DUResponse) response.getEntity();
+    List<DUResponse.DiskUsage> duData = duRes.getDuData();
+    List<DUResponse.DiskUsage> sortedDuData = new ArrayList<>(duData);
+    // Sort the DU data by size in descending order to compare with the 
original.
+    sortedDuData.sort(
+        Comparator.comparingLong(DUResponse.DiskUsage::getSize).reversed());
+
+    for (int i = 0; i < duData.size(); i++) {
+      assertEquals(sortedDuData.get(i).getSubpath(),
+          duData.get(i).getSubpath(),
+          "DU-Sub-Path under " + path +
+              " should be sorted by descending order of size");
+    }
+  }
+
+  /**
+   * Tests the NSSummaryEndpoint for a given volume, bucket, and directory 
structure.
+   * The test setup mimics the following filesystem structure with specified 
sizes:
+   *
+   * root
+   * ├── volA
+   * │   ├── bucketA1
+   * │   │   ├── fileA1 (Size: 600KB)
+   * │   │   ├── fileA2 (Size: 80KB)
+   * │   │   ├── dirA1 (Total Size: 1500KB)
+   * │   │   ├── dirA2 (Total Size: 1700KB)
+   * │   │   └── dirA3 (Total Size: 1300KB)
+   * │   ├── bucketA2
+   * │   │   ├── fileA3 (Size: 200KB)
+   * │   │   ├── fileA4 (Size: 4000KB)
+   * │   │   ├── dirA4 (Total Size: 1100KB)
+   * │   │   ├── dirA5 (Total Size: 1900KB)
+   * │   │   └── dirA6 (Total Size: 210KB)
+   * │   └── bucketA3
+   * │       ├── fileA5 (Size: 5000KB)
+   * │       ├── fileA6 (Size: 700KB)
+   * │       ├── dirA7 (Total Size: 1200KB)
+   * │       ├── dirA8 (Total Size: 1600KB)
+   * │       └── dirA9 (Total Size: 180KB)
+   * └── volB
+   *     └── bucketB1
+   *         ├── fileB1 (Size: 300KB)
+   *         ├── fileB2 (Size: 500KB)
+   *         ├── dirB1 (Total Size: 14000KB)
+   *         ├── dirB2 (Total Size: 1800KB)
+   *         └── dirB3 (Total Size: 2200KB)
+   *
+   * @throws Exception
+   */
+  private void populateOMDB() throws Exception {
+    // Create Volumes
+    long volAObjectId = createVolume("volA");
+    long volBObjectId = createVolume("volB");
+
+    // Create Buckets in volA
+    long bucketA1ObjectId =
+        createBucket("volA", "bucketA1", 600 + 80 + 1500 + 1700 + 1300);
+    long bucketA2ObjectId =
+        createBucket("volA", "bucketA2", 200 + 4000 + 1100 + 1900 + 210);
+    long bucketA3ObjectId =
+        createBucket("volA", "bucketA3", 5000 + 700 + 1200 + 1600 + 180);
+
+    // Create Bucket in volB
+    long bucketB1ObjectId =
+        createBucket("volB", "bucketB1", 300 + 500 + 14000 + 1800 + 2200);
+
+    // Create Directories and Files under bucketA1
+    long dirA1ObjectId =
+        createDirectory(bucketA1ObjectId, bucketA1ObjectId, volAObjectId,
+            "dirA1");
+    long dirA2ObjectId =
+        createDirectory(bucketA1ObjectId, bucketA1ObjectId, volAObjectId,
+            "dirA2");
+    long dirA3ObjectId =
+        createDirectory(bucketA1ObjectId, bucketA1ObjectId, volAObjectId,
+            "dirA3");
+
+    // Files directly under bucketA1
+    createFile("fileA1", "bucketA1", "volA", "fileA1", bucketA1ObjectId,
+        bucketA1ObjectId, volAObjectId, 600 * 1024);
+    createFile("fileA2", "bucketA1", "volA", "fileA2", bucketA1ObjectId,
+        bucketA1ObjectId, volAObjectId, 80 * 1024);
+
+    // Create Directories and Files under bucketA2
+    long dirA4ObjectId =
+        createDirectory(bucketA2ObjectId, bucketA2ObjectId, volAObjectId,
+            "dirA4");
+    long dirA5ObjectId =
+        createDirectory(bucketA2ObjectId, bucketA2ObjectId, volAObjectId,
+            "dirA5");
+    long dirA6ObjectId =
+        createDirectory(bucketA2ObjectId, bucketA2ObjectId, volAObjectId,
+            "dirA6");
+
+    // Files directly under bucketA2
+    createFile("fileA3", "bucketA2", "volA", "fileA3", bucketA2ObjectId,
+        bucketA2ObjectId, volAObjectId, 200 * 1024);
+    createFile("fileA4", "bucketA2", "volA", "fileA4", bucketA2ObjectId,
+        bucketA2ObjectId, volAObjectId, 4000 * 1024);
+
+    // Create Directories and Files under bucketA3
+    long dirA7ObjectId =
+        createDirectory(bucketA3ObjectId, bucketA3ObjectId, volAObjectId,
+            "dirA7");
+    long dirA8ObjectId =
+        createDirectory(bucketA3ObjectId, bucketA3ObjectId, volAObjectId,
+            "dirA8");
+    long dirA9ObjectId =
+        createDirectory(bucketA3ObjectId, bucketA3ObjectId, volAObjectId,
+            "dirA9");
+
+    // Files directly under bucketA3
+    createFile("fileA5", "bucketA3", "volA", "fileA5", bucketA3ObjectId,
+        bucketA3ObjectId, volAObjectId, 5000 * 1024);
+    createFile("fileA6", "bucketA3", "volA", "fileA6", bucketA3ObjectId,
+        bucketA3ObjectId, volAObjectId, 700 * 1024);
+
+    // Create Directories and Files under bucketB1
+    long dirB1ObjectId =
+        createDirectory(bucketB1ObjectId, bucketB1ObjectId, volBObjectId,
+            "dirB1");
+    long dirB2ObjectId =
+        createDirectory(bucketB1ObjectId, bucketB1ObjectId, volBObjectId,
+            "dirB2");
+    long dirB3ObjectId =
+        createDirectory(bucketB1ObjectId, bucketB1ObjectId, volBObjectId,
+            "dirB3");
+
+    // Files directly under bucketB1
+    createFile("fileB1", "bucketB1", "volB", "fileB1", bucketB1ObjectId,
+        bucketB1ObjectId, volBObjectId, 300 * 1024);
+    createFile("fileB2", "bucketB1", "volB", "fileB2", bucketB1ObjectId,
+        bucketB1ObjectId, volBObjectId, 500 * 1024);
+
+    // Create Inner files under directories
+    createFile("dirA1/innerFile", "bucketA1", "volA", "innerFile",
+        dirA1ObjectId, bucketA1ObjectId, volAObjectId, 1500 * 1024);
+    createFile("dirA2/innerFile", "bucketA1", "volA", "innerFile",
+        dirA2ObjectId, bucketA1ObjectId, volAObjectId, 1700 * 1024);
+    createFile("dirA3/innerFile", "bucketA1", "volA", "innerFile",
+        dirA3ObjectId, bucketA1ObjectId, volAObjectId, 1300 * 1024);
+    createFile("dirA4/innerFile", "bucketA2", "volA", "innerFile",
+        dirA4ObjectId, bucketA2ObjectId, volAObjectId, 1100 * 1024);
+    createFile("dirA5/innerFile", "bucketA2", "volA", "innerFile",
+        dirA5ObjectId, bucketA2ObjectId, volAObjectId, 1900 * 1024);
+    createFile("dirA6/innerFile", "bucketA2", "volA", "innerFile",
+        dirA6ObjectId, bucketA2ObjectId, volAObjectId, 210 * 1024);
+    createFile("dirA7/innerFile", "bucketA3", "volA", "innerFile",
+        dirA7ObjectId, bucketA3ObjectId, volAObjectId, 1200 * 1024);
+    createFile("dirA8/innerFile", "bucketA3", "volA", "innerFile",
+        dirA8ObjectId, bucketA3ObjectId, volAObjectId, 1600 * 1024);
+    createFile("dirA9/innerFile", "bucketA3", "volA", "innerFile",
+        dirA9ObjectId, bucketA3ObjectId, volAObjectId, 180 * 1024);
+    createFile("dirB1/innerFile", "bucketB1", "volB", "innerFile",
+        dirB1ObjectId, bucketB1ObjectId, volBObjectId, 14000 * 1024);
+    createFile("dirB2/innerFile", "bucketB1", "volB", "innerFile",
+        dirB2ObjectId, bucketB1ObjectId, volBObjectId, 1800 * 1024);
+    createFile("dirB3/innerFile", "bucketB1", "volB", "innerFile",
+        dirB3ObjectId, bucketB1ObjectId, volBObjectId, 2200 * 1024);
+  }
+
+  /**
+   * Create a volume and add it to the Volume Table.
+   * @return volume Object ID
+   * @throws IOException
+   */
+  private long createVolume(String volumeName) throws Exception {
+    String volumeKey = reconOMMetadataManager.getVolumeKey(volumeName);
+    long volumeId = UUID.randomUUID().getMostSignificantBits() &
+        Long.MAX_VALUE; // Generate positive ID
+    OmVolumeArgs args = OmVolumeArgs.newBuilder()
+        .setObjectID(volumeId)
+        .setVolume(volumeName)
+        .setAdminName(TEST_USER)
+        .setOwnerName(TEST_USER)
+        .build();
+
+    reconOMMetadataManager.getVolumeTable().put(volumeKey, args);
+    return volumeId;
+  }
+
+  /**
+   * Create a bucket and add it to the Bucket Table.
+   * @return bucket Object ID
+   * @throws IOException
+   */
+  private long createBucket(String volumeName, String bucketName, long 
dataSize)
+      throws Exception {
+    String bucketKey =
+        reconOMMetadataManager.getBucketKey(volumeName, bucketName);
+    long bucketId = UUID.randomUUID().getMostSignificantBits() &
+        Long.MAX_VALUE; // Generate positive ID
+    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+        .setVolumeName(volumeName)
+        .setBucketName(bucketName)
+        .setObjectID(bucketId)
+        .setBucketLayout(getBucketLayout())
+        .setUsedBytes(dataSize)
+        .build();
+
+    reconOMMetadataManager.getBucketTable().put(bucketKey, bucketInfo);
+    return bucketId;
+  }
+
+  /**
+   * Create a directory and add it to the Directory Table.
+   * @return directory Object ID
+   * @throws IOException
+   */
+  private long createDirectory(long parentObjectId,
+                               long bucketObjectId,
+                               long volumeObjectId,
+                               String dirName) throws IOException {
+    long objectId = UUID.randomUUID().getMostSignificantBits() &
+        Long.MAX_VALUE; // Ensure positive ID
+    writeDirToOm(reconOMMetadataManager, objectId, parentObjectId,
+        bucketObjectId,
+        volumeObjectId, dirName);
+    return objectId;
+  }
+
+  /**
+   * Create a file and add it to the File Table.
+   * @return file Object ID
+   * @throws IOException
+   */
+  @SuppressWarnings("checkstyle:ParameterNumber")
+  private long createFile(String key,
+                          String bucket,
+                          String volume,
+                          String fileName,
+                          long parentObjectId,
+                          long bucketObjectId,
+                          long volumeObjectId,
+                          long dataSize) throws IOException {
+    long objectId = UUID.randomUUID().getMostSignificantBits() &
+        Long.MAX_VALUE; // Ensure positive ID
+    writeKeyToOm(reconOMMetadataManager, key, bucket, volume, fileName,
+        objectId,
+        parentObjectId, bucketObjectId, volumeObjectId, dataSize,
+        getBucketLayout());
+    return objectId;
+  }
+
+  private static ReconStorageContainerManagerFacade getMockReconSCM()
+      throws ContainerNotFoundException {
+    ReconStorageContainerManagerFacade reconSCM =
+        mock(ReconStorageContainerManagerFacade.class);
+    ContainerManager containerManager = mock(ContainerManager.class);
+
+    when(reconSCM.getContainerManager()).thenReturn(containerManager);
+    ReconNodeManager mockReconNodeManager = mock(ReconNodeManager.class);
+    when(reconSCM.getScmNodeManager()).thenReturn(mockReconNodeManager);
+    return reconSCM;
+  }
+
+  private static BucketLayout getBucketLayout() {
+    return BucketLayout.FILE_SYSTEM_OPTIMIZED;
+  }
+}
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
index cbe850b918..a88064d565 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithFSO.java
@@ -449,7 +449,7 @@ public class TestNSSummaryEndpointWithFSO {
   public void testDiskUsageRoot() throws Exception {
     // root level DU
     Response rootResponse = nsSummaryEndpoint.getDiskUsage(ROOT_PATH,
-        false, false);
+        false, false, false);
     DUResponse duRootRes = (DUResponse) rootResponse.getEntity();
     assertEquals(2, duRootRes.getCount());
     List<DUResponse.DiskUsage> duRootData = duRootRes.getDuData();
@@ -463,11 +463,12 @@ public class TestNSSummaryEndpointWithFSO {
     assertEquals(VOL_DATA_SIZE, duVol1.getSize());
     assertEquals(VOL_TWO_DATA_SIZE, duVol2.getSize());
   }
+
   @Test
   public void testDiskUsageVolume() throws Exception {
     // volume level DU
     Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_PATH,
-            false, false);
+        false, false, false);
     DUResponse duVolRes = (DUResponse) volResponse.getEntity();
     assertEquals(2, duVolRes.getCount());
     List<DUResponse.DiskUsage> duData = duVolRes.getDuData();
@@ -482,11 +483,12 @@ public class TestNSSummaryEndpointWithFSO {
     assertEquals(BUCKET_TWO_DATA_SIZE, duBucket2.getSize());
 
   }
+
   @Test
   public void testDiskUsageBucket() throws Exception {
     // bucket level DU
     Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH,
-            false, false);
+        false, false, false);
     DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity();
     assertEquals(1, duBucketResponse.getCount());
     DUResponse.DiskUsage duDir1 = duBucketResponse.getDuData().get(0);
@@ -494,11 +496,12 @@ public class TestNSSummaryEndpointWithFSO {
     assertEquals(DIR_ONE_DATA_SIZE, duDir1.getSize());
 
   }
+
   @Test
   public void testDiskUsageDir() throws Exception {
     // dir level DU
     Response dirResponse = nsSummaryEndpoint.getDiskUsage(DIR_ONE_PATH,
-            false, false);
+        false, false, false);
     DUResponse duDirReponse = (DUResponse) dirResponse.getEntity();
     assertEquals(3, duDirReponse.getCount());
     List<DUResponse.DiskUsage> duSubDir = duDirReponse.getDuData();
@@ -517,21 +520,23 @@ public class TestNSSummaryEndpointWithFSO {
     assertEquals(KEY_SIX_SIZE, duDir4.getSize());
 
   }
+
   @Test
   public void testDiskUsageKey() throws Exception {
     // key level DU
     Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_PATH,
-            false, false);
+        false, false, false);
     DUResponse keyObj = (DUResponse) keyResponse.getEntity();
     assertEquals(0, keyObj.getCount());
     assertEquals(KEY_FOUR_SIZE, keyObj.getSize());
 
   }
+
   @Test
   public void testDiskUsageUnknown() throws Exception {
     // invalid path check
     Response invalidResponse = nsSummaryEndpoint.getDiskUsage(INVALID_PATH,
-            false, false);
+        false, false, false);
     DUResponse invalidObj = (DUResponse) invalidResponse.getEntity();
     assertEquals(ResponseStatus.PATH_NOT_FOUND,
             invalidObj.getStatus());
@@ -541,7 +546,7 @@ public class TestNSSummaryEndpointWithFSO {
   public void testDiskUsageWithReplication() throws Exception {
     setUpMultiBlockKey();
     Response keyResponse = nsSummaryEndpoint.getDiskUsage(MULTI_BLOCK_KEY_PATH,
-            false, true);
+        false, true, false);
     DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity();
     assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
     assertEquals(MULTI_BLOCK_KEY_SIZE_WITH_REPLICA,
@@ -553,7 +558,7 @@ public class TestNSSummaryEndpointWithFSO {
     setUpMultiBlockReplicatedKeys();
     //   withReplica is true
     Response rootResponse = nsSummaryEndpoint.getDiskUsage(ROOT_PATH,
-        false, true);
+        false, true, false);
     DUResponse replicaDUResponse = (DUResponse) rootResponse.getEntity();
     assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
     assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_ROOT,
@@ -567,7 +572,7 @@ public class TestNSSummaryEndpointWithFSO {
   public void testDataSizeUnderVolWithReplication() throws IOException {
     setUpMultiBlockReplicatedKeys();
     Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_PATH,
-        false, true);
+        false, true, false);
     DUResponse replicaDUResponse = (DUResponse) volResponse.getEntity();
     assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
     assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_VOL,
@@ -580,7 +585,7 @@ public class TestNSSummaryEndpointWithFSO {
   public void testDataSizeUnderBucketWithReplication() throws IOException {
     setUpMultiBlockReplicatedKeys();
     Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH,
-        false, true);
+        false, true, false);
     DUResponse replicaDUResponse = (DUResponse) bucketResponse.getEntity();
     assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
     assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET1,
@@ -599,7 +604,7 @@ public class TestNSSummaryEndpointWithFSO {
   public void testDataSizeUnderDirWithReplication() throws IOException {
     setUpMultiBlockReplicatedKeys();
     Response dir1Response = nsSummaryEndpoint.getDiskUsage(DIR_ONE_PATH,
-        false, true);
+        false, true, false);
     DUResponse replicaDUResponse = (DUResponse) dir1Response.getEntity();
     assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
     assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_DIR1,
@@ -612,7 +617,7 @@ public class TestNSSummaryEndpointWithFSO {
   public void testDataSizeUnderKeyWithReplication() throws IOException {
     setUpMultiBlockReplicatedKeys();
     Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_PATH,
-        false, true);
+        false, true, false);
     DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity();
     assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
     assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_KEY,
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
index 765399f71e..a5064ba5be 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java
@@ -245,10 +245,10 @@ public class TestNSSummaryEndpointWithLegacy {
               StandaloneReplicationConfig.getInstance(ONE));
   private static final long FILE6_SIZE_WITH_REPLICA =
       getReplicatedSize(KEY_SIX_SIZE,
-              StandaloneReplicationConfig.getInstance(ONE));;
+              StandaloneReplicationConfig.getInstance(ONE));
   private static final long FILE7_SIZE_WITH_REPLICA =
       getReplicatedSize(KEY_SEVEN_SIZE,
-              StandaloneReplicationConfig.getInstance(ONE));;
+              StandaloneReplicationConfig.getInstance(ONE));
   private static final long FILE8_SIZE_WITH_REPLICA =
       getReplicatedSize(KEY_EIGHT_SIZE,
               StandaloneReplicationConfig.getInstance(ONE));
@@ -451,7 +451,7 @@ public class TestNSSummaryEndpointWithLegacy {
   public void testDiskUsageRoot() throws Exception {
     // root level DU
     Response rootResponse = nsSummaryEndpoint.getDiskUsage(ROOT_PATH,
-        false, false);
+        false, false, false);
     DUResponse duRootRes = (DUResponse) rootResponse.getEntity();
     assertEquals(2, duRootRes.getCount());
     List<DUResponse.DiskUsage> duRootData = duRootRes.getDuData();
@@ -470,7 +470,7 @@ public class TestNSSummaryEndpointWithLegacy {
   public void testDiskUsageVolume() throws Exception {
     // volume level DU
     Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_PATH,
-        false, false);
+        false, false, false);
     DUResponse duVolRes = (DUResponse) volResponse.getEntity();
     assertEquals(2, duVolRes.getCount());
     List<DUResponse.DiskUsage> duData = duVolRes.getDuData();
@@ -489,7 +489,7 @@ public class TestNSSummaryEndpointWithLegacy {
   public void testDiskUsageBucket() throws Exception {
     // bucket level DU
     Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH,
-        false, false);
+        false, false, false);
     DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity();
     assertEquals(1, duBucketResponse.getCount());
     DUResponse.DiskUsage duDir1 = duBucketResponse.getDuData().get(0);
@@ -501,7 +501,7 @@ public class TestNSSummaryEndpointWithLegacy {
   public void testDiskUsageDir() throws Exception {
     // dir level DU
     Response dirResponse = nsSummaryEndpoint.getDiskUsage(DIR_ONE_PATH,
-        false, false);
+        false, false, false);
     DUResponse duDirReponse = (DUResponse) dirResponse.getEntity();
     assertEquals(3, duDirReponse.getCount());
     List<DUResponse.DiskUsage> duSubDir = duDirReponse.getDuData();
@@ -524,7 +524,7 @@ public class TestNSSummaryEndpointWithLegacy {
   public void testDiskUsageKey() throws Exception {
     // key level DU
     Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_PATH,
-        false, false);
+        false, false, false);
     DUResponse keyObj = (DUResponse) keyResponse.getEntity();
     assertEquals(0, keyObj.getCount());
     assertEquals(KEY_FOUR_SIZE, keyObj.getSize());
@@ -534,7 +534,7 @@ public class TestNSSummaryEndpointWithLegacy {
   public void testDiskUsageUnknown() throws Exception {
     // invalid path check
     Response invalidResponse = nsSummaryEndpoint.getDiskUsage(INVALID_PATH,
-        false, false);
+        false, false, false);
     DUResponse invalidObj = (DUResponse) invalidResponse.getEntity();
     assertEquals(ResponseStatus.PATH_NOT_FOUND,
         invalidObj.getStatus());
@@ -544,7 +544,7 @@ public class TestNSSummaryEndpointWithLegacy {
   public void testDiskUsageWithReplication() throws Exception {
     setUpMultiBlockKey();
     Response keyResponse = nsSummaryEndpoint.getDiskUsage(MULTI_BLOCK_KEY_PATH,
-        false, true);
+        false, true, false);
     DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity();
     assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
     assertEquals(MULTI_BLOCK_KEY_SIZE_WITH_REPLICA,
@@ -556,7 +556,7 @@ public class TestNSSummaryEndpointWithLegacy {
     setUpMultiBlockReplicatedKeys();
     //   withReplica is true
     Response rootResponse = nsSummaryEndpoint.getDiskUsage(ROOT_PATH,
-        false, true);
+        false, true, false);
     DUResponse replicaDUResponse = (DUResponse) rootResponse.getEntity();
     assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
     assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_ROOT,
@@ -570,7 +570,7 @@ public class TestNSSummaryEndpointWithLegacy {
   public void testDataSizeUnderVolWithReplication() throws IOException {
     setUpMultiBlockReplicatedKeys();
     Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_PATH,
-        false, true);
+        false, true, false);
     DUResponse replicaDUResponse = (DUResponse) volResponse.getEntity();
     assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
     assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_VOL,
@@ -583,7 +583,7 @@ public class TestNSSummaryEndpointWithLegacy {
   public void testDataSizeUnderBucketWithReplication() throws IOException {
     setUpMultiBlockReplicatedKeys();
     Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH,
-        false, true);
+        false, true, false);
     DUResponse replicaDUResponse = (DUResponse) bucketResponse.getEntity();
     assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
     assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET1,
@@ -602,7 +602,7 @@ public class TestNSSummaryEndpointWithLegacy {
   public void testDataSizeUnderDirWithReplication() throws IOException {
     setUpMultiBlockReplicatedKeys();
     Response dir1Response = nsSummaryEndpoint.getDiskUsage(DIR_ONE_PATH,
-        false, true);
+        false, true, false);
     DUResponse replicaDUResponse = (DUResponse) dir1Response.getEntity();
     assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
     assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_DIR1,
@@ -615,7 +615,7 @@ public class TestNSSummaryEndpointWithLegacy {
   public void testDataSizeUnderKeyWithReplication() throws IOException {
     setUpMultiBlockReplicatedKeys();
     Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_PATH,
-        false, true);
+        false, true, false);
     DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity();
     assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
     assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_KEY,
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java
index 8d8299aefc..ce8aa72963 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java
@@ -552,7 +552,7 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
   public void testDiskUsageRoot() throws Exception {
     // root level DU
     Response rootResponse = nsSummaryEndpoint.getDiskUsage(ROOT_PATH,
-        false, false);
+        false, false, false);
     DUResponse duRootRes = (DUResponse) rootResponse.getEntity();
     assertEquals(2, duRootRes.getCount());
     List<DUResponse.DiskUsage> duRootData = duRootRes.getDuData();
@@ -571,7 +571,7 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
   public void testDiskUsageVolume() throws Exception {
     // volume level DU
     Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_PATH,
-        false, false);
+        false, false, false);
     DUResponse duVolRes = (DUResponse) volResponse.getEntity();
     assertEquals(2, duVolRes.getCount());
     List<DUResponse.DiskUsage> duData = duVolRes.getDuData();
@@ -590,7 +590,7 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
   public void testDiskUsageVolTwo() throws Exception {
     // volume level DU
     Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_TWO_PATH,
-        false, false);
+        false, false, false);
     DUResponse duVolRes = (DUResponse) volResponse.getEntity();
     assertEquals(2, duVolRes.getCount());
     List<DUResponse.DiskUsage> duData = duVolRes.getDuData();
@@ -608,13 +608,13 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
   public void testDiskUsageBucketOne() throws Exception {
     // bucket level DU
     Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH,
-        false, false);
+        false, false, false);
     DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity();
     // There are no sub-paths under this OBS bucket.
     assertEquals(0, duBucketResponse.getCount());
 
     Response bucketResponseWithSubpath = nsSummaryEndpoint.getDiskUsage(
-        BUCKET_ONE_PATH, true, false);
+        BUCKET_ONE_PATH, true, false, false);
     DUResponse duBucketResponseWithFiles =
         (DUResponse) bucketResponseWithSubpath.getEntity();
     assertEquals(3, duBucketResponseWithFiles.getCount());
@@ -626,13 +626,13 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
   public void testDiskUsageBucketTwo() throws Exception {
     // bucket level DU
     Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_TWO_PATH,
-        false, false);
+        false, false, false);
     DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity();
     // There are no sub-paths under this OBS bucket.
     assertEquals(0, duBucketResponse.getCount());
 
     Response bucketResponseWithSubpath = nsSummaryEndpoint.getDiskUsage(
-        BUCKET_TWO_PATH, true, false);
+        BUCKET_TWO_PATH, true, false, false);
     DUResponse duBucketResponseWithFiles =
         (DUResponse) bucketResponseWithSubpath.getEntity();
     assertEquals(2, duBucketResponseWithFiles.getCount());
@@ -644,13 +644,13 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
   public void testDiskUsageBucketThree() throws Exception {
     // bucket level DU
     Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_THREE_PATH,
-        false, false);
+        false, false, false);
     DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity();
     // There are no sub-paths under this Legacy bucket.
     assertEquals(0, duBucketResponse.getCount());
 
     Response bucketResponseWithSubpath = nsSummaryEndpoint.getDiskUsage(
-        BUCKET_THREE_PATH, true, false);
+        BUCKET_THREE_PATH, true, false, false);
     DUResponse duBucketResponseWithFiles =
         (DUResponse) bucketResponseWithSubpath.getEntity();
     assertEquals(3, duBucketResponseWithFiles.getCount());
@@ -662,7 +662,7 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
   public void testDiskUsageKey1() throws Exception {
     // key level DU
     Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_ONE_PATH,
-        false, false);
+        false, false, false);
     DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity();
     assertEquals(0, duKeyResponse.getCount());
     assertEquals(FILE_ONE_SIZE, duKeyResponse.getSize());
@@ -672,7 +672,7 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
   public void testDiskUsageKey2() throws Exception {
     // key level DU
     Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_TWO_PATH,
-        false, false);
+        false, false, false);
     DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity();
     assertEquals(0, duKeyResponse.getCount());
     assertEquals(FILE_TWO_SIZE, duKeyResponse.getSize());
@@ -682,7 +682,7 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
   public void testDiskUsageKey4() throws Exception {
     // key level DU
     Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY4_PATH,
-        true, false);
+        true, false, false);
     DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity();
     assertEquals(0, duKeyResponse.getCount());
     assertEquals(FILE_FOUR_SIZE, duKeyResponse.getSize());
@@ -692,7 +692,7 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
   public void testDiskUsageKey5() throws Exception {
     // key level DU
     Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_FIVE_PATH,
-        false, false);
+        false, false, false);
     DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity();
     assertEquals(0, duKeyResponse.getCount());
     assertEquals(FILE_FIVE_SIZE, duKeyResponse.getSize());
@@ -702,7 +702,7 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
   public void testDiskUsageKey8() throws Exception {
     // key level DU
     Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_EIGHT_PATH,
-        false, false);
+        false, false, false);
     DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity();
     assertEquals(0, duKeyResponse.getCount());
     assertEquals(FILE_EIGHT_SIZE, duKeyResponse.getSize());
@@ -712,7 +712,7 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
   public void testDiskUsageKey11() throws Exception {
     // key level DU
     Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_ELEVEN_PATH,
-        false, false);
+        false, false, false);
     DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity();
     assertEquals(0, duKeyResponse.getCount());
     assertEquals(FILE_ELEVEN_SIZE, duKeyResponse.getSize());
@@ -722,7 +722,7 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
   public void testDiskUsageUnknown() throws Exception {
     // invalid path check
     Response invalidResponse = nsSummaryEndpoint.getDiskUsage(INVALID_PATH,
-        false, false);
+        false, false, false);
     DUResponse invalidObj = (DUResponse) invalidResponse.getEntity();
     assertEquals(ResponseStatus.PATH_NOT_FOUND,
         invalidObj.getStatus());
@@ -732,7 +732,7 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
   public void testDiskUsageWithReplication() throws Exception {
     setUpMultiBlockKey();
     Response keyResponse = nsSummaryEndpoint.getDiskUsage(MULTI_BLOCK_KEY_PATH,
-        false, true);
+        false, true, false);
     DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity();
     assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
     assertEquals(MULTI_BLOCK_KEY_SIZE_WITH_REPLICA,
@@ -744,7 +744,7 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
     setUpMultiBlockReplicatedKeys();
     //   withReplica is true
     Response rootResponse = nsSummaryEndpoint.getDiskUsage(ROOT_PATH,
-        false, true);
+        false, true, false);
     DUResponse replicaDUResponse = (DUResponse) rootResponse.getEntity();
     assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
     assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_ROOT,
@@ -758,7 +758,7 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
   public void testDataSizeUnderVolWithReplication() throws IOException {
     setUpMultiBlockReplicatedKeys();
     Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_PATH,
-        false, true);
+        false, true, false);
     DUResponse replicaDUResponse = (DUResponse) volResponse.getEntity();
     assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
     assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_VOL,
@@ -771,7 +771,7 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
   public void testDataSizeUnderBucketOneWithReplication() throws IOException {
     setUpMultiBlockReplicatedKeys();
     Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH,
-        false, true);
+        false, true, false);
     DUResponse replicaDUResponse = (DUResponse) bucketResponse.getEntity();
     assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
     assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET1,
@@ -782,7 +782,7 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
   public void testDataSizeUnderBucketThreeWithReplication() throws IOException 
{
     setUpMultiBlockReplicatedKeys();
     Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_THREE_PATH,
-        false, true);
+        false, true, false);
     DUResponse replicaDUResponse = (DUResponse) bucketResponse.getEntity();
     assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
     assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET3,
@@ -793,7 +793,7 @@ public class TestNSSummaryEndpointWithOBSAndLegacy {
   public void testDataSizeUnderKeyWithReplication() throws IOException {
     setUpMultiBlockReplicatedKeys();
     Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY4_PATH,
-        false, true);
+        false, true, false);
     DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity();
     assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus());
     assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_KEY,


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to