dombizita commented on code in PR #6231:
URL: https://github.com/apache/ozone/pull/6231#discussion_r1589288745


##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java:
##########
@@ -0,0 +1,457 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.api;
+
+import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler;
+import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo;
+import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse;
+import org.apache.hadoop.ozone.recon.api.types.NSSummary;
+import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
+import org.apache.hadoop.ozone.recon.spi.impl.ReconNamespaceSummaryManagerImpl;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.inject.Inject;
+import javax.ws.rs.DefaultValue;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import java.io.IOException;
+import java.util.Map;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Set;
+
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
+import static org.apache.hadoop.ozone.recon.ReconConstants.*;
+import static 
org.apache.hadoop.ozone.recon.api.handlers.BucketHandler.getBucketHandler;
+import static 
org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.normalizePath;
+import static 
org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.parseRequestPath;
+
+/**
+ * REST endpoint for search implementation in OM DB Insight.
+ */
+@Path("/keys")
+@Produces(MediaType.APPLICATION_JSON)
+@AdminOnly
+public class OMDBInsightSearchEndpoint {
+
+  private OzoneStorageContainerManager reconSCM;
+  private final ReconOMMetadataManager omMetadataManager;
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OMDBInsightSearchEndpoint.class);
+  private ReconNamespaceSummaryManagerImpl reconNamespaceSummaryManager;
+
+
+  @Inject
+  public OMDBInsightSearchEndpoint(OzoneStorageContainerManager reconSCM,
+                                   ReconOMMetadataManager omMetadataManager,
+                                   ReconNamespaceSummaryManagerImpl 
reconNamespaceSummaryManager) {
+    this.reconSCM = reconSCM;
+    this.omMetadataManager = omMetadataManager;
+    this.reconNamespaceSummaryManager = reconNamespaceSummaryManager;
+  }
+
+
+  /**
+   * Performs a search for open keys in the Ozone Manager (OM) database using 
a specified search prefix.
+   * This endpoint searches across both File System Optimized (FSO) and Object 
Store (non-FSO) layouts,
+   * compiling a list of keys that match the given prefix along with their 
data sizes.
+   * <p>
+   * The search prefix may range from the root level ('/') to any specific 
directory
+   * or key level (e.g., '/volA/' for everything under 'volA'). The search 
operation matches
+   * the prefix against the start of keys' names within the OM DB.
+   * <p>
+   * Example Usage:
+   * 1. A startPrefix of "/" will return all keys in the database.
+   * 2. A startPrefix of "/volA/" retrieves every key under volume 'volA'.
+   * 3. Specifying "/volA/bucketA/dir1" focuses the search within 'dir1' 
inside 'bucketA' of 'volA'.
+   *
+   * @param startPrefix The prefix for searching keys, starting from the root 
('/') or any specific path.
+   * @param limit       Limits the number of returned keys.
+   * @return A KeyInsightInfoResponse, containing matching keys and their data 
sizes.
+   * @throws IOException On failure to access the OM database or process the 
operation.
+   */
+  @GET
+  @Path("/open/search")
+  public Response searchOpenKeys(
+      @DefaultValue(DEFAULT_START_PREFIX) @QueryParam("startPrefix")
+      String startPrefix,
+      @DefaultValue(RECON_OPEN_KEY_DEFAULT_SEARCH_LIMIT) @QueryParam("limit")

Review Comment:
   If I looked through the code correctly, this endpoint doesn't support 
unlimited response, right? We always need to set the limit parameter to a valid 
non-negative number? Can we highlight this in the javadoc? 



##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java:
##########
@@ -0,0 +1,457 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.api;
+
+import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler;
+import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo;
+import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse;
+import org.apache.hadoop.ozone.recon.api.types.NSSummary;
+import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
+import org.apache.hadoop.ozone.recon.spi.impl.ReconNamespaceSummaryManagerImpl;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.inject.Inject;
+import javax.ws.rs.DefaultValue;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import java.io.IOException;
+import java.util.Map;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Set;
+
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
+import static org.apache.hadoop.ozone.recon.ReconConstants.*;
+import static 
org.apache.hadoop.ozone.recon.api.handlers.BucketHandler.getBucketHandler;
+import static 
org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.normalizePath;
+import static 
org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.parseRequestPath;
+
+/**
+ * REST endpoint for search implementation in OM DB Insight.
+ */
+@Path("/keys")
+@Produces(MediaType.APPLICATION_JSON)
+@AdminOnly
+public class OMDBInsightSearchEndpoint {
+
+  private OzoneStorageContainerManager reconSCM;
+  private final ReconOMMetadataManager omMetadataManager;
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OMDBInsightSearchEndpoint.class);
+  private ReconNamespaceSummaryManagerImpl reconNamespaceSummaryManager;
+
+
+  @Inject
+  public OMDBInsightSearchEndpoint(OzoneStorageContainerManager reconSCM,
+                                   ReconOMMetadataManager omMetadataManager,
+                                   ReconNamespaceSummaryManagerImpl 
reconNamespaceSummaryManager) {
+    this.reconSCM = reconSCM;
+    this.omMetadataManager = omMetadataManager;
+    this.reconNamespaceSummaryManager = reconNamespaceSummaryManager;
+  }
+
+
+  /**
+   * Performs a search for open keys in the Ozone Manager (OM) database using 
a specified search prefix.
+   * This endpoint searches across both File System Optimized (FSO) and Object 
Store (non-FSO) layouts,
+   * compiling a list of keys that match the given prefix along with their 
data sizes.
+   * <p>
+   * The search prefix may range from the root level ('/') to any specific 
directory
+   * or key level (e.g., '/volA/' for everything under 'volA'). The search 
operation matches
+   * the prefix against the start of keys' names within the OM DB.
+   * <p>
+   * Example Usage:
+   * 1. A startPrefix of "/" will return all keys in the database.
+   * 2. A startPrefix of "/volA/" retrieves every key under volume 'volA'.
+   * 3. Specifying "/volA/bucketA/dir1" focuses the search within 'dir1' 
inside 'bucketA' of 'volA'.
+   *
+   * @param startPrefix The prefix for searching keys, starting from the root 
('/') or any specific path.
+   * @param limit       Limits the number of returned keys.
+   * @return A KeyInsightInfoResponse, containing matching keys and their data 
sizes.
+   * @throws IOException On failure to access the OM database or process the 
operation.
+   */
+  @GET
+  @Path("/open/search")
+  public Response searchOpenKeys(
+      @DefaultValue(DEFAULT_START_PREFIX) @QueryParam("startPrefix")
+      String startPrefix,
+      @DefaultValue(RECON_OPEN_KEY_DEFAULT_SEARCH_LIMIT) @QueryParam("limit")
+      int limit) throws IOException {
+    try {
+      limit = Math.max(0, limit); // Ensure limit is non-negative
+      KeyInsightInfoResponse insightResponse = new KeyInsightInfoResponse();
+      long replicatedTotal = 0;
+      long unreplicatedTotal = 0;
+      boolean keysFound = false; // Flag to track if any keys are found
+
+      // Search keys from non-FSO layout.
+      Map<String, OmKeyInfo> obsKeys = new LinkedHashMap<>();
+      Table<String, OmKeyInfo> openKeyTable =
+          omMetadataManager.getOpenKeyTable(BucketLayout.LEGACY);
+      obsKeys = retrieveKeysFromTable(openKeyTable, startPrefix, limit);
+      for (Map.Entry<String, OmKeyInfo> entry : obsKeys.entrySet()) {
+        keysFound = true;
+        KeyEntityInfo keyEntityInfo =
+            createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue());
+        insightResponse.getNonFSOKeyInfoList()
+            .add(keyEntityInfo); // Add to non-FSO list
+        replicatedTotal += entry.getValue().getReplicatedSize();
+        unreplicatedTotal += entry.getValue().getDataSize();
+      }
+
+      // Search keys from FSO layout.
+      Map<String, OmKeyInfo> fsoKeys = searchOpenKeysInFSO(startPrefix, limit);
+      for (Map.Entry<String, OmKeyInfo> entry : fsoKeys.entrySet()) {
+        keysFound = true;
+        KeyEntityInfo keyEntityInfo =
+            createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue());
+        insightResponse.getFsoKeyInfoList()
+            .add(keyEntityInfo); // Add to FSO list
+        replicatedTotal += entry.getValue().getReplicatedSize();
+        unreplicatedTotal += entry.getValue().getDataSize();
+      }
+
+      // If no keys were found, return a response indicating that no keys 
matched
+      if (!keysFound) {
+        return noMatchedKeysResponse(startPrefix);
+      }
+
+      // Set the aggregated totals in the response
+      insightResponse.setReplicatedDataSize(replicatedTotal);
+      insightResponse.setUnreplicatedDataSize(unreplicatedTotal);
+
+      return Response.ok(insightResponse).build();
+    } catch (IOException e) {
+      return createInternalServerErrorResponse(
+          "Error searching open keys in OM DB: " + e.getMessage());
+    } catch (IllegalArgumentException e) {
+      return createBadRequestResponse(
+          "Invalid startPrefix: " + e.getMessage());
+    }
+  }
+
+  public Map<String, OmKeyInfo> searchOpenKeysInFSO(String startPrefix,
+                                                    int limit)
+      throws IOException, IllegalArgumentException {
+    Map<String, OmKeyInfo> matchedKeys = new LinkedHashMap<>();
+    // Convert the search prefix to an object path for FSO buckets
+    String startPrefixObjectPath = convertToObjectPath(startPrefix);
+    String[] names = parseRequestPath(startPrefixObjectPath);
+    Table<String, OmKeyInfo> openFileTable =
+        omMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED);
+
+    // If names.length > 2, then the search prefix is at the volume or bucket 
level hence
+    // no need to find parent or extract id's or find subpaths as the 
openFileTable is
+    // suitable for volume and bucket level search
+    if (names.length > 2) {

Review Comment:
   Maybe I misunderstood something, shouldn't the check be the opposite? If the 
`names.length` is more than 2, it means that the search prefix is longer than 
2, so we are searching at `/vol/buck/key` level, so it's not at the volume or 
bucket level. 



##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightSearchEndpoint.java:
##########
@@ -0,0 +1,456 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.api;
+
+import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler;
+import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo;
+import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse;
+import org.apache.hadoop.ozone.recon.api.types.NSSummary;
+import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
+import org.apache.hadoop.ozone.recon.spi.impl.ReconNamespaceSummaryManagerImpl;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.inject.Inject;
+import javax.ws.rs.DefaultValue;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import java.io.IOException;
+import java.util.Map;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Set;
+
+import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
+import static org.apache.hadoop.ozone.recon.ReconConstants.*;
+import static 
org.apache.hadoop.ozone.recon.api.handlers.BucketHandler.getBucketHandler;
+import static 
org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.normalizePath;
+import static 
org.apache.hadoop.ozone.recon.api.handlers.EntityHandler.parseRequestPath;
+
+/**
+ * REST endpoint for search implementation in OM DB Insight.
+ */
+@Path("/keys")
+@Produces(MediaType.APPLICATION_JSON)
+@AdminOnly
+public class OMDBInsightSearchEndpoint {
+
+  private OzoneStorageContainerManager reconSCM;
+  private final ReconOMMetadataManager omMetadataManager;
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OMDBInsightSearchEndpoint.class);
+  private ReconNamespaceSummaryManagerImpl reconNamespaceSummaryManager;
+
+
+  @Inject
+  public OMDBInsightSearchEndpoint(OzoneStorageContainerManager reconSCM,
+                                   ReconOMMetadataManager omMetadataManager,
+                                   ReconNamespaceSummaryManagerImpl 
reconNamespaceSummaryManager) {
+    this.reconSCM = reconSCM;
+    this.omMetadataManager = omMetadataManager;
+    this.reconNamespaceSummaryManager = reconNamespaceSummaryManager;
+  }
+
+
+  /**
+   * Performs a search for open keys in the Ozone Manager (OM) database using 
a specified search prefix.
+   * This endpoint searches across both File System Optimized (FSO) and Object 
Store (non-FSO) layouts,
+   * compiling a list of keys that match the given prefix along with their 
data sizes.
+   * <p>
+   * The search prefix may range from the root level ('/') to any specific 
directory
+   * or key level (e.g., '/volA/' for everything under 'volA'). The search 
operation matches
+   * the prefix against the start of keys' names within the OM DB.
+   * <p>
+   * Example Usage:
+   * 1. A startPrefix of "/" will return all keys in the database.
+   * 2. A startPrefix of "/volA/" retrieves every key under volume 'volA'.
+   * 3. Specifying "/volA/bucketA/dir1" focuses the search within 'dir1' 
inside 'bucketA' of 'volA'.
+   *
+   * @param startPrefix The prefix for searching keys, starting from the root 
('/') or any specific path.
+   * @param limit       Limits the number of returned keys.
+   * @return A KeyInsightInfoResponse, containing matching keys and their data 
sizes.
+   * @throws IOException On failure to access the OM database or process the 
operation.
+   */
+  @GET
+  @Path("/open/search")
+  public Response searchOpenKeys(
+      @DefaultValue(DEFAULT_START_PREFIX) @QueryParam("startPrefix")
+      String startPrefix,
+      @DefaultValue(RECON_OPEN_KEY_DEFAULT_SEARCH_LIMIT) @QueryParam("limit")
+      int limit) throws IOException {
+    try {
+      limit = Math.max(0, limit); // Ensure limit is non-negative
+      KeyInsightInfoResponse insightResponse = new KeyInsightInfoResponse();
+      long replicatedTotal = 0;
+      long unreplicatedTotal = 0;
+      boolean keysFound = false; // Flag to track if any keys are found
+
+      // Search keys from non-FSO layout.
+      Map<String, OmKeyInfo> obsKeys = new LinkedHashMap<>();
+      Table<String, OmKeyInfo> openKeyTable =
+          omMetadataManager.getOpenKeyTable(BucketLayout.LEGACY);
+      obsKeys = retrieveKeysFromTable(openKeyTable, startPrefix, limit);
+      for (Map.Entry<String, OmKeyInfo> entry : obsKeys.entrySet()) {
+        keysFound = true;
+        KeyEntityInfo keyEntityInfo =
+            createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue());
+        insightResponse.getNonFSOKeyInfoList()
+            .add(keyEntityInfo); // Add to non-FSO list
+        replicatedTotal += entry.getValue().getReplicatedSize();
+        unreplicatedTotal += entry.getValue().getDataSize();
+      }
+
+      // Search keys from FSO layout.
+      Map<String, OmKeyInfo> fsoKeys = searchOpenKeysInFSO(startPrefix, limit);
+      for (Map.Entry<String, OmKeyInfo> entry : fsoKeys.entrySet()) {
+        keysFound = true;
+        KeyEntityInfo keyEntityInfo =
+            createKeyEntityInfoFromOmKeyInfo(entry.getKey(), entry.getValue());
+        insightResponse.getFsoKeyInfoList()
+            .add(keyEntityInfo); // Add to FSO list
+        replicatedTotal += entry.getValue().getReplicatedSize();
+        unreplicatedTotal += entry.getValue().getDataSize();
+      }
+
+      // If no keys were found, return a response indicating that no keys 
matched
+      if (!keysFound) {
+        return noMatchedKeysResponse(startPrefix);
+      }
+
+      // Set the aggregated totals in the response
+      insightResponse.setReplicatedDataSize(replicatedTotal);
+      insightResponse.setUnreplicatedDataSize(unreplicatedTotal);
+
+      return Response.ok(insightResponse).build();
+    } catch (IOException e) {
+      return createInternalServerErrorResponse(
+          "Error searching open keys in OM DB: " + e.getMessage());
+    } catch (IllegalArgumentException e) {
+      return createBadRequestResponse(
+          "Invalid startPrefix: " + e.getMessage());
+    }
+  }
+
+  public Map<String, OmKeyInfo> searchOpenKeysInFSO(String startPrefix,
+                                                    int limit)
+      throws IOException, IllegalArgumentException {
+    Map<String, OmKeyInfo> matchedKeys = new LinkedHashMap<>();
+    // Convert the search prefix to an object path for FSO buckets
+    String startPrefixObjectPath = convertToObjectPath(startPrefix);
+    String[] names = parseRequestPath(startPrefixObjectPath);
+    Table<String, OmKeyInfo> openFileTable =
+        omMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED);
+
+    // If names.length > 2, then the search prefix is at the volume or bucket 
level hence
+    // no need to find parent or extract id's or find subpaths as the 
openFileTable is
+    // suitable for volume and bucket level search
+    if (names.length > 2) {
+      // Fetch the parent ID to search for
+      long parentId = Long.parseLong(names[names.length - 1]);
+
+      // Fetch the nameSpaceSummary for the parent ID
+      NSSummary parentSummary =
+          reconNamespaceSummaryManager.getNSSummary(parentId);
+      if (parentSummary == null) {
+        return matchedKeys;
+      }
+      List<String> subPaths = new ArrayList<>();
+      // Add the initial search prefix object path because it can have both 
openFiles
+      // and subdirectories with openFiles
+      subPaths.add(startPrefixObjectPath);
+
+      // Recursively gather all subpaths
+      gatherSubPaths(parentId, subPaths, names);
+
+      // Iterate over the subpaths and retrieve the open files
+      for (String subPath : subPaths) {
+        matchedKeys.putAll(
+            retrieveKeysFromTable(openFileTable, subPath,
+                limit - matchedKeys.size()));
+        if (matchedKeys.size() >= limit) {
+          break;
+        }
+      }
+      return matchedKeys;
+    }
+
+    // Iterate over for bucket and volume level search
+    matchedKeys.putAll(
+        retrieveKeysFromTable(openFileTable, startPrefixObjectPath, limit));
+    return matchedKeys;
+  }
+
+  /**
+   * Finds all subdirectories under a parent directory in an FSO bucket. It 
builds
+   * a list of paths for these subdirectories. These sub-directories are then 
used
+   * to search for open files in the openFileTable.
+   * <p>
+   * How it works:
+   * - Starts from a parent directory identified by parentId.
+   * - Looks through all child directories of this parent.
+   * - For each child, it creates a path that starts with 
volumeID/bucketID/parentId,
+   * following our openFileTable format
+   * - Adds these paths to a list and explores each child further for more 
subdirectories.
+   *
+   * @param parentId The ID of the directory we start exploring from.
+   * @param subPaths A list where we collect paths to all subdirectories.
+   * @param names    An array with at least two elements: the first is 
volumeID and
+   *                 the second is bucketID. These are used to start each path.
+   * @throws IOException If there are problems accessing directory information.
+   */
+  private void gatherSubPaths(long parentId, List<String> subPaths,
+                              String[] names) throws IOException {
+    // Fetch the NSSummary object for parentId
+    NSSummary parentSummary =
+        reconNamespaceSummaryManager.getNSSummary(parentId);
+    if (parentSummary == null) {
+      return;
+    }
+
+    Set<Long> childDirIds = parentSummary.getChildDir();
+    for (Long childId : childDirIds) {
+      // Fetch the NSSummary for each child directory
+      NSSummary childSummary =
+          reconNamespaceSummaryManager.getNSSummary(childId);
+      if (childSummary != null) {
+        long volumeID = Long.parseLong(names[0]);

Review Comment:
   Agree



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to