ArafatKhan2198 commented on code in PR #4516:
URL: https://github.com/apache/ozone/pull/4516#discussion_r1169123498


##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java:
##########
@@ -0,0 +1,454 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.api;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata;
+import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo;
+import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResp;
+import org.apache.hadoop.ozone.recon.api.types.KeysResponse;
+import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
+import org.apache.hadoop.ozone.recon.scm.ReconContainerManager;
+import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager;
+
+import javax.inject.Inject;
+import javax.ws.rs.DefaultValue;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT;
+import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_LIMIT;
+import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_PREVKEY;
+
+/**
+ * Endpoint to get following key level info under OM DB Insight page of Recon.
+ * 1. Number of open keys for Legacy/OBS buckets.
+ * 2. Number of open files for FSO buckets.
+ * 3. Amount of data mapped to open keys and open files.
+ * 4. Number of pending delete keys in legacy/OBS buckets and pending
+ * delete files in FSO buckets.
+ * 5. Amount of data mapped to pending delete keys in legacy/OBS buckets and
+ * pending delete files in FSO buckets.
+ */
+@Path("/omdbinsight")
+@Produces(MediaType.APPLICATION_JSON)
+@AdminOnly
+public class OMDBInsightEndpoint {
+
+  @Inject
+  private ContainerEndpoint containerEndpoint;
+  @Inject
+  private ReconContainerMetadataManager reconContainerMetadataManager;
+  private final ReconOMMetadataManager omMetadataManager;
+  private final ReconContainerManager containerManager;
+
+  @Inject
+  public OMDBInsightEndpoint(OzoneStorageContainerManager reconSCM,
+                             ReconOMMetadataManager omMetadataManager) {
+    this.containerManager =
+        (ReconContainerManager) reconSCM.getContainerManager();
+    this.omMetadataManager = omMetadataManager;
+  }
+
+  /**
+   * This method retrieves set of keys/files which are open.
+   *
+   * @return the http json response wrapped in below format:
+   * {
+   *     replicatedTotal: 13824,
+   *     unreplicatedTotal: 4608,
+   *     entities: [
+   *     {
+   *         path: “/vol1/bucket1/key1”,
+   *         keyState: “Open”,
+   *         inStateSince: 1667564193026,
+   *         size: 1024,
+   *         replicatedSize: 3072,
+   *         unreplicatedSize: 1024,
+   *         replicationType: RATIS,
+   *         replicationFactor: THREE
+   *     }.
+   *    {
+   *         path: “/vol1/bucket1/key2”,
+   *         keyState: “Open”,
+   *         inStateSince: 1667564193026,
+   *         size: 512,
+   *         replicatedSize: 1536,
+   *         unreplicatedSize: 512,
+   *         replicationType: RATIS,
+   *         replicationFactor: THREE
+   *     }.
+   *     {
+   *         path: “/vol1/fso-bucket/dir1/file1”,
+   *         keyState: “Open”,
+   *         inStateSince: 1667564193026,
+   *         size: 1024,
+   *         replicatedSize: 3072,
+   *         unreplicatedSize: 1024,
+   *         replicationType: RATIS,
+   *         replicationFactor: THREE
+   *     }.
+   *     {
+   *         path: “/vol1/fso-bucket/dir1/dir2/file2”,
+   *         keyState: “Open”,
+   *         inStateSince: 1667564193026,
+   *         size: 2048,
+   *         replicatedSize: 6144,
+   *         unreplicatedSize: 2048,
+   *         replicationType: RATIS,
+   *         replicationFactor: THREE
+   *     }
+   *   ]
+   * }
+   */
+  @GET
+  @Path("/keys/open")
+  public Response getOpenKeyInfo(
+      @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT)
+      int limit,
+      @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY)
+      String prevKeyPrefix) {
+    KeyInsightInfoResp openKeyInsightInfo = new KeyInsightInfoResp();
+    List<KeyEntityInfo> nonFSOKeyInfoList =
+        openKeyInsightInfo.getNonFSOKeyInfoList();
+    boolean isLegacyBucketLayout = true;
+    boolean recordsFetchedLimitReached = false;
+    List<KeyEntityInfo> fsoKeyInfoList = 
openKeyInsightInfo.getFsoKeyInfoList();
+    for (BucketLayout layout : Arrays.asList(BucketLayout.LEGACY,
+        BucketLayout.FILE_SYSTEM_OPTIMIZED)) {
+      isLegacyBucketLayout = (layout == BucketLayout.LEGACY);
+      Table<String, OmKeyInfo> openKeyTable =
+          omMetadataManager.getOpenKeyTable(layout);
+      try (
+          TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
+              keyIter = openKeyTable.iterator()) {
+        boolean skipPrevKey = false;
+        String seekKey = prevKeyPrefix;
+        if (StringUtils.isNotBlank(prevKeyPrefix)) {
+          skipPrevKey = true;
+          Table.KeyValue<String, OmKeyInfo> seekKeyValue =
+              keyIter.seek(seekKey);
+          // check if RocksDB was able to seek correctly to the given key 
prefix
+          // if not, then return empty result
+          // In case of an empty prevKeyPrefix, all the keys are returned
+          if (seekKeyValue == null ||
+              (StringUtils.isNotBlank(prevKeyPrefix) &&
+                  !seekKeyValue.getKey().equals(prevKeyPrefix))) {
+            return Response.ok(openKeyInsightInfo).build();
+          }
+        }
+        while (keyIter.hasNext()) {
+          Table.KeyValue<String, OmKeyInfo> kv = keyIter.next();
+          String key = kv.getKey();
+          OmKeyInfo omKeyInfo = kv.getValue();
+          // skip the prev key if prev key is present
+          if (skipPrevKey && key.equals(prevKeyPrefix)) {
+            continue;
+          }
+          KeyEntityInfo keyEntityInfo = new KeyEntityInfo();
+          keyEntityInfo.setKey(key);
+          keyEntityInfo.setPath(omKeyInfo.getKeyName());
+          keyEntityInfo.setInStateSince(omKeyInfo.getCreationTime());
+          keyEntityInfo.setSize(omKeyInfo.getDataSize());
+          keyEntityInfo.setReplicatedSize(omKeyInfo.getReplicatedSize());
+          keyEntityInfo.setReplicationConfig(omKeyInfo.getReplicationConfig());
+          openKeyInsightInfo.setUnreplicatedTotal(
+              openKeyInsightInfo.getUnreplicatedTotal() +
+                  keyEntityInfo.getSize());
+          openKeyInsightInfo.setReplicatedTotal(
+              openKeyInsightInfo.getReplicatedTotal() +
+                  keyEntityInfo.getReplicatedSize());
+          boolean added =

Review Comment:
   We are not referencing `added` variable anywhere? Can we remove it



##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyInsightInfoResp.java:
##########
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.api.types;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * HTTP Response wrapped for keys insights.
+ */
+public class KeyInsightInfoResp {
+
+  /** Amount of data mapped to all keys and files in
+   * a cluster across all DNs. */
+  @JsonProperty("replicatedTotal")
+  private long replicatedTotal;
+
+  /** Amount of data mapped to all keys and files on a single DN. */
+  @JsonProperty("unreplicatedTotal")
+  private long unreplicatedTotal;
+
+  /** List of all non-fso keys. */
+  @JsonProperty("non-fso")
+  @JsonInclude(JsonInclude.Include.NON_EMPTY)
+  private List<KeyEntityInfo> nonFSOKeyInfoList;
+
+  /** List of all fso keys. */
+  @JsonProperty("fso")
+  @JsonInclude(JsonInclude.Include.NON_EMPTY)
+  private List<KeyEntityInfo> fsoKeyInfoList;
+
+  /** List of all deleted and repeatedly deleted keys.  */
+  @JsonProperty("deletedkeyinfo")
+  @JsonInclude(JsonInclude.Include.NON_EMPTY)
+  private List<RepeatedOmKeyInfo> repeatedOmKeyInfoList;
+
+  @JsonProperty("deleteddirinfo")

Review Comment:
   ```suggestion
     @JsonProperty("deletedDirInfo")
   ```
   Apply CamelCase here 



##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyInsightInfoResp.java:
##########
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.api.types;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * HTTP Response wrapped for keys insights.
+ */
+public class KeyInsightInfoResp {

Review Comment:
   To make things consistent with naming conventions, it's probably a good idea 
to rename the response class. Instead of the current name, it would be better 
to call it either `KeyInsightInfoResponse` or `KeyInsightResponse`.



##########
hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java:
##########
@@ -0,0 +1,329 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.api;
+
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.client.RatisReplicationConfig;
+import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerStateManager;
+import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.recon.ReconTestInjector;
+import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata;
+import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResp;
+import org.apache.hadoop.ozone.recon.api.types.KeysResponse;
+import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager;
+import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
+import org.apache.hadoop.ozone.recon.scm.ReconContainerManager;
+import org.apache.hadoop.ozone.recon.scm.ReconPipelineManager;
+import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade;
+import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager;
+import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider;
+import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
+import 
org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl;
+import org.apache.hadoop.ozone.recon.tasks.ContainerKeyMapperTask;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.rules.TemporaryFolder;
+
+import javax.ws.rs.core.Response;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getBucketLayout;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getOmKeyLocationInfo;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDataToOm;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+/**
+ * Unit test for OmDBInsightEndPoint.
+ */
+public class TestOmDBInsightEndPoint {
+
+  @Rule
+  public TemporaryFolder temporaryFolder = new TemporaryFolder();
+  private OzoneStorageContainerManager ozoneStorageContainerManager;
+  private ReconContainerMetadataManager reconContainerMetadataManager;
+  private OMMetadataManager omMetadataManager;
+  private ReconContainerManager reconContainerManager;
+  private ContainerStateManager containerStateManager;
+  private ReconPipelineManager reconPipelineManager;
+  private ReconOMMetadataManager reconOMMetadataManager;
+  private OMDBInsightEndpoint omdbInsightEndpoint;
+  private Pipeline pipeline;
+  private PipelineID pipelineID;
+  private Random random = new Random();
+  private long keyCount = 5L;
+
+  @Before
+  public void setUp() throws Exception {
+    omMetadataManager = initializeNewOmMetadataManager(
+        temporaryFolder.newFolder());
+    reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager,
+        temporaryFolder.newFolder());
+    ReconTestInjector reconTestInjector =
+        new ReconTestInjector.Builder(temporaryFolder)
+            .withReconSqlDb()
+            .withReconOm(reconOMMetadataManager)
+            .withOmServiceProvider(mock(OzoneManagerServiceProviderImpl.class))
+            // No longer using mock reconSCM as we need nodeDB in Facade
+            //  to establish datanode UUID to hostname mapping
+            .addBinding(OzoneStorageContainerManager.class,
+                ReconStorageContainerManagerFacade.class)
+            .withContainerDB()
+            .addBinding(StorageContainerServiceProvider.class,
+                mock(StorageContainerServiceProviderImpl.class))
+            .addBinding(OMDBInsightEndpoint.class)
+            .addBinding(ContainerHealthSchemaManager.class)
+            .build();
+    reconContainerMetadataManager =
+        reconTestInjector.getInstance(ReconContainerMetadataManager.class);
+    omdbInsightEndpoint = reconTestInjector.getInstance(
+        OMDBInsightEndpoint.class);
+    ozoneStorageContainerManager =
+        reconTestInjector.getInstance(OzoneStorageContainerManager.class);
+    reconContainerManager = (ReconContainerManager)
+        ozoneStorageContainerManager.getContainerManager();
+    containerStateManager = reconContainerManager
+        .getContainerStateManager();
+    reconPipelineManager = (ReconPipelineManager)
+        ozoneStorageContainerManager.getPipelineManager();
+    pipeline = getRandomPipeline();
+    pipelineID = pipeline.getId();
+    reconPipelineManager.addPipeline(pipeline);
+    setUpOmData();
+  }
+
+  private void setUpOmData() throws Exception {
+    List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
+    BlockID blockID1 = new BlockID(1, 101);
+    OmKeyLocationInfo omKeyLocationInfo1 = getOmKeyLocationInfo(blockID1,
+        pipeline);
+    omKeyLocationInfoList.add(omKeyLocationInfo1);
+
+    BlockID blockID2 = new BlockID(2, 102);
+    OmKeyLocationInfo omKeyLocationInfo2 = getOmKeyLocationInfo(blockID2,
+        pipeline);
+    omKeyLocationInfoList.add(omKeyLocationInfo2);
+
+    OmKeyLocationInfoGroup omKeyLocationInfoGroup = new
+        OmKeyLocationInfoGroup(0, omKeyLocationInfoList);
+
+    //key = key_one, Blocks = [ {CID = 1, LID = 101}, {CID = 2, LID = 102} ]
+    writeDataToOm(reconOMMetadataManager,
+        "key_one", "bucketOne", "sampleVol",
+        Collections.singletonList(omKeyLocationInfoGroup));
+
+    List<OmKeyLocationInfoGroup> infoGroups = new ArrayList<>();
+    BlockID blockID3 = new BlockID(1, 103);
+    OmKeyLocationInfo omKeyLocationInfo3 = getOmKeyLocationInfo(blockID3,
+        pipeline);
+
+    List<OmKeyLocationInfo> omKeyLocationInfoListNew = new ArrayList<>();
+    omKeyLocationInfoListNew.add(omKeyLocationInfo3);
+    infoGroups.add(new OmKeyLocationInfoGroup(0,
+        omKeyLocationInfoListNew));
+
+    BlockID blockID4 = new BlockID(1, 104);
+    OmKeyLocationInfo omKeyLocationInfo4 = getOmKeyLocationInfo(blockID4,
+        pipeline);
+
+    omKeyLocationInfoListNew = new ArrayList<>();
+    omKeyLocationInfoListNew.add(omKeyLocationInfo4);
+    infoGroups.add(new OmKeyLocationInfoGroup(1,
+        omKeyLocationInfoListNew));
+
+    //key = key_two, Blocks = [ {CID = 1, LID = 103}, {CID = 1, LID = 104} ]
+    writeDataToOm(reconOMMetadataManager,
+        "key_two", "bucketOne", "sampleVol", infoGroups);
+
+    List<OmKeyLocationInfo> omKeyLocationInfoList2 = new ArrayList<>();
+    BlockID blockID5 = new BlockID(2, 2);
+    OmKeyLocationInfo omKeyLocationInfo5 = getOmKeyLocationInfo(blockID5,
+        pipeline);
+    omKeyLocationInfoList2.add(omKeyLocationInfo5);
+
+    BlockID blockID6 = new BlockID(2, 3);
+    OmKeyLocationInfo omKeyLocationInfo6 = getOmKeyLocationInfo(blockID6,
+        pipeline);
+    omKeyLocationInfoList2.add(omKeyLocationInfo6);
+
+    OmKeyLocationInfoGroup omKeyLocationInfoGroup2 = new
+        OmKeyLocationInfoGroup(0, omKeyLocationInfoList2);
+
+    //key = key_three, Blocks = [ {CID = 2, LID = 2}, {CID = 2, LID = 3} ]
+    writeDataToOm(reconOMMetadataManager,
+        "key_three", "bucketOne", "sampleVol",
+        Collections.singletonList(omKeyLocationInfoGroup2));
+
+    //Generate Recon container DB data.
+    OMMetadataManager omMetadataManagerMock = mock(OMMetadataManager.class);
+    Table tableMock = mock(Table.class);
+    when(tableMock.getName()).thenReturn("KeyTable");
+    when(omMetadataManagerMock.getKeyTable(getBucketLayout()))
+        .thenReturn(tableMock);
+    ContainerKeyMapperTask containerKeyMapperTask  =
+        new ContainerKeyMapperTask(reconContainerMetadataManager);
+    containerKeyMapperTask.reprocess(reconOMMetadataManager);
+  }
+
+  @Test
+  public void testGetOpenKeyInfo() throws Exception {
+    OmKeyInfo omKeyInfo = getOmKeyInfo("sampleVol", "bucketOne", "key_one");
+
+    reconOMMetadataManager.getOpenKeyTable(getBucketLayout())
+        .put("/sampleVol/bucketOne/key_one", omKeyInfo);
+    OmKeyInfo omKeyInfo1 =
+        reconOMMetadataManager.getOpenKeyTable(getBucketLayout())
+            .get("/sampleVol/bucketOne/key_one");
+    Assertions.assertEquals("key_one", omKeyInfo1.getKeyName());
+    Response openKeyInfoResp = omdbInsightEndpoint.getOpenKeyInfo(-1, "");
+    KeyInsightInfoResp keyInsightInfoResp =
+        (KeyInsightInfoResp) openKeyInfoResp.getEntity();
+    Assertions.assertNotNull(keyInsightInfoResp);
+    Assertions.assertEquals("key_one",
+        keyInsightInfoResp.getNonFSOKeyInfoList().get(0).getPath());
+  }
+
+  @Test
+  public void testGetDeletedKeyInfo() throws Exception {
+    OmKeyInfo omKeyInfo = getOmKeyInfo("sampleVol", "bucketOne", "key_one");
+
+    reconOMMetadataManager.getKeyTable(getBucketLayout())
+        .put("/sampleVol/bucketOne/key_one", omKeyInfo);
+    OmKeyInfo omKeyInfo1 = 
reconOMMetadataManager.getKeyTable(getBucketLayout())
+        .get("/sampleVol/bucketOne/key_one");
+    Assertions.assertEquals("key_one", omKeyInfo1.getKeyName());
+    RepeatedOmKeyInfo repeatedOmKeyInfo = new RepeatedOmKeyInfo(omKeyInfo);
+    reconOMMetadataManager.getDeletedTable()
+        .put("/sampleVol/bucketOne/key_one", repeatedOmKeyInfo);
+    RepeatedOmKeyInfo repeatedOmKeyInfo1 =
+        reconOMMetadataManager.getDeletedTable()
+            .get("/sampleVol/bucketOne/key_one");
+    Assertions.assertEquals("key_one",
+        repeatedOmKeyInfo1.getOmKeyInfoList().get(0).getKeyName());
+    Response deletedKeyInfo = omdbInsightEndpoint.getDeletedKeyInfo(-1, "");
+    KeyInsightInfoResp keyInsightInfoResp =
+        (KeyInsightInfoResp) deletedKeyInfo.getEntity();
+    Assertions.assertNotNull(keyInsightInfoResp);
+    Assertions.assertEquals("key_one",
+        keyInsightInfoResp.getRepeatedOmKeyInfoList().get(0).getOmKeyInfoList()
+            .get(0).getKeyName());
+  }
+
+  private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName,
+                                 String keyName) {
+    return new OmKeyInfo.Builder()
+        .setVolumeName(volumeName)
+        .setBucketName(bucketName)
+        .setKeyName(keyName)
+        .setReplicationConfig(StandaloneReplicationConfig
+            .getInstance(HddsProtos.ReplicationFactor.ONE))
+        .setDataSize(random.nextLong())
+        .build();
+  }
+
+  @Test
+  public void testGetDeletedContainerKeysInfo() throws Exception {
+    Map<Long, ContainerMetadata> omContainers =
+        reconContainerMetadataManager.getContainers(-1, 0);
+    putContainerInfos(2);
+    List<ContainerInfo> scmContainers = reconContainerManager.getContainers();
+    assertEquals(omContainers.size(), scmContainers.size());
+    // Update container state of Container Id 1 to CLOSING to CLOSED
+    // and then to DELETED
+    reconContainerManager.updateContainerState(ContainerID.valueOf(1),
+        HddsProtos.LifeCycleEvent.FINALIZE);
+    reconContainerManager.updateContainerState(ContainerID.valueOf(1),
+        HddsProtos.LifeCycleEvent.CLOSE);
+    reconContainerManager
+        .updateContainerState(ContainerID.valueOf(1),
+            HddsProtos.LifeCycleEvent.DELETE);
+    Set<ContainerID> containerIDs = containerStateManager
+        .getContainerIDs(HddsProtos.LifeCycleState.DELETING);
+    Assert.assertEquals(1, containerIDs.size());
+
+    reconContainerManager
+        .updateContainerState(ContainerID.valueOf(1),
+            HddsProtos.LifeCycleEvent.CLEANUP);
+    containerIDs = containerStateManager

Review Comment:
   Is it necessary to follow the entire Container lifecycle states from 
`FINALIZE -> CLOSE -> DELETE -> DELETING -> CLEANUP -> DELETED` and can't we 
just mark them as deleted?



##########
hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java:
##########
@@ -0,0 +1,329 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.api;
+
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.client.RatisReplicationConfig;
+import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerStateManager;
+import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.recon.ReconTestInjector;
+import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata;
+import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResp;
+import org.apache.hadoop.ozone.recon.api.types.KeysResponse;
+import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager;
+import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
+import org.apache.hadoop.ozone.recon.scm.ReconContainerManager;
+import org.apache.hadoop.ozone.recon.scm.ReconPipelineManager;
+import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade;
+import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager;
+import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider;
+import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
+import 
org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl;
+import org.apache.hadoop.ozone.recon.tasks.ContainerKeyMapperTask;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.rules.TemporaryFolder;
+
+import javax.ws.rs.core.Response;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getBucketLayout;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getOmKeyLocationInfo;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDataToOm;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+/**
+ * Unit test for OmDBInsightEndPoint.
+ */
+public class TestOmDBInsightEndPoint {
+
+  @Rule
+  public TemporaryFolder temporaryFolder = new TemporaryFolder();
+  private OzoneStorageContainerManager ozoneStorageContainerManager;
+  private ReconContainerMetadataManager reconContainerMetadataManager;
+  private OMMetadataManager omMetadataManager;
+  private ReconContainerManager reconContainerManager;
+  private ContainerStateManager containerStateManager;
+  private ReconPipelineManager reconPipelineManager;
+  private ReconOMMetadataManager reconOMMetadataManager;
+  private OMDBInsightEndpoint omdbInsightEndpoint;
+  private Pipeline pipeline;
+  private PipelineID pipelineID;
+  private Random random = new Random();
+  private long keyCount = 5L;
+
+  @Before
+  public void setUp() throws Exception {
+    omMetadataManager = initializeNewOmMetadataManager(
+        temporaryFolder.newFolder());
+    reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager,
+        temporaryFolder.newFolder());
+    ReconTestInjector reconTestInjector =
+        new ReconTestInjector.Builder(temporaryFolder)
+            .withReconSqlDb()
+            .withReconOm(reconOMMetadataManager)
+            .withOmServiceProvider(mock(OzoneManagerServiceProviderImpl.class))
+            // No longer using mock reconSCM as we need nodeDB in Facade
+            //  to establish datanode UUID to hostname mapping
+            .addBinding(OzoneStorageContainerManager.class,
+                ReconStorageContainerManagerFacade.class)
+            .withContainerDB()
+            .addBinding(StorageContainerServiceProvider.class,
+                mock(StorageContainerServiceProviderImpl.class))
+            .addBinding(OMDBInsightEndpoint.class)
+            .addBinding(ContainerHealthSchemaManager.class)
+            .build();
+    reconContainerMetadataManager =
+        reconTestInjector.getInstance(ReconContainerMetadataManager.class);
+    omdbInsightEndpoint = reconTestInjector.getInstance(
+        OMDBInsightEndpoint.class);
+    ozoneStorageContainerManager =
+        reconTestInjector.getInstance(OzoneStorageContainerManager.class);
+    reconContainerManager = (ReconContainerManager)
+        ozoneStorageContainerManager.getContainerManager();
+    containerStateManager = reconContainerManager
+        .getContainerStateManager();
+    reconPipelineManager = (ReconPipelineManager)
+        ozoneStorageContainerManager.getPipelineManager();
+    pipeline = getRandomPipeline();
+    pipelineID = pipeline.getId();
+    reconPipelineManager.addPipeline(pipeline);
+    setUpOmData();
+  }
+
+  private void setUpOmData() throws Exception {
+    List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
+    BlockID blockID1 = new BlockID(1, 101);
+    OmKeyLocationInfo omKeyLocationInfo1 = getOmKeyLocationInfo(blockID1,
+        pipeline);
+    omKeyLocationInfoList.add(omKeyLocationInfo1);
+
+    BlockID blockID2 = new BlockID(2, 102);
+    OmKeyLocationInfo omKeyLocationInfo2 = getOmKeyLocationInfo(blockID2,
+        pipeline);
+    omKeyLocationInfoList.add(omKeyLocationInfo2);
+
+    OmKeyLocationInfoGroup omKeyLocationInfoGroup = new
+        OmKeyLocationInfoGroup(0, omKeyLocationInfoList);
+
+    //key = key_one, Blocks = [ {CID = 1, LID = 101}, {CID = 2, LID = 102} ]
+    writeDataToOm(reconOMMetadataManager,
+        "key_one", "bucketOne", "sampleVol",
+        Collections.singletonList(omKeyLocationInfoGroup));
+
+    List<OmKeyLocationInfoGroup> infoGroups = new ArrayList<>();
+    BlockID blockID3 = new BlockID(1, 103);
+    OmKeyLocationInfo omKeyLocationInfo3 = getOmKeyLocationInfo(blockID3,
+        pipeline);
+
+    List<OmKeyLocationInfo> omKeyLocationInfoListNew = new ArrayList<>();
+    omKeyLocationInfoListNew.add(omKeyLocationInfo3);
+    infoGroups.add(new OmKeyLocationInfoGroup(0,
+        omKeyLocationInfoListNew));
+
+    BlockID blockID4 = new BlockID(1, 104);
+    OmKeyLocationInfo omKeyLocationInfo4 = getOmKeyLocationInfo(blockID4,
+        pipeline);
+
+    omKeyLocationInfoListNew = new ArrayList<>();
+    omKeyLocationInfoListNew.add(omKeyLocationInfo4);
+    infoGroups.add(new OmKeyLocationInfoGroup(1,
+        omKeyLocationInfoListNew));
+
+    //key = key_two, Blocks = [ {CID = 1, LID = 103}, {CID = 1, LID = 104} ]
+    writeDataToOm(reconOMMetadataManager,
+        "key_two", "bucketOne", "sampleVol", infoGroups);
+
+    List<OmKeyLocationInfo> omKeyLocationInfoList2 = new ArrayList<>();
+    BlockID blockID5 = new BlockID(2, 2);
+    OmKeyLocationInfo omKeyLocationInfo5 = getOmKeyLocationInfo(blockID5,
+        pipeline);
+    omKeyLocationInfoList2.add(omKeyLocationInfo5);
+
+    BlockID blockID6 = new BlockID(2, 3);
+    OmKeyLocationInfo omKeyLocationInfo6 = getOmKeyLocationInfo(blockID6,
+        pipeline);
+    omKeyLocationInfoList2.add(omKeyLocationInfo6);
+
+    OmKeyLocationInfoGroup omKeyLocationInfoGroup2 = new
+        OmKeyLocationInfoGroup(0, omKeyLocationInfoList2);
+
+    //key = key_three, Blocks = [ {CID = 2, LID = 2}, {CID = 2, LID = 3} ]
+    writeDataToOm(reconOMMetadataManager,
+        "key_three", "bucketOne", "sampleVol",
+        Collections.singletonList(omKeyLocationInfoGroup2));
+
+    //Generate Recon container DB data.
+    OMMetadataManager omMetadataManagerMock = mock(OMMetadataManager.class);
+    Table tableMock = mock(Table.class);
+    when(tableMock.getName()).thenReturn("KeyTable");
+    when(omMetadataManagerMock.getKeyTable(getBucketLayout()))
+        .thenReturn(tableMock);
+    ContainerKeyMapperTask containerKeyMapperTask  =
+        new ContainerKeyMapperTask(reconContainerMetadataManager);
+    containerKeyMapperTask.reprocess(reconOMMetadataManager);
+  }
+
+  @Test
+  public void testGetOpenKeyInfo() throws Exception {
+    OmKeyInfo omKeyInfo = getOmKeyInfo("sampleVol", "bucketOne", "key_one");
+
+    reconOMMetadataManager.getOpenKeyTable(getBucketLayout())
+        .put("/sampleVol/bucketOne/key_one", omKeyInfo);
+    OmKeyInfo omKeyInfo1 =
+        reconOMMetadataManager.getOpenKeyTable(getBucketLayout())
+            .get("/sampleVol/bucketOne/key_one");
+    Assertions.assertEquals("key_one", omKeyInfo1.getKeyName());
+    Response openKeyInfoResp = omdbInsightEndpoint.getOpenKeyInfo(-1, "");
+    KeyInsightInfoResp keyInsightInfoResp =
+        (KeyInsightInfoResp) openKeyInfoResp.getEntity();
+    Assertions.assertNotNull(keyInsightInfoResp);
+    Assertions.assertEquals("key_one",
+        keyInsightInfoResp.getNonFSOKeyInfoList().get(0).getPath());
+  }
+
+  @Test
+  public void testGetDeletedKeyInfo() throws Exception {
+    OmKeyInfo omKeyInfo = getOmKeyInfo("sampleVol", "bucketOne", "key_one");
+
+    reconOMMetadataManager.getKeyTable(getBucketLayout())
+        .put("/sampleVol/bucketOne/key_one", omKeyInfo);
+    OmKeyInfo omKeyInfo1 = 
reconOMMetadataManager.getKeyTable(getBucketLayout())
+        .get("/sampleVol/bucketOne/key_one");
+    Assertions.assertEquals("key_one", omKeyInfo1.getKeyName());
+    RepeatedOmKeyInfo repeatedOmKeyInfo = new RepeatedOmKeyInfo(omKeyInfo);
+    reconOMMetadataManager.getDeletedTable()
+        .put("/sampleVol/bucketOne/key_one", repeatedOmKeyInfo);
+    RepeatedOmKeyInfo repeatedOmKeyInfo1 =
+        reconOMMetadataManager.getDeletedTable()
+            .get("/sampleVol/bucketOne/key_one");
+    Assertions.assertEquals("key_one",
+        repeatedOmKeyInfo1.getOmKeyInfoList().get(0).getKeyName());
+    Response deletedKeyInfo = omdbInsightEndpoint.getDeletedKeyInfo(-1, "");
+    KeyInsightInfoResp keyInsightInfoResp =
+        (KeyInsightInfoResp) deletedKeyInfo.getEntity();
+    Assertions.assertNotNull(keyInsightInfoResp);
+    Assertions.assertEquals("key_one",
+        keyInsightInfoResp.getRepeatedOmKeyInfoList().get(0).getOmKeyInfoList()
+            .get(0).getKeyName());
+  }
+
+  private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName,
+                                 String keyName) {
+    return new OmKeyInfo.Builder()
+        .setVolumeName(volumeName)
+        .setBucketName(bucketName)
+        .setKeyName(keyName)
+        .setReplicationConfig(StandaloneReplicationConfig
+            .getInstance(HddsProtos.ReplicationFactor.ONE))
+        .setDataSize(random.nextLong())
+        .build();
+  }
+
+  @Test
+  public void testGetDeletedContainerKeysInfo() throws Exception {
+    Map<Long, ContainerMetadata> omContainers =
+        reconContainerMetadataManager.getContainers(-1, 0);
+    putContainerInfos(2);
+    List<ContainerInfo> scmContainers = reconContainerManager.getContainers();
+    assertEquals(omContainers.size(), scmContainers.size());
+    // Update container state of Container Id 1 to CLOSING to CLOSED
+    // and then to DELETED
+    reconContainerManager.updateContainerState(ContainerID.valueOf(1),
+        HddsProtos.LifeCycleEvent.FINALIZE);
+    reconContainerManager.updateContainerState(ContainerID.valueOf(1),
+        HddsProtos.LifeCycleEvent.CLOSE);
+    reconContainerManager
+        .updateContainerState(ContainerID.valueOf(1),
+            HddsProtos.LifeCycleEvent.DELETE);
+    Set<ContainerID> containerIDs = containerStateManager
+        .getContainerIDs(HddsProtos.LifeCycleState.DELETING);
+    Assert.assertEquals(1, containerIDs.size());
+
+    reconContainerManager
+        .updateContainerState(ContainerID.valueOf(1),
+            HddsProtos.LifeCycleEvent.CLEANUP);
+    containerIDs = containerStateManager
+        .getContainerIDs(HddsProtos.LifeCycleState.DELETED);
+    Assert.assertEquals(1, containerIDs.size());
+
+    List<ContainerInfo> deletedSCMContainers =
+        reconContainerManager.getContainers(HddsProtos.LifeCycleState.DELETED);
+    assertEquals(1, deletedSCMContainers.size());
+
+    Response deletedContainerKeysInfo =
+        omdbInsightEndpoint.getDeletedContainerKeysInfo(-1, "");

Review Comment:
   Currently, the `testGetDeletedContainerKeysInfo()` method is only testing a 
single case. It would be good to test multiple cases such as when there are 
multiple open keys in the open key table or when the `limit` and 
`prevKeyPrefix` query parameters are used.



##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyInsightInfoResp.java:
##########
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.api.types;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * HTTP Response wrapped for keys insights.
+ */
+public class KeyInsightInfoResp {
+
+  /** Amount of data mapped to all keys and files in
+   * a cluster across all DNs. */
+  @JsonProperty("replicatedTotal")
+  private long replicatedTotal;
+
+  /** Amount of data mapped to all keys and files on a single DN. */
+  @JsonProperty("unreplicatedTotal")
+  private long unreplicatedTotal;
+
+  /** List of all non-fso keys. */
+  @JsonProperty("non-fso")
+  @JsonInclude(JsonInclude.Include.NON_EMPTY)
+  private List<KeyEntityInfo> nonFSOKeyInfoList;
+
+  /** List of all fso keys. */
+  @JsonProperty("fso")
+  @JsonInclude(JsonInclude.Include.NON_EMPTY)
+  private List<KeyEntityInfo> fsoKeyInfoList;
+
+  /** List of all deleted and repeatedly deleted keys.  */
+  @JsonProperty("deletedkeyinfo")

Review Comment:
   ```suggestion
     @JsonProperty("deletedKeyInfo")
   ```
   Apply CamelCase here



##########
hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java:
##########
@@ -0,0 +1,329 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.api;
+
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.client.RatisReplicationConfig;
+import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerStateManager;
+import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.recon.ReconTestInjector;
+import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata;
+import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResp;
+import org.apache.hadoop.ozone.recon.api.types.KeysResponse;
+import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager;
+import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
+import org.apache.hadoop.ozone.recon.scm.ReconContainerManager;
+import org.apache.hadoop.ozone.recon.scm.ReconPipelineManager;
+import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade;
+import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager;
+import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider;
+import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
+import 
org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl;
+import org.apache.hadoop.ozone.recon.tasks.ContainerKeyMapperTask;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.rules.TemporaryFolder;
+
+import javax.ws.rs.core.Response;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getBucketLayout;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getOmKeyLocationInfo;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDataToOm;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+/**
+ * Unit test for OmDBInsightEndPoint.
+ */
+public class TestOmDBInsightEndPoint {
+
+  @Rule
+  public TemporaryFolder temporaryFolder = new TemporaryFolder();
+  private OzoneStorageContainerManager ozoneStorageContainerManager;
+  private ReconContainerMetadataManager reconContainerMetadataManager;
+  private OMMetadataManager omMetadataManager;
+  private ReconContainerManager reconContainerManager;
+  private ContainerStateManager containerStateManager;
+  private ReconPipelineManager reconPipelineManager;
+  private ReconOMMetadataManager reconOMMetadataManager;
+  private OMDBInsightEndpoint omdbInsightEndpoint;
+  private Pipeline pipeline;
+  private PipelineID pipelineID;
+  private Random random = new Random();
+  private long keyCount = 5L;
+
+  @Before
+  public void setUp() throws Exception {
+    omMetadataManager = initializeNewOmMetadataManager(
+        temporaryFolder.newFolder());
+    reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager,
+        temporaryFolder.newFolder());
+    ReconTestInjector reconTestInjector =
+        new ReconTestInjector.Builder(temporaryFolder)
+            .withReconSqlDb()
+            .withReconOm(reconOMMetadataManager)
+            .withOmServiceProvider(mock(OzoneManagerServiceProviderImpl.class))
+            // No longer using mock reconSCM as we need nodeDB in Facade
+            //  to establish datanode UUID to hostname mapping
+            .addBinding(OzoneStorageContainerManager.class,
+                ReconStorageContainerManagerFacade.class)
+            .withContainerDB()
+            .addBinding(StorageContainerServiceProvider.class,
+                mock(StorageContainerServiceProviderImpl.class))
+            .addBinding(OMDBInsightEndpoint.class)
+            .addBinding(ContainerHealthSchemaManager.class)
+            .build();
+    reconContainerMetadataManager =
+        reconTestInjector.getInstance(ReconContainerMetadataManager.class);
+    omdbInsightEndpoint = reconTestInjector.getInstance(
+        OMDBInsightEndpoint.class);
+    ozoneStorageContainerManager =
+        reconTestInjector.getInstance(OzoneStorageContainerManager.class);
+    reconContainerManager = (ReconContainerManager)
+        ozoneStorageContainerManager.getContainerManager();
+    containerStateManager = reconContainerManager
+        .getContainerStateManager();
+    reconPipelineManager = (ReconPipelineManager)
+        ozoneStorageContainerManager.getPipelineManager();
+    pipeline = getRandomPipeline();
+    pipelineID = pipeline.getId();
+    reconPipelineManager.addPipeline(pipeline);
+    setUpOmData();
+  }
+
+  private void setUpOmData() throws Exception {
+    List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
+    BlockID blockID1 = new BlockID(1, 101);
+    OmKeyLocationInfo omKeyLocationInfo1 = getOmKeyLocationInfo(blockID1,
+        pipeline);
+    omKeyLocationInfoList.add(omKeyLocationInfo1);
+
+    BlockID blockID2 = new BlockID(2, 102);
+    OmKeyLocationInfo omKeyLocationInfo2 = getOmKeyLocationInfo(blockID2,
+        pipeline);
+    omKeyLocationInfoList.add(omKeyLocationInfo2);
+
+    OmKeyLocationInfoGroup omKeyLocationInfoGroup = new
+        OmKeyLocationInfoGroup(0, omKeyLocationInfoList);
+
+    //key = key_one, Blocks = [ {CID = 1, LID = 101}, {CID = 2, LID = 102} ]
+    writeDataToOm(reconOMMetadataManager,
+        "key_one", "bucketOne", "sampleVol",
+        Collections.singletonList(omKeyLocationInfoGroup));
+
+    List<OmKeyLocationInfoGroup> infoGroups = new ArrayList<>();
+    BlockID blockID3 = new BlockID(1, 103);
+    OmKeyLocationInfo omKeyLocationInfo3 = getOmKeyLocationInfo(blockID3,
+        pipeline);
+
+    List<OmKeyLocationInfo> omKeyLocationInfoListNew = new ArrayList<>();
+    omKeyLocationInfoListNew.add(omKeyLocationInfo3);
+    infoGroups.add(new OmKeyLocationInfoGroup(0,
+        omKeyLocationInfoListNew));
+
+    BlockID blockID4 = new BlockID(1, 104);
+    OmKeyLocationInfo omKeyLocationInfo4 = getOmKeyLocationInfo(blockID4,
+        pipeline);
+
+    omKeyLocationInfoListNew = new ArrayList<>();
+    omKeyLocationInfoListNew.add(omKeyLocationInfo4);
+    infoGroups.add(new OmKeyLocationInfoGroup(1,
+        omKeyLocationInfoListNew));
+
+    //key = key_two, Blocks = [ {CID = 1, LID = 103}, {CID = 1, LID = 104} ]
+    writeDataToOm(reconOMMetadataManager,
+        "key_two", "bucketOne", "sampleVol", infoGroups);
+
+    List<OmKeyLocationInfo> omKeyLocationInfoList2 = new ArrayList<>();
+    BlockID blockID5 = new BlockID(2, 2);
+    OmKeyLocationInfo omKeyLocationInfo5 = getOmKeyLocationInfo(blockID5,
+        pipeline);
+    omKeyLocationInfoList2.add(omKeyLocationInfo5);
+
+    BlockID blockID6 = new BlockID(2, 3);
+    OmKeyLocationInfo omKeyLocationInfo6 = getOmKeyLocationInfo(blockID6,
+        pipeline);
+    omKeyLocationInfoList2.add(omKeyLocationInfo6);
+
+    OmKeyLocationInfoGroup omKeyLocationInfoGroup2 = new
+        OmKeyLocationInfoGroup(0, omKeyLocationInfoList2);
+
+    //key = key_three, Blocks = [ {CID = 2, LID = 2}, {CID = 2, LID = 3} ]
+    writeDataToOm(reconOMMetadataManager,
+        "key_three", "bucketOne", "sampleVol",
+        Collections.singletonList(omKeyLocationInfoGroup2));
+
+    //Generate Recon container DB data.
+    OMMetadataManager omMetadataManagerMock = mock(OMMetadataManager.class);
+    Table tableMock = mock(Table.class);
+    when(tableMock.getName()).thenReturn("KeyTable");
+    when(omMetadataManagerMock.getKeyTable(getBucketLayout()))
+        .thenReturn(tableMock);
+    ContainerKeyMapperTask containerKeyMapperTask  =
+        new ContainerKeyMapperTask(reconContainerMetadataManager);
+    containerKeyMapperTask.reprocess(reconOMMetadataManager);
+  }
+
+  @Test
+  public void testGetOpenKeyInfo() throws Exception {
+    OmKeyInfo omKeyInfo = getOmKeyInfo("sampleVol", "bucketOne", "key_one");
+
+    reconOMMetadataManager.getOpenKeyTable(getBucketLayout())
+        .put("/sampleVol/bucketOne/key_one", omKeyInfo);
+    OmKeyInfo omKeyInfo1 =
+        reconOMMetadataManager.getOpenKeyTable(getBucketLayout())
+            .get("/sampleVol/bucketOne/key_one");
+    Assertions.assertEquals("key_one", omKeyInfo1.getKeyName());
+    Response openKeyInfoResp = omdbInsightEndpoint.getOpenKeyInfo(-1, "");

Review Comment:
   Currently, the `testGetOpenKeyInfo()` method is only testing a single case. 
It would be good to test multiple cases such as when there are multiple open 
keys in the open key table or when the `limit` and `prevKeyPrefix` query 
parameters are used.
   
   



##########
hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java:
##########
@@ -0,0 +1,329 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.api;
+
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.client.RatisReplicationConfig;
+import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerStateManager;
+import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.recon.ReconTestInjector;
+import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata;
+import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResp;
+import org.apache.hadoop.ozone.recon.api.types.KeysResponse;
+import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager;
+import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
+import org.apache.hadoop.ozone.recon.scm.ReconContainerManager;
+import org.apache.hadoop.ozone.recon.scm.ReconPipelineManager;
+import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade;
+import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager;
+import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider;
+import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
+import 
org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl;
+import org.apache.hadoop.ozone.recon.tasks.ContainerKeyMapperTask;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.rules.TemporaryFolder;
+
+import javax.ws.rs.core.Response;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getBucketLayout;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getOmKeyLocationInfo;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager;
+import static 
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDataToOm;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+/**
+ * Unit test for OmDBInsightEndPoint.
+ */
+public class TestOmDBInsightEndPoint {
+
+  @Rule
+  public TemporaryFolder temporaryFolder = new TemporaryFolder();
+  private OzoneStorageContainerManager ozoneStorageContainerManager;
+  private ReconContainerMetadataManager reconContainerMetadataManager;
+  private OMMetadataManager omMetadataManager;
+  private ReconContainerManager reconContainerManager;
+  private ContainerStateManager containerStateManager;
+  private ReconPipelineManager reconPipelineManager;
+  private ReconOMMetadataManager reconOMMetadataManager;
+  private OMDBInsightEndpoint omdbInsightEndpoint;
+  private Pipeline pipeline;
+  private PipelineID pipelineID;
+  private Random random = new Random();
+  private long keyCount = 5L;
+
+  @Before
+  public void setUp() throws Exception {
+    omMetadataManager = initializeNewOmMetadataManager(
+        temporaryFolder.newFolder());
+    reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager,
+        temporaryFolder.newFolder());
+    ReconTestInjector reconTestInjector =
+        new ReconTestInjector.Builder(temporaryFolder)
+            .withReconSqlDb()
+            .withReconOm(reconOMMetadataManager)
+            .withOmServiceProvider(mock(OzoneManagerServiceProviderImpl.class))
+            // No longer using mock reconSCM as we need nodeDB in Facade
+            //  to establish datanode UUID to hostname mapping
+            .addBinding(OzoneStorageContainerManager.class,
+                ReconStorageContainerManagerFacade.class)
+            .withContainerDB()
+            .addBinding(StorageContainerServiceProvider.class,
+                mock(StorageContainerServiceProviderImpl.class))
+            .addBinding(OMDBInsightEndpoint.class)
+            .addBinding(ContainerHealthSchemaManager.class)
+            .build();
+    reconContainerMetadataManager =
+        reconTestInjector.getInstance(ReconContainerMetadataManager.class);
+    omdbInsightEndpoint = reconTestInjector.getInstance(
+        OMDBInsightEndpoint.class);
+    ozoneStorageContainerManager =
+        reconTestInjector.getInstance(OzoneStorageContainerManager.class);
+    reconContainerManager = (ReconContainerManager)
+        ozoneStorageContainerManager.getContainerManager();
+    containerStateManager = reconContainerManager
+        .getContainerStateManager();
+    reconPipelineManager = (ReconPipelineManager)
+        ozoneStorageContainerManager.getPipelineManager();
+    pipeline = getRandomPipeline();
+    pipelineID = pipeline.getId();
+    reconPipelineManager.addPipeline(pipeline);
+    setUpOmData();
+  }
+
+  private void setUpOmData() throws Exception {
+    List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
+    BlockID blockID1 = new BlockID(1, 101);
+    OmKeyLocationInfo omKeyLocationInfo1 = getOmKeyLocationInfo(blockID1,
+        pipeline);
+    omKeyLocationInfoList.add(omKeyLocationInfo1);
+
+    BlockID blockID2 = new BlockID(2, 102);
+    OmKeyLocationInfo omKeyLocationInfo2 = getOmKeyLocationInfo(blockID2,
+        pipeline);
+    omKeyLocationInfoList.add(omKeyLocationInfo2);
+
+    OmKeyLocationInfoGroup omKeyLocationInfoGroup = new
+        OmKeyLocationInfoGroup(0, omKeyLocationInfoList);
+
+    //key = key_one, Blocks = [ {CID = 1, LID = 101}, {CID = 2, LID = 102} ]
+    writeDataToOm(reconOMMetadataManager,
+        "key_one", "bucketOne", "sampleVol",
+        Collections.singletonList(omKeyLocationInfoGroup));
+
+    List<OmKeyLocationInfoGroup> infoGroups = new ArrayList<>();
+    BlockID blockID3 = new BlockID(1, 103);
+    OmKeyLocationInfo omKeyLocationInfo3 = getOmKeyLocationInfo(blockID3,
+        pipeline);
+
+    List<OmKeyLocationInfo> omKeyLocationInfoListNew = new ArrayList<>();
+    omKeyLocationInfoListNew.add(omKeyLocationInfo3);
+    infoGroups.add(new OmKeyLocationInfoGroup(0,
+        omKeyLocationInfoListNew));
+
+    BlockID blockID4 = new BlockID(1, 104);
+    OmKeyLocationInfo omKeyLocationInfo4 = getOmKeyLocationInfo(blockID4,
+        pipeline);
+
+    omKeyLocationInfoListNew = new ArrayList<>();
+    omKeyLocationInfoListNew.add(omKeyLocationInfo4);
+    infoGroups.add(new OmKeyLocationInfoGroup(1,
+        omKeyLocationInfoListNew));
+
+    //key = key_two, Blocks = [ {CID = 1, LID = 103}, {CID = 1, LID = 104} ]
+    writeDataToOm(reconOMMetadataManager,
+        "key_two", "bucketOne", "sampleVol", infoGroups);
+
+    List<OmKeyLocationInfo> omKeyLocationInfoList2 = new ArrayList<>();
+    BlockID blockID5 = new BlockID(2, 2);
+    OmKeyLocationInfo omKeyLocationInfo5 = getOmKeyLocationInfo(blockID5,
+        pipeline);
+    omKeyLocationInfoList2.add(omKeyLocationInfo5);
+
+    BlockID blockID6 = new BlockID(2, 3);
+    OmKeyLocationInfo omKeyLocationInfo6 = getOmKeyLocationInfo(blockID6,
+        pipeline);
+    omKeyLocationInfoList2.add(omKeyLocationInfo6);
+
+    OmKeyLocationInfoGroup omKeyLocationInfoGroup2 = new
+        OmKeyLocationInfoGroup(0, omKeyLocationInfoList2);
+
+    //key = key_three, Blocks = [ {CID = 2, LID = 2}, {CID = 2, LID = 3} ]
+    writeDataToOm(reconOMMetadataManager,
+        "key_three", "bucketOne", "sampleVol",
+        Collections.singletonList(omKeyLocationInfoGroup2));
+
+    //Generate Recon container DB data.
+    OMMetadataManager omMetadataManagerMock = mock(OMMetadataManager.class);
+    Table tableMock = mock(Table.class);
+    when(tableMock.getName()).thenReturn("KeyTable");
+    when(omMetadataManagerMock.getKeyTable(getBucketLayout()))
+        .thenReturn(tableMock);
+    ContainerKeyMapperTask containerKeyMapperTask  =
+        new ContainerKeyMapperTask(reconContainerMetadataManager);
+    containerKeyMapperTask.reprocess(reconOMMetadataManager);
+  }
+
+  @Test
+  public void testGetOpenKeyInfo() throws Exception {
+    OmKeyInfo omKeyInfo = getOmKeyInfo("sampleVol", "bucketOne", "key_one");
+
+    reconOMMetadataManager.getOpenKeyTable(getBucketLayout())
+        .put("/sampleVol/bucketOne/key_one", omKeyInfo);
+    OmKeyInfo omKeyInfo1 =
+        reconOMMetadataManager.getOpenKeyTable(getBucketLayout())
+            .get("/sampleVol/bucketOne/key_one");
+    Assertions.assertEquals("key_one", omKeyInfo1.getKeyName());
+    Response openKeyInfoResp = omdbInsightEndpoint.getOpenKeyInfo(-1, "");
+    KeyInsightInfoResp keyInsightInfoResp =
+        (KeyInsightInfoResp) openKeyInfoResp.getEntity();
+    Assertions.assertNotNull(keyInsightInfoResp);
+    Assertions.assertEquals("key_one",
+        keyInsightInfoResp.getNonFSOKeyInfoList().get(0).getPath());
+  }
+
+  @Test
+  public void testGetDeletedKeyInfo() throws Exception {
+    OmKeyInfo omKeyInfo = getOmKeyInfo("sampleVol", "bucketOne", "key_one");
+
+    reconOMMetadataManager.getKeyTable(getBucketLayout())
+        .put("/sampleVol/bucketOne/key_one", omKeyInfo);
+    OmKeyInfo omKeyInfo1 = 
reconOMMetadataManager.getKeyTable(getBucketLayout())
+        .get("/sampleVol/bucketOne/key_one");
+    Assertions.assertEquals("key_one", omKeyInfo1.getKeyName());
+    RepeatedOmKeyInfo repeatedOmKeyInfo = new RepeatedOmKeyInfo(omKeyInfo);
+    reconOMMetadataManager.getDeletedTable()
+        .put("/sampleVol/bucketOne/key_one", repeatedOmKeyInfo);
+    RepeatedOmKeyInfo repeatedOmKeyInfo1 =
+        reconOMMetadataManager.getDeletedTable()
+            .get("/sampleVol/bucketOne/key_one");
+    Assertions.assertEquals("key_one",
+        repeatedOmKeyInfo1.getOmKeyInfoList().get(0).getKeyName());
+    Response deletedKeyInfo = omdbInsightEndpoint.getDeletedKeyInfo(-1, "");

Review Comment:
   Currently, the `testGetDeletedKeyInfo()` method is only testing a single 
case. It would be good to test multiple cases such as when there are **multiple 
open keys**  in the open key table or when the `limit` and `prevKeyPrefix` 
query parameters are used.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to