This is an automated email from the ASF dual-hosted git repository.

devesh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 68e3842616 HDDS-12589. Fix Incorrect FSO Key Listing for 
Container-to-Key Mapping. (#8078)
68e3842616 is described below

commit 68e3842616ba98d6f1c9b76d8378ccb985141cbc
Author: Arafat2198 <[email protected]>
AuthorDate: Tue Mar 18 14:22:00 2025 +0530

    HDDS-12589. Fix Incorrect FSO Key Listing for Container-to-Key Mapping. 
(#8078)
---
 .../hadoop/ozone/recon/api/ContainerEndpoint.java  |  14 ++-
 .../ozone/recon/api/TestContainerEndpoint.java     | 118 +++++++++++++++++++++
 .../recon/tasks/TestContainerKeyMapperTask.java    | 116 ++++++++++++++++++++
 3 files changed, 240 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
index 041bcc8e6b..f5936e34d5 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
@@ -241,6 +241,11 @@ public Response getKeysForContainer(
       for (ContainerKeyPrefix containerKeyPrefix : containerKeyPrefixMap
           .keySet()) {
 
+        // break the for loop if limit has been reached
+        if (keyMetadataMap.size() == limit) {
+          break;
+        }
+
         // Directly calling getSkipCache() on the Key/FileTable table
         // instead of iterating since only full keys are supported now. We will
         // try to get the OmKeyInfo object by searching the KEY_TABLE table 
with
@@ -265,10 +270,7 @@ public Response getKeysForContainer(
           List<ContainerBlockMetadata> blockIds =
               getBlocks(matchedKeys, containerID);
 
-          String ozoneKey = omMetadataManager.getOzoneKey(
-              omKeyInfo.getVolumeName(),
-              omKeyInfo.getBucketName(),
-              omKeyInfo.getKeyName());
+          String ozoneKey = containerKeyPrefix.getKeyPrefix();
           lastKey = ozoneKey;
           if (keyMetadataMap.containsKey(ozoneKey)) {
             keyMetadataMap.get(ozoneKey).getVersions()
@@ -277,10 +279,6 @@ public Response getKeysForContainer(
             keyMetadataMap.get(ozoneKey).getBlockIds()
                 .put(containerKeyPrefix.getKeyVersion(), blockIds);
           } else {
-            // break the for loop if limit has been reached
-            if (keyMetadataMap.size() == limit) {
-              break;
-            }
             KeyMetadata keyMetadata = new KeyMetadata();
             keyMetadata.setBucket(omKeyInfo.getBucketName());
             keyMetadata.setVolume(omKeyInfo.getVolumeName());
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
index 65967c6e24..0dfdf047c4 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
@@ -1665,4 +1665,122 @@ public void 
testGetOmContainersDeletedInSCMPrevContainerParam()
     assertEquals(1, containerDiscrepancyInfoList.size());
     assertEquals(2, containerDiscrepancyInfoList.get(0).getContainerID());
   }
+
+  /**
+   * Helper method that creates duplicate FSO file keys – two keys having the 
same file
+   * name but under different directories. It creates the necessary volume, 
bucket, and
+   * directory entries, and then writes two keys using writeKeyToOm.
+   */
+  private void setUpDuplicateFSOFileKeys() throws IOException {
+    // Ensure the volume exists.
+    String volumeKey = reconOMMetadataManager.getVolumeKey(VOLUME_NAME);
+    OmVolumeArgs volArgs = OmVolumeArgs.newBuilder()
+        .setVolume(VOLUME_NAME)
+        .setAdminName("TestUser")
+        .setOwnerName("TestUser")
+        .setObjectID(VOL_OBJECT_ID)
+        .build();
+    reconOMMetadataManager.getVolumeTable().put(volumeKey, volArgs);
+
+    // Ensure the bucket exists.
+    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+        .setVolumeName(VOLUME_NAME)
+        .setBucketName(BUCKET_NAME)
+        .setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED)
+        .setObjectID(BUCKET_OBJECT_ID)
+        .build();
+    String bucketKey = reconOMMetadataManager.getBucketKey(VOLUME_NAME, 
BUCKET_NAME);
+    reconOMMetadataManager.getBucketTable().put(bucketKey, bucketInfo);
+
+    // Create two directories: "dirA" and "dirB" with unique object IDs.
+    // For a top-level directory in a bucket, the parent's object id is the 
bucket's id.
+    OmDirectoryInfo dirA = OmDirectoryInfo.newBuilder()
+        .setName("dirA")
+        .setParentObjectID(BUCKET_OBJECT_ID)
+        .setUpdateID(1L)
+        .setObjectID(5L)   // Unique object id for dirA.
+        .build();
+    OmDirectoryInfo dirB = OmDirectoryInfo.newBuilder()
+        .setName("dirB")
+        .setParentObjectID(BUCKET_OBJECT_ID)
+        .setUpdateID(1L)
+        .setObjectID(6L)   // Unique object id for dirB.
+        .build();
+    // Build DB directory keys. (The third parameter is used to form a unique 
key.)
+    String dirKeyA = reconOMMetadataManager.getOzonePathKey(VOL_OBJECT_ID, 
BUCKET_OBJECT_ID, 5L, "dirA");
+    String dirKeyB = reconOMMetadataManager.getOzonePathKey(VOL_OBJECT_ID, 
BUCKET_OBJECT_ID, 6L, "dirB");
+    reconOMMetadataManager.getDirectoryTable().put(dirKeyA, dirA);
+    reconOMMetadataManager.getDirectoryTable().put(dirKeyB, dirB);
+
+    // Use a common OmKeyLocationInfoGroup.
+    OmKeyLocationInfoGroup locationInfoGroup = getLocationInfoGroup1();
+
+    // Write two FSO keys with the same file name ("dupFile") but in different 
directories.
+    // The file name stored in OM is the full path (e.g., "dirA/dupFile" vs. 
"dirB/dupFile").
+    writeKeyToOm(reconOMMetadataManager,
+        "dupFileKey1",           // internal key name for the first key
+        BUCKET_NAME,
+        VOLUME_NAME,
+        "dupFileKey1",          // full file path for the first key
+        100L,                    // object id (example)
+        5L,                      // parent's object id for dirA (same as 
dirA's object id)
+        BUCKET_OBJECT_ID,
+        VOL_OBJECT_ID,
+        Collections.singletonList(locationInfoGroup),
+        BucketLayout.FILE_SYSTEM_OPTIMIZED,
+        KEY_ONE_SIZE);
+
+    writeKeyToOm(reconOMMetadataManager,
+        "dupFileKey1",           // internal key name for the second key
+        BUCKET_NAME,
+        VOLUME_NAME,
+        "dupFileKey1",          // full file path for the second key
+        100L,
+        6L,                      // parent's object id for dirB
+        BUCKET_OBJECT_ID,
+        VOL_OBJECT_ID,
+        Collections.singletonList(locationInfoGroup),
+        BucketLayout.FILE_SYSTEM_OPTIMIZED,
+        KEY_ONE_SIZE);
+  }
+
+  /**
+   * Test method that sets up two duplicate FSO file keys (same file name but 
in different directories)
+   * and then verifies that the ContainerEndpoint returns two distinct key 
records.
+   */
+  @Test
+  public void testDuplicateFSOKeysForContainerEndpoint() throws IOException {
+    // Set up duplicate FSO file keys.
+    setUpDuplicateFSOFileKeys();
+    NSSummaryTaskWithFSO nSSummaryTaskWithFso =
+        new NSSummaryTaskWithFSO(reconNamespaceSummaryManager,
+            reconOMMetadataManager, 10);
+    nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager);
+    // Reprocess the container key mappings so that the new keys are loaded.
+    reprocessContainerKeyMapper();
+
+    // Assume that FSO keys are mapped to container ID 20L (as in previous 
tests for FSO).
+    Response response = containerEndpoint.getKeysForContainer(20L, -1, "");
+    KeysResponse keysResponse = (KeysResponse) response.getEntity();
+    Collection<KeyMetadata> keyMetadataList = keysResponse.getKeys();
+
+    // We expect two distinct keys.
+    assertEquals(2, keysResponse.getTotalCount());
+    assertEquals(2, keyMetadataList.size());
+
+    for (KeyMetadata km : keyMetadataList) {
+      String completePath = km.getCompletePath();
+      assertNotNull(completePath);
+      // Verify that the complete path reflects either directory "dirA" or 
"dirB"
+      if (completePath.contains("dirA")) {
+        assertEquals(VOLUME_NAME + "/" + BUCKET_NAME + "/dirA/dupFileKey1", 
completePath);
+      } else if (completePath.contains("dirB")) {
+        assertEquals(VOLUME_NAME + "/" + BUCKET_NAME + "/dirB/dupFileKey1", 
completePath);
+      } else {
+        throw new AssertionError("Unexpected complete path: " + completePath);
+      }
+    }
+  }
+
+
 }
diff --git 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java
 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java
index fb31537ec7..0cacbf209e 100644
--- 
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java
+++ 
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java
@@ -472,6 +472,122 @@ public void testFileTableProcess() throws Exception {
         firstKeyPrefix.getKeyPrefix());
   }
 
+  @Test
+  public void testDuplicateFSOKeysInDifferentDirectories() throws Exception {
+    // Ensure container 1 is initially empty.
+    Map<ContainerKeyPrefix, Integer> keyPrefixesForContainer =
+        reconContainerMetadataManager.getKeyPrefixesForContainer(1L);
+    assertThat(keyPrefixesForContainer).isEmpty();
+
+    Pipeline pipeline = getRandomPipeline();
+    // Create a common OmKeyLocationInfoGroup for all keys.
+    List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
+    BlockID blockID = new BlockID(1L, 1L);
+    OmKeyLocationInfo omKeyLocationInfo = getOmKeyLocationInfo(blockID, 
pipeline);
+    omKeyLocationInfoList.add(omKeyLocationInfo);
+    OmKeyLocationInfoGroup omKeyLocationInfoGroup =
+        new OmKeyLocationInfoGroup(0L, omKeyLocationInfoList);
+
+    // Define file names.
+    String file1Key = "file1";
+    String file2Key = "file2";
+
+    // Define directory (parent) object IDs with shorter values.
+    long dir1Id = -101L;
+    long dir2Id = -102L;
+    long dir3Id = -103L;
+
+    // Write three FSO keys for "file1" with different parent object IDs.
+    writeKeyToOm(reconOMMetadataManager,
+        file1Key,                // keyName
+        BUCKET_NAME,             // bucketName
+        VOLUME_NAME,             // volName
+        file1Key,                // fileName
+        KEY_ONE_OBJECT_ID,       // objectId
+        dir1Id,                  // ObjectId for first directory
+        BUCKET_ONE_OBJECT_ID,    // bucketObjectId
+        VOL_OBJECT_ID,           // volumeObjectId
+        Collections.singletonList(omKeyLocationInfoGroup),
+        BucketLayout.FILE_SYSTEM_OPTIMIZED,
+        KEY_ONE_SIZE);
+
+    writeKeyToOm(reconOMMetadataManager,
+        file1Key,
+        BUCKET_NAME,
+        VOLUME_NAME,
+        file1Key,
+        KEY_ONE_OBJECT_ID,
+        dir2Id,            // ObjectId for second directory
+        BUCKET_ONE_OBJECT_ID,
+        VOL_OBJECT_ID,
+        Collections.singletonList(omKeyLocationInfoGroup),
+        BucketLayout.FILE_SYSTEM_OPTIMIZED,
+        KEY_ONE_SIZE);
+
+    writeKeyToOm(reconOMMetadataManager,
+        file1Key,
+        BUCKET_NAME,
+        VOLUME_NAME,
+        file1Key,
+        KEY_ONE_OBJECT_ID,
+        dir3Id,            // ObjectId for third directory
+        BUCKET_ONE_OBJECT_ID,
+        VOL_OBJECT_ID,
+        Collections.singletonList(omKeyLocationInfoGroup),
+        BucketLayout.FILE_SYSTEM_OPTIMIZED,
+        KEY_ONE_SIZE);
+
+    // Write three FSO keys for "file2" with different parent object IDs.
+    writeKeyToOm(reconOMMetadataManager,
+        "fso-file2",
+        BUCKET_NAME,
+        VOLUME_NAME,
+        file2Key,
+        KEY_ONE_OBJECT_ID,
+        dir1Id,
+        BUCKET_ONE_OBJECT_ID,
+        VOL_OBJECT_ID,
+        Collections.singletonList(omKeyLocationInfoGroup),
+        BucketLayout.FILE_SYSTEM_OPTIMIZED,
+        KEY_ONE_SIZE);
+
+    writeKeyToOm(reconOMMetadataManager,
+        "fso-file2",
+        BUCKET_NAME,
+        VOLUME_NAME,
+        file2Key,
+        KEY_ONE_OBJECT_ID,
+        dir2Id,
+        BUCKET_ONE_OBJECT_ID,
+        VOL_OBJECT_ID,
+        Collections.singletonList(omKeyLocationInfoGroup),
+        BucketLayout.FILE_SYSTEM_OPTIMIZED,
+        KEY_ONE_SIZE);
+
+    writeKeyToOm(reconOMMetadataManager,
+        "fso-file2",
+        BUCKET_NAME,
+        VOLUME_NAME,
+        file2Key,
+        KEY_ONE_OBJECT_ID,
+        dir3Id,
+        BUCKET_ONE_OBJECT_ID,
+        VOL_OBJECT_ID,
+        Collections.singletonList(omKeyLocationInfoGroup),
+        BucketLayout.FILE_SYSTEM_OPTIMIZED,
+        KEY_ONE_SIZE);
+
+    // Reprocess container key mappings.
+    ContainerKeyMapperTask containerKeyMapperTask =
+        new ContainerKeyMapperTask(reconContainerMetadataManager, 
omConfiguration);
+    containerKeyMapperTask.reprocess(reconOMMetadataManager);
+
+    // With our changes using the raw key prefix as the unique identifier,
+    // we expect six distinct entries in container 1.
+    keyPrefixesForContainer = 
reconContainerMetadataManager.getKeyPrefixesForContainer(1L);
+    assertEquals(6, keyPrefixesForContainer.size());
+  }
+
   private OmKeyInfo buildOmKeyInfo(String volume,
                                    String bucket,
                                    String key,


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to