This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new ca1289b7f25 HDDS-14124. Make WithParentObjectId immutable (#9478)
ca1289b7f25 is described below

commit ca1289b7f25e02515333d82775dcd07bbbdca8c1
Author: Doroszlai, Attila <[email protected]>
AuthorDate: Tue Dec 16 08:22:24 2025 +0100

    HDDS-14124. Make WithParentObjectId immutable (#9478)
---
 .../ozone/om/helpers/WithParentObjectId.java       | 14 +++---
 .../om/request/key/OMKeyRenameRequestWithFSO.java  | 13 +++---
 .../hadoop/ozone/om/TestOmMetadataManager.java     | 40 ++++++++++-------
 .../om/request/key/TestOMKeyCommitRequest.java     | 13 +++---
 .../request/key/TestOMKeyCommitRequestWithFSO.java | 11 ++---
 .../om/request/key/TestOMKeyRenameRequest.java     |  6 +--
 .../request/key/TestOMKeyRenameRequestWithFSO.java | 20 +++++----
 .../key/TestOMKeyRenameResponseWithFSO.java        |  4 +-
 .../response/key/TestOMOpenKeysDeleteResponse.java |  8 ++--
 ...TestS3ExpiredMultipartUploadsAbortResponse.java | 12 +++--
 .../ozone/recon/api/types/ReconBasicOmKeyInfo.java |  9 +---
 .../recon/tasks/NSSummaryTaskDbEventHandler.java   | 12 ++---
 .../ozone/recon/tasks/NSSummaryTaskWithFSO.java    | 10 ++---
 .../ozone/recon/tasks/NSSummaryTaskWithLegacy.java | 52 +++++++++++-----------
 .../ozone/recon/tasks/NSSummaryTaskWithOBS.java    | 20 ++++-----
 15 files changed, 126 insertions(+), 118 deletions(-)

diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java
index 7f02097d7f1..d08df684a97 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java
@@ -17,16 +17,16 @@
 
 package org.apache.hadoop.ozone.om.helpers;
 
+import net.jcip.annotations.Immutable;
+
 /**
  * Object ID with additional parent ID field.
  */
+@Immutable
 public abstract class WithParentObjectId extends WithObjectID {
-  private long parentObjectID;
-
-  public WithParentObjectId() {
-  }
+  private final long parentObjectID;
 
-  public WithParentObjectId(Builder builder) {
+  public WithParentObjectId(Builder<?> builder) {
     super(builder);
     parentObjectID = builder.getParentObjectID();
   }
@@ -58,10 +58,6 @@ public final long getParentObjectID() {
     return parentObjectID;
   }
 
-  public final void setParentObjectID(long parentObjectID) {
-    this.parentObjectID = parentObjectID;
-  }
-
   /** Builder for {@link WithParentObjectId}. */
   public abstract static class Builder<T extends WithParentObjectId> extends 
WithObjectID.Builder<T> {
     private long parentObjectID;
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java
index 3800886a849..4e93f8d1ba5 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java
@@ -288,18 +288,19 @@ private OMClientResponse renameKey(OmKeyInfo toKeyParent, 
String toKeyName,
     String bucketKey = metadataMgr.getBucketKey(
         fromKeyValue.getVolumeName(), fromKeyValue.getBucketName());
 
-    fromKeyValue = fromKeyValue.toBuilder()
-        .setUpdateID(trxnLogIndex)
-        .build();
+    OmKeyInfo.Builder fromKeyBuilder = fromKeyValue.toBuilder()
+        .setUpdateID(trxnLogIndex);
     // Set toFileName
-    fromKeyValue.setKeyName(toKeyFileName);
+    fromKeyBuilder.setKeyName(toKeyFileName);
     // Set toKeyObjectId
     if (toKeyParent != null) {
-      fromKeyValue.setParentObjectID(toKeyParent.getObjectID());
+      fromKeyBuilder.setParentObjectID(toKeyParent.getObjectID());
     } else {
       omBucketInfo = metadataMgr.getBucketTable().get(bucketKey);
-      fromKeyValue.setParentObjectID(omBucketInfo.getObjectID());
+      fromKeyBuilder.setParentObjectID(omBucketInfo.getObjectID());
     }
+    fromKeyValue = fromKeyBuilder.build();
+
     // Set modification time
     omBucketInfo = setModificationTime(ommm, omBucketInfo, toKeyParent, 
volumeId, bucketId,
         modificationTime, dirTable, trxnLogIndex);
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
index 43219d5bd01..d905fb23461 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
@@ -695,13 +695,15 @@ public void testListOpenFiles(BucketLayout bucketLayout) 
throws Exception {
 
     int numOpenKeys = 3;
     for (int i = 0; i < numOpenKeys; i++) {
-      final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, 
bucketName, keyPrefix + i,
-              RatisReplicationConfig.getInstance(ONE))
-          .build();
+      OmKeyInfo.Builder keyInfoBuilder = 
OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyPrefix + i,
+          RatisReplicationConfig.getInstance(ONE));
+      if (bucketLayout.isFileSystemOptimized()) {
+        keyInfoBuilder.setParentObjectID(i);
+      }
+      final OmKeyInfo keyInfo = keyInfoBuilder.build();
 
       final String dbOpenKeyName;
       if (bucketLayout.isFileSystemOptimized()) {
-        keyInfo.setParentObjectID(i);
         OMRequestTestUtils.addFileToKeyTable(true, false,
             keyInfo.getFileName(), keyInfo, clientID, 0L, omMetadataManager);
         dbOpenKeyName = omMetadataManager.getOpenFileName(volumeId, bucketId,
@@ -811,14 +813,16 @@ public void testGetExpiredOpenKeys(BucketLayout 
bucketLayout)
     for (int i = 0; i < numExpiredOpenKeys + numUnexpiredOpenKeys; i++) {
       final long creationTime = i < numExpiredOpenKeys ?
           expiredOpenKeyCreationTime : Time.now();
-      final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(
+      final OmKeyInfo.Builder keyInfoBuilder = 
OMRequestTestUtils.createOmKeyInfo(
               volumeName, bucketName, "expired" + i, 
RatisReplicationConfig.getInstance(ONE))
-          .setCreationTime(creationTime)
-          .build();
+          .setCreationTime(creationTime);
+      if (bucketLayout.isFileSystemOptimized()) {
+        keyInfoBuilder.setParentObjectID(i);
+      }
+      final OmKeyInfo keyInfo = keyInfoBuilder.build();
 
       final String dbOpenKeyName;
       if (bucketLayout.isFileSystemOptimized()) {
-        keyInfo.setParentObjectID(i);
         OMRequestTestUtils.addFileToKeyTable(true, false,
             keyInfo.getFileName(), keyInfo, clientID, 0L, omMetadataManager);
         dbOpenKeyName = omMetadataManager.getOpenFileName(volumeId, bucketId,
@@ -883,10 +887,13 @@ public void testGetExpiredOpenKeysExcludeMPUKeys(
     // Ensure that "expired" MPU-related open keys are not fetched.
     // MPU-related open keys, identified by isMultipartKey = false
     for (int i = 0; i < numExpiredMPUOpenKeys; i++) {
-      final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, 
bucketName, "expired" + i,
+      final OmKeyInfo.Builder keyInfoBuilder = 
OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, "expired" + i,
               RatisReplicationConfig.getInstance(ONE), new 
OmKeyLocationInfoGroup(0L, new ArrayList<>(), true))
-          .setCreationTime(expiredOpenKeyCreationTime)
-          .build();
+          .setCreationTime(expiredOpenKeyCreationTime);
+      if (bucketLayout.isFileSystemOptimized()) {
+        keyInfoBuilder.setParentObjectID(i);
+      }
+      final OmKeyInfo keyInfo = keyInfoBuilder.build();
       assertThat(keyInfo.getModificationTime()).isPositive();
 
       final String uploadId = OMMultipartUploadUtils.getMultipartUploadId();
@@ -896,7 +903,6 @@ public void testGetExpiredOpenKeysExcludeMPUKeys(
               HddsProtos.ReplicationFactor.ONE, 0L);
 
       if (bucketLayout.isFileSystemOptimized()) {
-        keyInfo.setParentObjectID(i);
         OMRequestTestUtils.addMultipartKeyToOpenFileTable(false,
             keyInfo.getFileName(), keyInfo, uploadId, 0L, omMetadataManager);
       } else {
@@ -916,10 +922,13 @@ public void testGetExpiredOpenKeysExcludeMPUKeys(
     // HDDS-9017. Although these open keys are MPU-related,
     // the isMultipartKey flags are set to false
     for (int i = numExpiredMPUOpenKeys; i < 2 * numExpiredMPUOpenKeys; i++) {
-      final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(
+      final OmKeyInfo.Builder keyInfoBuilder = 
OMRequestTestUtils.createOmKeyInfo(
               volumeName, bucketName, "expired" + i, 
RatisReplicationConfig.getInstance(ONE))
-          .setCreationTime(expiredOpenKeyCreationTime)
-          .build();
+          .setCreationTime(expiredOpenKeyCreationTime);
+      if (bucketLayout.isFileSystemOptimized()) {
+        keyInfoBuilder.setParentObjectID(i);
+      }
+      final OmKeyInfo keyInfo = keyInfoBuilder.build();
 
       final String uploadId = OMMultipartUploadUtils.getMultipartUploadId();
       final OmMultipartKeyInfo multipartKeyInfo = OMRequestTestUtils.
@@ -928,7 +937,6 @@ public void testGetExpiredOpenKeysExcludeMPUKeys(
               HddsProtos.ReplicationFactor.ONE, 0L);
 
       if (bucketLayout.isFileSystemOptimized()) {
-        keyInfo.setParentObjectID(i);
         OMRequestTestUtils.addMultipartKeyToOpenFileTable(false,
             keyInfo.getFileName(), keyInfo, uploadId, 0L, omMetadataManager);
       } else {
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
index 8ccac5cd335..533ecbeff3d 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
@@ -240,12 +240,10 @@ public void testAtomicRewrite() throws Exception {
     List<OzoneAcl> acls = 
Collections.singletonList(OzoneAcl.parseAcl("user:foo:rw"));
     OmKeyInfo.Builder omKeyInfoBuilder = OMRequestTestUtils.createOmKeyInfo(
         volumeName, bucketName, keyName, replicationConfig, new 
OmKeyLocationInfoGroup(version, new ArrayList<>()));
-    OmKeyInfo omKeyInfo = omKeyInfoBuilder.setExpectedDataGeneration(1L)
-        .addAcl(acls.get(0))
-        .build();
-    omKeyInfo.appendNewBlocks(allocatedLocationList, false);
+    omKeyInfoBuilder.setExpectedDataGeneration(1L);
+    omKeyInfoBuilder.addAcl(acls.get(0));
 
-    String openKey = addKeyToOpenKeyTable(allocatedLocationList, omKeyInfo);
+    String openKey = addKeyToOpenKeyTable(allocatedLocationList, 
omKeyInfoBuilder);
     OmKeyInfo openKeyInfo = openKeyTable.get(openKey);
     assertNotNull(openKeyInfo);
     assertEquals(acls, openKeyInfo.getAcls());
@@ -935,7 +933,10 @@ protected String 
addKeyToOpenKeyTable(List<OmKeyLocationInfo> locationList)
   }
 
   @Nonnull
-  protected String addKeyToOpenKeyTable(List<OmKeyLocationInfo> locationList, 
OmKeyInfo keyInfo) throws Exception {
+  protected String addKeyToOpenKeyTable(List<OmKeyLocationInfo> locationList, 
OmKeyInfo.Builder keyInfoBuilder)
+      throws Exception {
+    OmKeyInfo keyInfo = keyInfoBuilder.build();
+    keyInfo.appendNewBlocks(locationList, false);
     OMRequestTestUtils.addKeyToTable(true, false, keyInfo, clientID, 0, 
omMetadataManager);
 
     return omMetadataManager.getOpenKey(volumeName, bucketName,
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java
index 07ddf966a83..041090c0183 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java
@@ -67,7 +67,7 @@ protected String getOzonePathKey() throws IOException {
   }
 
   @Override
-  protected String addKeyToOpenKeyTable(List<OmKeyLocationInfo> locationList, 
OmKeyInfo keyInfo)
+  protected String addKeyToOpenKeyTable(List<OmKeyLocationInfo> locationList, 
OmKeyInfo.Builder keyInfoBuilder)
       throws Exception {
     // need to initialize parentID
     if (getParentDir() == null) {
@@ -76,7 +76,9 @@ protected String addKeyToOpenKeyTable(List<OmKeyLocationInfo> 
locationList, OmKe
       parentID = OMRequestTestUtils.addParentsToDirTable(volumeName,
           bucketName, getParentDir(), omMetadataManager);
     }
-    keyInfo.setParentObjectID(parentID);
+    keyInfoBuilder.setParentObjectID(parentID);
+
+    OmKeyInfo keyInfo = keyInfoBuilder.build();
     keyInfo.appendNewBlocks(locationList, false);
 
     String fileName = OzoneFSUtils.getFileName(keyName);
@@ -90,12 +92,11 @@ protected String 
addKeyToOpenKeyTable(List<OmKeyLocationInfo> locationList)
       throws Exception {
     long objectId = 100;
 
-    OmKeyInfo omKeyInfoFSO =
+    OmKeyInfo.Builder omKeyInfoFSO =
         OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
                 RatisReplicationConfig.getInstance(ONE), new 
OmKeyLocationInfoGroup(version, new ArrayList<>(), false))
             .setObjectID(objectId)
-            .setUpdateID(100L)
-            .build();
+            .setUpdateID(100L);
     return addKeyToOpenKeyTable(locationList, omKeyInfoFSO);
   }
 
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java
index 10b31d4d861..145cb364d9d 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java
@@ -52,7 +52,7 @@ public void createParentKey() throws Exception {
         omMetadataManager, getBucketLayout());
     fromKeyName = new Path("fromKey").toString();
     toKeyName = new Path("toKey").toString();
-    fromKeyInfo = getOmKeyInfo(fromKeyName);
+    fromKeyInfo = getOmKeyInfo(fromKeyName).build();
     dbToKey = omMetadataManager.getOzoneKey(volumeName, bucketName, toKeyName);
   }
 
@@ -233,9 +233,9 @@ protected OMRequest createRenameKeyRequest(
         .setCmdType(OzoneManagerProtocolProtos.Type.RenameKey).build();
   }
 
-  protected OmKeyInfo getOmKeyInfo(String keyName) {
+  protected OmKeyInfo.Builder getOmKeyInfo(String keyName) {
     return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
-        replicationConfig).build();
+        replicationConfig);
   }
 
   protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception {
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequestWithFSO.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequestWithFSO.java
index ac691020f37..e79f55a53dd 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequestWithFSO.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequestWithFSO.java
@@ -62,12 +62,15 @@ public void createParentKey() throws Exception {
     String toKeyParentName = UUID.randomUUID().toString();
     fromKeyName = new Path(fromKeyParentName, "fromKey").toString();
     toKeyName = new Path(toKeyParentName, "toKey").toString();
-    fromKeyParentInfo = getOmKeyInfo(fromKeyParentName);
-    fromKeyParentInfo.setParentObjectID(bucketId);
-    toKeyParentInfo = getOmKeyInfo(toKeyParentName);
-    toKeyParentInfo.setParentObjectID(bucketId);
-    fromKeyInfo = getOmKeyInfo(fromKeyName);
-    fromKeyInfo.setParentObjectID(fromKeyParentInfo.getObjectID());
+    fromKeyParentInfo = getOmKeyInfo(fromKeyParentName)
+        .setParentObjectID(bucketId)
+        .build();
+    toKeyParentInfo = getOmKeyInfo(toKeyParentName)
+        .setParentObjectID(bucketId)
+        .build();
+    fromKeyInfo = getOmKeyInfo(fromKeyName)
+        .setParentObjectID(fromKeyParentInfo.getObjectID())
+        .build();
     OMRequestTestUtils.addDirKeyToDirTable(false,
         OMFileRequest.getDirectoryInfo(fromKeyParentInfo), volumeName,
         bucketName, txnLogId, omMetadataManager);
@@ -177,12 +180,11 @@ private OMRequest doPreExecute(OMRequest 
originalOmRequest) throws Exception {
   }
 
   @Override
-  protected OmKeyInfo getOmKeyInfo(String keyName) {
+  protected OmKeyInfo.Builder getOmKeyInfo(String keyName) {
     long bucketId = random.nextLong();
     return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, 
RatisReplicationConfig.getInstance(ONE))
         .setObjectID(bucketId + 100L)
-        .setParentObjectID(bucketId + 101L)
-        .build();
+        .setParentObjectID(bucketId + 101L);
   }
 
   @Override
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java
index 073d9ccb489..a1d14fadad3 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java
@@ -82,12 +82,12 @@ protected void createParent() {
     String toKeyParentName = UUID.randomUUID().toString();
     fromKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, 
fromKeyParentName, replicationConfig)
         .setObjectID(bucketId + 100L)
+        .setParentObjectID(bucketId)
         .build();
     toKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, 
toKeyParentName, replicationConfig)
         .setObjectID(bucketId + 101L)
+        .setParentObjectID(bucketId)
         .build();
-    fromKeyParent.setParentObjectID(bucketId);
-    toKeyParent.setParentObjectID(bucketId);
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
     bucketInfo = TestOMResponseUtils.createBucket(volumeName, bucketName);
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java
index 653934698ea..9ee50336d19 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java
@@ -207,8 +207,11 @@ private Map<String, Pair<Long, OmKeyInfo>> 
addOpenKeysToDB(String volume, int nu
       long clientID = random.nextLong();
       long parentID = random.nextLong();
 
-      OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume,
-          bucket, key, replicationConfig).build();
+      OmKeyInfo.Builder keyInfoBuilder = 
OMRequestTestUtils.createOmKeyInfo(volume, bucket, key, replicationConfig);
+      if (getBucketLayout().isFileSystemOptimized()) {
+        keyInfoBuilder.setParentObjectID(parentID);
+      }
+      OmKeyInfo omKeyInfo = keyInfoBuilder.build();
 
       if (keyLength > 0) {
         OMRequestTestUtils.addKeyLocationInfo(omKeyInfo, 0, keyLength);
@@ -223,7 +226,6 @@ private Map<String, Pair<Long, OmKeyInfo>> 
addOpenKeysToDB(String volume, int nu
         String file = OzoneFSUtils.getFileName(key);
         final long volumeId = omMetadataManager.getVolumeId(volume);
         final long bucketId = omMetadataManager.getBucketId(volume, bucket);
-        omKeyInfo.setParentObjectID(parentID);
         OMRequestTestUtils.addFileToKeyTable(true, false, file, omKeyInfo,
             clientID, 0L, omMetadataManager);
         openKey = omMetadataManager.getOpenFileName(
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3ExpiredMultipartUploadsAbortResponse.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3ExpiredMultipartUploadsAbortResponse.java
index 9e8ad383655..96190117430 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3ExpiredMultipartUploadsAbortResponse.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3ExpiredMultipartUploadsAbortResponse.java
@@ -277,12 +277,16 @@ private Map<OmBucketInfo, List<OmMultipartAbortInfo>> 
addMPUsToDB(
           bucket, omMetadataManager, getBucketLayout());
 
       ReplicationConfig replicationConfig = 
RatisReplicationConfig.getInstance(ONE);
-      final OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, 
bucket, keyName, replicationConfig,
-              new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true))
-          .build();
+      OmKeyInfo.Builder keyInfoBuilder = 
OMRequestTestUtils.createOmKeyInfo(volume, bucket, keyName, replicationConfig,
+              new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true));
+
+      if (getBucketLayout().equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) {
+        keyInfoBuilder.setParentObjectID(omBucketInfo.getObjectID());
+      }
+
+      final OmKeyInfo omKeyInfo = keyInfoBuilder.build();
 
       if (getBucketLayout().equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) {
-        omKeyInfo.setParentObjectID(omBucketInfo.getObjectID());
         OMRequestTestUtils.addMultipartKeyToOpenFileTable(false,
             omKeyInfo.getFileName(), omKeyInfo, uploadId, 0L,
             omMetadataManager);
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ReconBasicOmKeyInfo.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ReconBasicOmKeyInfo.java
index df62f72c845..71af52e0867 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ReconBasicOmKeyInfo.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ReconBasicOmKeyInfo.java
@@ -17,7 +17,6 @@
 
 package org.apache.hadoop.ozone.recon.api.types;
 
-import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
 import com.fasterxml.jackson.annotation.JsonProperty;
 import java.util.Objects;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
@@ -25,16 +24,12 @@
 import org.apache.hadoop.hdds.utils.db.DelegatedCodec;
 import org.apache.hadoop.hdds.utils.db.Proto2Codec;
 import org.apache.hadoop.ozone.om.helpers.QuotaUtil;
-import org.apache.hadoop.ozone.om.helpers.WithParentObjectId;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 
 /**
  * Lightweight OmKeyInfo class.
  */
-@JsonIgnoreProperties({
-    "metadata", "objectID", "updateID", "parentObjectID", "objectInfo"
-})
-public final class ReconBasicOmKeyInfo extends WithParentObjectId {
+public final class ReconBasicOmKeyInfo {
 
   private final String volumeName;
   private final String bucketName;
@@ -59,7 +54,7 @@ public final class ReconBasicOmKeyInfo extends 
WithParentObjectId {
   private final ReplicationConfig replicationConfig;
 
   private final boolean isFile;
-  private long parentId;
+  private final long parentId;
 
   public static Codec<ReconBasicOmKeyInfo> getCodec() {
     return DelegatedCodec.decodeOnly(
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java
index 6d5d42f0880..974c109bb42 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskDbEventHandler.java
@@ -84,8 +84,7 @@ private void updateNSSummariesToDB(Map<Long, NSSummary> 
nsSummaryMap, Collection
   }
 
   protected void handlePutKeyEvent(OmKeyInfo keyInfo, Map<Long,
-      NSSummary> nsSummaryMap) throws IOException {
-    long parentObjectId = keyInfo.getParentObjectID();
+      NSSummary> nsSummaryMap, long parentObjectId) throws IOException {
     // Try to get the NSSummary from our local map that maps NSSummaries to IDs
     NSSummary nsSummary = nsSummaryMap.get(parentObjectId);
     if (nsSummary == null) {
@@ -182,10 +181,11 @@ protected void handlePutDirEvent(OmDirectoryInfo 
directoryInfo,
     }
   }
 
-  protected void handleDeleteKeyEvent(OmKeyInfo keyInfo,
-                                      Map<Long, NSSummary> nsSummaryMap)
-      throws IOException {
-    long parentObjectId = keyInfo.getParentObjectID();
+  protected void handleDeleteKeyEvent(
+      OmKeyInfo keyInfo,
+      Map<Long, NSSummary> nsSummaryMap,
+      long parentObjectId
+  ) throws IOException {
     // Try to get the NSSummary from our local map that maps NSSummaries to IDs
     NSSummary nsSummary = nsSummaryMap.get(parentObjectId);
     if (nsSummary == null) {
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithFSO.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithFSO.java
index 0e7f41c16b5..0c3287d9eab 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithFSO.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithFSO.java
@@ -198,21 +198,21 @@ private void 
handleUpdateOnFileTable(OMDBUpdateEvent<String, ? extends WithParen
 
     switch (action) {
     case PUT:
-      handlePutKeyEvent(updatedKeyInfo, nsSummaryMap);
+      handlePutKeyEvent(updatedKeyInfo, nsSummaryMap, 
updatedKeyInfo.getParentObjectID());
       break;
 
     case DELETE:
-      handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap);
+      handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap, 
updatedKeyInfo.getParentObjectID());
       break;
 
     case UPDATE:
       if (oldKeyInfo != null) {
         // delete first, then put
-        handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap);
+        handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap, 
oldKeyInfo.getParentObjectID());
       } else {
         LOG.warn("Update event does not have the old keyInfo for {}.", 
omdbUpdateEvent.getKey());
       }
-      handlePutKeyEvent(updatedKeyInfo, nsSummaryMap);
+      handlePutKeyEvent(updatedKeyInfo, nsSummaryMap, 
updatedKeyInfo.getParentObjectID());
       break;
 
     default:
@@ -251,7 +251,7 @@ public boolean reprocessWithFSO(OMMetadataManager 
omMetadataManager) {
         while (keyTableIter.hasNext()) {
           Table.KeyValue<String, OmKeyInfo> kv = keyTableIter.next();
           OmKeyInfo keyInfo = kv.getValue();
-          handlePutKeyEvent(keyInfo, nsSummaryMap);
+          handlePutKeyEvent(keyInfo, nsSummaryMap, 
keyInfo.getParentObjectID());
           if (nsSummaryMap.size() >= nsSummaryFlushToDBMaxThreshold) {
             if (!flushAndCommitNSToDB(nsSummaryMap)) {
               return false;
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java
index 6ad94797603..6dec8087c0a 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java
@@ -153,27 +153,27 @@ private void processWithFileSystemLayout(OmKeyInfo 
updatedKeyInfo,
                                            OMDBUpdateEvent.OMDBUpdateAction 
action,
                                            Map<Long, NSSummary> nsSummaryMap)
       throws IOException {
-    setKeyParentID(updatedKeyInfo);
+    long updatedKeyParentObjectID = setKeyParentID(updatedKeyInfo);
 
     if (!updatedKeyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) {
       switch (action) {
       case PUT:
-        handlePutKeyEvent(updatedKeyInfo, nsSummaryMap);
+        handlePutKeyEvent(updatedKeyInfo, nsSummaryMap, 
updatedKeyParentObjectID);
         break;
 
       case DELETE:
-        handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap);
+        handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap, 
updatedKeyParentObjectID);
         break;
 
       case UPDATE:
         if (oldKeyInfo != null) {
-          setKeyParentID(oldKeyInfo);
-          handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap);
+          long oldKeyParentObjectID = setKeyParentID(oldKeyInfo);
+          handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap, oldKeyParentObjectID);
         } else {
           LOG.warn("Update event does not have the old keyInfo for {}.",
               updatedKeyInfo.getKeyName());
         }
-        handlePutKeyEvent(updatedKeyInfo, nsSummaryMap);
+        handlePutKeyEvent(updatedKeyInfo, nsSummaryMap, 
updatedKeyParentObjectID);
         break;
 
       default:
@@ -183,7 +183,7 @@ private void processWithFileSystemLayout(OmKeyInfo 
updatedKeyInfo,
       OmDirectoryInfo updatedDirectoryInfo = new OmDirectoryInfo.Builder()
           .setName(updatedKeyInfo.getKeyName())
           .setObjectID(updatedKeyInfo.getObjectID())
-          .setParentObjectID(updatedKeyInfo.getParentObjectID())
+          .setParentObjectID(updatedKeyParentObjectID)
           .build();
 
       OmDirectoryInfo oldDirectoryInfo = null;
@@ -227,26 +227,26 @@ private void processWithObjectStoreLayout(OmKeyInfo 
updatedKeyInfo,
                                             OMDBUpdateEvent.OMDBUpdateAction 
action,
                                             Map<Long, NSSummary> nsSummaryMap)
       throws IOException {
-    setParentBucketId(updatedKeyInfo);
+    long updatedKeyParentObjectID = setParentBucketId(updatedKeyInfo);
 
     switch (action) {
     case PUT:
-      handlePutKeyEvent(updatedKeyInfo, nsSummaryMap);
+      handlePutKeyEvent(updatedKeyInfo, nsSummaryMap, 
updatedKeyParentObjectID);
       break;
 
     case DELETE:
-      handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap);
+      handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap, 
updatedKeyParentObjectID);
       break;
 
     case UPDATE:
       if (oldKeyInfo != null) {
-        setParentBucketId(oldKeyInfo);
-        handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap);
+        long oldKeyParentObjectID = setParentBucketId(oldKeyInfo);
+        handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap, oldKeyParentObjectID);
       } else {
         LOG.warn("Update event does not have the old keyInfo for {}.",
             updatedKeyInfo.getKeyName());
       }
-      handlePutKeyEvent(updatedKeyInfo, nsSummaryMap);
+      handlePutKeyEvent(updatedKeyInfo, nsSummaryMap, 
updatedKeyParentObjectID);
       break;
 
     default:
@@ -278,23 +278,23 @@ public boolean reprocessWithLegacy(OMMetadataManager 
omMetadataManager) {
 
           if (enableFileSystemPaths) {
             // The LEGACY bucket is a file system bucket.
-            setKeyParentID(keyInfo);
+            long parentObjectID = setKeyParentID(keyInfo);
 
             if (keyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) {
               OmDirectoryInfo directoryInfo =
                   new OmDirectoryInfo.Builder()
                       .setName(keyInfo.getKeyName())
                       .setObjectID(keyInfo.getObjectID())
-                      .setParentObjectID(keyInfo.getParentObjectID())
+                      .setParentObjectID(parentObjectID)
                       .build();
               handlePutDirEvent(directoryInfo, nsSummaryMap);
             } else {
-              handlePutKeyEvent(keyInfo, nsSummaryMap);
+              handlePutKeyEvent(keyInfo, nsSummaryMap, parentObjectID);
             }
           } else {
             // The LEGACY bucket is an object store bucket.
-            setParentBucketId(keyInfo);
-            handlePutKeyEvent(keyInfo, nsSummaryMap);
+            long parentObjectID = setParentBucketId(keyInfo);
+            handlePutKeyEvent(keyInfo, nsSummaryMap, parentObjectID);
           }
           if (nsSummaryMap.size() >= nsSummaryFlushToDBMaxThreshold) {
             if (!flushAndCommitNSToDB(nsSummaryMap)) {
@@ -321,11 +321,11 @@ public boolean reprocessWithLegacy(OMMetadataManager 
omMetadataManager) {
   /**
    * KeyTable entries don't have the parentId set.
    * In order to reuse the existing FSO methods that rely on
-   * the parentId, we have to set it explicitly.
+   * the parentId, we have to look it up.
    * @param keyInfo
    * @throws IOException
    */
-  private void setKeyParentID(OmKeyInfo keyInfo) throws IOException {
+  private long setKeyParentID(OmKeyInfo keyInfo) throws IOException {
     String[] keyPath = keyInfo.getKeyName().split(OM_KEY_PREFIX);
 
     // If the path contains only one key then keyPath.length
@@ -344,7 +344,7 @@ private void setKeyParentID(OmKeyInfo keyInfo) throws 
IOException {
           .getSkipCache(fullParentKeyName);
 
       if (parentKeyInfo != null) {
-        keyInfo.setParentObjectID(parentKeyInfo.getObjectID());
+        return parentKeyInfo.getObjectID();
       } else {
         LOG.warn("ParentKeyInfo is null for key: {} in volume: {}, bucket: {}. 
Full Parent Key: {}",
             keyInfo.getKeyName(), keyInfo.getVolumeName(), 
keyInfo.getBucketName(), fullParentKeyName);
@@ -352,16 +352,14 @@ private void setKeyParentID(OmKeyInfo keyInfo) throws 
IOException {
                 keyInfo.getKeyName());
       }
     } else {
-      setParentBucketId(keyInfo);
+      return setParentBucketId(keyInfo);
     }
   }
 
   /**
-   * Set the parent object ID for a bucket.
-   *@paramkeyInfo
-   *@throwsIOException
+   * Look up the parent object ID for a bucket.
    */
-  private void setParentBucketId(OmKeyInfo keyInfo)
+  private long setParentBucketId(OmKeyInfo keyInfo)
       throws IOException {
     String bucketKey = getReconOMMetadataManager()
         .getBucketKey(keyInfo.getVolumeName(), keyInfo.getBucketName());
@@ -369,7 +367,7 @@ private void setParentBucketId(OmKeyInfo keyInfo)
         getReconOMMetadataManager().getBucketTable().getSkipCache(bucketKey);
 
     if (parentBucketInfo != null) {
-      keyInfo.setParentObjectID(parentBucketInfo.getObjectID());
+      return parentBucketInfo.getObjectID();
     } else {
       LOG.warn("ParentBucketInfo is null for key: {} in volume: {}, bucket: 
{}",
           keyInfo.getKeyName(), keyInfo.getVolumeName(), 
keyInfo.getBucketName());
diff --git 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java
 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java
index e1c6f4542c4..699e0612544 100644
--- 
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java
+++ 
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java
@@ -88,9 +88,9 @@ public boolean reprocessWithOBS(OMMetadataManager 
omMetadataManager) {
             continue;
           }
 
-          setKeyParentID(keyInfo);
+          long parentObjectID = getKeyParentID(keyInfo);
 
-          handlePutKeyEvent(keyInfo, nsSummaryMap);
+          handlePutKeyEvent(keyInfo, nsSummaryMap, parentObjectID);
           if (nsSummaryMap.size() >= nsSummaryFlushToDBMaxThreshold) {
             if (!flushAndCommitNSToDB(nsSummaryMap)) {
               return false;
@@ -172,25 +172,25 @@ public Pair<Integer, Boolean> 
processWithOBS(OMUpdateEventBatch events,
           continue;
         }
 
-        setKeyParentID(updatedKeyInfo);
+        long parentObjectID = getKeyParentID(updatedKeyInfo);
 
         switch (action) {
         case PUT:
-          handlePutKeyEvent(updatedKeyInfo, nsSummaryMap);
+          handlePutKeyEvent(updatedKeyInfo, nsSummaryMap, parentObjectID);
           break;
         case DELETE:
-          handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap);
+          handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap, parentObjectID);
           break;
         case UPDATE:
           if (oldKeyInfo != null) {
             // delete first, then put
-            setKeyParentID(oldKeyInfo);
-            handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap);
+            long oldKeyParentObjectID = getKeyParentID(oldKeyInfo);
+            handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap, 
oldKeyParentObjectID);
           } else {
             LOG.warn("Update event does not have the old keyInfo for {}.",
                 updatedKey);
           }
-          handlePutKeyEvent(updatedKeyInfo, nsSummaryMap);
+          handlePutKeyEvent(updatedKeyInfo, nsSummaryMap, parentObjectID);
           break;
         default:
           LOG.debug("Skipping DB update event: {}", action);
@@ -228,7 +228,7 @@ public Pair<Integer, Boolean> 
processWithOBS(OMUpdateEventBatch events,
    * @param keyInfo
    * @throws IOException
    */
-  private void setKeyParentID(OmKeyInfo keyInfo)
+  private long getKeyParentID(OmKeyInfo keyInfo)
       throws IOException {
     String bucketKey = getReconOMMetadataManager()
         .getBucketKey(keyInfo.getVolumeName(), keyInfo.getBucketName());
@@ -236,7 +236,7 @@ private void setKeyParentID(OmKeyInfo keyInfo)
         getReconOMMetadataManager().getBucketTable().getSkipCache(bucketKey);
 
     if (parentBucketInfo != null) {
-      keyInfo.setParentObjectID(parentBucketInfo.getObjectID());
+      return parentBucketInfo.getObjectID();
     } else {
       LOG.warn("ParentBucketInfo is null for key: %s in volume: %s, bucket: 
%s",
           keyInfo.getKeyName(), keyInfo.getVolumeName(), 
keyInfo.getBucketName());


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to