This is an automated email from the ASF dual-hosted git repository.

aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new c55ac6a  HDDS-2174. Delete GDPR Encryption Key from metadata when a 
Key is deleted
c55ac6a is described below

commit c55ac6a1c7d1dc65a0d2e735b315bbf6898f6ff1
Author: dchitlangia <dineshchitlan...@gmail.com>
AuthorDate: Tue Sep 24 23:39:34 2019 -0400

    HDDS-2174. Delete GDPR Encryption Key from metadata when a Key is deleted
    
    Signed-off-by: Anu Engineer <aengin...@apache.org>
---
 .../main/java/org/apache/hadoop/ozone/OmUtils.java | 40 ++++++++++--
 .../client/rpc/TestOzoneRpcClientAbstract.java     | 76 +++++++++++++++++++++-
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 49 ++++++--------
 .../ozone/om/response/key/OMKeyDeleteResponse.java |  8 +--
 .../multipart/S3MultipartUploadAbortResponse.java  |  9 ++-
 .../S3MultipartUploadCommitPartResponse.java       | 34 +++++-----
 .../ozone/om/request/TestOMRequestUtils.java       | 11 ++--
 7 files changed, 158 insertions(+), 69 deletions(-)

diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
index b7a6c2f..1417d89 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
@@ -46,6 +46,9 @@ import org.apache.hadoop.hdds.scm.HddsServerUtil;
 import org.apache.hadoop.hdds.server.ServerUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 
 import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
@@ -498,13 +501,36 @@ public final class OmUtils {
   }
 
   /**
-   * Returns the DB key name of a deleted key in OM metadata store. The
-   * deleted key name is the <keyName>_<deletionTimestamp>.
-   * @param key Original key name
-   * @param timestamp timestamp of deletion
-   * @return Deleted key name
+   * Prepares key info to be moved to deletedTable.
+   * 1. It strips GDPR metadata from key info
+   * 2. For given object key, if the repeatedOmKeyInfo instance is null, it
+   * implies that no entry for the object key exists in deletedTable so we
+   * create a new instance to include this key, else we update the existing
+   * repeatedOmKeyInfo instance.
+   * @param keyInfo args supplied by client
+   * @param repeatedOmKeyInfo key details from deletedTable
+   * @return {@link RepeatedOmKeyInfo}
+   * @throws IOException if I/O Errors when checking for key
    */
-  public static String getDeletedKeyName(String key, long timestamp) {
-    return key + "_" + timestamp;
+  public static RepeatedOmKeyInfo prepareKeyForDelete(OmKeyInfo keyInfo,
+      RepeatedOmKeyInfo repeatedOmKeyInfo) throws IOException{
+    // If this key is in a GDPR enforced bucket, then before moving
+    // KeyInfo to deletedTable, remove the GDPR related metadata from
+    // KeyInfo.
+    if(Boolean.valueOf(keyInfo.getMetadata().get(OzoneConsts.GDPR_FLAG))) {
+      keyInfo.getMetadata().remove(OzoneConsts.GDPR_FLAG);
+      keyInfo.getMetadata().remove(OzoneConsts.GDPR_ALGORITHM);
+      keyInfo.getMetadata().remove(OzoneConsts.GDPR_SECRET);
+    }
+
+    if(repeatedOmKeyInfo == null) {
+      //The key doesn't exist in deletedTable, so create a new instance.
+      repeatedOmKeyInfo = new RepeatedOmKeyInfo(keyInfo);
+    } else {
+      //The key exists in deletedTable, so update existing instance.
+      repeatedOmKeyInfo.addOmKeyInfo(keyInfo);
+    }
+
+    return repeatedOmKeyInfo;
   }
 }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index d91f739..9189c2f 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -86,6 +86,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
 import org.apache.hadoop.ozone.s3.util.OzoneS3Util;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
 import org.apache.hadoop.ozone.security.acl.OzoneAclConfig;
@@ -2667,7 +2668,7 @@ public abstract class TestOzoneRpcClientAbstract {
    * @throws Exception
    */
   @Test
-  public void testGDPR() throws Exception {
+  public void testKeyReadWriteForGDPR() throws Exception {
     //Step 1
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
@@ -2733,4 +2734,77 @@ public abstract class TestOzoneRpcClientAbstract {
     Assert.assertNotEquals(text, new String(fileContent));
 
   }
+
+  /**
+   * Tests deletedKey for GDPR.
+   * 1. Create GDPR Enabled bucket.
+   * 2. Create a Key in this bucket so it gets encrypted via GDPRSymmetricKey.
+   * 3. Read key and validate the content/metadata is as expected because the
+   * readKey will decrypt using the GDPR Symmetric Key with details from 
KeyInfo
+   * Metadata.
+   * 4. Delete this key in GDPR enabled bucket
+   * 5. Confirm the deleted key metadata in deletedTable does not contain the
+   * GDPR encryption details (flag, secret, algorithm).
+   * @throws Exception
+   */
+  @Test
+  public void testDeletedKeyForGDPR() throws Exception {
+    //Step 1
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    store.createVolume(volumeName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    BucketArgs args = BucketArgs.newBuilder()
+        .addMetadata(OzoneConsts.GDPR_FLAG, "true").build();
+    volume.createBucket(bucketName, args);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    Assert.assertEquals(bucketName, bucket.getName());
+    Assert.assertNotNull(bucket.getMetadata());
+    Assert.assertEquals("true",
+        bucket.getMetadata().get(OzoneConsts.GDPR_FLAG));
+
+    //Step 2
+    String text = "hello world";
+    Map<String, String> keyMetadata = new HashMap<>();
+    keyMetadata.put(OzoneConsts.GDPR_FLAG, "true");
+    OzoneOutputStream out = bucket.createKey(keyName,
+        text.getBytes().length, STAND_ALONE, ONE, keyMetadata);
+    out.write(text.getBytes());
+    out.close();
+
+    //Step 3
+    OzoneKeyDetails key = bucket.getKey(keyName);
+
+    Assert.assertEquals(keyName, key.getName());
+    Assert.assertEquals("true", key.getMetadata().get(OzoneConsts.GDPR_FLAG));
+    Assert.assertEquals("AES",
+        key.getMetadata().get(OzoneConsts.GDPR_ALGORITHM));
+    Assert.assertTrue(key.getMetadata().get(OzoneConsts.GDPR_SECRET) != null);
+
+    OzoneInputStream is = bucket.readKey(keyName);
+    byte[] fileContent = new byte[text.getBytes().length];
+    is.read(fileContent);
+    Assert.assertTrue(verifyRatisReplication(volumeName, bucketName,
+        keyName, STAND_ALONE,
+        ONE));
+    Assert.assertEquals(text, new String(fileContent));
+
+    //Step 4
+    bucket.deleteKey(keyName);
+
+    //Step 5
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+    String objectKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
+        keyName);
+    RepeatedOmKeyInfo deletedKeys =
+        omMetadataManager.getDeletedTable().get(objectKey);
+    Map<String, String> deletedKeyMetadata =
+        deletedKeys.getOmKeyInfoList().get(0).getMetadata();
+    Assert.assertFalse(deletedKeyMetadata.containsKey(OzoneConsts.GDPR_FLAG));
+    
Assert.assertFalse(deletedKeyMetadata.containsKey(OzoneConsts.GDPR_SECRET));
+    Assert.assertFalse(
+        deletedKeyMetadata.containsKey(OzoneConsts.GDPR_ALGORITHM));
+  }
 }
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 526274c..bae71bf 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.hdds.utils.db.RDBStore;
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.common.BlockGroup;
@@ -782,15 +783,10 @@ public class KeyManagerImpl implements KeyManager {
           return;
         }
       }
-      //Check if key with same keyName exists in deletedTable and then
-      // insert/update accordingly.
       RepeatedOmKeyInfo repeatedOmKeyInfo =
           metadataManager.getDeletedTable().get(objectKey);
-      if(repeatedOmKeyInfo == null) {
-        repeatedOmKeyInfo = new RepeatedOmKeyInfo(keyInfo);
-      } else {
-        repeatedOmKeyInfo.addOmKeyInfo(keyInfo);
-      }
+      repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(keyInfo,
+          repeatedOmKeyInfo);
       metadataManager.getKeyTable().delete(objectKey);
       metadataManager.getDeletedTable().put(objectKey, repeatedOmKeyInfo);
     } catch (OMException ex) {
@@ -1014,11 +1010,8 @@ public class KeyManagerImpl implements KeyManager {
         // Move this part to delete table.
         RepeatedOmKeyInfo repeatedOmKeyInfo =
             metadataManager.getDeletedTable().get(partName);
-        if(repeatedOmKeyInfo == null) {
-          repeatedOmKeyInfo = new RepeatedOmKeyInfo(keyInfo);
-        } else {
-          repeatedOmKeyInfo.addOmKeyInfo(keyInfo);
-        }
+        repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
+            keyInfo, repeatedOmKeyInfo);
         metadataManager.getDeletedTable().put(partName, repeatedOmKeyInfo);
         throw new OMException("No such Multipart upload is with specified " +
             "uploadId " + uploadID, 
ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
@@ -1047,15 +1040,16 @@ public class KeyManagerImpl implements KeyManager {
           // Add the new entry in to the list of part keys.
           DBStore store = metadataManager.getStore();
           try (BatchOperation batch = store.initBatchOperation()) {
-            RepeatedOmKeyInfo repeatedOmKeyInfo = metadataManager.
-                getDeletedTable().get(oldPartKeyInfo.getPartName());
-            if(repeatedOmKeyInfo == null) {
-              repeatedOmKeyInfo = new RepeatedOmKeyInfo(
-                  OmKeyInfo.getFromProtobuf(oldPartKeyInfo.getPartKeyInfo()));
-            } else {
-              repeatedOmKeyInfo.addOmKeyInfo(
-                  OmKeyInfo.getFromProtobuf(oldPartKeyInfo.getPartKeyInfo()));
-            }
+            OmKeyInfo partKey = OmKeyInfo.getFromProtobuf(
+                oldPartKeyInfo.getPartKeyInfo());
+
+            RepeatedOmKeyInfo repeatedOmKeyInfo =
+                metadataManager.getDeletedTable()
+                    .get(oldPartKeyInfo.getPartName());
+
+            repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
+                partKey, repeatedOmKeyInfo);
+
             metadataManager.getDeletedTable().put(partName, repeatedOmKeyInfo);
             metadataManager.getDeletedTable().putWithBatch(batch,
                 oldPartKeyInfo.getPartName(),
@@ -1279,13 +1273,12 @@ public class KeyManagerImpl implements KeyManager {
             OmKeyInfo currentKeyPartInfo = OmKeyInfo.getFromProtobuf(
                 partKeyInfo.getPartKeyInfo());
 
-            RepeatedOmKeyInfo repeatedOmKeyInfo = metadataManager.
-                getDeletedTable().get(partKeyInfo.getPartName());
-            if(repeatedOmKeyInfo == null) {
-              repeatedOmKeyInfo = new RepeatedOmKeyInfo(currentKeyPartInfo);
-            } else {
-              repeatedOmKeyInfo.addOmKeyInfo(currentKeyPartInfo);
-            }
+            RepeatedOmKeyInfo repeatedOmKeyInfo =
+                metadataManager.getDeletedTable()
+                    .get(partKeyInfo.getPartName());
+
+            repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
+                currentKeyPartInfo, repeatedOmKeyInfo);
 
             metadataManager.getDeletedTable().putWithBatch(batch,
                 partKeyInfo.getPartName(), repeatedOmKeyInfo);
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
index 1a8ce1b..96aedd1 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.ozone.om.response.key;
 
+import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
@@ -69,11 +70,8 @@ public class OMKeyDeleteResponse extends OMClientResponse {
         // instance in deletedTable.
         RepeatedOmKeyInfo repeatedOmKeyInfo =
             omMetadataManager.getDeletedTable().get(ozoneKey);
-        if (repeatedOmKeyInfo == null) {
-          repeatedOmKeyInfo = new RepeatedOmKeyInfo(omKeyInfo);
-        } else {
-          repeatedOmKeyInfo.addOmKeyInfo(omKeyInfo);
-        }
+        repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
+            omKeyInfo, repeatedOmKeyInfo);
         omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
             ozoneKey, repeatedOmKeyInfo);
       }
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java
index 4fd270a..a9a4024 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.ozone.om.response.s3.multipart;
 
+import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
@@ -75,11 +76,9 @@ public class S3MultipartUploadAbortResponse extends 
OMClientResponse {
 
         RepeatedOmKeyInfo repeatedOmKeyInfo =
             omMetadataManager.getDeletedTable().get(partKeyInfo.getPartName());
-        if(repeatedOmKeyInfo == null) {
-          repeatedOmKeyInfo = new RepeatedOmKeyInfo(currentKeyPartInfo);
-        } else {
-          repeatedOmKeyInfo.addOmKeyInfo(currentKeyPartInfo);
-        }
+
+        repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
+            currentKeyPartInfo, repeatedOmKeyInfo);
 
         omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
             partKeyInfo.getPartName(),
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java
index dca1005..fef3698 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.ozone.om.response.s3.multipart;
 
+import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
@@ -66,17 +67,16 @@ public class S3MultipartUploadCommitPartResponse extends 
OMClientResponse {
   public void addToDBBatch(OMMetadataManager omMetadataManager,
       BatchOperation batchOperation) throws IOException {
 
-
     if (getOMResponse().getStatus() == NO_SUCH_MULTIPART_UPLOAD_ERROR) {
       // Means by the time we try to commit part, some one has aborted this
       // multipart upload. So, delete this part information.
       RepeatedOmKeyInfo repeatedOmKeyInfo =
           omMetadataManager.getDeletedTable().get(openKey);
-      if(repeatedOmKeyInfo == null) {
-        repeatedOmKeyInfo = new RepeatedOmKeyInfo(deletePartKeyInfo);
-      } else {
-        repeatedOmKeyInfo.addOmKeyInfo(deletePartKeyInfo);
-      }
+
+      repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(
+          deletePartKeyInfo, repeatedOmKeyInfo);
+
+
       omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
           openKey,
           repeatedOmKeyInfo);
@@ -86,6 +86,7 @@ public class S3MultipartUploadCommitPartResponse extends 
OMClientResponse {
 
       // If we have old part info:
       // Need to do 3 steps:
+      //   0. Strip GDPR related metadata from multipart info
       //   1. add old part to delete table
       //   2. Commit multipart info which has information about this new part.
       //   3. delete this new part entry from open key table.
@@ -93,22 +94,21 @@ public class S3MultipartUploadCommitPartResponse extends 
OMClientResponse {
       // This means for this multipart upload part upload, we have an old
       // part information, so delete it.
       if (oldMultipartKeyInfo != null) {
-        RepeatedOmKeyInfo repeatedOmKeyInfo = omMetadataManager.
-            getDeletedTable().get(oldMultipartKeyInfo.getPartName());
-        if(repeatedOmKeyInfo == null) {
-          repeatedOmKeyInfo = new RepeatedOmKeyInfo(
-              OmKeyInfo.getFromProtobuf(oldMultipartKeyInfo.getPartKeyInfo()));
-        } else {
-          repeatedOmKeyInfo.addOmKeyInfo(
-              OmKeyInfo.getFromProtobuf(oldMultipartKeyInfo.getPartKeyInfo()));
-        }
+        OmKeyInfo partKey =
+            OmKeyInfo.getFromProtobuf(oldMultipartKeyInfo.getPartKeyInfo());
+
+        RepeatedOmKeyInfo repeatedOmKeyInfo =
+            omMetadataManager.getDeletedTable()
+                .get(oldMultipartKeyInfo.getPartName());
+
+        repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(partKey,
+            repeatedOmKeyInfo);
 
         omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
             oldMultipartKeyInfo.getPartName(),
             repeatedOmKeyInfo);
       }
 
-
       omMetadataManager.getMultipartInfoTable().putWithBatch(batchOperation,
           multipartKey, omMultipartKeyInfo);
 
@@ -116,8 +116,6 @@ public class S3MultipartUploadCommitPartResponse extends 
OMClientResponse {
       //  safely delete part key info from open key table.
       omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation,
           openKey);
-
-
     }
   }
 
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
index 20ff425..2a0e835 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
@@ -28,6 +28,7 @@ import java.util.UUID;
 
 import com.google.common.base.Optional;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
@@ -374,13 +375,13 @@ public final class TestOMRequestUtils {
 
     // Delete key from KeyTable and put in DeletedKeyTable
     omMetadataManager.getKeyTable().delete(ozoneKey);
+
     RepeatedOmKeyInfo repeatedOmKeyInfo =
         omMetadataManager.getDeletedTable().get(ozoneKey);
-    if(repeatedOmKeyInfo == null) {
-      repeatedOmKeyInfo = new RepeatedOmKeyInfo(omKeyInfo);
-    } else {
-      repeatedOmKeyInfo.addOmKeyInfo(omKeyInfo);
-    }
+
+    repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(omKeyInfo,
+        repeatedOmKeyInfo);
+
     omMetadataManager.getDeletedTable().put(ozoneKey, repeatedOmKeyInfo);
 
     return ozoneKey;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to