This is an automated email from the ASF dual-hosted git repository. bharat pushed a commit to branch HDDS-3930 in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
commit 668e0f9fa9d5ceb365c886474a6d8b249bb8d508 Author: Bharat Viswanadham <[email protected]> AuthorDate: Mon Jul 6 17:36:46 2020 -0700 HDDS-3900. Fix OMKeyDeletesRequest. --- .../interface-client/src/main/proto/proto.lock | 30 +++++ .../org/apache/hadoop/ozone/audit/OMAction.java | 1 + .../src/main/proto/OmClientProtocol.proto | 4 +- .../interface-client/src/main/proto/proto.lock | 26 ++-- .../hadoop/ozone/om/request/OMClientRequest.java | 50 +------- .../ozone/om/request/key/OMKeysDeleteRequest.java | 137 +++++++++++---------- .../om/response/key/OMKeysDeleteResponse.java | 90 +++++--------- 7 files changed, 156 insertions(+), 182 deletions(-) diff --git a/hadoop-hdds/interface-client/src/main/proto/proto.lock b/hadoop-hdds/interface-client/src/main/proto/proto.lock index 1be06ae..afdaf96 100644 --- a/hadoop-hdds/interface-client/src/main/proto/proto.lock +++ b/hadoop-hdds/interface-client/src/main/proto/proto.lock @@ -1477,6 +1477,21 @@ ], "messages": [ { + "name": "UUID", + "fields": [ + { + "id": 1, + "name": "mostSigBits", + "type": "int64" + }, + { + "id": 2, + "name": "leastSigBits", + "type": "int64" + } + ] + }, + { "name": "DatanodeDetailsProto", "fields": [ { @@ -1514,6 +1529,11 @@ "id": 7, "name": "networkLocation", "type": "string" + }, + { + "id": 100, + "name": "uuid128", + "type": "UUID" } ] }, @@ -1565,6 +1585,11 @@ "id": 1, "name": "id", "type": "string" + }, + { + "id": 100, + "name": "uuid128", + "type": "UUID" } ] }, @@ -1630,6 +1655,11 @@ "id": 8, "name": "creationTimeStamp", "type": "uint64" + }, + { + "id": 100, + "name": "leaderID128", + "type": "UUID" } ] }, diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java index cd8b126..31cccac 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java @@ -38,6 +38,7 @@ public enum OMAction implements AuditAction { UPDATE_BUCKET, UPDATE_KEY, PURGE_KEYS, + DELETE_KEYS, // S3 Bucket CREATE_S3_BUCKET, diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index ba193c7..96ee773 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -867,10 +867,10 @@ message DeletedKeys { } message DeleteKeysResponse { - repeated KeyInfo deletedKeys = 1; - repeated KeyInfo unDeletedKeys = 2; + optional bool status = 1; } + message PurgeKeysRequest { repeated DeletedKeys deletedKeys = 1; } diff --git a/hadoop-ozone/interface-client/src/main/proto/proto.lock b/hadoop-ozone/interface-client/src/main/proto/proto.lock index 0331ff1..e6c434c 100644 --- a/hadoop-ozone/interface-client/src/main/proto/proto.lock +++ b/hadoop-ozone/interface-client/src/main/proto/proto.lock @@ -1229,6 +1229,11 @@ "id": 9, "name": "updateID", "type": "uint64" + }, + { + "id": 10, + "name": "modificationTime", + "type": "uint64" } ] }, @@ -1333,6 +1338,11 @@ "id": 3, "name": "quotaInBytes", "type": "uint64" + }, + { + "id": 4, + "name": "modificationTime", + "type": "uint64" } ] }, @@ -1504,6 +1514,11 @@ "id": 10, "name": "updateID", "type": "uint64" + }, + { + "id": 11, + "name": "modificationTime", + "type": "uint64" } ] }, @@ -2471,15 +2486,8 @@ "fields": [ { "id": 1, - "name": "deletedKeys", - "type": "KeyInfo", - "is_repeated": true - }, - { - "id": 2, - "name": "unDeletedKeys", - "type": "KeyInfo", - "is_repeated": true + "name": "status", + "type": "bool" } ] }, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java index 0353144..cfda8b3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java @@ -18,15 +18,8 @@ package org.apache.hadoop.ozone.om.request; -import java.io.IOException; -import java.net.InetAddress; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.Set; - import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ozone.OzoneConsts; @@ -36,23 +29,22 @@ import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.AuditMessage; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.WithObjectID; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .DeleteKeysResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.security.UserGroupInformation; import javax.annotation.Nonnull; +import java.io.IOException; +import java.net.InetAddress; +import java.util.LinkedHashMap; +import java.util.Map; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.REPLAY; @@ -225,36 +217,6 @@ public abstract class OMClientRequest implements RequestAuditor { } /** - * Set parameters needed for return error response to client. - * - * @param omResponse - * @param ex - IOException - * @param unDeletedKeys - Set<OmKeyInfo> - * @return error response need to be returned to client - OMResponse. - */ - protected OMResponse createOperationKeysErrorOMResponse( - @Nonnull OMResponse.Builder omResponse, - @Nonnull IOException ex, @Nonnull Set<OmKeyInfo> unDeletedKeys) { - omResponse.setSuccess(false); - StringBuffer errorMsg = new StringBuffer(); - DeleteKeysResponse.Builder resp = DeleteKeysResponse.newBuilder(); - for (OmKeyInfo key : unDeletedKeys) { - if(key != null) { - resp.addUnDeletedKeys(key.getProtobuf()); - } - } - if (errorMsg != null) { - omResponse.setMessage(errorMsg.toString()); - } - // TODO: Currently all delete operations in OzoneBucket.java are void. Here - // we put the List of unDeletedKeys into Response. These KeyInfo can be - // used to continue deletion if client support delete retry. - omResponse.setDeleteKeysResponse(resp.build()); - omResponse.setStatus(OzoneManagerRatisUtils.exceptionToResponseStatus(ex)); - return omResponse.build(); - } - - /** * Add the client response to double buffer and set the flush future. * For responses which has status set to REPLAY it is a no-op. * @param trxIndex diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java index b5e8dc8..bbedb2f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java @@ -18,31 +18,27 @@ package org.apache.hadoop.ozone.om.request.key; +import com.google.common.base.Optional; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.exceptions.OMReplayException; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.key.OMKeyDeleteResponse; import org.apache.hadoop.ozone.om.response.key.OMKeysDeleteResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .DeleteKeysRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .DeleteKeysResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.util.Time; @@ -51,12 +47,11 @@ import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayList; -import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; /** * Handles DeleteKey request. @@ -96,7 +91,7 @@ public class OMKeysDeleteRequest extends OMKeyRequest { getOmRequest().getDeleteKeysRequest(); List<KeyArgs> deleteKeyArgsList = deleteKeyRequest.getKeyArgsList(); - Set<OmKeyInfo> unDeletedKeys = new HashSet<>(); + IOException exception = null; OMClientResponse omClientResponse = null; Result result = null; @@ -116,89 +111,101 @@ public class OMKeysDeleteRequest extends OMKeyRequest { OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( getOmRequest()); OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + + boolean acquiredLock = + omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, + bucketName); + + int indexFailed = 0; try { - for (KeyArgs deleteKeyArgs : deleteKeyArgsList) { + + // Validate bucket and volume exists or not. + validateBucketAndVolume(omMetadataManager, volumeName, bucketName); + + + // Check if any of the key in the batch cannot be deleted. If exists the + // batch delete will be failed. + + for (indexFailed = 0; indexFailed < deleteKeyArgsList.size(); + indexFailed++) { + KeyArgs deleteKeyArgs = deleteKeyArgsList.get(0); + auditMap = buildKeyArgsAuditMap(deleteKeyArgs); volumeName = deleteKeyArgs.getVolumeName(); bucketName = deleteKeyArgs.getBucketName(); keyName = deleteKeyArgs.getKeyName(); String objectKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(objectKey); - omKeyInfoList.add(omKeyInfo); - unDeletedKeys.add(omKeyInfo); - } - // Check if any of the key in the batch cannot be deleted. If exists the - // batch will delete failed. - for (KeyArgs deleteKeyArgs : deleteKeyArgsList) { - volumeName = deleteKeyArgs.getVolumeName(); - bucketName = deleteKeyArgs.getBucketName(); - keyName = deleteKeyArgs.getKeyName(); - auditMap = buildKeyArgsAuditMap(deleteKeyArgs); + + // Do we need to fail the batch if one of the key does not exist? + // For now following the previous code behavior. If this code changes + // behavior, this will be incompatible change across upgrades, and we + // need to version the Requests and do logic accordingly. + + if (omKeyInfo == null) { + LOG.error("Key does not exist {}", objectKey); + throw new OMException("Key Not Found " + objectKey, KEY_NOT_FOUND); + } + // check Acl checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, IAccessAuthorizer.ACLType.DELETE, OzoneObj.ResourceType.KEY); - String objectKey = omMetadataManager.getOzoneKey( - volumeName, bucketName, keyName); - - // Validate bucket and volume exists or not. - validateBucketAndVolume(omMetadataManager, volumeName, bucketName); - - OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(objectKey); + omKeyInfoList.add(omKeyInfo); + } - if (omKeyInfo == null) { - throw new OMException("Key not found: " + keyName, KEY_NOT_FOUND); - } - // Check if this transaction is a replay of ratis logs. - if (isReplay(ozoneManager, omKeyInfo, trxnLogIndex)) { - // Replay implies the response has already been returned to - // the client. So take no further action and return a dummy - // OMClientResponse. - throw new OMReplayException(); - } + // Mark all keys in cache as deleted. + for (KeyArgs deleteKeyArgs : deleteKeyArgsList) { + volumeName = deleteKeyArgs.getVolumeName(); + bucketName = deleteKeyArgs.getBucketName(); + keyName = deleteKeyArgs.getKeyName(); + omMetadataManager.getKeyTable().addCacheEntry( + new CacheKey<>(omMetadataManager.getOzoneKey(volumeName, bucketName, + keyName)), + new CacheValue<>(Optional.absent(), trxnLogIndex)); } + omClientResponse = new OMKeysDeleteResponse(omResponse - .setDeleteKeysResponse(DeleteKeysResponse.newBuilder()).build(), - omKeyInfoList, trxnLogIndex, ozoneManager.isRatisEnabled()); + .setDeleteKeysResponse(DeleteKeysResponse.newBuilder() + .setStatus(true)).build(), omKeyInfoList, trxnLogIndex, + ozoneManager.isRatisEnabled()); result = Result.SUCCESS; + } catch (IOException ex) { - if (ex instanceof OMReplayException) { - result = Result.REPLAY; - omClientResponse = new OMKeyDeleteResponse(createReplayOMResponse( - omResponse)); - } else { - result = Result.FAILURE; - exception = ex; - - omClientResponse = new OMKeyDeleteResponse( - createOperationKeysErrorOMResponse(omResponse, exception, - unDeletedKeys)); - } + result = Result.FAILURE; + exception = ex; + omClientResponse = new OMKeysDeleteResponse( + omResponse.setDeleteKeysResponse(DeleteKeysResponse.newBuilder() + .setStatus(false).build()).build()); } finally { + if (acquiredLock) { + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, + bucketName); + } addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); } - // Performing audit logging outside of the lock. - if (result != Result.REPLAY) { - auditLog(auditLogger, buildAuditMessage( - OMAction.DELETE_KEY, auditMap, exception, userInfo)); + // When we get any error during iteration build the remaining audit map + // from deleteKeyArgsList. + for (int i = indexFailed; i < deleteKeyArgsList.size(); i++) { + buildKeyArgsAuditMap(deleteKeyArgsList.get(i)); } + auditLog(auditLogger, buildAuditMessage( + OMAction.DELETE_KEYS, auditMap, exception, userInfo)); + + switch (result) { case SUCCESS: omMetrics.decNumKeys(); LOG.debug("Key deleted. Volume:{}, Bucket:{}, Key:{}", volumeName, bucketName, keyName); break; - case REPLAY: - LOG.debug("Replayed Transaction {} ignored. Request: {}", - trxnLogIndex, deleteKeyRequest); - break; case FAILURE: omMetrics.incNumKeyDeleteFails(); LOG.error("Key delete failed. Volume:{}, Bucket:{}, Key{}." + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java index 597841c..af3a8d1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java @@ -18,17 +18,13 @@ package org.apache.hadoop.ozone.om.response.key; -import com.google.common.base.Optional; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import javax.annotation.Nonnull; @@ -36,7 +32,6 @@ import java.io.IOException; import java.util.List; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; /** * Response for DeleteKey request. @@ -48,10 +43,10 @@ public class OMKeysDeleteResponse extends OMClientResponse { private long trxnLogIndex; public OMKeysDeleteResponse(@Nonnull OMResponse omResponse, - @Nonnull List<OmKeyInfo> omKeyInfoList, + @Nonnull List<OmKeyInfo> keyDeleteList, long trxnLogIndex, boolean isRatisEnabled) { super(omResponse); - this.omKeyInfoList = omKeyInfoList; + this.omKeyInfoList = keyDeleteList; this.isRatisEnabled = isRatisEnabled; this.trxnLogIndex = trxnLogIndex; } @@ -69,65 +64,36 @@ public class OMKeysDeleteResponse extends OMClientResponse { public void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { + String volumeName = ""; + String bucketName = ""; + String keyName = ""; for (OmKeyInfo omKeyInfo : omKeyInfoList) { - // Set the UpdateID to current transactionLogIndex - omKeyInfo.setUpdateID(trxnLogIndex, isRatisEnabled); + volumeName = omKeyInfo.getVolumeName(); + bucketName = omKeyInfo.getBucketName(); + keyName = omKeyInfo.getKeyName(); - // For OmResponse with failure, this should do nothing. This method is - // not called in failure scenario in OM code. - if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) { - boolean acquiredLock = false; - String volumeName = ""; - String bucketName = ""; + String deleteKey = omMetadataManager.getOzoneKey(volumeName, bucketName, + keyName); - try { - volumeName = omKeyInfo.getVolumeName(); - bucketName = omKeyInfo.getBucketName(); - String keyName = omKeyInfo.getKeyName(); - acquiredLock = - omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, - volumeName, bucketName); - // Update table cache. - omMetadataManager.getKeyTable().addCacheEntry( - new CacheKey<>(omMetadataManager.getOzoneKey( - volumeName, bucketName, keyName)), - new CacheValue<>(Optional.absent(), trxnLogIndex)); + omMetadataManager.getKeyTable().deleteWithBatch(batchOperation, + deleteKey); - String ozoneKey = omMetadataManager.getOzoneKey( - omKeyInfo.getVolumeName(), omKeyInfo.getBucketName(), - omKeyInfo.getKeyName()); - omMetadataManager.getKeyTable().deleteWithBatch(batchOperation, - ozoneKey); - // If a deleted key is put in the table where a key with the same - // name already exists, then the old deleted key information would - // be lost. To avoid this, first check if a key with same name - // exists. deletedTable in OM Metadata stores <KeyName, - // RepeatedOMKeyInfo>. The RepeatedOmKeyInfo is the structure that - // allows us to store a list of OmKeyInfo that can be tied to same - // key name. For a keyName if RepeatedOMKeyInfo structure is null, - // we create a new instance, if it is not null, then we simply add - // to the list and store this instance in deletedTable. - RepeatedOmKeyInfo repeatedOmKeyInfo = - omMetadataManager.getDeletedTable().get(ozoneKey); - repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - omKeyInfo, repeatedOmKeyInfo, omKeyInfo.getUpdateID(), - isRatisEnabled); - omMetadataManager.getDeletedTable().putWithBatch(batchOperation, - ozoneKey, repeatedOmKeyInfo); - if (acquiredLock) { - omMetadataManager.getLock().releaseWriteLock( - BUCKET_LOCK, volumeName, bucketName); - acquiredLock = false; - } - } finally { - if (acquiredLock) { - omMetadataManager.getLock() - .releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - } + // If a deleted key is put in the table where a key with the same + // name already exists, then the old deleted key information would + // be lost. To avoid this, first check if a key with same name + // exists. deletedTable in OM Metadata stores <KeyName, + // RepeatedOMKeyInfo>. The RepeatedOmKeyInfo is the structure that + // allows us to store a list of OmKeyInfo that can be tied to same + // key name. For a keyName if RepeatedOMKeyInfo structure is null, + // we create a new instance, if it is not null, then we simply add + // to the list and store this instance in deletedTable. + RepeatedOmKeyInfo repeatedOmKeyInfo = + omMetadataManager.getDeletedTable().get(deleteKey); + repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( + omKeyInfo, repeatedOmKeyInfo, omKeyInfo.getUpdateID(), + isRatisEnabled); + omMetadataManager.getDeletedTable().putWithBatch(batchOperation, + deleteKey, repeatedOmKeyInfo); } } - } \ No newline at end of file --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
