smengcl commented on code in PR #8587:
URL: https://github.com/apache/ozone/pull/8587#discussion_r2377934679
##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java:
##########
@@ -194,35 +196,47 @@ private Pair<Pair<Integer, Long>, Boolean>
submitPurgeKeysRequest(
String deletedKey = result.getObjectKey();
if (result.isSuccess()) {
// Add key to PurgeKeys list.
- if (keysToModify != null && !keysToModify.containsKey(deletedKey)) {
- // Parse Volume and BucketName
- purgeKeys.add(deletedKey);
+ if (keysToModify == null || !keysToModify.containsKey(deletedKey)) {
+ completePurgedKeys.add(deletedKey);
if (LOG.isDebugEnabled()) {
- LOG.debug("Key {} set to be updated in OM DB, Other versions " +
- "of the key that are reclaimable are reclaimed.", deletedKey);
+ LOG.debug("Key {} set to be purged from OM DB", deletedKey);
}
- } else if (keysToModify == null) {
- purgeKeys.add(deletedKey);
+ } else {
if (LOG.isDebugEnabled()) {
- LOG.debug("Key {} set to be purged from OM DB", deletedKey);
+ LOG.debug("Key {} set to be updated in OM DB, Other versions " +
+ "of the key that are reclaimable are reclaimed.", deletedKey);
}
}
- if (keyBlockReplicatedSize != null) {
- deletedReplSize += keyBlockReplicatedSize.getOrDefault(deletedKey,
0L);
+ if (purgedKeys.containsKey(deletedKey)) {
+ deletedReplSize += purgedKeys.get(deletedKey).getPurgedBytes();
}
deletedCount++;
} else {
- // If the block deletion failed, then the deleted keys should also not
be modified.
+ // If the block deletion failed, then the deleted keys should also not
be modified and
+ // any other version of the key should also not be purged.
failedDeletedKeys.add(deletedKey);
purgeSuccess = false;
+ if (LOG.isDebugEnabled()) {
+ LOG.error("Failed Block Delete corresponding to Key {} with block
result : {}.", deletedKey,
+ result.getBlockResultList());
+ } else {
+ LOG.error("Failed Block Delete corresponding to Key {}.",
deletedKey);
+ }
}
}
+ // Filter out the key even if one version of the key purge has failed.
This is to prevent orphan blocks, and
+ // this needs to be retried.
+ completePurgedKeys = completePurgedKeys.stream()
+ .filter(i ->
!failedDeletedKeys.contains(i)).collect(Collectors.toSet());
+ // Filter out any keys that have failed and sort the purge keys based on
volume and bucket.
+ List<PurgedKey> purgedKeyList = purgedKeys.values().stream()
+ .filter(purgedKey ->
!failedDeletedKeys.contains(purgedKey.getBlockGroup().getGroupID()))
+
.sorted(Comparator.comparing(PurgedKey::getVolume).thenComparing(PurgedKey::getBucket))
+ .collect(Collectors.toList());
Review Comment:
I hope all those cases and new logic are covered in unit/integration test.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]