This is an automated email from the ASF dual-hosted git repository.
sumitagrawal pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 6e0c753d78 HDDS-11509. logging improvements for directory deletion
(#7314)
6e0c753d78 is described below
commit 6e0c753d785f283203c2be5bce0f80bd0099c8be
Author: Tejaskriya <[email protected]>
AuthorDate: Wed Dec 18 15:54:42 2024 +0530
HDDS-11509. logging improvements for directory deletion (#7314)
---
.../common/impl/BlockDeletingService.java | 9 ++----
.../hdds/scm/block/SCMBlockDeletingService.java | 6 ++--
.../om/service/AbstractKeyDeletingService.java | 15 ++++++----
.../ozone/om/service/DirectoryDeletingService.java | 6 ++--
.../ozone/om/service/OpenKeyCleanupService.java | 33 ++++++++++++++++++----
5 files changed, 48 insertions(+), 21 deletions(-)
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/BlockDeletingService.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/BlockDeletingService.java
index 8c090713de..0a6495ced0 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/BlockDeletingService.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/BlockDeletingService.java
@@ -137,8 +137,7 @@ public class BlockDeletingService extends BackgroundService
{
chooseContainerForBlockDeletion(getBlockLimitPerInterval(),
containerDeletionPolicy);
- BackgroundTask
- containerBlockInfos = null;
+ BackgroundTask containerBlockInfos = null;
long totalBlocks = 0;
for (ContainerBlockInfo containerBlockInfo : containers) {
BlockDeletingTaskBuilder builder =
@@ -149,13 +148,11 @@ public class BlockDeletingService extends
BackgroundService {
containerBlockInfos = builder.build();
queue.add(containerBlockInfos);
totalBlocks += containerBlockInfo.getNumBlocksToDelete();
+ LOG.debug("Queued- Container: {}, deleted blocks: {}",
+ containerBlockInfo.getContainerData().getContainerID(),
containerBlockInfo.getNumBlocksToDelete());
}
metrics.incrTotalBlockChosenCount(totalBlocks);
metrics.incrTotalContainerChosenCount(containers.size());
- if (containers.size() > 0) {
- LOG.debug("Queued {} blocks from {} containers for deletion",
- totalBlocks, containers.size());
- }
} catch (StorageContainerException e) {
LOG.warn("Failed to initiate block deleting tasks, "
+ "caused by unable to get containers info. "
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
index 782ef05c4c..e3f80f8fd4 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
@@ -167,8 +167,9 @@ public class SCMBlockDeletingService extends
BackgroundService
// These nodes will be considered for this iteration.
final Set<DatanodeDetails> included =
getDatanodesWithinCommandLimit(datanodes);
+ int blockDeletionLimit = getBlockDeleteTXNum();
DatanodeDeletedBlockTransactions transactions =
- deletedBlockLog.getTransactions(getBlockDeleteTXNum(), included);
+ deletedBlockLog.getTransactions(blockDeletionLimit, included);
if (transactions.isEmpty()) {
return EmptyTaskResult.newResult();
@@ -204,10 +205,11 @@ public class SCMBlockDeletingService extends
BackgroundService
}
}
LOG.info("Totally added {} blocks to be deleted for"
- + " {} datanodes / {} totalnodes, task elapsed time: {}ms",
+ + " {} datanodes / {} totalnodes, limit per iteration :
{}blocks, task elapsed time: {}ms",
transactions.getBlocksDeleted(),
transactions.getDatanodeTransactionMap().size(),
included.size(),
+ blockDeletionLimit,
Time.monotonicNow() - startTime);
deletedBlockLog.incrementCount(new ArrayList<>(processedTxIDs));
} catch (NotLeaderException nle) {
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java
index 0ac6c98660..e60180938f 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.common.BlockGroup;
import org.apache.hadoop.ozone.lock.BootstrapStateHandler;
import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
import org.apache.hadoop.ozone.om.KeyManager;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
@@ -126,8 +127,10 @@ public abstract class AbstractKeyDeletingService extends
BackgroundService
// OMRequest model.
delCount = deleteAllKeys(blockDeletionResults, manager);
}
- LOG.info("Blocks for {} (out of {}) keys are deleted from DB in {} ms",
- delCount, blockDeletionResults.size(), Time.monotonicNow() -
startTime);
+ int limit =
ozoneManager.getConfiguration().getInt(OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK,
+ OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT);
+ LOG.info("Blocks for {} (out of {}) keys are deleted from DB in {} ms.
Limit per task is {}.",
+ delCount, blockDeletionResults.size(), Time.monotonicNow() -
startTime, limit);
}
return delCount;
}
@@ -402,6 +405,7 @@ public abstract class AbstractKeyDeletingService extends
BackgroundService
int remainingBufLimit, KeyManager keyManager,
UUID expectedPreviousSnapshotId, long rnCnt) {
+ long limit = remainNum;
// Optimization to handle delete sub-dir and keys to remove quickly
// This case will be useful to handle when depth of directory is high
int subdirDelNum = 0;
@@ -449,13 +453,14 @@ public abstract class AbstractKeyDeletingService extends
BackgroundService
deletedDirsCount.addAndGet(dirNum + subdirDelNum);
movedDirsCount.addAndGet(subDirNum - subdirDelNum);
movedFilesCount.addAndGet(subFileNum);
+ long timeTakenInIteration = Time.monotonicNow() - startTime;
LOG.info("Number of dirs deleted: {}, Number of sub-dir " +
"deleted: {}, Number of sub-files moved:" +
" {} to DeletedTable, Number of sub-dirs moved {} to " +
- "DeletedDirectoryTable, iteration elapsed: {}ms," +
+ "DeletedDirectoryTable, limit per iteration: {}, iteration
elapsed: {}ms, " +
" totalRunCount: {}",
- dirNum, subdirDelNum, subFileNum, (subDirNum - subdirDelNum),
- Time.monotonicNow() - startTime, rnCnt);
+ dirNum, subdirDelNum, subFileNum, (subDirNum - subdirDelNum), limit,
+ timeTakenInIteration, rnCnt);
}
return remainNum;
}
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java
index a8270f92f2..05555439ac 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java
@@ -213,11 +213,11 @@ public class DirectoryDeletingService extends
AbstractKeyDeletingService {
public BackgroundTaskResult call() {
try {
if (shouldRun()) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Running DirectoryDeletingService");
- }
isRunningOnAOS.set(true);
long rnCnt = getRunCount().incrementAndGet();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Running DirectoryDeletingService. {}", rnCnt);
+ }
long dirNum = 0L;
long subDirNum = 0L;
long subFileNum = 0L;
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OpenKeyCleanupService.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OpenKeyCleanupService.java
index 6d53e48a0f..d185d87290 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OpenKeyCleanupService.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/OpenKeyCleanupService.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyRequest;
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteOpenKeysRequest;
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
@@ -51,8 +52,11 @@ import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.Collectors;
import java.util.stream.Stream;
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
+
/**
* This is the background service to delete hanging open keys.
* Scan the metadata of om periodically to get
@@ -180,6 +184,7 @@ public class OpenKeyCleanupService extends
BackgroundService {
if (!shouldRun()) {
return BackgroundTaskResult.EmptyTaskResult.newResult();
}
+ LOG.debug("Running OpenKeyCleanupService");
long startTime = Time.monotonicNow();
final ExpiredOpenKeys expiredOpenKeys;
try {
@@ -202,6 +207,16 @@ public class OpenKeyCleanupService extends
BackgroundService {
final OMResponse response = submitRequest(omRequest);
if (response != null && response.getSuccess()) {
ozoneManager.getMetrics().incNumOpenKeysCleaned(numOpenKeys);
+ if (LOG.isDebugEnabled()) {
+ StringBuilder sb = new StringBuilder();
+ for (OpenKeyBucket.Builder openKey : openKeyBuckets) {
+ sb.append(openKey.getVolumeName() + OZONE_URI_DELIMITER +
openKey.getBucketName() + ": ")
+
.append(openKey.getKeysList().stream().map(OzoneManagerProtocolProtos.OpenKey::getName)
+ .collect(Collectors.toList()))
+ .append("\n");
+ }
+ LOG.debug("Non-hsync'ed openKeys being deleted in current
iteration: \n" + sb);
+ }
}
}
@@ -214,15 +229,23 @@ public class OpenKeyCleanupService extends
BackgroundService {
final OMResponse response = submitRequest(createCommitKeyRequest(b));
if (response != null && response.getSuccess()) {
ozoneManager.getMetrics().incNumOpenKeysHSyncCleaned();
+ if (LOG.isDebugEnabled()) {
+ StringBuilder sb = new StringBuilder();
+ for (CommitKeyRequest.Builder openKey : hsyncKeys) {
+ sb.append(openKey.getKeyArgs().getVolumeName() +
OZONE_URI_DELIMITER +
+ openKey.getKeyArgs().getBucketName() + ": ")
+ .append(openKey.getKeyArgs().getKeyName())
+ .append(", ");
+ }
+ LOG.debug("hsync'ed openKeys committed in current iteration: \n"
+ sb);
+ }
}
});
}
- if (LOG.isDebugEnabled()) {
- LOG.debug("Number of expired open keys submitted for deletion: {},"
- + " for commit: {}, elapsed time: {}ms",
- numOpenKeys, numHsyncKeys, Time.monotonicNow() - startTime);
- }
+ LOG.info("Number of expired open keys submitted for deletion: {},"
+ + " for commit: {}, cleanupLimit: {}, elapsed time: {}ms",
+ numOpenKeys, numHsyncKeys, cleanupLimitPerTask, Time.monotonicNow()
- startTime);
final int numKeys = numOpenKeys + numHsyncKeys;
submittedOpenKeyCount.addAndGet(numKeys);
return () -> numKeys;
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]