This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 61dba03f5a HDDS-9696. Add configuration for wait time after checking
queue in DeleteCmdWorker (#5611)
61dba03f5a is described below
commit 61dba03f5a6de47cadc84b537f772a7dae89d42e
Author: jianghuazhu <[email protected]>
AuthorDate: Sat Nov 18 13:56:12 2023 -0600
HDDS-9696. Add configuration for wait time after checking queue in
DeleteCmdWorker (#5611)
---
.../common/statemachine/DatanodeConfiguration.java | 33 ++++++++++++++++++++++
.../commandhandler/DeleteBlocksCommandHandler.java | 16 +++++++++--
.../statemachine/TestDatanodeConfiguration.java | 14 +++++++++
.../TestDeleteBlocksCommandHandler.java | 23 +++++++++++++++
4 files changed, 84 insertions(+), 2 deletions(-)
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
index be1723a3de..3272aedb66 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
@@ -127,6 +127,11 @@ public class DatanodeConfiguration extends
ReconfigurableConfig {
static final int CONTAINER_CLOSE_THREADS_DEFAULT = 3;
static final int BLOCK_DELETE_THREADS_DEFAULT = 5;
+ public static final String BLOCK_DELETE_COMMAND_WORKER_INTERVAL =
+ "hdds.datanode.block.delete.command.worker.interval";
+ public static final Duration BLOCK_DELETE_COMMAND_WORKER_INTERVAL_DEFAULT =
+ Duration.ofSeconds(2);
+
/**
* The maximum number of threads used to delete containers on a datanode
* simultaneously.
@@ -184,6 +189,16 @@ public class DatanodeConfiguration extends
ReconfigurableConfig {
)
private int blockDeleteQueueLimit = 5;
+ @Config(key = "block.delete.command.worker.interval",
+ type = ConfigType.TIME,
+ defaultValue = "2s",
+ tags = {DATANODE},
+ description = "The interval between DeleteCmdWorker execution of " +
+ "delete commands."
+ )
+ private Duration blockDeleteCommandWorkerInterval =
+ BLOCK_DELETE_COMMAND_WORKER_INTERVAL_DEFAULT;
+
/**
* The maximum number of commands in queued list.
* if the commands limit crosses limit, then command will be ignored.
@@ -634,6 +649,15 @@ public class DatanodeConfiguration extends
ReconfigurableConfig {
diskCheckTimeout = DISK_CHECK_TIMEOUT_DEFAULT;
}
+ if (blockDeleteCommandWorkerInterval.isNegative()) {
+ LOG.warn(BLOCK_DELETE_COMMAND_WORKER_INTERVAL +
+ " must be greater than zero and was set to {}. Defaulting to {}",
+ blockDeleteCommandWorkerInterval,
+ BLOCK_DELETE_COMMAND_WORKER_INTERVAL_DEFAULT);
+ blockDeleteCommandWorkerInterval =
+ BLOCK_DELETE_COMMAND_WORKER_INTERVAL_DEFAULT;
+ }
+
if (rocksdbLogMaxFileSize < 0) {
LOG.warn(ROCKSDB_LOG_MAX_FILE_SIZE_BYTES_KEY +
" must be no less than zero and was set to {}. Defaulting to {}",
@@ -771,6 +795,15 @@ public class DatanodeConfiguration extends
ReconfigurableConfig {
this.blockDeleteQueueLimit = queueLimit;
}
+ public Duration getBlockDeleteCommandWorkerInterval() {
+ return blockDeleteCommandWorkerInterval;
+ }
+
+ public void setBlockDeleteCommandWorkerInterval(
+ Duration blockDeleteCommandWorkerInterval) {
+ this.blockDeleteCommandWorkerInterval = blockDeleteCommandWorkerInterval;
+ }
+
public int getCommandQueueLimit() {
return cmdQueueLimit;
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
index 0b5b5a68b7..bec6734768 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
@@ -121,7 +121,8 @@ public class DeleteBlocksCommandHandler implements
CommandHandler {
dnConf.getBlockDeleteThreads(), threadFactory);
this.deleteCommandQueues =
new LinkedBlockingQueue<>(dnConf.getBlockDeleteQueueLimit());
- handlerThread = new Daemon(new DeleteCmdWorker());
+ long interval = dnConf.getBlockDeleteCommandWorkerInterval().toMillis();
+ handlerThread = new Daemon(new DeleteCmdWorker(interval));
handlerThread.start();
}
@@ -221,6 +222,17 @@ public class DeleteBlocksCommandHandler implements
CommandHandler {
*/
public final class DeleteCmdWorker implements Runnable {
+ private long intervalInMs;
+
+ public DeleteCmdWorker(long interval) {
+ this.intervalInMs = interval;
+ }
+
+ @VisibleForTesting
+ public long getInterval() {
+ return this.intervalInMs;
+ }
+
@Override
public void run() {
while (true) {
@@ -234,7 +246,7 @@ public class DeleteBlocksCommandHandler implements
CommandHandler {
}
try {
- Thread.sleep(2000);
+ Thread.sleep(this.intervalInMs);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
break;
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java
index 359023e8b8..1d83c5c7a5 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java
@@ -43,6 +43,8 @@ import static
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConf
import static
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.FAILED_DATA_VOLUMES_TOLERATED_KEY;
import static
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.FAILED_METADATA_VOLUMES_TOLERATED_KEY;
import static
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.FAILED_VOLUMES_TOLERATED_DEFAULT;
+import static
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.BLOCK_DELETE_COMMAND_WORKER_INTERVAL;
+import static
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.BLOCK_DELETE_COMMAND_WORKER_INTERVAL_DEFAULT;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -59,6 +61,7 @@ public class TestDatanodeConfiguration {
int validFailedVolumesTolerated = 10;
long validDiskCheckMinGap = 2;
long validDiskCheckTimeout = 1;
+ long validBlockDeleteCommandWorkerInterval = 1;
OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(CONTAINER_DELETE_THREADS_MAX_KEY, validDeleteThreads);
conf.setLong(PERIODIC_DISK_CHECK_INTERVAL_MINUTES_KEY,
@@ -73,6 +76,8 @@ public class TestDatanodeConfiguration {
validDiskCheckMinGap, TimeUnit.MINUTES);
conf.setTimeDuration(DISK_CHECK_TIMEOUT_KEY,
validDiskCheckTimeout, TimeUnit.MINUTES);
+ conf.setTimeDuration(BLOCK_DELETE_COMMAND_WORKER_INTERVAL,
+ validBlockDeleteCommandWorkerInterval, TimeUnit.SECONDS);
// WHEN
DatanodeConfiguration subject =
conf.getObject(DatanodeConfiguration.class);
@@ -91,6 +96,8 @@ public class TestDatanodeConfiguration {
subject.getDiskCheckMinGap().toMinutes());
assertEquals(validDiskCheckTimeout,
subject.getDiskCheckTimeout().toMinutes());
+ assertEquals(validBlockDeleteCommandWorkerInterval,
+ subject.getBlockDeleteCommandWorkerInterval().getSeconds());
}
@Test
@@ -101,6 +108,7 @@ public class TestDatanodeConfiguration {
int invalidFailedVolumesTolerated = -2;
long invalidDiskCheckMinGap = -1;
long invalidDiskCheckTimeout = -1;
+ long invalidBlockDeleteCommandWorkerInterval = -1;
OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(CONTAINER_DELETE_THREADS_MAX_KEY, invalidDeleteThreads);
conf.setLong(PERIODIC_DISK_CHECK_INTERVAL_MINUTES_KEY,
@@ -115,6 +123,8 @@ public class TestDatanodeConfiguration {
invalidDiskCheckMinGap, TimeUnit.MINUTES);
conf.setTimeDuration(DISK_CHECK_TIMEOUT_KEY,
invalidDiskCheckTimeout, TimeUnit.MINUTES);
+ conf.setTimeDuration(BLOCK_DELETE_COMMAND_WORKER_INTERVAL,
+ invalidBlockDeleteCommandWorkerInterval, TimeUnit.SECONDS);
// WHEN
DatanodeConfiguration subject =
conf.getObject(DatanodeConfiguration.class);
@@ -134,6 +144,8 @@ public class TestDatanodeConfiguration {
subject.getDiskCheckMinGap());
assertEquals(DISK_CHECK_TIMEOUT_DEFAULT,
subject.getDiskCheckTimeout());
+ assertEquals(BLOCK_DELETE_COMMAND_WORKER_INTERVAL_DEFAULT,
+ subject.getBlockDeleteCommandWorkerInterval());
}
@Test
@@ -159,6 +171,8 @@ public class TestDatanodeConfiguration {
subject.getDiskCheckMinGap());
assertEquals(DISK_CHECK_TIMEOUT_DEFAULT,
subject.getDiskCheckTimeout());
+ assertEquals(BLOCK_DELETE_COMMAND_WORKER_INTERVAL_DEFAULT,
+ subject.getBlockDeleteCommandWorkerInterval());
}
@Test
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java
index 6cadaae192..083a8eb016 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java
@@ -56,7 +56,10 @@ import java.util.Map;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import static
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.BLOCK_DELETE_COMMAND_WORKER_INTERVAL;
+import static
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.BLOCK_DELETE_COMMAND_WORKER_INTERVAL_DEFAULT;
import static
org.apache.hadoop.ozone.container.common.statemachine.commandhandler.DeleteBlocksCommandHandler.DeleteBlockTransactionExecutionResult;
import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1;
import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2;
@@ -262,6 +265,26 @@ public class TestDeleteBlocksCommandHandler {
blockDeleteMetrics.getTotalLockTimeoutTransactionCount());
}
+ @Test
+ public void testDeleteCmdWorkerInterval() {
+ OzoneConfiguration tmpConf = new OzoneConfiguration();
+ tmpConf.setTimeDuration(BLOCK_DELETE_COMMAND_WORKER_INTERVAL, 3,
+ TimeUnit.SECONDS);
+ OzoneContainer container = Mockito.mock(OzoneContainer.class);
+ DatanodeConfiguration dnConf =
+ tmpConf.getObject(DatanodeConfiguration.class);
+ DeleteBlocksCommandHandler commandHandler =
+ spy(new DeleteBlocksCommandHandler(
+ container, tmpConf, dnConf, "test"));
+
+ Assert.assertEquals(tmpConf.getTimeDuration(
+ BLOCK_DELETE_COMMAND_WORKER_INTERVAL,
+ BLOCK_DELETE_COMMAND_WORKER_INTERVAL_DEFAULT.getSeconds(),
+ TimeUnit.SECONDS), 3);
+ DeleteBlocksCommandHandler.DeleteCmdWorker deleteCmdWorker =
+ commandHandler.new DeleteCmdWorker(4000);
+ Assert.assertEquals(deleteCmdWorker.getInterval(), 4000);
+ }
private DeletedBlocksTransaction createDeletedBlocksTransaction(long txID,
long containerID) {
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]