This is an automated email from the ASF dual-hosted git repository.
sammichen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new f6359044c4 HDDS-8690. Ozone Support deletion related parameter dynamic
configuration (#4798)
f6359044c4 is described below
commit f6359044c453cfb996b2ba7331bbf62cf1c4068a
Author: XiChen <[email protected]>
AuthorDate: Sun Oct 8 23:42:28 2023 +0800
HDDS-8690. Ozone Support deletion related parameter dynamic configuration
(#4798)
---
.../java/org/apache/hadoop/hdds/scm/ScmConfig.java | 4 +-
.../hadoop/hdds/utils/BackgroundService.java | 21 +++++--
.../apache/hadoop/ozone/HddsDatanodeService.java | 34 +++++++++++-
.../common/impl/BlockDeletingService.java | 28 ++++++++--
.../common/statemachine/DatanodeConfiguration.java | 16 +++++-
.../common/statemachine/DatanodeStateMachine.java | 15 ++++-
.../commandhandler/DeleteBlocksCommandHandler.java | 29 +++++++++-
.../ozone/container/ozoneimpl/OzoneContainer.java | 8 ++-
.../ozone/container/common/ContainerTestUtils.java | 3 +
.../container/ozoneimpl/TestOzoneContainer.java | 7 +++
.../docs/content/feature/Reconfigurability.md | 26 ++++-----
.../docs/content/feature/Reconfigurability.zh.md | 30 +++-------
.../hadoop/hdds/scm/block/BlockManagerImpl.java | 3 +-
.../hdds/scm/block/SCMBlockDeletingService.java | 23 +++++---
.../scm/server/OzoneStorageContainerManager.java | 3 +
.../hdds/scm/server/StorageContainerManager.java | 1 +
.../ozone/container/common/TestEndPoint.java | 3 +
.../container/ozoneimpl/TestOzoneContainer.java | 5 ++
.../ozoneimpl/TestOzoneContainerWithTLS.java | 3 +
.../ozoneimpl/TestSecureOzoneContainer.java | 3 +
.../reconfig/TestDatanodeReconfiguration.java | 64 +++++++++++++++++++++-
.../ozone/reconfig/TestOmReconfiguration.java | 16 +++++-
.../ozone/reconfig/TestScmReconfiguration.java | 18 ++++++
.../org/apache/hadoop/ozone/om/OzoneManager.java | 15 ++++-
.../ozone/om/service/KeyDeletingService.java | 15 ++++-
.../scm/ReconStorageContainerManagerFacade.java | 6 ++
26 files changed, 325 insertions(+), 74 deletions(-)
diff --git
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java
index c05b0ff664..521ffef919 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java
@@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.conf.Config;
import org.apache.hadoop.hdds.conf.ConfigGroup;
import org.apache.hadoop.hdds.conf.ConfigTag;
import org.apache.hadoop.hdds.conf.ConfigType;
+import org.apache.hadoop.hdds.conf.ReconfigurableConfig;
import java.time.Duration;
@@ -29,7 +30,7 @@ import java.time.Duration;
* The configuration class for the SCM service.
*/
@ConfigGroup(prefix = "hdds.scm")
-public class ScmConfig {
+public class ScmConfig extends ReconfigurableConfig {
@Config(key = "kerberos.principal",
type = ConfigType.STRING,
@@ -95,6 +96,7 @@ public class ScmConfig {
@Config(key = "block.deletion.per-interval.max",
type = ConfigType.INT,
defaultValue = "100000",
+ reconfigurable = true,
tags = { ConfigTag.SCM, ConfigTag.DELETION},
description =
"Maximum number of blocks which SCM processes during an interval. "
diff --git
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java
index aa2377075c..07be341cbb 100644
---
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java
+++
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundService.java
@@ -26,7 +26,7 @@ import org.slf4j.LoggerFactory;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
@@ -43,7 +43,7 @@ public abstract class BackgroundService {
LoggerFactory.getLogger(BackgroundService.class);
// Executor to launch child tasks
- private final ScheduledExecutorService exec;
+ private final ScheduledThreadPoolExecutor exec;
private final ThreadGroup threadGroup;
private final String serviceName;
private final long interval;
@@ -64,14 +64,27 @@ public abstract class BackgroundService {
.setDaemon(true)
.setNameFormat(serviceName + "#%d")
.build();
- exec = Executors.newScheduledThreadPool(threadPoolSize, threadFactory);
+ exec = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(
+ threadPoolSize, threadFactory);
service = new PeriodicalTask();
}
- protected ExecutorService getExecutorService() {
+ @VisibleForTesting
+ public ExecutorService getExecutorService() {
return this.exec;
}
+ public void setPoolSize(int size) {
+ if (size <= 0) {
+ throw new IllegalArgumentException("Pool size must be positive.");
+ }
+
+ // In ScheduledThreadPoolExecutor, maximumPoolSize is Integer.MAX_VALUE
+ // the corePoolSize will always less maximumPoolSize.
+ // So we can directly set the corePoolSize
+ exec.setCorePoolSize(size);
+ }
+
@VisibleForTesting
public int getThreadCount() {
return threadGroup.activeCount();
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
index 417cc83bd5..fca42820f0 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import
org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
+import
org.apache.hadoop.ozone.container.common.statemachine.commandhandler.DeleteBlocksCommandHandler;
import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
@@ -79,9 +80,11 @@ import static
org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name.HTTPS;
import static org.apache.hadoop.hdds.utils.HddsServerUtil.getRemoteUser;
import static
org.apache.hadoop.hdds.utils.HddsServerUtil.getScmSecurityClientWithMaxRetry;
import static
org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY;
+import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_WORKERS;
import static
org.apache.hadoop.ozone.conf.OzoneServiceConfig.DEFAULT_SHUTDOWN_HOOK_PRIORITY;
import static org.apache.hadoop.ozone.common.Storage.StorageState.INITIALIZED;
import static org.apache.hadoop.security.UserGroupInformation.getCurrentUser;
+import static
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.HDDS_DATANODE_BLOCK_DELETE_THREAD_MAX;
import static org.apache.hadoop.util.ExitUtil.terminate;
import org.slf4j.Logger;
@@ -283,8 +286,17 @@ public class HddsDatanodeService extends GenericCli
implements ServicePlugin {
secretKeyClient.start(conf);
}
}
+
+ reconfigurationHandler =
+ new ReconfigurationHandler("DN", conf, this::checkAdminPrivilege)
+ .register(HDDS_DATANODE_BLOCK_DELETE_THREAD_MAX,
+ this::reconfigBlockDeleteThreadMax)
+ .register(OZONE_BLOCK_DELETING_SERVICE_WORKERS,
+ this::reconfigDeletingServiceWorkers);
+
datanodeStateMachine = new DatanodeStateMachine(datanodeDetails, conf,
- dnCertClient, secretKeyClient, this::terminateDatanode, dnCRLStore);
+ dnCertClient, secretKeyClient, this::terminateDatanode, dnCRLStore,
+ reconfigurationHandler);
try {
httpServer = new HddsDatanodeHttpServer(conf);
httpServer.start();
@@ -301,8 +313,6 @@ public class HddsDatanodeService extends GenericCli
implements ServicePlugin {
LOG.error("HttpServer failed to start.", ex);
}
- reconfigurationHandler =
- new ReconfigurationHandler("DN", conf, this::checkAdminPrivilege);
clientProtocolServer = new HddsDatanodeClientProtocolServer(
datanodeDetails, conf, HddsVersionInfo.HDDS_VERSION_INFO,
@@ -668,4 +678,22 @@ public class HddsDatanodeService extends GenericCli
implements ServicePlugin {
public ReconfigurationHandler getReconfigurationHandler() {
return reconfigurationHandler;
}
+
+ private String reconfigBlockDeleteThreadMax(String value) {
+ getConf().set(HDDS_DATANODE_BLOCK_DELETE_THREAD_MAX, value);
+
+ DeleteBlocksCommandHandler handler =
+ (DeleteBlocksCommandHandler) getDatanodeStateMachine()
+ .getCommandDispatcher().getDeleteBlocksCommandHandler();
+ handler.setPoolSize(Integer.parseInt(value));
+ return value;
+ }
+
+ private String reconfigDeletingServiceWorkers(String value) {
+ getConf().set(OZONE_BLOCK_DELETING_SERVICE_WORKERS, value);
+
+ getDatanodeStateMachine().getContainer().getBlockDeletingService()
+ .setPoolSize(Integer.parseInt(value));
+ return value;
+ }
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/BlockDeletingService.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/BlockDeletingService.java
index 5d10e474de..d67840cd5a 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/BlockDeletingService.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/BlockDeletingService.java
@@ -16,7 +16,10 @@
*/
package org.apache.hadoop.ozone.container.common.impl;
+import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
@@ -55,9 +58,7 @@ public class BlockDeletingService extends BackgroundService {
private final OzoneContainer ozoneContainer;
private final ContainerDeletionChoosingPolicy containerDeletionPolicy;
private final ConfigurationSource conf;
-
- private final int blockLimitPerInterval;
-
+ private final DatanodeConfiguration dnConf;
private final BlockDeletingServiceMetrics metrics;
// Task priority is useful when a to-delete block has weight.
@@ -65,10 +66,21 @@ public class BlockDeletingService extends BackgroundService
{
private final Duration blockDeletingMaxLockHoldingTime;
+ @VisibleForTesting
public BlockDeletingService(OzoneContainer ozoneContainer,
long serviceInterval, long serviceTimeout,
TimeUnit timeUnit, int workerSize,
ConfigurationSource conf) {
+ this(ozoneContainer, serviceInterval, serviceTimeout, timeUnit,
+ workerSize, conf, new ReconfigurationHandler(
+ "DN", (OzoneConfiguration) conf, op -> { }));
+ }
+
+ public BlockDeletingService(OzoneContainer ozoneContainer,
+ long serviceInterval, long serviceTimeout,
+ TimeUnit timeUnit, int workerSize,
+ ConfigurationSource conf,
+ ReconfigurationHandler reconfigurationHandler) {
super("BlockDeletingService", serviceInterval, timeUnit,
workerSize, serviceTimeout);
this.ozoneContainer = ozoneContainer;
@@ -81,8 +93,8 @@ public class BlockDeletingService extends BackgroundService {
throw new RuntimeException(e);
}
this.conf = conf;
- DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class);
- this.blockLimitPerInterval = dnConf.getBlockDeletionLimit();
+ dnConf = conf.getObject(DatanodeConfiguration.class);
+ reconfigurationHandler.register(dnConf);
this.blockDeletingMaxLockHoldingTime =
dnConf.getBlockDeletingMaxLockHoldingTime();
metrics = BlockDeletingServiceMetrics.create();
@@ -121,7 +133,7 @@ public class BlockDeletingService extends BackgroundService
{
// The chosen result depends on what container deletion policy is
// configured.
List<ContainerBlockInfo> containers =
- chooseContainerForBlockDeletion(blockLimitPerInterval,
+ chooseContainerForBlockDeletion(getBlockLimitPerInterval(),
containerDeletionPolicy);
BackgroundTask
@@ -258,6 +270,10 @@ public class BlockDeletingService extends
BackgroundService {
return blockDeletingMaxLockHoldingTime;
}
+ public int getBlockLimitPerInterval() {
+ return dnConf.getBlockDeletionLimit();
+ }
+
private static class BlockDeletingTaskBuilder {
private BlockDeletingService blockDeletingService;
private BlockDeletingService.ContainerBlockInfo containerBlockInfo;
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
index 7abad805d2..715faafa51 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java
@@ -24,6 +24,9 @@ import org.apache.hadoop.hdds.conf.ConfigTag;
import static java.util.concurrent.TimeUnit.MICROSECONDS;
import static org.apache.hadoop.hdds.conf.ConfigTag.DATANODE;
+import static
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.CONFIG_PREFIX;
+
+import org.apache.hadoop.hdds.conf.ReconfigurableConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -32,8 +35,14 @@ import java.time.Duration;
/**
* Configuration class used for high level datanode configuration parameters.
*/
-@ConfigGroup(prefix = "hdds.datanode")
-public class DatanodeConfiguration {
+@ConfigGroup(prefix = CONFIG_PREFIX)
+public class DatanodeConfiguration extends ReconfigurableConfig {
+ public static final String CONFIG_PREFIX = "hdds.datanode";
+
+ private static final String BLOCK_DELETE_THREAD_MAX
+ = "block.delete.threads.max";
+ public static final String HDDS_DATANODE_BLOCK_DELETE_THREAD_MAX =
+ CONFIG_PREFIX + "." + BLOCK_DELETE_THREAD_MAX;
private static final Logger LOG =
LoggerFactory.getLogger(DatanodeConfiguration.class);
@@ -153,7 +162,7 @@ public class DatanodeConfiguration {
* missed. With max threads 5, optimistically DN can handle 1500 individual
* container delete tx in 60s with RocksDB cache miss.
*/
- @Config(key = "block.delete.threads.max",
+ @Config(key = BLOCK_DELETE_THREAD_MAX,
type = ConfigType.INT,
defaultValue = "5",
tags = {DATANODE},
@@ -253,6 +262,7 @@ public class DatanodeConfiguration {
@Config(key = "block.deleting.limit.per.interval",
defaultValue = "5000",
+ reconfigurable = true,
type = ConfigType.INT,
tags = { ConfigTag.SCM, ConfigTag.DELETION },
description =
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index 2184a67d46..a1ee46d754 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
@@ -30,6 +30,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
import org.apache.hadoop.hdds.datanode.metadata.DatanodeCRLStore;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
@@ -130,6 +132,7 @@ public class DatanodeStateMachine implements Closeable {
reconstructECContainersCommandHandler;
private final DatanodeQueueMetrics queueMetrics;
+ private final ReconfigurationHandler reconfigurationHandler;
/**
* Constructs a datanode state machine.
* @param datanodeDetails - DatanodeDetails used to identify a datanode
@@ -142,10 +145,13 @@ public class DatanodeStateMachine implements Closeable {
CertificateClient certClient,
SecretKeyClient secretKeyClient,
HddsDatanodeStopService hddsDatanodeStopService,
- DatanodeCRLStore crlStore) throws IOException {
+ DatanodeCRLStore crlStore,
+ ReconfigurationHandler reconfigurationHandler)
+ throws IOException {
DatanodeConfiguration dnConf =
conf.getObject(DatanodeConfiguration.class);
+ this.reconfigurationHandler = reconfigurationHandler;
this.hddsDatanodeStopService = hddsDatanodeStopService;
this.conf = conf;
this.datanodeDetails = datanodeDetails;
@@ -265,7 +271,8 @@ public class DatanodeStateMachine implements Closeable {
@VisibleForTesting
public DatanodeStateMachine(DatanodeDetails datanodeDetails,
ConfigurationSource conf) throws IOException {
- this(datanodeDetails, conf, null, null, null, null);
+ this(datanodeDetails, conf, null, null, null, null,
+ new ReconfigurationHandler("DN", (OzoneConfiguration) conf, op -> {
}));
}
private int getEndPointTaskThreadPoolSize() {
@@ -747,4 +754,8 @@ public class DatanodeStateMachine implements Closeable {
public DatanodeQueueMetrics getQueueMetrics() {
return queueMetrics;
}
+
+ public ReconfigurationHandler getReconfigurationHandler() {
+ return reconfigurationHandler;
+ }
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
index 87d35c2da0..f8943b62e4 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
@@ -66,7 +66,6 @@ import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
@@ -92,7 +91,7 @@ public class DeleteBlocksCommandHandler implements
CommandHandler {
private final ConfigurationSource conf;
private int invocationCount;
private long totalTime;
- private final ExecutorService executor;
+ private final ThreadPoolExecutor executor;
private final LinkedBlockingQueue<DeleteCmdInfo> deleteCommandQueues;
private final Daemon handlerThread;
private final OzoneContainer ozoneContainer;
@@ -112,7 +111,7 @@ public class DeleteBlocksCommandHandler implements
CommandHandler {
schemaHandlers.put(SCHEMA_V2, this::markBlocksForDeletionSchemaV2);
schemaHandlers.put(SCHEMA_V3, this::markBlocksForDeletionSchemaV3);
- this.executor = Executors.newFixedThreadPool(
+ this.executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(
dnConf.getBlockDeleteThreads(), new ThreadFactoryBuilder()
.setNameFormat("DeleteBlocksCommandHandlerThread-%d").build());
this.deleteCommandQueues =
@@ -682,4 +681,28 @@ public class DeleteBlocksCommandHandler implements
CommandHandler {
return blockDeleteMetrics;
}
+ @VisibleForTesting
+ public ThreadPoolExecutor getExecutor() {
+ return executor;
+ }
+
+ public void setPoolSize(int size) {
+ if (size <= 0) {
+ throw new IllegalArgumentException("Pool size must be positive.");
+ }
+
+ int currentCorePoolSize = executor.getCorePoolSize();
+
+ // In ThreadPoolExecutor, maximumPoolSize must always be greater than or
+ // equal to the corePoolSize. We must make sure this invariant holds when
+ // changing the pool size. Therefore, we take into account whether the
+ // new size is greater or smaller than the current core pool size.
+ if (size > currentCorePoolSize) {
+ executor.setMaximumPoolSize(size);
+ executor.setCorePoolSize(size);
+ } else {
+ executor.setCorePoolSize(size);
+ executor.setMaximumPoolSize(size);
+ }
+ }
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index 50c406d35e..a1559d6885 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -229,7 +229,8 @@ public class OzoneContainer {
blockDeletingService =
new BlockDeletingService(this, blockDeletingSvcInterval.toMillis(),
blockDeletingServiceTimeout, TimeUnit.MILLISECONDS,
- blockDeletingServiceWorkerSize, config);
+ blockDeletingServiceWorkerSize, config, context.getParent()
+ .getReconfigurationHandler());
Duration recoveringContainerScrubbingSvcInterval = conf.getObject(
DatanodeConfiguration.class).getRecoveringContainerScrubInterval();
@@ -551,4 +552,9 @@ public class OzoneContainer {
public ContainerMetrics getMetrics() {
return metrics;
}
+
+ public BlockDeletingService getBlockDeletingService() {
+ return blockDeletingService;
+ }
+
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
index 263ab54599..061e20b9b9 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
@@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
import
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
@@ -117,6 +118,8 @@ public final class ContainerTestUtils {
throws IOException {
DatanodeStateMachine stateMachine =
Mockito.mock(DatanodeStateMachine.class);
+ Mockito.when(stateMachine.getReconfigurationHandler())
+ .thenReturn(new ReconfigurationHandler("DN", conf, op -> { }));
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
Mockito.when(context.getParent()).thenReturn(stateMachine);
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index d856f2c214..8d9b30d2ff 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -171,6 +172,8 @@ public class TestOzoneContainer {
DatanodeStateMachine stateMachine = Mockito.mock(
DatanodeStateMachine.class);
+ Mockito.when(stateMachine.getReconfigurationHandler())
+ .thenReturn(new ReconfigurationHandler("DN", conf, op -> { }));
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
Mockito.when(context.getParent()).thenReturn(stateMachine);
@@ -206,6 +209,8 @@ public class TestOzoneContainer {
DatanodeStateMachine stateMachine = Mockito.mock(
DatanodeStateMachine.class);
+ Mockito.when(stateMachine.getReconfigurationHandler())
+ .thenReturn(new ReconfigurationHandler("DN", conf, op -> { }));
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
Mockito.when(context.getParent()).thenReturn(stateMachine);
@@ -227,6 +232,8 @@ public class TestOzoneContainer {
public void testBuildNodeReportWithDefaultRatisLogDir() throws Exception {
DatanodeStateMachine stateMachine = Mockito.mock(
DatanodeStateMachine.class);
+ Mockito.when(stateMachine.getReconfigurationHandler())
+ .thenReturn(new ReconfigurationHandler("DN", conf, op -> { }));
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
Mockito.when(context.getParent()).thenReturn(stateMachine);
diff --git a/hadoop-hdds/docs/content/feature/Reconfigurability.md
b/hadoop-hdds/docs/content/feature/Reconfigurability.md
index cbad0e8911..a237e478b1 100644
--- a/hadoop-hdds/docs/content/feature/Reconfigurability.md
+++ b/hadoop-hdds/docs/content/feature/Reconfigurability.md
@@ -38,13 +38,18 @@ The meaning of command options:
- **status**: Check reconfig status
- **properties**: List reconfigurable properties
-## OM Reconfigurability
+## Retrieve the reconfigurable properties list
+To retrieve all the reconfigurable properties list for a specific component in
Ozone,
+you can use the command: `ozone admin reconfig --address=<ip:port> properties`.
+This command will list all the properties that can be dynamically reconfigured
at runtime for specific component.<br>
-**Reconfigurable properties**
-key | description
------------------------------------|-----------------------------------------
-ozone.administrators | OM startup user will be added to admin by default
+> For example, get the Ozone OM reconfigurable properties list.
+>
+>$ `ozone admin reconfig --address=hadoop1:9862 properties`<br>
+OM: Node [hadoop1:9862] Reconfigurable properties:<br>
+ozone.administrators
+## OM Reconfigurability
>For example, modify `ozone.administrators` in ozone-site.xml and execute:
>
> $ `ozone admin reconfig --address=hadoop1:9862 start`<br>
@@ -61,12 +66,6 @@ OM: Node [hadoop1:9862] Reconfigurable properties:<br>
ozone.administrators
## SCM Reconfigurability
-
-**Reconfigurable properties**
-key | description
------------------------------------|-----------------------------------------
-ozone.administrators | OM startup user will be added to admin by default
-
>For example, modify `ozone.administrators` in ozone-site.xml and execute:
>
> $ `ozone admin reconfig --address=hadoop1:9860 start`<br>
@@ -83,11 +82,6 @@ SCM: Node [hadoop1:9860] Reconfigurable properties:<br>
ozone.administrators
## Datanode Reconfigurability
-
-**Reconfigurable properties**
-key | description
------------------------------------|-----------------------------------------
-
>For example, modify `ozone.example.config` in ozone-site.xml and execute:
>
> $ `ozone admin reconfig --address=hadoop1:9864 start`<br>
diff --git a/hadoop-hdds/docs/content/feature/Reconfigurability.zh.md
b/hadoop-hdds/docs/content/feature/Reconfigurability.zh.md
index 483a4808c6..321b6f137e 100644
--- a/hadoop-hdds/docs/content/feature/Reconfigurability.zh.md
+++ b/hadoop-hdds/docs/content/feature/Reconfigurability.zh.md
@@ -37,14 +37,17 @@ ozone admin reconfig --address=<ip:port>
start|status|properties
- **status**: 查看最近一次动态加载的状态
- **properties**: 列出支持动态加载的配置项
-## OM动态配置
-
-**支持动态加载的属性**
-配置项 | 描述
------------------------------------|-----------------------------------------
-ozone.administrators | OM启动用户将默认成为一个管理员
+## 获取可动态加载的属性列表
+要获取 Ozone 中指定组件的可动态加载属性列表, 可以使用命令 `ozone admin reconfig --address=<ip:port>
properties`。
+这个命令将会列出所有可以在运行时动态加载的属性。
+> 例如, 获取 Ozone OM 可动态加载属性列表
+>
+>$ `ozone admin reconfig --address=hadoop1:9862 properties`<br>
+OM: Node [hadoop1:9862] Reconfigurable properties:<br>
+ozone.administrators
+## OM动态配置
>例如, 在`ozone-site.xml`文件中修改`ozone.administrators`的值并执行:
>
> $ `ozone admin reconfig --address=hadoop1:9862 start`<br>
@@ -61,14 +64,6 @@ OM: Node [hadoop1:9862] Reconfigurable properties:<br>
ozone.administrators
## SCM动态配置
-
-
-**支持动态加载的属性**
-配置项 | 描述
------------------------------------|-----------------------------------------
-ozone.administrators | SCM启动用户将默认成为一个管理员
-
-
>例如, 在`ozone-site.xml`文件中修改`ozone.administrators`的值并执行:
>
> $ `ozone admin reconfig --address=hadoop1:9860 start`<br>
@@ -86,13 +81,6 @@ ozone.administrators
## Datanode 动态配置
-
-
-**支持动态加载的属性**
-配置项 | 描述
------------------------------------|-----------------------------------------
-
-
>例如, 在`ozone-site.xml`文件中修改`ozone.example.config`的值并执行:
>
> $ `ozone admin reconfig --address=hadoop1:9864 start`<br>
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index c56ee9bf51..48830cafbd 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -104,11 +104,12 @@ public class BlockManagerImpl implements BlockManager,
BlockmanagerMXBean {
scm.getSequenceIdGen(),
metrics);
+
blockDeletingService =
new SCMBlockDeletingService(deletedBlockLog,
scm.getScmNodeManager(), scm.getEventQueue(), scm.getScmContext(),
scm.getSCMServiceManager(), conf,
- metrics, scm.getSystemClock());
+ metrics, scm.getSystemClock(), scm.getReconfigurationHandler());
}
/**
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
index 0a1222e380..8480c63966 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
@@ -31,6 +31,7 @@ import java.util.stream.Collectors;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
@@ -78,6 +79,7 @@ public class SCMBlockDeletingService extends BackgroundService
private final NodeManager nodeManager;
private final EventPublisher eventPublisher;
private final SCMContext scmContext;
+ private final ScmConfig scmConf;
private int blockDeleteLimitSize;
private ScmBlockDeletingServiceMetrics metrics;
@@ -98,9 +100,10 @@ public class SCMBlockDeletingService extends
BackgroundService
SCMContext scmContext, SCMServiceManager serviceManager,
ConfigurationSource conf,
ScmBlockDeletingServiceMetrics metrics,
- Clock clock) {
+ Clock clock, ReconfigurationHandler reconfigurationHandler) {
super("SCMBlockDeletingService",
- conf.getObject(ScmConfig.class).getBlockDeletionInterval().toMillis(),
+ conf.getObject(ScmConfig.class)
+ .getBlockDeletionInterval().toMillis(),
TimeUnit.MILLISECONDS, BLOCK_DELETING_SERVICE_CORE_POOL_SIZE,
conf.getTimeDuration(OZONE_BLOCK_DELETING_SERVICE_TIMEOUT,
OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT,
@@ -116,9 +119,9 @@ public class SCMBlockDeletingService extends
BackgroundService
this.eventPublisher = eventPublisher;
this.scmContext = scmContext;
this.metrics = metrics;
-
- blockDeleteLimitSize =
- conf.getObject(ScmConfig.class).getBlockDeletionLimit();
+ scmConf = conf.getObject(ScmConfig.class);
+ reconfigurationHandler.register(scmConf);
+ blockDeleteLimitSize = scmConf.getBlockDeletionLimit();
Preconditions.checkArgument(blockDeleteLimitSize > 0,
"Block deletion limit should be positive.");
@@ -164,7 +167,7 @@ public class SCMBlockDeletingService extends
BackgroundService
Type.deleteBlocksCommand) == 0).collect(Collectors.toSet());
try {
DatanodeDeletedBlockTransactions transactions =
- deletedBlockLog.getTransactions(blockDeleteLimitSize, included);
+ deletedBlockLog.getTransactions(getBlockDeleteTXNum(), included);
if (transactions.isEmpty()) {
return EmptyTaskResult.newResult();
@@ -222,7 +225,13 @@ public class SCMBlockDeletingService extends
BackgroundService
@VisibleForTesting
public void setBlockDeleteTXNum(int numTXs) {
- blockDeleteLimitSize = numTXs;
+ Preconditions.checkArgument(numTXs > 0,
+ "Block deletion limit should be positive.");
+ scmConf.setBlockDeletionLimit(numTXs);
+ }
+
+ public int getBlockDeleteTXNum() {
+ return scmConf.getBlockDeletionLimit();
}
@Override
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/OzoneStorageContainerManager.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/OzoneStorageContainerManager.java
index 68adc76a45..009b14f812 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/OzoneStorageContainerManager.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/OzoneStorageContainerManager.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdds.scm.server;
import java.io.IOException;
import java.net.InetSocketAddress;
+import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
import org.apache.hadoop.hdds.scm.block.BlockManager;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
@@ -58,4 +59,6 @@ public interface OzoneStorageContainerManager {
InetSocketAddress getDatanodeRpcAddress();
SCMNodeDetails getScmNodeDetails();
+
+ ReconfigurationHandler getReconfigurationHandler();
}
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 9f12993853..6e038cd55a 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -2175,6 +2175,7 @@ public final class StorageContainerManager extends
ServiceRuntimeInfoImpl
scmHAMetrics = SCMHAMetrics.create(getScmId(), leaderId);
}
+ @Override
public ReconfigurationHandler getReconfigurationHandler() {
return reconfigurationHandler;
}
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index 24e94b54ea..b5dca06fc9 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -26,6 +26,7 @@ import java.util.Map;
import java.util.UUID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CloseContainerCommandProto;
@@ -618,6 +619,8 @@ public class TestEndPoint {
private StateContext getContext(DatanodeDetails datanodeDetails) {
DatanodeStateMachine stateMachine = Mockito.mock(
DatanodeStateMachine.class);
+ Mockito.when(stateMachine.getReconfigurationHandler())
+ .thenReturn(new ReconfigurationHandler("DN", ozoneConf, op -> { }));
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
Mockito.when(context.getParent()).thenReturn(stateMachine);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index 6bf927d629..db7416dcfa 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.ozone.container.ozoneimpl;
import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.pipeline.MockPipeline;
@@ -78,6 +79,8 @@ public class TestOzoneContainer {
DatanodeDetails datanodeDetails = randomDatanodeDetails();
StateContext context = Mockito.mock(StateContext.class);
DatanodeStateMachine dsm = Mockito.mock(DatanodeStateMachine.class);
+ Mockito.when(dsm.getReconfigurationHandler())
+ .thenReturn(new ReconfigurationHandler("DN", conf, op -> { }));
Mockito.when(dsm.getDatanodeDetails()).thenReturn(datanodeDetails);
Mockito.when(context.getParent()).thenReturn(dsm);
container = new OzoneContainer(datanodeDetails, conf, context);
@@ -111,6 +114,8 @@ public class TestOzoneContainer {
DatanodeDetails datanodeDetails = randomDatanodeDetails();
StateContext context = Mockito.mock(StateContext.class);
DatanodeStateMachine dsm = Mockito.mock(DatanodeStateMachine.class);
+ Mockito.when(dsm.getReconfigurationHandler())
+ .thenReturn(new ReconfigurationHandler("DN", conf, op -> { }));
Mockito.when(dsm.getDatanodeDetails()).thenReturn(datanodeDetails);
Mockito.when(context.getParent()).thenReturn(dsm);
container = new OzoneContainer(datanodeDetails, conf, context);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
index 6ea0dc4d77..5c6fe2a37e 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.container.ozoneimpl;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@@ -378,6 +379,8 @@ public class TestOzoneContainerWithTLS {
DatanodeStateMachine stateMachine = Mockito.mock(
DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
+ Mockito.when(stateMachine.getReconfigurationHandler())
+ .thenReturn(new ReconfigurationHandler("DN", conf, op -> { }));
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
Mockito.when(context.getParent()).thenReturn(stateMachine);
return context;
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java
index 4731352c5c..2bf0dd5e10 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.container.ozoneimpl;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@@ -205,6 +206,8 @@ public class TestSecureOzoneContainer {
private StateContext getContext(DatanodeDetails datanodeDetails) {
DatanodeStateMachine stateMachine = Mockito.mock(
DatanodeStateMachine.class);
+ Mockito.when(stateMachine.getReconfigurationHandler())
+ .thenReturn(new ReconfigurationHandler("DN", conf, op -> { }));
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails);
Mockito.when(context.getParent()).thenReturn(stateMachine);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestDatanodeReconfiguration.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestDatanodeReconfiguration.java
index 7a26832aaf..c0b3d7d541 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestDatanodeReconfiguration.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestDatanodeReconfiguration.java
@@ -18,8 +18,22 @@
package org.apache.hadoop.ozone.reconfig;
import com.google.common.collect.ImmutableSet;
+import org.apache.hadoop.conf.ReconfigurationException;
import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
+import org.apache.hadoop.ozone.HddsDatanodeService;
+import
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
+import
org.apache.hadoop.ozone.container.common.statemachine.commandhandler.DeleteBlocksCommandHandler;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.ValueSource;
+
+import java.util.Set;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.ThreadPoolExecutor;
+
+import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_WORKERS;
+import static
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.HDDS_DATANODE_BLOCK_DELETE_THREAD_MAX;
+import static org.junit.jupiter.api.Assertions.assertEquals;
/**
* Tests for Datanode reconfiguration.
@@ -27,12 +41,58 @@ import org.junit.jupiter.api.Test;
class TestDatanodeReconfiguration extends ReconfigurationTestBase {
@Override
ReconfigurationHandler getSubject() {
- return getCluster().getHddsDatanodes().get(0).getReconfigurationHandler();
+ return getFirstDatanode().getReconfigurationHandler();
}
@Test
void reconfigurableProperties() {
- assertProperties(getSubject(), ImmutableSet.of());
+ Set<String> expected = ImmutableSet.<String>builder()
+ .add(HDDS_DATANODE_BLOCK_DELETE_THREAD_MAX)
+ .add(OZONE_BLOCK_DELETING_SERVICE_WORKERS)
+ .addAll(new DatanodeConfiguration().reconfigurableProperties())
+ .build();
+
+ assertProperties(getSubject(), expected);
+ }
+
+ @Test
+ void blockDeletingLimitPerInterval() throws ReconfigurationException {
+ getFirstDatanode().getReconfigurationHandler().reconfigurePropertyImpl(
+ "hdds.datanode.block.deleting.limit.per.interval", "1");
+
+ assertEquals(1, getFirstDatanode().getDatanodeStateMachine().getContainer()
+ .getBlockDeletingService().getBlockLimitPerInterval());
+ }
+
+ @ParameterizedTest
+ @ValueSource(ints = { -1, +1 })
+ void blockDeleteThreadMax(int delta) throws ReconfigurationException {
+ ThreadPoolExecutor executor = ((DeleteBlocksCommandHandler)
+ getFirstDatanode().getDatanodeStateMachine().getCommandDispatcher()
+ .getDeleteBlocksCommandHandler()).getExecutor();
+ int newValue = executor.getMaximumPoolSize() + delta;
+
+ getFirstDatanode().getReconfigurationHandler().reconfigurePropertyImpl(
+ HDDS_DATANODE_BLOCK_DELETE_THREAD_MAX, String.valueOf(newValue));
+ assertEquals(newValue, executor.getMaximumPoolSize());
+ assertEquals(newValue, executor.getCorePoolSize());
+ }
+
+ @ParameterizedTest
+ @ValueSource(ints = { -1, +1 })
+ void blockDeletingServiceWorkers(int delta) throws ReconfigurationException {
+ ScheduledThreadPoolExecutor executor = (ScheduledThreadPoolExecutor)
+ getFirstDatanode().getDatanodeStateMachine().getContainer()
+ .getBlockDeletingService().getExecutorService();
+ int newValue = executor.getCorePoolSize() + delta;
+
+ getFirstDatanode().getReconfigurationHandler().reconfigurePropertyImpl(
+ OZONE_BLOCK_DELETING_SERVICE_WORKERS, String.valueOf(newValue));
+ assertEquals(newValue, executor.getCorePoolSize());
+ }
+
+ private HddsDatanodeService getFirstDatanode() {
+ return getCluster().getHddsDatanodes().get(0);
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestOmReconfiguration.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestOmReconfiguration.java
index a42763f0ce..a7c471c81b 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestOmReconfiguration.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestOmReconfiguration.java
@@ -26,6 +26,7 @@ import org.junit.jupiter.api.Test;
import static org.apache.commons.lang3.RandomStringUtils.randomAlphabetic;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_READONLY_ADMINISTRATORS;
+import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK;
import static org.junit.jupiter.api.Assertions.assertEquals;
/**
@@ -41,7 +42,8 @@ class TestOmReconfiguration extends ReconfigurationTestBase {
@Test
void reconfigurableProperties() {
assertProperties(getSubject(),
- ImmutableSet.of(OZONE_ADMINISTRATORS, OZONE_READONLY_ADMINISTRATORS));
+ ImmutableSet.of(OZONE_ADMINISTRATORS, OZONE_READONLY_ADMINISTRATORS,
+ OZONE_KEY_DELETING_LIMIT_PER_TASK));
}
@Test
@@ -67,4 +69,16 @@ class TestOmReconfiguration extends ReconfigurationTestBase {
getCluster().getOzoneManager().getOmReadOnlyAdminUsernames());
}
+ @Test
+ public void keyDeletingLimitPerTask() throws ReconfigurationException {
+ int originLimit = getCluster().getOzoneManager()
+ .getKeyManager().getDeletingService().getKeyLimitPerTask();
+
+ getSubject().reconfigurePropertyImpl(OZONE_KEY_DELETING_LIMIT_PER_TASK,
+ String.valueOf(originLimit + 1));
+
+ assertEquals(originLimit + 1, getCluster().getOzoneManager()
+ .getKeyManager().getDeletingService().getKeyLimitPerTask());
+ }
+
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestScmReconfiguration.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestScmReconfiguration.java
index 27d9a9736d..6329f13a1a 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestScmReconfiguration.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/reconfig/TestScmReconfiguration.java
@@ -21,7 +21,9 @@ package org.apache.hadoop.ozone.reconfig;
import com.google.common.collect.ImmutableSet;
import org.apache.hadoop.conf.ReconfigurationException;
import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
+import org.apache.hadoop.hdds.scm.ScmConfig;
import
org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationManagerConfiguration;
+import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService;
import
org.apache.hadoop.hdds.scm.pipeline.WritableECContainerProvider.WritableECContainerProviderConfig;
import org.junit.jupiter.api.Test;
@@ -53,6 +55,7 @@ class TestScmReconfiguration extends ReconfigurationTestBase {
.reconfigurableProperties())
.addAll(new WritableECContainerProviderConfig()
.reconfigurableProperties())
+ .addAll(new ScmConfig().reconfigurableProperties())
.build();
assertProperties(getSubject(), expected);
@@ -98,4 +101,19 @@ class TestScmReconfiguration extends
ReconfigurationTestBase {
.getConfig();
}
+ @Test
+ void blockDeletionPerInterval() throws ReconfigurationException {
+ SCMBlockDeletingService blockDeletingService =
+ getCluster().getStorageContainerManager().getScmBlockManager()
+ .getSCMBlockDeletingService();
+ int blockDeleteTXNum = blockDeletingService.getBlockDeleteTXNum();
+ int newValue = blockDeleteTXNum + 1;
+
+ getSubject().reconfigurePropertyImpl(
+ "hdds.scm.block.deletion.per-interval.max",
+ String.valueOf(newValue));
+
+ assertEquals(newValue, blockDeletingService.getBlockDeleteTXNum());
+ }
+
}
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index a9a13a49a2..e5e304eea7 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -266,6 +266,7 @@ import static
org.apache.hadoop.ozone.OzoneConsts.PREPARE_MARKER_KEY;
import static org.apache.hadoop.ozone.OzoneConsts.OM_RATIS_SNAPSHOT_DIR;
import static org.apache.hadoop.ozone.OzoneConsts.RPC_PORT;
import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY;
+import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS;
import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT;
@@ -507,7 +508,9 @@ public final class OzoneManager extends
ServiceRuntimeInfoImpl
new ReconfigurationHandler("OM", conf, this::checkAdminUserPrivilege)
.register(OZONE_ADMINISTRATORS, this::reconfOzoneAdmins)
.register(OZONE_READONLY_ADMINISTRATORS,
- this::reconfOzoneReadOnlyAdmins);
+ this::reconfOzoneReadOnlyAdmins)
+ .register(OZONE_KEY_DELETING_LIMIT_PER_TASK,
+ this::reconfOzoneKeyDeletingLimitPerTask);
versionManager = new OMLayoutVersionManager(omStorage.getLayoutVersion());
upgradeFinalizer = new OMUpgradeFinalizer(versionManager);
@@ -4781,6 +4784,16 @@ public final class OzoneManager extends
ServiceRuntimeInfoImpl
return String.valueOf(newVal);
}
+ private String reconfOzoneKeyDeletingLimitPerTask(String newVal) {
+ Preconditions.checkArgument(Integer.parseInt(newVal) >= 0,
+ OZONE_KEY_DELETING_LIMIT_PER_TASK + " cannot be negative.");
+ getConfiguration().set(OZONE_KEY_DELETING_LIMIT_PER_TASK, newVal);
+
+ getKeyManager().getDeletingService()
+ .setKeyLimitPerTask(Integer.parseInt(newVal));
+ return newVal;
+ }
+
public void validateReplicationConfig(ReplicationConfig replicationConfig)
throws OMException {
try {
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java
index f03484347e..a0ca1ae547 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java
@@ -28,6 +28,7 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
+import com.google.common.base.Preconditions;
import com.google.protobuf.ServiceException;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
@@ -91,7 +92,7 @@ public class KeyDeletingService extends
AbstractKeyDeletingService {
private static final int KEY_DELETING_CORE_POOL_SIZE = 1;
private final KeyManager manager;
- private final int keyLimitPerTask;
+ private int keyLimitPerTask;
private final AtomicLong deletedKeyCount;
private final AtomicBoolean suspended;
private final Map<String, Long> exclusiveSizeMap;
@@ -108,6 +109,8 @@ public class KeyDeletingService extends
AbstractKeyDeletingService {
this.manager = manager;
this.keyLimitPerTask = conf.getInt(OZONE_KEY_DELETING_LIMIT_PER_TASK,
OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT);
+ Preconditions.checkArgument(keyLimitPerTask >= 0,
+ OZONE_KEY_DELETING_LIMIT_PER_TASK + " cannot be negative.");
this.deletedKeyCount = new AtomicLong(0);
this.suspended = new AtomicBoolean(false);
this.exclusiveSizeMap = new HashMap<>();
@@ -156,6 +159,14 @@ public class KeyDeletingService extends
AbstractKeyDeletingService {
suspended.set(false);
}
+ public int getKeyLimitPerTask() {
+ return keyLimitPerTask;
+ }
+
+ public void setKeyLimitPerTask(int keyLimitPerTask) {
+ this.keyLimitPerTask = keyLimitPerTask;
+ }
+
/**
* A key deleting task scans OM DB and looking for a certain number of
* pending-deletion keys, sends these keys along with their associated blocks
@@ -192,7 +203,7 @@ public class KeyDeletingService extends
AbstractKeyDeletingService {
// from if the above would be done inside getPendingDeletionKeys().
PendingKeysDeletion pendingKeysDeletion = manager
- .getPendingDeletionKeys(keyLimitPerTask);
+ .getPendingDeletionKeys(getKeyLimitPerTask());
List<BlockGroup> keyBlocksList = pendingKeysDeletion
.getKeyBlocksList();
if (keyBlocksList != null && !keyBlocksList.isEmpty()) {
diff --git
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
index be40e60880..64dd8cee82 100644
---
a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
+++
b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
@@ -41,6 +41,7 @@ import com.google.inject.Singleton;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.PlacementPolicy;
@@ -686,6 +687,11 @@ public class ReconStorageContainerManagerFacade
return reconNodeDetails;
}
+ @Override
+ public ReconfigurationHandler getReconfigurationHandler() {
+ return null;
+ }
+
public DBStore getScmDBStore() {
return dbStore;
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]