This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 01f8d62fbe HDDS-10331. Rename Java constants of ex-DFS config keys
(#6290)
01f8d62fbe is described below
commit 01f8d62fbee2be4c79c4b4d4bf759a81f5ea30bb
Author: Sarveksha Yeshavantha Raju
<[email protected]>
AuthorDate: Wed Feb 28 22:10:30 2024 +0530
HDDS-10331. Rename Java constants of ex-DFS config keys (#6290)
---
.../apache/hadoop/hdds/scm/XceiverClientGrpc.java | 4 +-
.../apache/hadoop/hdds/scm/XceiverClientRatis.java | 4 +-
.../hadoop/hdds/conf/OzoneConfiguration.java | 60 +++----
.../org/apache/hadoop/hdds/ratis/RatisHelper.java | 4 +-
.../org/apache/hadoop/hdds/scm/ScmConfigKeys.java | 74 ++++----
.../org/apache/hadoop/ozone/OzoneConfigKeys.java | 186 ++++++++++-----------
.../common/transport/server/XceiverServerGrpc.java | 8 +-
.../server/ratis/ContainerStateMachine.java | 12 +-
.../transport/server/ratis/XceiverServerRatis.java | 108 ++++++------
.../ozone/container/keyvalue/KeyValueHandler.java | 4 +-
.../keyvalue/impl/ChunkManagerFactory.java | 4 +-
.../ozone/container/common/ContainerTestUtils.java | 2 +-
.../ozone/container/common/SCMTestUtils.java | 4 +-
.../container/common/TestDatanodeStateMachine.java | 12 +-
.../statemachine/TestDatanodeConfiguration.java | 2 +-
.../common/volume/TestPeriodicVolumeChecker.java | 2 +-
.../container/common/volume/TestVolumeSet.java | 2 +-
.../common/volume/TestVolumeSetDiskChecks.java | 2 +-
.../container/ozoneimpl/TestContainerReader.java | 2 +-
.../container/ozoneimpl/TestOzoneContainer.java | 2 +-
.../upgrade/TestDatanodeUpgradeToSchemaV3.java | 4 +-
.../apache/hadoop/hdds/utils/HddsServerUtil.java | 6 +-
.../hdds/scm/node/TestContainerPlacement.java | 2 +-
.../ozone/container/common/TestEndPoint.java | 18 +-
.../hdds/scm/cli/ContainerOperationClient.java | 4 +-
.../apache/hadoop/ozone/MiniOzoneChaosCluster.java | 8 +-
.../fs/ozone/TestOzoneFileSystemWithStreaming.java | 4 +-
.../hadoop/hdds/scm/TestRatisPipelineLeader.java | 4 +-
.../org/apache/hadoop/ozone/RatisTestHelper.java | 6 +-
.../apache/hadoop/ozone/TestMiniOzoneCluster.java | 18 +-
.../hadoop/ozone/UniformDatanodesFactory.java | 24 +--
.../client/rpc/TestContainerStateMachine.java | 2 +-
.../rpc/TestContainerStateMachineFailures.java | 10 +-
.../rpc/TestContainerStateMachineFlushDelay.java | 2 +-
.../rpc/TestContainerStateMachineStream.java | 2 +-
.../client/rpc/TestFailureHandlingByClient.java | 2 +-
.../rpc/TestFailureHandlingByClientFlushDelay.java | 2 +-
.../rpc/TestMultiBlockWritesWithDnFailures.java | 2 +-
.../transport/server/ratis/TestCSMMetrics.java | 4 +-
.../container/metrics/TestContainerMetrics.java | 2 +-
.../container/ozoneimpl/TestOzoneContainer.java | 4 +-
.../ozoneimpl/TestSecureOzoneContainer.java | 4 +-
.../container/server/TestContainerServer.java | 10 +-
.../server/TestSecureContainerServer.java | 10 +-
.../ozone/dn/ratis/TestDnRatisLogParser.java | 2 +-
.../apache/hadoop/ozone/shell/TestScmAdminHA.java | 2 +-
.../fs/ozone/BasicOzoneClientAdapterImpl.java | 4 +-
.../ozone/BasicRootedOzoneClientAdapterImpl.java | 4 +-
.../hadoop/ozone/s3/endpoint/ObjectEndpoint.java | 8 +-
.../s3/endpoint/TestPartUploadWithStream.java | 2 +-
.../ozone/s3/endpoint/TestUploadWithStream.java | 2 +-
51 files changed, 338 insertions(+), 338 deletions(-)
diff --git
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index 0a38e66048..52f435dc82 100644
---
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -166,8 +166,8 @@ public class XceiverClientGrpc extends XceiverClientSpi {
// port.
int port = dn.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue();
if (port == 0) {
- port = config.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
- OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
+ port = config.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT,
+ OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT);
}
// Add credential context to the client call
diff --git
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index aff0aa966a..58a2153352 100644
---
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -83,8 +83,8 @@ public final class XceiverClientRatis extends
XceiverClientSpi {
org.apache.hadoop.hdds.scm.pipeline.Pipeline pipeline,
ConfigurationSource ozoneConf, ClientTrustManager trustManager) {
final String rpcType = ozoneConf
- .get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
- ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
+ .get(ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY,
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf);
final GrpcTlsConfig tlsConfig = RatisHelper.createTlsClientConfig(new
SecurityConfig(ozoneConf), trustManager);
diff --git
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
index ed897f898c..e324a63d3b 100644
---
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
+++
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
@@ -326,65 +326,65 @@ public class OzoneConfiguration extends Configuration
new DeprecationDelta("hdds.datanode.replication.work.dir",
OZONE_CONTAINER_COPY_WORKDIR),
new DeprecationDelta("dfs.container.chunk.write.sync",
- OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY),
+ OzoneConfigKeys.HDDS_CONTAINER_CHUNK_WRITE_SYNC_KEY),
new DeprecationDelta("dfs.container.ipc",
- OzoneConfigKeys.DFS_CONTAINER_IPC_PORT),
+ OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT),
new DeprecationDelta("dfs.container.ipc.random.port",
- OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT),
+ OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT),
new DeprecationDelta("dfs.container.ratis.admin.port",
- OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT),
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT),
new DeprecationDelta("dfs.container.ratis.datanode.storage.dir",
- OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR),
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR),
new DeprecationDelta("dfs.container.ratis.datastream.enabled",
- OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED),
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED),
new DeprecationDelta("dfs.container.ratis.datastream.port",
- OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT),
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT),
new DeprecationDelta("dfs.container.ratis.datastream.random.port",
- OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT),
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT),
new DeprecationDelta("dfs.container.ratis.enabled",
- ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY),
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY),
new DeprecationDelta("dfs.container.ratis.ipc",
- OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT),
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT),
new DeprecationDelta("dfs.container.ratis.ipc.random.port",
- OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT),
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT),
new DeprecationDelta("dfs.container.ratis.leader.pending.bytes.limit",
- ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT),
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT),
new
DeprecationDelta("dfs.container.ratis.log.appender.queue.byte-limit",
- ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT),
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT),
new
DeprecationDelta("dfs.container.ratis.log.appender.queue.num-elements",
- ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS),
+
ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS),
new DeprecationDelta("dfs.container.ratis.log.purge.gap",
- ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP),
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP),
new DeprecationDelta("dfs.container.ratis.log.queue.byte-limit",
- ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT),
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT),
new DeprecationDelta("dfs.container.ratis.log.queue.num-elements",
- ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS),
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS),
new DeprecationDelta("dfs.container.ratis.num.container.op.executors",
- ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY),
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY),
new
DeprecationDelta("dfs.container.ratis.num.write.chunk.threads.per.volume",
-
ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME),
+
ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME),
new DeprecationDelta("dfs.container.ratis.replication.level",
- ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY),
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY),
new DeprecationDelta("dfs.container.ratis.rpc.type",
- ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY),
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY),
new DeprecationDelta("dfs.container.ratis.segment.preallocated.size",
- ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY),
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY),
new DeprecationDelta("dfs.container.ratis.segment.size",
- ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY),
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY),
new DeprecationDelta("dfs.container.ratis.server.port",
- OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT),
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_SERVER_PORT),
new
DeprecationDelta("dfs.container.ratis.statemachinedata.sync.retries",
- ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES),
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES),
new
DeprecationDelta("dfs.container.ratis.statemachinedata.sync.timeout",
- ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT),
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT),
new
DeprecationDelta("dfs.container.ratis.statemachine.max.pending.apply-transactions",
-
ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS),
+
ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS),
new
DeprecationDelta("dfs.ratis.leader.election.minimum.timeout.duration",
-
ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY),
+
ScmConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY),
new DeprecationDelta("dfs.ratis.server.retry-cache.timeout.duration",
- ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY),
+ ScmConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY),
new DeprecationDelta("dfs.ratis.snapshot.threshold",
- ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY)
+ ScmConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY)
});
}
diff --git
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
index cb7f6f8a3b..bcea4d0193 100644
---
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
+++
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
@@ -234,8 +234,8 @@ public final class RatisHelper {
private static RpcType getRpcType(ConfigurationSource conf) {
return SupportedRpcType.valueOfIgnoreCase(conf.get(
- ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
- ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT));
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY,
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT));
}
public static BiFunction<RaftPeer, GrpcTlsConfig, RaftClient> newRaftClient(
diff --git
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index e093a45af0..d8fdbc1063 100644
---
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -41,95 +41,95 @@ public final class ScmConfigKeys {
public static final String OZONE_SCM_DB_DIRS_PERMISSIONS =
"ozone.scm.db.dirs.permissions";
- public static final String DFS_CONTAINER_RATIS_ENABLED_KEY
+ public static final String HDDS_CONTAINER_RATIS_ENABLED_KEY
= "hdds.container.ratis.enabled";
- public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT
+ public static final boolean HDDS_CONTAINER_RATIS_ENABLED_DEFAULT
= false;
- public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY
+ public static final String HDDS_CONTAINER_RATIS_RPC_TYPE_KEY
= "hdds.container.ratis.rpc.type";
- public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT
+ public static final String HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT
= "GRPC";
public static final String
- DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME
+ HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME
= "hdds.container.ratis.num.write.chunk.threads.per.volume";
public static final int
- DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT
+ HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT
= 10;
- public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY
+ public static final String HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY
= "hdds.container.ratis.replication.level";
public static final ReplicationLevel
- DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT =
ReplicationLevel.MAJORITY;
- public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY
+ HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT =
ReplicationLevel.MAJORITY;
+ public static final String
HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY
= "hdds.container.ratis.num.container.op.executors";
- public static final int
DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT
+ public static final int
HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT
= 10;
- public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY =
+ public static final String HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY =
"hdds.container.ratis.segment.size";
- public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT =
+ public static final String HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT =
"64MB";
- public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY
=
+ public static final String
HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY =
"hdds.container.ratis.segment.preallocated.size";
public static final String
- DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = "4MB";
+ HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = "4MB";
public static final String
- DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT =
+ HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT =
"hdds.container.ratis.statemachinedata.sync.timeout";
public static final TimeDuration
- DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT =
+ HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT =
TimeDuration.valueOf(10, TimeUnit.SECONDS);
public static final String
- DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES =
+ HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES =
"hdds.container.ratis.statemachinedata.sync.retries";
public static final String
- DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS =
+ HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS =
"hdds.container.ratis.statemachine.max.pending.apply-transactions";
// The default value of maximum number of pending state machine apply
// transactions is kept same as default snapshot threshold.
public static final int
- DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT =
+ HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT =
100000;
- public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS =
+ public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS =
"hdds.container.ratis.log.queue.num-elements";
- public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT =
+ public static final int HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT =
1024;
- public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT =
+ public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT =
"hdds.container.ratis.log.queue.byte-limit";
- public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT =
+ public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT
=
"4GB";
public static final String
- DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS =
+ HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS =
"hdds.container.ratis.log.appender.queue.num-elements";
public static final int
- DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = 1;
- public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT
=
+ HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = 1;
+ public static final String
HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT =
"hdds.container.ratis.log.appender.queue.byte-limit";
public static final String
- DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB";
- public static final String DFS_CONTAINER_RATIS_LOG_PURGE_GAP =
+ HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB";
+ public static final String HDDS_CONTAINER_RATIS_LOG_PURGE_GAP =
"hdds.container.ratis.log.purge.gap";
// TODO: Set to 1024 once RATIS issue around purge is fixed.
- public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT =
+ public static final int HDDS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT =
1000000;
- public static final String DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT =
+ public static final String HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT =
"hdds.container.ratis.leader.pending.bytes.limit";
public static final String
- DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = "1GB";
+ HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = "1GB";
- public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY
=
+ public static final String
HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY =
"hdds.ratis.server.retry-cache.timeout.duration";
public static final TimeDuration
- DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT =
+ HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT =
TimeDuration.valueOf(600000, TimeUnit.MILLISECONDS);
public static final String
- DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY =
+ HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY =
"hdds.ratis.leader.election.minimum.timeout.duration";
public static final TimeDuration
- DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT =
+ HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT =
TimeDuration.valueOf(5, TimeUnit.SECONDS);
- public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY =
+ public static final String HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY =
"hdds.ratis.snapshot.threshold";
- public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000;
+ public static final long HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000;
// TODO : this is copied from OzoneConsts, may need to move to a better place
public static final String OZONE_SCM_CHUNK_SIZE_KEY = "ozone.scm.chunk.size";
diff --git
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 3007110009..0080686575 100644
---
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -36,9 +36,9 @@ import java.util.concurrent.TimeUnit;
@InterfaceAudience.Public
@InterfaceStability.Unstable
public final class OzoneConfigKeys {
- public static final String DFS_CONTAINER_IPC_PORT =
+ public static final String HDDS_CONTAINER_IPC_PORT =
"hdds.container.ipc.port";
- public static final int DFS_CONTAINER_IPC_PORT_DEFAULT = 9859;
+ public static final int HDDS_CONTAINER_IPC_PORT_DEFAULT = 9859;
public static final String OZONE_METADATA_DIRS = "ozone.metadata.dirs";
@@ -56,52 +56,52 @@ public final class OzoneConfigKeys {
* so that a mini cluster is able to launch multiple containers on a node.
*
* When set to false (default), the container port will be specified as
- * {@link #DFS_CONTAINER_IPC_PORT} and the default value will be specified
- * as {@link #DFS_CONTAINER_IPC_PORT_DEFAULT}.
+ * {@link #HDDS_CONTAINER_IPC_PORT} and the default value will be specified
+ * as {@link #HDDS_CONTAINER_IPC_PORT_DEFAULT}.
*/
- public static final String DFS_CONTAINER_IPC_RANDOM_PORT =
+ public static final String HDDS_CONTAINER_IPC_RANDOM_PORT =
"hdds.container.ipc.random.port";
- public static final boolean DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT =
+ public static final boolean HDDS_CONTAINER_IPC_RANDOM_PORT_DEFAULT =
false;
- public static final String DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT =
+ public static final String HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT =
"hdds.container.ratis.datastream.random.port";
public static final boolean
- DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT =
+ HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT =
false;
- public static final String DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY =
+ public static final String HDDS_CONTAINER_CHUNK_WRITE_SYNC_KEY =
"hdds.container.chunk.write.sync";
- public static final boolean DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT = false;
+ public static final boolean HDDS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT = false;
/**
* Ratis Port where containers listen to.
*/
- public static final String DFS_CONTAINER_RATIS_IPC_PORT =
+ public static final String HDDS_CONTAINER_RATIS_IPC_PORT =
"hdds.container.ratis.ipc.port";
- public static final int DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT = 9858;
+ public static final int HDDS_CONTAINER_RATIS_IPC_PORT_DEFAULT = 9858;
/**
* Ratis Port where containers listen to admin requests.
*/
- public static final String DFS_CONTAINER_RATIS_ADMIN_PORT =
+ public static final String HDDS_CONTAINER_RATIS_ADMIN_PORT =
"hdds.container.ratis.admin.port";
- public static final int DFS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT = 9857;
+ public static final int HDDS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT = 9857;
/**
* Ratis Port where containers listen to server-to-server requests.
*/
- public static final String DFS_CONTAINER_RATIS_SERVER_PORT =
+ public static final String HDDS_CONTAINER_RATIS_SERVER_PORT =
"hdds.container.ratis.server.port";
- public static final int DFS_CONTAINER_RATIS_SERVER_PORT_DEFAULT = 9856;
+ public static final int HDDS_CONTAINER_RATIS_SERVER_PORT_DEFAULT = 9856;
/**
* Ratis Port where containers listen to datastream requests.
*/
- public static final String DFS_CONTAINER_RATIS_DATASTREAM_ENABLED
+ public static final String HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED
= "hdds.container.ratis.datastream.enabled";
- public static final boolean DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT
+ public static final boolean HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT
= false;
- public static final String DFS_CONTAINER_RATIS_DATASTREAM_PORT
+ public static final String HDDS_CONTAINER_RATIS_DATASTREAM_PORT
= "hdds.container.ratis.datastream.port";
- public static final int DFS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT
+ public static final int HDDS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT
= 9855;
/**
@@ -133,9 +133,9 @@ public final class OzoneConfigKeys {
* When set to true, allocate a random free port for ozone container, so that
* a mini cluster is able to launch multiple containers on a node.
*/
- public static final String DFS_CONTAINER_RATIS_IPC_RANDOM_PORT =
+ public static final String HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT =
"hdds.container.ratis.ipc.random.port";
- public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT =
+ public static final boolean HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT =
false;
public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS =
@@ -325,97 +325,97 @@ public final class OzoneConfigKeys {
public static final int
OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT = 10;
- public static final String DFS_CONTAINER_RATIS_ENABLED_KEY
- = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY;
- public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT
- = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT;
- public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY
- = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY;
- public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT
- = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT;
+ public static final String HDDS_CONTAINER_RATIS_ENABLED_KEY
+ = ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY;
+ public static final boolean HDDS_CONTAINER_RATIS_ENABLED_DEFAULT
+ = ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_DEFAULT;
+ public static final String HDDS_CONTAINER_RATIS_RPC_TYPE_KEY
+ = ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY;
+ public static final String HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT
+ = ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT;
public static final String
- DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY
- = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME;
+ HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY
+ = ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME;
public static final int
- DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT
+ HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT
= ScmConfigKeys.
- DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT;
- public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY
- = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY;
+ HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT;
+ public static final String HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY
+ = ScmConfigKeys.HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY;
public static final ReplicationLevel
- DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT
- = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT;
- public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY
- = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY;
- public static final int
DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT
- = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT;
- public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY
- = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY;
- public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT
- = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT;
- public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY
- = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY;
+ HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT
+ = ScmConfigKeys.HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT;
+ public static final String
HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY
+ = ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY;
+ public static final int
HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT
+ = ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT;
+ public static final String HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY
+ = ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY;
+ public static final String HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT
+ = ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT;
+ public static final String HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY
+ = ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY;
public static final String
- DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT =
- ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT;
+ HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT =
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT;
// config settings to enable stateMachineData write timeout
public static final String
- DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT =
- ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT;
+ HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT =
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT;
public static final TimeDuration
- DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT =
- ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT;
+ HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT =
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT;
- public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR =
+ public static final String HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR =
"hdds.container.ratis.datanode.storage.dir";
- public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY
=
- ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY;
+ public static final String
HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY =
+ ScmConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY;
public static final TimeDuration
- DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT =
- ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT;
+ HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT =
+ ScmConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT;
public static final String
- DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES =
- ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES;
- public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS =
- ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS;
- public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT =
- ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT;
- public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT =
- ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT;
- public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT =
- ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT;
+ HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES =
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES;
+ public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS =
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS;
+ public static final int HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT =
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT;
+ public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT =
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT;
+ public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT
=
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT;
public static final String
- DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS =
- ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS;
+ HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS =
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS;
public static final int
- DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT =
-
ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT;
- public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT
=
- ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT;
+ HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT =
+
ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT;
+ public static final String
HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT =
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT;
public static final String
- DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT =
- ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT;
- public static final String DFS_CONTAINER_RATIS_LOG_PURGE_GAP =
- ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP;
- public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT =
- ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT;
- public static final String DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT =
- ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT;
+ HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT =
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT;
+ public static final String HDDS_CONTAINER_RATIS_LOG_PURGE_GAP =
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP;
+ public static final int HDDS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT =
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT;
+ public static final String HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT =
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT;
public static final String
- DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT =
- ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT;
+ HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT =
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT;
public static final String
- DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY =
- ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY;
+ HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY =
+ ScmConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY;
public static final TimeDuration
- DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT =
- ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT;
- public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY =
- ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY;
- public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT =
- ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT;
+ HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT =
+
ScmConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT;
+ public static final String HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY =
+ ScmConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY;
+ public static final long HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT =
+ ScmConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT;
public static final String HDDS_DATANODE_PLUGINS_KEY =
"hdds.datanode.plugins";
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
index 009e6396e0..346b05ebb4 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
@@ -99,11 +99,11 @@ public final class XceiverServerGrpc implements
XceiverServerSpi {
this.id = datanodeDetails.getUuid();
this.datanodeDetails = datanodeDetails;
- this.port = conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
- OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
+ this.port = conf.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT,
+ OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT);
- if (conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT,
- OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT)) {
+ if (conf.getBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT,
+ OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT_DEFAULT)) {
this.port = 0;
}
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index e3c2913ec5..fdbe8c981c 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -219,8 +219,8 @@ public class ContainerStateMachine extends BaseStateMachine
{
this.writeChunkFutureMap = new ConcurrentHashMap<>();
applyTransactionCompletionMap = new ConcurrentHashMap<>();
long pendingRequestsBytesLimit = (long)conf.getStorageSize(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT,
- OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT,
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT,
+
OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT,
StorageUnit.BYTES);
// cache with FIFO eviction, and if element not found, this needs
// to be obtained from disk for slow follower
@@ -238,13 +238,13 @@ public class ContainerStateMachine extends
BaseStateMachine {
this.container2BCSIDMap = new ConcurrentHashMap<>();
final int numContainerOpExecutors = conf.getInt(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY,
-
OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT);
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY,
+
OzoneConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT);
int maxPendingApplyTransactions = conf.getInt(
ScmConfigKeys.
- DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS,
+ HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS,
ScmConfigKeys.
- DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT);
+ HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT);
applyTransactionSemaphore = new Semaphore(maxPendingApplyTransactions);
stateMachineHealthy = new AtomicBoolean(true);
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index fcc611ea3f..53ae98f50c 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -110,12 +110,12 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static
org.apache.hadoop.hdds.DatanodeVersion.SEPARATE_RATIS_PORTS_AVAILABLE;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY;
+import static
org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT;
+import static
org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT;
+import static
org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS;
+import static
org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT;
+import static
org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT;
+import static
org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY;
import static org.apache.ratis.util.Preconditions.assertTrue;
/**
@@ -189,8 +189,8 @@ public final class XceiverServerRatis implements
XceiverServerSpi {
ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class);
assignPorts();
this.streamEnable = conf.getBoolean(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED,
- OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT);
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED,
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT);
RaftProperties serverProperties = newRaftProperties();
this.context = context;
this.dispatcher = dispatcher;
@@ -217,17 +217,17 @@ public final class XceiverServerRatis implements
XceiverServerSpi {
private void assignPorts() {
clientPort = determinePort(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT,
- OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT);
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT,
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT_DEFAULT);
if (DatanodeVersion.fromProtoValue(datanodeDetails.getInitialVersion())
.compareTo(SEPARATE_RATIS_PORTS_AVAILABLE) >= 0) {
adminPort = determinePort(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT,
- OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT);
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT,
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT);
serverPort = determinePort(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT,
- OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT_DEFAULT);
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_SERVER_PORT,
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_SERVER_PORT_DEFAULT);
} else {
adminPort = clientPort;
serverPort = clientPort;
@@ -236,8 +236,8 @@ public final class XceiverServerRatis implements
XceiverServerSpi {
private int determinePort(String key, int defaultValue) {
boolean randomPort = conf.getBoolean(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT,
- OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT);
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT,
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT);
return randomPort ? 0 : conf.getInt(key, defaultValue);
}
@@ -249,14 +249,14 @@ public final class XceiverServerRatis implements
XceiverServerSpi {
private void setUpRatisStream(RaftProperties properties) {
// set the datastream config
if (conf.getBoolean(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT,
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT,
OzoneConfigKeys.
- DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT)) {
+ HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT)) {
dataStreamPort = 0;
} else {
dataStreamPort = conf.getInt(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT,
- OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT);
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT,
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT);
}
RatisHelper.enableNettyStreaming(properties);
NettyConfigKeys.DataStream.setPort(properties, dataStreamPort);
@@ -327,8 +327,8 @@ public final class XceiverServerRatis implements
XceiverServerSpi {
}
long snapshotThreshold =
- conf.getLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY,
- OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT);
+ conf.getLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY,
+ OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT);
RaftServerConfigKeys.Snapshot.
setAutoTriggerEnabled(properties, true);
RaftServerConfigKeys.Snapshot.
@@ -338,11 +338,11 @@ public final class XceiverServerRatis implements
XceiverServerSpi {
setPendingRequestsLimits(properties);
int logQueueNumElements =
- conf.getInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS,
-
OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT);
+
conf.getInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS,
+
OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT);
final long logQueueByteLimit = (long) conf.getStorageSize(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT,
- OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT,
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT,
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT,
StorageUnit.BYTES);
RaftServerConfigKeys.Log.setQueueElementLimit(
properties, logQueueNumElements);
@@ -353,8 +353,8 @@ public final class XceiverServerRatis implements
XceiverServerSpi {
false);
int purgeGap = conf.getInt(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP,
- OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT);
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP,
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT);
RaftServerConfigKeys.Log.setPurgeGap(properties, purgeGap);
//Set the number of Snapshots Retained.
@@ -375,12 +375,12 @@ public final class XceiverServerRatis implements
XceiverServerSpi {
long duration;
TimeUnit leaderElectionMinTimeoutUnit =
OzoneConfigKeys.
- DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT
+ HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT
.getUnit();
duration = conf.getTimeDuration(
- OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
+
OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
OzoneConfigKeys.
- DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT
+ HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT
.getDuration(), leaderElectionMinTimeoutUnit);
final TimeDuration leaderElectionMinTimeout =
TimeDuration.valueOf(duration, leaderElectionMinTimeoutUnit);
@@ -396,11 +396,11 @@ public final class XceiverServerRatis implements
XceiverServerSpi {
TimeUnit timeUnit;
long duration;
timeUnit =
- OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT
+ OzoneConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT
.getUnit();
duration = conf.getTimeDuration(
- OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY,
- OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT
+ OzoneConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY,
+ OzoneConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT
.getDuration(), timeUnit);
final TimeDuration retryCacheTimeout =
TimeDuration.valueOf(duration, timeUnit);
@@ -410,8 +410,8 @@ public final class XceiverServerRatis implements
XceiverServerSpi {
private long setRaftSegmentPreallocatedSize(RaftProperties properties) {
final long raftSegmentPreallocatedSize = (long) conf.getStorageSize(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY,
- OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT,
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY,
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT,
StorageUnit.BYTES);
RaftServerConfigKeys.Log.setPreallocatedSize(properties,
SizeInBytes.valueOf(raftSegmentPreallocatedSize));
@@ -420,23 +420,23 @@ public final class XceiverServerRatis implements
XceiverServerSpi {
private void setRaftSegmentAndWriteBufferSize(RaftProperties properties) {
final int logAppenderQueueNumElements = conf.getInt(
- DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS,
- DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT);
+ HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS,
+ HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT);
final int logAppenderQueueByteLimit = (int) conf.getStorageSize(
- DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT,
- DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT,
+ HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT,
+ HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT,
StorageUnit.BYTES);
final long raftSegmentSize = (long) conf.getStorageSize(
- DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY,
- DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT,
+ HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY,
+ HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT,
StorageUnit.BYTES);
final long raftSegmentBufferSize = logAppenderQueueByteLimit + 8;
assertTrue(raftSegmentBufferSize <= raftSegmentSize,
- () -> DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT + " = "
+ () -> HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT + " = "
+ logAppenderQueueByteLimit
- + " must be <= (" + DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY + " - 8"
+ + " must be <= (" + HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY + " - 8"
+ " = " + (raftSegmentSize - 8) + ")");
RaftServerConfigKeys.Log.Appender.setBufferElementLimit(properties,
@@ -454,11 +454,11 @@ public final class XceiverServerRatis implements
XceiverServerSpi {
RaftServerConfigKeys.Log.StateMachineData.setSync(properties, true);
TimeUnit timeUnit = OzoneConfigKeys.
- DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT.getUnit();
+ HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT.getUnit();
long duration = conf.getTimeDuration(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT,
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT,
OzoneConfigKeys.
- DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT
+ HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT
.getDuration(), timeUnit);
final TimeDuration dataSyncTimeout =
TimeDuration.valueOf(duration, timeUnit);
@@ -479,7 +479,7 @@ public final class XceiverServerRatis implements
XceiverServerSpi {
int syncTimeoutRetryDefault = (int) nodeFailureTimeoutMs /
dataSyncTimeout.toIntExact(TimeUnit.MILLISECONDS);
int numSyncRetries = conf.getInt(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES,
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES,
syncTimeoutRetryDefault);
RaftServerConfigKeys.Log.StateMachineData.setSyncTimeoutRetry(properties,
numSyncRetries);
@@ -507,8 +507,8 @@ public final class XceiverServerRatis implements
XceiverServerSpi {
private RpcType setRpcType(RaftProperties properties) {
final String rpcType = conf.get(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
- OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY,
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
final RpcType rpc = SupportedRpcType.valueOfIgnoreCase(rpcType);
RatisHelper.setRpcType(properties, rpc);
return rpc;
@@ -517,8 +517,8 @@ public final class XceiverServerRatis implements
XceiverServerSpi {
private void setPendingRequestsLimits(RaftProperties properties) {
long pendingRequestsBytesLimit = (long) conf.getStorageSize(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT,
- OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT,
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT,
+
OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT,
StorageUnit.BYTES);
final int pendingRequestsMegaBytesLimit =
HddsUtils.roundupMb(pendingRequestsBytesLimit);
@@ -990,9 +990,9 @@ public final class XceiverServerRatis implements
XceiverServerSpi {
// TODO create single pool with N threads if using non-incremental chunks
final int threadCountPerDisk = conf.getInt(
OzoneConfigKeys
- .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY,
+ .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY,
OzoneConfigKeys
- .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT);
+ .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT);
final int numberOfDisks =
HddsServerUtil.getDatanodeStorageDirs(conf).size();
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index c9d6672ee8..e35c634568 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -164,9 +164,9 @@ public class KeyValueHandler extends Handler {
// Requests.
final int threadCountPerDisk = conf.getInt(
OzoneConfigKeys
- .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY,
+ .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY,
OzoneConfigKeys
- .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT);
+ .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT);
final int numberOfDisks =
HddsServerUtil.getDatanodeStorageDirs(conf).size();
containerCreationLocks = Striped.lazyWeakLock(
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java
index 1267ed7868..288a2d3e33 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java
@@ -51,8 +51,8 @@ public final class ChunkManagerFactory {
public static ChunkManager createChunkManager(ConfigurationSource conf,
BlockManager manager, VolumeSet volSet) {
boolean sync =
- conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY,
- OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT);
+ conf.getBoolean(OzoneConfigKeys.HDDS_CONTAINER_CHUNK_WRITE_SYNC_KEY,
+ OzoneConfigKeys.HDDS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT);
boolean persist = conf.getBoolean(HDDS_CONTAINER_PERSISTDATA,
HDDS_CONTAINER_PERSISTDATA_DEFAULT);
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
index 33bc4a8516..c63f82025e 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
@@ -344,7 +344,7 @@ public final class ContainerTestUtils {
public static XceiverServerRatis newXceiverServerRatis(
DatanodeDetails dn, OzoneConfiguration conf) throws IOException {
- conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT,
+ conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT,
dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue());
return XceiverServerRatis.newXceiverServerRatis(dn, conf,
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
index 7917a4ce55..21775245ef 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
@@ -155,8 +155,8 @@ public final class SCMTestUtils {
private static boolean isUseRatis(ConfigurationSource c) {
return c.getBoolean(
- ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY,
- ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT);
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY,
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_DEFAULT);
}
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
index 5738f5c110..e1e1ee9172 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
@@ -82,12 +82,12 @@ public class TestDatanodeStateMachine {
conf = SCMTestUtils.getConf(testRoot);
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500,
TimeUnit.MILLISECONDS);
- conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
- conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, true);
- conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED,
+ conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT,
true);
+ conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, true);
+ conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED,
true);
conf.setBoolean(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true);
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true);
serverAddresses = new ArrayList<>();
scmServers = new ArrayList<>();
mockServers = new ArrayList<>();
@@ -200,7 +200,7 @@ public class TestDatanodeStateMachine {
DatanodeDetails datanodeDetails = getNewDatanodeDetails();
DatanodeDetails.Port port = DatanodeDetails.newPort(
DatanodeDetails.Port.Name.STANDALONE,
- OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
+ OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT);
datanodeDetails.setPort(port);
ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath, conf);
try (DatanodeStateMachine stateMachine =
@@ -327,7 +327,7 @@ public class TestDatanodeStateMachine {
DatanodeDetails datanodeDetails = getNewDatanodeDetails();
DatanodeDetails.Port port = DatanodeDetails.newPort(
DatanodeDetails.Port.Name.STANDALONE,
- OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
+ OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT);
datanodeDetails.setPort(port);
try (DatanodeStateMachine stateMachine =
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java
index 565853c22d..657afc3887 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java
@@ -178,7 +178,7 @@ public class TestDatanodeConfiguration {
public void testConf() throws Exception {
final OzoneConfiguration conf = new OzoneConfiguration();
final String dir = "dummy/dir";
- conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir);
+ conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir);
final DatanodeRatisServerConfig ratisConf = conf.getObject(
DatanodeRatisServerConfig.class);
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java
index 3859cd47c9..46b8cc6772 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java
@@ -59,7 +59,7 @@ public class TestPeriodicVolumeChecker {
public void setup() throws IOException {
conf = new OzoneConfiguration();
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, folder.toString());
- conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
+ conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
Files.createDirectory(folder.resolve("VolumeCheckerDir")).toString());
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
index 1159d4277c..68e687fefa 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
@@ -82,7 +82,7 @@ public class TestVolumeSet {
volumes.add(volume1);
volumes.add(volume2);
conf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY, dataDirKey);
- conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
+ conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
dataDirKey);
initializeVolumeSet();
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
index e3c610bfe4..eb1f7979f8 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
@@ -228,7 +228,7 @@ public class TestVolumeSetDiskChecks {
for (int i = 0; i < numDirs; ++i) {
metaDirs.add(new File(dir, randomAlphanumeric(10)).toString());
}
- ozoneConf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
+ ozoneConf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
String.join(",", metaDirs));
final List<String> dbDirs = new ArrayList<>();
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
index 7f38eab785..8fd7b6280b 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
@@ -378,7 +378,7 @@ public class TestContainerReader {
BlockUtils.shutdownCache(conf);
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY,
datanodeDirs.toString());
- conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
+ conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
datanodeDirs.toString());
MutableVolumeSet volumeSets =
new MutableVolumeSet(datanodeId.toString(), clusterId, conf, null,
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index 497418dcdc..07804c2a20 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -165,7 +165,7 @@ public class TestOzoneContainer {
throws Exception {
initTest(versionInfo);
String path = folder.toString();
- conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
+ conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
String.join(",",
path + "/ratis1", path + "/ratis2", path + "ratis3"));
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java
index 383e76dcc7..23b7da2634 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java
@@ -99,9 +99,9 @@ public class TestDatanodeUpgradeToSchemaV3 {
conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED,
schemaV3Enabled);
conf.setBoolean(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, true);
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true);
conf.setBoolean(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true);
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true);
setup();
}
diff --git
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
index 70d394e73b..4fae3686c9 100644
---
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
+++
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
@@ -382,14 +382,14 @@ public final class HddsServerUtil {
* @return port number.
*/
public static int getContainerPort(ConfigurationSource conf) {
- return conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
- OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
+ return conf.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT,
+ OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT);
}
public static Collection<String> getOzoneDatanodeRatisDirectory(
ConfigurationSource conf) {
Collection<String> rawLocations = conf.getTrimmedStringCollection(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR);
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR);
if (rawLocations.isEmpty()) {
rawLocations = new ArrayList<>(1);
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
index b241ac0f2d..f3a303cad7 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
@@ -186,7 +186,7 @@ public class TestContainerPlacement {
testDir.getAbsolutePath());
conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
SCMContainerPlacementCapacity.class, PlacementPolicy.class);
- conf.setBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true);
+ conf.setBoolean(ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, true);
SCMNodeManager scmNodeManager = createNodeManager(conf);
containerManager = createContainerManager();
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index 58f65df8fd..c74e274d3d 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -151,10 +151,10 @@ public class TestEndPoint {
try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf,
serverAddress, 1000)) {
ozoneConf.setBoolean(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED,
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED,
true);
ozoneConf.setBoolean(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true);
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true);
OzoneContainer ozoneContainer = new OzoneContainer(dnDetails,
ozoneConf, ContainerTestUtils.getMockContext(dnDetails, ozoneConf));
rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
@@ -179,9 +179,9 @@ public class TestEndPoint {
*/
@Test
public void testDeletedContainersClearedOnStartup() throws Exception {
- ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT,
+ ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT,
true);
- ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT,
+ ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT,
true);
ozoneConf.setFromObject(new ReplicationConfig().setPort(0));
try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf,
@@ -217,12 +217,12 @@ public class TestEndPoint {
@Test
public void testCheckVersionResponse() throws Exception {
- ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT,
+ ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT,
true);
- ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT,
+ ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT,
true);
ozoneConf.setBoolean(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true);
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true);
ozoneConf.setFromObject(new ReplicationConfig().setPort(0));
try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf,
serverAddress, 1000)) {
@@ -267,7 +267,7 @@ public class TestEndPoint {
*/
@Test
public void testDnLayoutVersionFile() throws Exception {
- ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT,
+ ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT,
true);
try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf,
serverAddress, 1000)) {
@@ -579,7 +579,7 @@ public class TestEndPoint {
// Mini Ozone cluster will not come up if the port is not true, since
// Ratis will exit if the server port cannot be bound. We can remove this
// hard coding once we fix the Ratis default behaviour.
- conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
+ conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT,
true);
// Create a datanode state machine for stateConext used by endpoint task
try (DatanodeStateMachine stateMachine = new DatanodeStateMachine(
diff --git
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
index 6a5550e9fb..499d58b1ff 100644
---
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
+++
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
@@ -98,8 +98,8 @@ public class ContainerOperationClient implements ScmClient {
containerSizeB = (int) conf.getStorageSize(OZONE_SCM_CONTAINER_SIZE,
OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES);
boolean useRatis = conf.getBoolean(
- ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY,
- ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT);
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY,
+ ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_DEFAULT);
if (useRatis) {
replicationFactor = HddsProtos.ReplicationFactor.THREE;
replicationType = HddsProtos.ReplicationType.RATIS;
diff --git
a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
index 6469a63176..f7f49fec3d 100644
---
a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
+++
b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
@@ -261,10 +261,10 @@ public class MiniOzoneChaosCluster extends
MiniOzoneHAClusterImpl {
TimeUnit.SECONDS);
conf.setInt(
OzoneConfigKeys
- .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY,
+ .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY,
4);
conf.setInt(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY,
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY,
2);
conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2);
ReplicationManagerConfiguration replicationConf =
@@ -273,8 +273,8 @@ public class MiniOzoneChaosCluster extends
MiniOzoneHAClusterImpl {
replicationConf.setEventTimeout(Duration.ofSeconds(20));
replicationConf.setDatanodeTimeoutOffset(0);
conf.setFromObject(replicationConf);
- conf.setInt(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 100);
- conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP, 100);
+ conf.setInt(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, 100);
+ conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP, 100);
conf.setInt(OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_GAP, 100);
conf.setInt(OMConfigKeys.
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java
index 6ec6a32d4f..059f7b3e03 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java
@@ -45,7 +45,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED;
+import static
org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED;
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD;
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_ENABLED;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME;
@@ -83,7 +83,7 @@ public class TestOzoneFileSystemWithStreaming {
final int blockSize = 2 * maxFlushSize;
final BucketLayout layout = BucketLayout.FILE_SYSTEM_OPTIMIZED;
- CONF.setBoolean(DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, true);
+ CONF.setBoolean(HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true);
CONF.setBoolean(OZONE_FS_DATASTREAM_ENABLED, true);
CONF.set(OZONE_FS_DATASTREAM_AUTO_THRESHOLD, AUTO_THRESHOLD + "B");
CONF.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, true);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java
index 6f0bd40dde..2829ba234c 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.ozone.MiniOzoneCluster;
import
org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis;
import org.apache.ozone.test.GenericTestUtils;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY;
+import static
org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
@@ -132,7 +132,7 @@ public class TestRatisPipelineLeader {
dnToStop.get().stop();
// wait long enough based on leader election min timeout
Thread.sleep(4000 * conf.getTimeDuration(
- DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
+ HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
5, TimeUnit.SECONDS));
GenericTestUtils.waitFor(() -> {
try {
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
index 5338cb8a0c..c084a72a3c 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
@@ -44,11 +44,11 @@ public interface RatisTestHelper {
Logger LOG = LoggerFactory.getLogger(RatisTestHelper.class);
static void initRatisConf(RpcType rpc, OzoneConfiguration conf) {
- conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true);
- conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, rpc.name());
+ conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, true);
+ conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY, rpc.name());
conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS);
- LOG.info("{} = {}", OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
+ LOG.info("{} = {}", OzoneConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY,
rpc.name());
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
index 020f8623c4..275061ef78 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
@@ -48,7 +48,7 @@ import java.util.List;
import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port;
import static
org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT;
+import static
org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
@@ -68,7 +68,7 @@ public class TestMiniOzoneCluster {
conf = new OzoneConfiguration();
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1);
- conf.setBoolean(DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
+ conf.setBoolean(HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
conf.set(ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL, "1s");
}
@@ -114,13 +114,13 @@ public class TestMiniOzoneCluster {
// Each instance of SM will create an ozone container
// that bounds to a random port.
- ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, true);
- ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT,
+ ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, true);
+ ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT,
true);
- conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED,
+ conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED,
true);
ozoneConf.setBoolean(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true);
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true);
List<DatanodeStateMachine> stateMachines = new ArrayList<>();
try {
@@ -168,7 +168,7 @@ public class TestMiniOzoneCluster {
}
// Turn off the random port flag and test again
- ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false);
+ ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT,
false);
try (
DatanodeStateMachine sm1 = new DatanodeStateMachine(
randomDatanodeDetails(), ozoneConf);
@@ -182,8 +182,8 @@ public class TestMiniOzoneCluster {
assertFalse(ports.add(sm2.getContainer().getReadChannel().getIPCPort()));
assertFalse(ports.add(sm3.getContainer().getReadChannel().getIPCPort()));
assertEquals(ports.iterator().next().intValue(),
- conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
- OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT));
+ conf.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT,
+ OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT));
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java
index 6cc6bcb8e9..8f79605ab0 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java
@@ -39,12 +39,12 @@ import static
org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_REST_HTTP_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_IPC_PORT;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT;
+import static
org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT;
+import static
org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR;
+import static
org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT;
+import static
org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT;
+import static
org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_SERVER_PORT;
import static
org.apache.ozone.test.GenericTestUtils.PortAllocator.anyHostWithFreePort;
import static org.apache.ozone.test.GenericTestUtils.PortAllocator.getFreePort;
@@ -96,7 +96,7 @@ public class UniformDatanodesFactory implements
MiniOzoneCluster.DatanodeFactory
Path ratisDir = baseDir.resolve("ratis");
Files.createDirectories(ratisDir);
- dnConf.set(DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, ratisDir.toString());
+ dnConf.set(HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, ratisDir.toString());
if (layoutVersion != null) {
DatanodeLayoutStorage layoutStorage = new DatanodeLayoutStorage(
@@ -111,11 +111,11 @@ public class UniformDatanodesFactory implements
MiniOzoneCluster.DatanodeFactory
conf.set(HDDS_REST_HTTP_ADDRESS_KEY, anyHostWithFreePort());
conf.set(HDDS_DATANODE_HTTP_ADDRESS_KEY, anyHostWithFreePort());
conf.set(HDDS_DATANODE_CLIENT_ADDRESS_KEY, anyHostWithFreePort());
- conf.setInt(DFS_CONTAINER_IPC_PORT, getFreePort());
- conf.setInt(DFS_CONTAINER_RATIS_IPC_PORT, getFreePort());
- conf.setInt(DFS_CONTAINER_RATIS_ADMIN_PORT, getFreePort());
- conf.setInt(DFS_CONTAINER_RATIS_SERVER_PORT, getFreePort());
- conf.setInt(DFS_CONTAINER_RATIS_DATASTREAM_PORT, getFreePort());
+ conf.setInt(HDDS_CONTAINER_IPC_PORT, getFreePort());
+ conf.setInt(HDDS_CONTAINER_RATIS_IPC_PORT, getFreePort());
+ conf.setInt(HDDS_CONTAINER_RATIS_ADMIN_PORT, getFreePort());
+ conf.setInt(HDDS_CONTAINER_RATIS_SERVER_PORT, getFreePort());
+ conf.setInt(HDDS_CONTAINER_RATIS_DATASTREAM_PORT, getFreePort());
conf.setFromObject(new
ReplicationServer.ReplicationConfig().setPort(getFreePort()));
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
index 563904922e..3f1c31edfe 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java
@@ -103,7 +103,7 @@ public class TestContainerStateMachine {
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
conf.setQuietMode(false);
OzoneManager.setTestSecureOmFlag(true);
- conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1);
+ conf.setLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1);
conf.set(OzoneConfigKeys.OZONE_SCM_CLOSE_CONTAINER_WAIT_DURATION, "2s");
conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_SCRUB_INTERVAL, "2s");
conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, "5s");
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
index 4588a86a48..b6eaca8e80 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
@@ -168,7 +168,7 @@ public class TestContainerStateMachineFailures {
raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(20));
conf.setFromObject(raftClientConfig);
- conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1);
+ conf.setLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1);
conf.setQuietMode(false);
cluster =
MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10)
@@ -309,9 +309,9 @@ public class TestContainerStateMachineFailures {
// restart the hdds datanode, container should not in the regular set
OzoneConfiguration config = dn.getConf();
final String dir = config.get(OzoneConfigKeys.
- DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR)
+ HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR)
+ UUID.randomUUID();
- config.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir);
+ config.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir);
int index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails());
cluster.restartHddsDatanode(dn.getDatanodeDetails(), false);
ozoneContainer = cluster.getHddsDatanodes().get(index)
@@ -373,9 +373,9 @@ public class TestContainerStateMachineFailures {
OzoneConfiguration config = dn.getConf();
final String dir = config.get(OzoneConfigKeys.
- DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR)
+ HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR)
+ UUID.randomUUID();
- config.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir);
+ config.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir);
int index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails());
// restart the hdds datanode and see if the container is listed in the
// in the missing container set and not in the regular set
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java
index bf41df6c78..229059d84a 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java
@@ -106,7 +106,7 @@ public class TestContainerStateMachineFlushDelay {
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
conf.setQuietMode(false);
OzoneManager.setTestSecureOmFlag(true);
- conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1);
+ conf.setLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1);
// conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.toString());
conf.set(OzoneConfigKeys.OZONE_SCM_CLOSE_CONTAINER_WAIT_DURATION, "2s");
conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_SCRUB_INTERVAL, "2s");
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java
index be27dab58e..d48df574a9 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java
@@ -123,7 +123,7 @@ public class TestContainerStateMachineStream {
.setStreamBufferMaxSize(MAX_FLUSH_SIZE)
.applyTo(conf);
- conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1);
+ conf.setLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1);
conf.setQuietMode(false);
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(3)
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
index 4e05087929..5c0910ecdc 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
@@ -111,7 +111,7 @@ public class TestFailureHandlingByClient {
conf.setFromObject(ratisClientConfig);
conf.setTimeDuration(
- OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
+
OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
1, TimeUnit.SECONDS);
conf.setBoolean(
OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
index a385edd027..b4ad49a3ed 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
@@ -107,7 +107,7 @@ public class TestFailureHandlingByClientFlushDelay {
conf.setFromObject(ratisClientConfig);
conf.setTimeDuration(
- OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
+
OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
1, TimeUnit.SECONDS);
conf.setBoolean(
OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
index ed00686bd8..34f85d8e99 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
@@ -105,7 +105,7 @@ public class TestMultiBlockWritesWithDnFailures {
conf.setFromObject(ratisClientConfig);
conf.setTimeDuration(
- OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
+
OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
1, TimeUnit.SECONDS);
conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
index 8c35d5011a..0fd31bb4b7 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
@@ -183,10 +183,10 @@ public class TestCSMMetrics {
static XceiverServerRatis newXceiverServerRatis(
DatanodeDetails dn, OzoneConfiguration conf) throws IOException {
- conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT,
+ conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT,
dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue());
final String dir = TEST_DIR + dn.getUuid();
- conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir);
+ conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir);
final ContainerDispatcher dispatcher = new TestContainerDispatcher();
return XceiverServerRatis.newXceiverServerRatis(dn, conf, dispatcher,
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
index d4900bb487..51943a2e8d 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
@@ -78,7 +78,7 @@ public class TestContainerMetrics {
Pipeline pipeline = MockPipeline
.createSingleNodePipeline();
OzoneConfiguration conf = new OzoneConfiguration();
- conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
+ conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT,
pipeline.getFirstNode()
.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
conf.setInt(DFSConfigKeysLegacy.DFS_METRICS_PERCENTILES_INTERVALS_KEY,
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index 7a64ddc5d5..1b8bae0d03 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -68,7 +68,7 @@ public class TestOzoneContainer {
Pipeline pipeline = MockPipeline.createSingleNodePipeline();
conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath());
conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath());
- conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
+ conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT,
pipeline.getFirstNode()
.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
@@ -99,7 +99,7 @@ public class TestOzoneContainer {
Pipeline pipeline = MockPipeline.createSingleNodePipeline();
conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath());
conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath());
- conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
+ conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT,
pipeline.getFirstNode()
.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java
index 715b0678a1..4f24f8e6c3 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java
@@ -129,10 +129,10 @@ class TestSecureOzoneContainer {
try {
Pipeline pipeline = MockPipeline.createSingleNodePipeline();
conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.toString());
- conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline
+ conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline
.getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE)
.getValue());
- conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false);
+ conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, false);
DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
container = new OzoneContainer(dn, conf, ContainerTestUtils
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
index 3c89bb12ee..c05f55bd4a 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
@@ -89,7 +89,7 @@ public class TestContainerServer {
public static void setup() {
DefaultMetricsSystem.setMiniClusterMode(true);
CONF.set(HddsConfigKeys.HDDS_METADATA_DIR_NAME, TEST_DIR);
- CONF.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED,
false);
+ CONF.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED,
false);
DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
caClient = new DNCertificateClient(new SecurityConfig(CONF), null,
dn, null, null, null);
@@ -104,7 +104,7 @@ public class TestContainerServer {
public void testClientServer() throws Exception {
DatanodeDetails datanodeDetails = randomDatanodeDetails();
runTestClientServer(1, (pipeline, conf) -> conf
- .setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
+ .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT,
pipeline.getFirstNode()
.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()),
XceiverClientGrpc::new,
@@ -121,10 +121,10 @@ public class TestContainerServer {
static XceiverServerRatis newXceiverServerRatis(
DatanodeDetails dn, OzoneConfiguration conf) throws IOException {
- conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT,
+ conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT,
dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue());
final String dir = TEST_DIR + dn.getUuid();
- conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir);
+ conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir);
final ContainerDispatcher dispatcher = new TestContainerDispatcher();
return XceiverServerRatis.newXceiverServerRatis(dn, conf, dispatcher,
@@ -216,7 +216,7 @@ public class TestContainerServer {
HddsDispatcher hddsDispatcher = createDispatcher(dd,
UUID.randomUUID(), CONF);
runTestClientServer(1, (pipeline, conf) -> conf
- .setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
+ .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT,
pipeline.getFirstNode()
.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()),
XceiverClientGrpc::new,
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
index 53420c0e22..e0522ac6e9 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
@@ -158,7 +158,7 @@ public class TestSecureContainerServer {
HddsDispatcher hddsDispatcher = createDispatcher(dd,
UUID.randomUUID(), CONF);
runTestClientServer(1, (pipeline, conf) -> conf
- .setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
+ .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT,
pipeline.getFirstNode()
.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()),
XceiverClientGrpc::new,
@@ -201,14 +201,14 @@ public class TestSecureContainerServer {
static XceiverServerRatis newXceiverServerRatis(
DatanodeDetails dn, OzoneConfiguration conf) throws IOException {
- conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT,
+ conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT,
dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue());
- conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED,
+ conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED,
true);
conf.setBoolean(
- OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true);
+ OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true);
final String dir = TEST_DIR + dn.getUuid();
- conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir);
+ conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir);
final ContainerDispatcher dispatcher = createDispatcher(dn,
UUID.randomUUID(), conf);
return XceiverServerRatis.newXceiverServerRatis(dn, conf, dispatcher,
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java
index 8e95e6cb18..7c82633f11 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java
@@ -73,7 +73,7 @@ public class TestDnRatisLogParser {
public void testRatisLogParsing() throws Exception {
OzoneConfiguration conf = cluster.getHddsDatanodes().get(0).getConf();
String path =
- conf.get(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR);
+ conf.get(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR);
UUID pid = cluster.getStorageContainerManager().getPipelineManager()
.getPipelines().get(0).getId().getId();
File pipelineDir = new File(path, pid.toString());
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java
index 0324d030af..c1d55accfd 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java
@@ -51,7 +51,7 @@ public class TestScmAdminHA {
.build();
conf.setQuietMode(false);
// enable ratis for Scm.
- conf.setBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true);
+ conf.setBoolean(ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, true);
cluster.waitForClusterToBeReady();
}
diff --git
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
index 1614f81087..3d426ed034 100644
---
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
+++
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
@@ -190,8 +190,8 @@ public class BasicOzoneClientAdapterImpl implements
OzoneClientAdapter {
OzoneFSUtils.validateBucketLayout(bucket.getName(), resolvedBucketLayout);
this.configuredDnPort = conf.getInt(
- OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
- OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
+ OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT,
+ OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT);
this.config = conf;
}
diff --git
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
index 7a80878549..ce27fce8c1 100644
---
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
+++
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
@@ -198,8 +198,8 @@ public class BasicRootedOzoneClientAdapterImpl
proxy = objectStore.getClientProxy();
this.configuredDnPort = conf.getInt(
- OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
- OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
+ OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT,
+ OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT);
// Fetches the bucket layout to be used by OFS.
initDefaultFsBucketLayout(conf);
diff --git
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
index 1d3850b12a..47be6aeb6d 100644
---
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
+++
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
@@ -106,8 +106,8 @@ import static javax.ws.rs.core.HttpHeaders.LAST_MODIFIED;
import static
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.EC;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT;
+import static
org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED;
+import static
org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT;
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD;
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
@@ -196,8 +196,8 @@ public class ObjectEndpoint extends EndpointBase {
OZONE_SCM_CHUNK_SIZE_DEFAULT,
StorageUnit.BYTES);
datastreamEnabled = ozoneConfiguration.getBoolean(
- DFS_CONTAINER_RATIS_DATASTREAM_ENABLED,
- DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT);
+ HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED,
+ HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT);
datastreamMinLength = (long) ozoneConfiguration.getStorageSize(
OZONE_FS_DATASTREAM_AUTO_THRESHOLD,
OZONE_FS_DATASTREAM_AUTO_THRESHOLD_DEFAULT, StorageUnit.BYTES);
diff --git
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java
index 775d5a1976..28ce32e747 100644
---
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java
+++
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java
@@ -68,7 +68,7 @@ public class TestPartUploadWithStream {
REST.setClient(client);
OzoneConfiguration conf = new OzoneConfiguration();
- conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED,
+ conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED,
true);
REST.setOzoneConfiguration(conf);
REST.init();
diff --git
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java
index f92496249e..d988b43023 100644
---
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java
+++
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java
@@ -81,7 +81,7 @@ public class TestUploadWithStream {
REST.setClient(client);
OzoneConfiguration conf = new OzoneConfiguration();
- conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED,
+ conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED,
true);
conf.setStorageSize(OZONE_FS_DATASTREAM_AUTO_THRESHOLD, 1,
StorageUnit.BYTES);
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]