This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 7939faf7d6 HDDS-815. Rename HDDS config keys prefixed with dfs. (#6274)
7939faf7d6 is described below

commit 7939faf7d6c904bf1e4ad32baa5d6d0c1de19003
Author: Sarveksha Yeshavantha Raju 
<[email protected]>
AuthorDate: Tue Feb 27 17:33:44 2024 +0530

    HDDS-815. Rename HDDS config keys prefixed with dfs. (#6274)
---
 .../hadoop/hdds/conf/OzoneConfiguration.java       | 63 ++++++++++++++++++-
 .../org/apache/hadoop/hdds/scm/ScmConfigKeys.java  | 38 ++++++------
 .../org/apache/hadoop/ozone/OzoneConfigKeys.java   | 22 +++----
 .../common/src/main/resources/ozone-default.xml    | 71 ++++++++++++----------
 .../content/feature/Streaming-Write-Pipeline.md    |  4 +-
 .../intellij/runConfigurations/Datanode2-ha.xml    |  2 +-
 .../intellij/runConfigurations/Datanode2.xml       |  2 +-
 .../intellij/runConfigurations/Datanode3-ha.xml    |  2 +-
 .../intellij/runConfigurations/Datanode3.xml       |  2 +-
 .../dist/src/main/compose/ozone-ha/docker-config   |  2 +-
 .../dist/src/main/compose/ozone/docker-config      |  2 +-
 .../src/main/compose/ozonesecure-ha/docker-config  |  2 +-
 .../src/main/compose/ozonesecure/docker-config     |  2 +-
 .../src/test/resources/ozone-site.xml              |  7 ++-
 14 files changed, 144 insertions(+), 77 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
index 69cce8db6d..ed897f898c 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
 
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.ratis.server.RaftServerConfigKeys;
 
 import static java.util.Collections.unmodifiableSortedSet;
@@ -323,7 +324,67 @@ public class OzoneConfiguration extends Configuration
         new DeprecationDelta("ozone.scm.chunk.layout",
             ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY),
         new DeprecationDelta("hdds.datanode.replication.work.dir",
-            OZONE_CONTAINER_COPY_WORKDIR)
+            OZONE_CONTAINER_COPY_WORKDIR),
+        new DeprecationDelta("dfs.container.chunk.write.sync",
+            OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY),
+        new DeprecationDelta("dfs.container.ipc",
+            OzoneConfigKeys.DFS_CONTAINER_IPC_PORT),
+        new DeprecationDelta("dfs.container.ipc.random.port",
+            OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT),
+        new DeprecationDelta("dfs.container.ratis.admin.port",
+            OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT),
+        new DeprecationDelta("dfs.container.ratis.datanode.storage.dir",
+            OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR),
+        new DeprecationDelta("dfs.container.ratis.datastream.enabled",
+            OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED),
+        new DeprecationDelta("dfs.container.ratis.datastream.port",
+            OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT),
+        new DeprecationDelta("dfs.container.ratis.datastream.random.port",
+            OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT),
+        new DeprecationDelta("dfs.container.ratis.enabled",
+            ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY),
+        new DeprecationDelta("dfs.container.ratis.ipc",
+            OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT),
+        new DeprecationDelta("dfs.container.ratis.ipc.random.port",
+            OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT),
+        new DeprecationDelta("dfs.container.ratis.leader.pending.bytes.limit",
+            ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT),
+        new 
DeprecationDelta("dfs.container.ratis.log.appender.queue.byte-limit",
+            ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT),
+        new 
DeprecationDelta("dfs.container.ratis.log.appender.queue.num-elements",
+            ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS),
+        new DeprecationDelta("dfs.container.ratis.log.purge.gap",
+            ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP),
+        new DeprecationDelta("dfs.container.ratis.log.queue.byte-limit",
+            ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT),
+        new DeprecationDelta("dfs.container.ratis.log.queue.num-elements",
+            ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS),
+        new DeprecationDelta("dfs.container.ratis.num.container.op.executors",
+            ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY),
+        new 
DeprecationDelta("dfs.container.ratis.num.write.chunk.threads.per.volume",
+            
ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME),
+        new DeprecationDelta("dfs.container.ratis.replication.level",
+            ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY),
+        new DeprecationDelta("dfs.container.ratis.rpc.type",
+            ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY),
+        new DeprecationDelta("dfs.container.ratis.segment.preallocated.size",
+            ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY),
+        new DeprecationDelta("dfs.container.ratis.segment.size",
+            ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY),
+        new DeprecationDelta("dfs.container.ratis.server.port",
+            OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT),
+        new 
DeprecationDelta("dfs.container.ratis.statemachinedata.sync.retries",
+            ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES),
+        new 
DeprecationDelta("dfs.container.ratis.statemachinedata.sync.timeout",
+            ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT),
+        new 
DeprecationDelta("dfs.container.ratis.statemachine.max.pending.apply-transactions",
+            
ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS),
+        new 
DeprecationDelta("dfs.ratis.leader.election.minimum.timeout.duration",
+            
ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY),
+        new DeprecationDelta("dfs.ratis.server.retry-cache.timeout.duration",
+            ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY),
+        new DeprecationDelta("dfs.ratis.snapshot.threshold",
+            ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY)
     });
   }
 
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index c6760451c6..e093a45af0 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -42,93 +42,93 @@ public final class ScmConfigKeys {
       "ozone.scm.db.dirs.permissions";
 
   public static final String DFS_CONTAINER_RATIS_ENABLED_KEY
-      = "dfs.container.ratis.enabled";
+      = "hdds.container.ratis.enabled";
   public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT
       = false;
   public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY
-      = "dfs.container.ratis.rpc.type";
+      = "hdds.container.ratis.rpc.type";
   public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT
       = "GRPC";
   public static final String
       DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME
-      = "dfs.container.ratis.num.write.chunk.threads.per.volume";
+      = "hdds.container.ratis.num.write.chunk.threads.per.volume";
   public static final int
       DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT
       = 10;
   public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY
-      = "dfs.container.ratis.replication.level";
+      = "hdds.container.ratis.replication.level";
   public static final ReplicationLevel
       DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT = 
ReplicationLevel.MAJORITY;
   public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY
-      = "dfs.container.ratis.num.container.op.executors";
+      = "hdds.container.ratis.num.container.op.executors";
   public static final int 
DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT
       = 10;
   public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY =
-      "dfs.container.ratis.segment.size";
+      "hdds.container.ratis.segment.size";
   public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT =
       "64MB";
   public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY 
=
-      "dfs.container.ratis.segment.preallocated.size";
+      "hdds.container.ratis.segment.preallocated.size";
   public static final String
       DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = "4MB";
   public static final String
       DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT =
-      "dfs.container.ratis.statemachinedata.sync.timeout";
+      "hdds.container.ratis.statemachinedata.sync.timeout";
   public static final TimeDuration
       DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT =
       TimeDuration.valueOf(10, TimeUnit.SECONDS);
   public static final String
       DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES =
-      "dfs.container.ratis.statemachinedata.sync.retries";
+      "hdds.container.ratis.statemachinedata.sync.retries";
   public static final String
       DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS =
-      "dfs.container.ratis.statemachine.max.pending.apply-transactions";
+      "hdds.container.ratis.statemachine.max.pending.apply-transactions";
   // The default value of maximum number of pending state machine apply
   // transactions is kept same as default snapshot threshold.
   public static final int
       DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT =
       100000;
   public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS =
-      "dfs.container.ratis.log.queue.num-elements";
+      "hdds.container.ratis.log.queue.num-elements";
   public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT =
       1024;
   public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT =
-      "dfs.container.ratis.log.queue.byte-limit";
+      "hdds.container.ratis.log.queue.byte-limit";
   public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT =
       "4GB";
   public static final String
       DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS =
-      "dfs.container.ratis.log.appender.queue.num-elements";
+      "hdds.container.ratis.log.appender.queue.num-elements";
   public static final int
       DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = 1;
   public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT 
=
-      "dfs.container.ratis.log.appender.queue.byte-limit";
+      "hdds.container.ratis.log.appender.queue.byte-limit";
   public static final String
       DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB";
   public static final String DFS_CONTAINER_RATIS_LOG_PURGE_GAP =
-      "dfs.container.ratis.log.purge.gap";
+      "hdds.container.ratis.log.purge.gap";
   // TODO: Set to 1024 once RATIS issue around purge is fixed.
   public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT =
       1000000;
   public static final String DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT =
-      "dfs.container.ratis.leader.pending.bytes.limit";
+      "hdds.container.ratis.leader.pending.bytes.limit";
   public static final String
       DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = "1GB";
 
   public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY 
=
-      "dfs.ratis.server.retry-cache.timeout.duration";
+      "hdds.ratis.server.retry-cache.timeout.duration";
   public static final TimeDuration
       DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT =
       TimeDuration.valueOf(600000, TimeUnit.MILLISECONDS);
   public static final String
       DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY =
-      "dfs.ratis.leader.election.minimum.timeout.duration";
+      "hdds.ratis.leader.election.minimum.timeout.duration";
   public static final TimeDuration
       DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT =
       TimeDuration.valueOf(5, TimeUnit.SECONDS);
 
   public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY =
-      "dfs.ratis.snapshot.threshold";
+      "hdds.ratis.snapshot.threshold";
   public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000;
 
   // TODO : this is copied from OzoneConsts, may need to move to a better place
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 7bfda01840..3007110009 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -37,7 +37,7 @@ import java.util.concurrent.TimeUnit;
 @InterfaceStability.Unstable
 public final class OzoneConfigKeys {
   public static final String DFS_CONTAINER_IPC_PORT =
-      "dfs.container.ipc";
+      "hdds.container.ipc.port";
   public static final int DFS_CONTAINER_IPC_PORT_DEFAULT = 9859;
 
   public static final String OZONE_METADATA_DIRS = "ozone.metadata.dirs";
@@ -60,47 +60,47 @@ public final class OzoneConfigKeys {
    * as {@link #DFS_CONTAINER_IPC_PORT_DEFAULT}.
    */
   public static final String DFS_CONTAINER_IPC_RANDOM_PORT =
-      "dfs.container.ipc.random.port";
+      "hdds.container.ipc.random.port";
   public static final boolean DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT =
       false;
 
   public static final String DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT =
-      "dfs.container.ratis.datastream.random.port";
+      "hdds.container.ratis.datastream.random.port";
   public static final boolean
       DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT =
       false;
 
   public static final String DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY =
-      "dfs.container.chunk.write.sync";
+      "hdds.container.chunk.write.sync";
   public static final boolean DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT = false;
   /**
    * Ratis Port where containers listen to.
    */
   public static final String DFS_CONTAINER_RATIS_IPC_PORT =
-      "dfs.container.ratis.ipc";
+      "hdds.container.ratis.ipc.port";
   public static final int DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT = 9858;
   /**
    * Ratis Port where containers listen to admin requests.
    */
   public static final String DFS_CONTAINER_RATIS_ADMIN_PORT =
-      "dfs.container.ratis.admin.port";
+      "hdds.container.ratis.admin.port";
   public static final int DFS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT = 9857;
   /**
    * Ratis Port where containers listen to server-to-server requests.
    */
   public static final String DFS_CONTAINER_RATIS_SERVER_PORT =
-      "dfs.container.ratis.server.port";
+      "hdds.container.ratis.server.port";
   public static final int DFS_CONTAINER_RATIS_SERVER_PORT_DEFAULT = 9856;
 
   /**
    * Ratis Port where containers listen to datastream requests.
    */
   public static final String DFS_CONTAINER_RATIS_DATASTREAM_ENABLED
-      = "dfs.container.ratis.datastream.enabled";
+      = "hdds.container.ratis.datastream.enabled";
   public static final boolean DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT
       = false;
   public static final String DFS_CONTAINER_RATIS_DATASTREAM_PORT
-      = "dfs.container.ratis.datastream.port";
+      = "hdds.container.ratis.datastream.port";
   public static final int DFS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT
       = 9855;
 
@@ -134,7 +134,7 @@ public final class OzoneConfigKeys {
    * a mini cluster is able to launch multiple containers on a node.
    */
   public static final String DFS_CONTAINER_RATIS_IPC_RANDOM_PORT =
-      "dfs.container.ratis.ipc.random.port";
+      "hdds.container.ratis.ipc.random.port";
   public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT =
       false;
 
@@ -368,7 +368,7 @@ public final class OzoneConfigKeys {
       ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT;
 
   public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR =
-      "dfs.container.ratis.datanode.storage.dir";
+      "hdds.container.ratis.datanode.storage.dir";
 
   public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY 
=
       ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY;
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index e5e3726beb..ee0aa4514a 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -46,26 +46,26 @@
     </description>
   </property>
   <property>
-    <name>dfs.container.ipc</name>
+    <name>hdds.container.ipc.port</name>
     <value>9859</value>
     <tag>OZONE, CONTAINER, MANAGEMENT</tag>
     <description>The ipc port number of container.</description>
   </property>
 
   <property>
-    <name>dfs.container.ratis.datastream.enabled</name>
+    <name>hdds.container.ratis.datastream.enabled</name>
     <value>false</value>
     <tag>OZONE, CONTAINER, RATIS, DATASTREAM</tag>
     <description>It specifies whether to enable data stream of 
container.</description>
   </property>
   <property>
-    <name>dfs.container.ratis.datastream.port</name>
+    <name>hdds.container.ratis.datastream.port</name>
     <value>9855</value>
     <tag>OZONE, CONTAINER, RATIS, DATASTREAM</tag>
     <description>The datastream port number of container.</description>
   </property>
   <property>
-    <name>dfs.container.ratis.datastream.random.port</name>
+    <name>hdds.container.ratis.datastream.random.port</name>
     <value>false</value>
     <tag>OZONE, CONTAINER, RATIS, DATASTREAM</tag>
     <description>Allocates a random free port for ozone container datastream.
@@ -73,7 +73,7 @@
     </description>
   </property>
   <property>
-    <name>dfs.container.ipc.random.port</name>
+    <name>hdds.container.ipc.random.port</name>
     <value>false</value>
     <tag>OZONE, DEBUG, CONTAINER</tag>
     <description>Allocates a random free port for ozone container. This is used
@@ -82,7 +82,7 @@
     </description>
   </property>
   <property>
-    <name>dfs.container.chunk.write.sync</name>
+    <name>hdds.container.chunk.write.sync</name>
     <value>false</value>
     <tag>OZONE, CONTAINER, MANAGEMENT</tag>
     <description>Determines whether the chunk writes in the container happen as
@@ -90,19 +90,19 @@
     </description>
   </property>
   <property>
-    <name>dfs.container.ratis.statemachinedata.sync.timeout</name>
+    <name>hdds.container.ratis.statemachinedata.sync.timeout</name>
     <value>10s</value>
     <tag>OZONE, DEBUG, CONTAINER, RATIS</tag>
     <description>Timeout for StateMachine data writes by Ratis.
     </description>
   </property>
   <property>
-    <name>dfs.container.ratis.statemachinedata.sync.retries</name>
+    <name>hdds.container.ratis.statemachinedata.sync.retries</name>
     <value/>
     <tag>OZONE, DEBUG, CONTAINER, RATIS</tag>
     <description>Number of times the WriteStateMachineData op will be tried
       before failing. If the value is not configured, it will default
-      to (hdds.ratis.rpc.slowness.timeout / 
dfs.container.ratis.statemachinedata.sync.timeout),
+      to (hdds.ratis.rpc.slowness.timeout / 
hdds.container.ratis.statemachinedata.sync.timeout),
       which means that the WriteStatMachineData will be retried for every sync 
timeout until
       the configured slowness timeout is hit, after which the StateMachine 
will close down the pipeline.
 
@@ -112,21 +112,22 @@
     </description>
   </property>
   <property>
-    <name>dfs.container.ratis.log.queue.num-elements</name>
+    <name>hdds.container.ratis.log.queue.num-elements</name>
     <value>1024</value>
     <tag>OZONE, DEBUG, CONTAINER, RATIS</tag>
     <description>Limit for the number of operations in Ratis Log Worker.
     </description>
   </property>
   <property>
-    <name>dfs.container.ratis.log.queue.byte-limit</name>
+    <name>hdds.container.ratis.log.queue.byte-limit</name>
     <value>4GB</value>
     <tag>OZONE, DEBUG, CONTAINER, RATIS</tag>
     <description>Byte limit for Ratis Log Worker queue.
     </description>
   </property>
   <property>
-    <name>dfs.container.ratis.log.appender.queue.num-elements</name>
+    <name>hdds.container.ratis.log.appender.queue.num-elements
+</name>
     <value>1</value>
     <tag>OZONE, DEBUG, CONTAINER, RATIS</tag>
     <description>Limit for number of append entries in ratis leader's
@@ -134,14 +135,16 @@
     </description>
   </property>
   <property>
-    <name>dfs.container.ratis.log.appender.queue.byte-limit</name>
+    <name>hdds.container.ratis.log.appender.queue.byte-limit
+</name>
     <value>32MB</value>
     <tag>OZONE, DEBUG, CONTAINER, RATIS</tag>
     <description>Byte limit for ratis leader's log appender queue.
     </description>
   </property>
   <property>
-    <name>dfs.container.ratis.log.purge.gap</name>
+    <name>hdds.container.ratis.log.purge.gap
+</name>
     <value>1000000</value>
     <tag>OZONE, DEBUG, CONTAINER, RATIS</tag>
     <description>Purge gap between the last purged commit index
@@ -149,7 +152,7 @@
     </description>
   </property>
   <property>
-    <name>dfs.container.ratis.datanode.storage.dir</name>
+    <name>hdds.container.ratis.datanode.storage.dir</name>
     <value/>
     <tag>OZONE, CONTAINER, STORAGE, MANAGEMENT, RATIS</tag>
     <description>This directory is used for storing Ratis metadata like logs. 
If
@@ -223,7 +226,7 @@
     </description>
   </property>
   <property>
-    <name>dfs.container.ratis.enabled</name>
+    <name>hdds.container.ratis.enabled</name>
     <value>false</value>
     <tag>OZONE, MANAGEMENT, PIPELINE, RATIS</tag>
     <description>Ozone supports different kinds of replication pipelines. Ratis
@@ -232,25 +235,26 @@
     </description>
   </property>
   <property>
-    <name>dfs.container.ratis.ipc</name>
+    <name>hdds.container.ratis.ipc.port</name>
     <value>9858</value>
     <tag>OZONE, CONTAINER, PIPELINE, RATIS</tag>
     <description>The ipc port number of container for clients.</description>
   </property>
   <property>
-    <name>dfs.container.ratis.admin.port</name>
+    <name>hdds.container.ratis.admin.port</name>
     <value>9857</value>
     <tag>OZONE, CONTAINER, PIPELINE, RATIS, MANAGEMENT</tag>
     <description>The ipc port number of container for admin 
requests.</description>
   </property>
   <property>
-    <name>dfs.container.ratis.server.port</name>
+    <name>hdds.container.ratis.server.port</name>
     <value>9856</value>
     <tag>OZONE, CONTAINER, PIPELINE, RATIS, MANAGEMENT</tag>
     <description>The ipc port number of container for server-server 
communication.</description>
   </property>
   <property>
-    <name>dfs.container.ratis.ipc.random.port</name>
+    <name>hdds.container.ratis.ipc.random.port
+</name>
     <value>false</value>
     <tag>OZONE,DEBUG</tag>
     <description>Allocates a random free port for ozone ratis port for the
@@ -259,7 +263,7 @@
     </description>
   </property>
   <property>
-    <name>dfs.container.ratis.rpc.type</name>
+    <name>hdds.container.ratis.rpc.type</name>
     <value>GRPC</value>
     <tag>OZONE, RATIS, MANAGEMENT</tag>
     <description>Ratis supports different kinds of transports like netty, GRPC,
@@ -268,7 +272,7 @@
     </description>
   </property>
   <property>
-    <name>dfs.ratis.snapshot.threshold</name>
+    <name>hdds.ratis.snapshot.threshold</name>
     <value>10000</value>
     <tag>OZONE, RATIS</tag>
     <description>Number of transactions after which a ratis snapshot should be
@@ -276,16 +280,16 @@
     </description>
   </property>
   <property>
-    
<name>dfs.container.ratis.statemachine.max.pending.apply-transactions</name>
+    
<name>hdds.container.ratis.statemachine.max.pending.apply-transactions</name>
     <value>10000</value>
     <tag>OZONE, RATIS</tag>
     <description>Maximum number of pending apply transactions in a data
       pipeline. The default value is kept same as default snapshot threshold
-      dfs.ratis.snapshot.threshold.
+      hdds.ratis.snapshot.threshold.
     </description>
   </property>
   <property>
-    <name>dfs.container.ratis.num.write.chunk.threads.per.volume</name>
+    <name>hdds.container.ratis.num.write.chunk.threads.per.volume</name>
     <value>10</value>
     <tag>OZONE, RATIS, PERFORMANCE</tag>
     <description>Maximum number of threads in the thread pool that Datanode
@@ -295,7 +299,8 @@
     </description>
   </property>
   <property>
-    <name>dfs.container.ratis.leader.pending.bytes.limit</name>
+    <name>hdds.container.ratis.leader.pending.bytes.limit
+</name>
     <value>1GB</value>
     <tag>OZONE, RATIS, PERFORMANCE</tag>
     <description>Limit on the total bytes of pending requests after which
@@ -303,7 +308,7 @@
     </description>
   </property>
   <property>
-    <name>dfs.container.ratis.replication.level</name>
+    <name>hdds.container.ratis.replication.level</name>
     <value>MAJORITY</value>
     <tag>OZONE, RATIS</tag>
     <description>Replication level to be used by datanode for submitting a
@@ -312,7 +317,7 @@
     </description>
   </property>
   <property>
-    <name>dfs.container.ratis.num.container.op.executors</name>
+    <name>hdds.container.ratis.num.container.op.executors</name>
     <value>10</value>
     <tag>OZONE, RATIS, PERFORMANCE</tag>
     <description>Number of executors that will be used by Ratis to execute
@@ -320,7 +325,7 @@
     </description>
   </property>
   <property>
-    <name>dfs.container.ratis.segment.size</name>
+    <name>hdds.container.ratis.segment.size</name>
     <value>64MB</value>
     <tag>OZONE, RATIS, PERFORMANCE</tag>
     <description>The size of the raft segment file used
@@ -328,7 +333,7 @@
     </description>
   </property>
   <property>
-    <name>dfs.container.ratis.segment.preallocated.size</name>
+    <name>hdds.container.ratis.segment.preallocated.size</name>
     <value>4MB</value>
     <tag>OZONE, RATIS, PERFORMANCE</tag>
     <description>The pre-allocated file size for raft segment used
@@ -336,13 +341,13 @@
     </description>
   </property>
   <property>
-    <name>dfs.ratis.server.retry-cache.timeout.duration</name>
+    <name>hdds.ratis.server.retry-cache.timeout.duration</name>
     <value>600000ms</value>
     <tag>OZONE, RATIS, MANAGEMENT</tag>
     <description>Retry Cache entry timeout for ratis server.</description>
   </property>
   <property>
-    <name>dfs.ratis.leader.election.minimum.timeout.duration</name>
+    <name>hdds.ratis.leader.election.minimum.timeout.duration</name>
     <value>5s</value>
     <tag>OZONE, RATIS, MANAGEMENT</tag>
     <description>The minimum timeout duration for ratis leader election.
@@ -707,7 +712,7 @@
 
       For production clusters or any time you care about performance, it is
       recommended that ozone.om.db.dirs, ozone.scm.db.dirs and
-      dfs.container.ratis.datanode.storage.dir be configured separately.
+      hdds.container.ratis.datanode.storage.dir be configured separately.
     </description>
   </property>
   <property>
diff --git a/hadoop-hdds/docs/content/feature/Streaming-Write-Pipeline.md 
b/hadoop-hdds/docs/content/feature/Streaming-Write-Pipeline.md
index 5f55afebc3..e48a95c8bb 100644
--- a/hadoop-hdds/docs/content/feature/Streaming-Write-Pipeline.md
+++ b/hadoop-hdds/docs/content/feature/Streaming-Write-Pipeline.md
@@ -43,7 +43,7 @@ Set the following properties to the Ozone configuration file 
`ozone-site.xml`.
 - To enable the Streaming Write Pipeline feature, set the following property 
to true.
 ```XML
   <property>
-    <name>dfs.container.ratis.datastream.enabled</name>
+    <name>hdds.container.ratis.datastream.enabled</name>
     <value>false</value>
     <tag>OZONE, CONTAINER, RATIS, DATASTREAM</tag>
     <description>It specifies whether to enable data stream of 
container.</description>
@@ -52,7 +52,7 @@ Set the following properties to the Ozone configuration file 
`ozone-site.xml`.
 - Datanodes listen to the following port for the streaming traffic.
 ```XML
   <property>
-    <name>dfs.container.ratis.datastream.port</name>
+    <name>hdds.container.ratis.datastream.port</name>
     <value>9855</value>
     <tag>OZONE, CONTAINER, RATIS, DATASTREAM</tag>
     <description>The datastream port number of container.</description>
diff --git 
a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml 
b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml
index 171494aa5d..df9c4c0ab3 100644
--- a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml
+++ b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml
@@ -18,7 +18,7 @@
   <configuration default="false" name="Datanode2-ha" type="Application" 
factoryName="Application">
     <option name="MAIN_CLASS_NAME" 
value="org.apache.hadoop.ozone.HddsDatanodeService" />
     <module name="ozone-datanode" />
-    <option name="PROGRAM_PARAMETERS" 
value="-conf=hadoop-ozone/dev-support/intellij/ozone-site-ha.xml --set 
ozone.metadata.dirs=/tmp/datanode2 --set 
hdds.datanode.dir=/tmp/datanode2/storage --set 
hdds.datanode.http-address=127.0.0.1:10021 --set dfs.container.ratis.ipc=10022 
--set dfs.container.ipc=10023 --set dfs.container.ratis.server.port=10024 --set 
dfs.container.ratis.admin.port=10025 --set hdds.datanode.replication.port=10026 
--set dfs.container.ratis.datastream.port=10027 --set hd [...]
+    <option name="PROGRAM_PARAMETERS" 
value="-conf=hadoop-ozone/dev-support/intellij/ozone-site-ha.xml --set 
ozone.metadata.dirs=/tmp/datanode2 --set 
hdds.datanode.dir=/tmp/datanode2/storage --set 
hdds.datanode.http-address=127.0.0.1:10021 --set 
hdds.container.ratis.ipc.port=10022 --set hdds.container.ipc.port=10023 --set 
hdds.container.ratis.server.port=10024 --set 
hdds.container.ratis.admin.port=10025 --set 
hdds.datanode.replication.port=10026 --set hdds.container.ratis.datastream.port 
[...]
     <option name="VM_PARAMETERS" 
value="-Dlog4j.configuration=file:hadoop-ozone/dev-support/intellij/log4j.properties"
 />
     <extension name="coverage">
       <pattern>
diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2.xml 
b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2.xml
index 437757d649..f1f2f2f4f0 100644
--- a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2.xml
+++ b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2.xml
@@ -18,7 +18,7 @@
   <configuration default="false" name="Datanode2" type="Application" 
factoryName="Application">
     <option name="MAIN_CLASS_NAME" 
value="org.apache.hadoop.ozone.HddsDatanodeService" />
     <module name="ozone-datanode" />
-    <option name="PROGRAM_PARAMETERS" 
value="-conf=hadoop-ozone/dev-support/intellij/ozone-site.xml --set 
ozone.metadata.dirs=/tmp/datanode2 --set 
hdds.datanode.dir=/tmp/datanode2/storage --set 
hdds.datanode.http-address=127.0.0.1:10021 --set dfs.container.ratis.ipc=10022 
--set dfs.container.ipc=10023 --set dfs.container.ratis.server.port=10024 --set 
dfs.container.ratis.admin.port=10025 --set hdds.datanode.replication.port=10026 
--set dfs.container.ratis.datastream.port=10027 --set hdds. [...]
+    <option name="PROGRAM_PARAMETERS" 
value="-conf=hadoop-ozone/dev-support/intellij/ozone-site.xml --set 
ozone.metadata.dirs=/tmp/datanode2 --set 
hdds.datanode.dir=/tmp/datanode2/storage --set 
hdds.datanode.http-address=127.0.0.1:10021 --set 
hdds.container.ratis.ipc.port=10022 --set hdds.container.ipc.port=10023 --set 
hdds.container.ratis.server.port=10024 --set 
hdds.container.ratis.admin.port=10025 --set 
hdds.datanode.replication.port=10026 --set 
hdds.container.ratis.datastream.port=10 [...]
     <option name="VM_PARAMETERS" 
value="-Dlog4j.configuration=file:hadoop-ozone/dev-support/intellij/log4j.properties"
 />
     <extension name="coverage">
       <pattern>
diff --git 
a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode3-ha.xml 
b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode3-ha.xml
index 4a820682a0..5e4c7ffb94 100644
--- a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode3-ha.xml
+++ b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode3-ha.xml
@@ -18,7 +18,7 @@
   <configuration default="false" name="Datanode3-ha" type="Application" 
factoryName="Application">
     <option name="MAIN_CLASS_NAME" 
value="org.apache.hadoop.ozone.HddsDatanodeService" />
     <module name="ozone-datanode" />
-    <option name="PROGRAM_PARAMETERS" 
value="-conf=hadoop-ozone/dev-support/intellij/ozone-site-ha.xml --set 
ozone.metadata.dirs=/tmp/datanode3 --set 
hdds.datanode.dir=/tmp/datanode3/storage --set 
hdds.datanode.http-address=127.0.0.1:10031 --set dfs.container.ratis.ipc=10032 
--set dfs.container.ipc=10033 --set dfs.container.ratis.server.port=10034 --set 
dfs.container.ratis.admin.port=10035 --set hdds.datanode.replication.port=10036 
--set dfs.container.ratis.datastream.port=10037 --set hd [...]
+    <option name="PROGRAM_PARAMETERS" 
value="-conf=hadoop-ozone/dev-support/intellij/ozone-site-ha.xml --set 
ozone.metadata.dirs=/tmp/datanode3 --set 
hdds.datanode.dir=/tmp/datanode3/storage --set 
hdds.datanode.http-address=127.0.0.1:10031 --set 
hdds.container.ratis.ipc.port=10032 --set hdds.container.ipc.port=10033 --set 
hdds.container.ratis.server.port=10034 --set 
hdds.container.ratis.admin.port=10035 --set 
hdds.datanode.replication.port=10036 --set hdds.container.ratis.datastream.port 
[...]
     <option name="VM_PARAMETERS" 
value="-Dlog4j.configuration=file:hadoop-ozone/dev-support/intellij/log4j.properties"
 />
     <extension name="coverage">
       <pattern>
diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode3.xml 
b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode3.xml
index 590251b0c5..1c9d946b81 100644
--- a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode3.xml
+++ b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode3.xml
@@ -18,7 +18,7 @@
   <configuration default="false" name="Datanode3" type="Application" 
factoryName="Application">
     <option name="MAIN_CLASS_NAME" 
value="org.apache.hadoop.ozone.HddsDatanodeService" />
     <module name="ozone-datanode" />
-    <option name="PROGRAM_PARAMETERS" 
value="-conf=hadoop-ozone/dev-support/intellij/ozone-site.xml --set 
ozone.metadata.dirs=/tmp/datanode3 --set 
hdds.datanode.dir=/tmp/datanode3/storage --set 
hdds.datanode.http-address=127.0.0.1:10031 --set dfs.container.ratis.ipc=10032 
--set dfs.container.ipc=10033 --set dfs.container.ratis.server.port=10034 --set 
dfs.container.ratis.admin.port=10035 --set hdds.datanode.replication.port=10036 
--set dfs.container.ratis.datastream.port=10037 --set hdds. [...]
+    <option name="PROGRAM_PARAMETERS" 
value="-conf=hadoop-ozone/dev-support/intellij/ozone-site.xml --set 
ozone.metadata.dirs=/tmp/datanode3 --set 
hdds.datanode.dir=/tmp/datanode3/storage --set 
hdds.datanode.http-address=127.0.0.1:10031 --set 
hdds.container.ratis.ipc.port=10032 --set hdds.container.ipc.port=10033 --set 
hdds.container.ratis.server.port=10034 --set 
hdds.container.ratis.admin.port=10035 --set 
hdds.datanode.replication.port=10036 --set 
hdds.container.ratis.datastream.port=10 [...]
     <option name="VM_PARAMETERS" 
value="-Dlog4j.configuration=file:hadoop-ozone/dev-support/intellij/log4j.properties"
 />
     <extension name="coverage">
       <pattern>
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config
index bdd504d6e2..ab8485ef72 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config
@@ -49,7 +49,7 @@ OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon
 OZONE-SITE.XML_ozone.recon.address=recon:9891
 OZONE-SITE.XML_ozone.recon.http-address=0.0.0.0:9888
 OZONE-SITE.XML_ozone.recon.https-address=0.0.0.0:9889
-OZONE-SITE.XML_dfs.container.ratis.datastream.enabled=true
+OZONE-SITE.XML_hdds.container.ratis.datastream.enabled=true
 
 OZONE_CONF_DIR=/etc/hadoop
 OZONE_LOG_DIR=/var/log/hadoop
diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone/docker-config
index 729f036e15..83057f9d39 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone/docker-config
@@ -52,7 +52,7 @@ 
OZONE-SITE.XML_hdds.scm.replication.under.replicated.interval=5s
 OZONE-SITE.XML_hdds.scm.replication.over.replicated.interval=5s
 OZONE-SITE.XML_hdds.scm.wait.time.after.safemode.exit=30s
 
-OZONE-SITE.XML_dfs.container.ratis.datastream.enabled=true
+OZONE-SITE.XML_hdds.container.ratis.datastream.enabled=true
 
 OZONE_CONF_DIR=/etc/hadoop
 OZONE_LOG_DIR=/var/log/hadoop
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config
index 20b37c78fc..d8b82ff220 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config
@@ -57,7 +57,7 @@ OZONE-SITE.XML_hdds.grpc.tls.enabled=true
 OZONE-SITE.XML_ozone.server.default.replication=3
 OZONE-SITE.XML_hdds.scmclient.max.retry.timeout=30s
 OZONE-SITE.XML_hdds.container.report.interval=60s
-OZONE-SITE.XML_dfs.container.ratis.datastream.enabled=true
+OZONE-SITE.XML_hdds.container.ratis.datastream.enabled=true
 
 OZONE-SITE.XML_ozone.recon.om.snapshot.task.interval.delay=1m
 OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config
index 4c8ce1a274..d09e2db8e3 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config
@@ -84,7 +84,7 @@ OZONE-SITE.XML_ozone.scm.dead.node.interval=45s
 OZONE-SITE.XML_hdds.container.report.interval=60s
 OZONE-SITE.XML_ozone.scm.close.container.wait.duration=5s
 
-OZONE-SITE.XML_dfs.container.ratis.datastream.enabled=true
+OZONE-SITE.XML_hdds.container.ratis.datastream.enabled=true
 
 HDFS-SITE.XML_dfs.datanode.kerberos.principal=dn/[email protected]
 HDFS-SITE.XML_dfs.datanode.kerberos.keytab.file=/etc/security/keytabs/dn.keytab
diff --git a/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml 
b/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml
index 21a7715305..779ed2b785 100644
--- a/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml
+++ b/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml
@@ -37,7 +37,7 @@
   </property>
 
   <property>
-    <name>dfs.container.ratis.num.write.chunk.threads.per.volume</name>
+    <name>hdds.container.ratis.num.write.chunk.threads.per.volume</name>
     <value>4</value>
   </property>
 
@@ -52,7 +52,7 @@
   </property>
 
   <property>
-    <name>dfs.container.ratis.datastream.enabled</name>
+    <name>hdds.container.ratis.datastream.enabled</name>
     <value>true</value>
   </property>
 
@@ -82,7 +82,8 @@
   </property>
 
   <property>
-    <name>dfs.container.ratis.log.appender.queue.byte-limit</name>
+    <name>hdds.container.ratis.log.appender.queue.byte-limit
+</name>
     <value>8MB</value>
   </property>
   <property>


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to