This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new dd74eee7fc HDDS-12430. Document in ozone-default.xml the config keys 
moved from DFSConfigKeysLegacy (#7987)
dd74eee7fc is described below

commit dd74eee7fccd941c7baced88ce75967ef9f269c9
Author: Sarveksha Yeshavantha Raju 
<[email protected]>
AuthorDate: Wed Mar 5 19:59:06 2025 +0530

    HDDS-12430. Document in ozone-default.xml the config keys moved from 
DFSConfigKeysLegacy (#7987)
---
 .../org/apache/hadoop/hdds/HddsConfigKeys.java     |   2 -
 .../hadoop/hdds/conf/OzoneConfiguration.java       |   2 +-
 .../common/src/main/resources/ozone-default.xml    | 125 ++++++++++++++++++++-
 .../hadoop/ozone/TestHddsDatanodeService.java      |   2 +-
 .../hadoop/ozone/TestHddsSecureDatanodeInit.java   |   3 +-
 .../TestDeleteBlocksCommandHandler.java            |   2 +
 .../container/common/volume/TestVolumeSet.java     |   2 +-
 .../common/volume/TestVolumeSetDiskChecks.java     |   6 +-
 .../replication/TestContainerImporter.java         |   2 +
 .../TestSendContainerRequestHandler.java           |   7 ++
 .../apache/hadoop/hdds/utils/HddsServerUtil.java   |  13 +--
 .../hadoop/ozone/TestOzoneConfigurationFields.java |  14 +--
 .../hadoop/ozone/UniformDatanodesFactory.java      |   2 -
 13 files changed, 146 insertions(+), 36 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index d516cd5f27..2fcc3a67db 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -412,8 +412,6 @@ private HddsConfigKeys() {
       "hdds.datanode.dns.nameserver";
   public static final String HDDS_DATANODE_HOST_NAME_KEY =
       "hdds.datanode.hostname";
-  public static final String HDDS_DATANODE_DATA_DIR_KEY =
-      "hdds.datanode.data.dir";
   public static final String HDDS_DATANODE_USE_DN_HOSTNAME =
       "hdds.datanode.use.datanode.hostname";
   public static final boolean HDDS_DATANODE_USE_DN_HOSTNAME_DEFAULT = false;
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
index 58f33d445a..0e11be5477 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
@@ -387,7 +387,7 @@ private static void addDeprecatedKeys() {
         new DeprecationDelta("dfs.datanode.hostname",
             HddsConfigKeys.HDDS_DATANODE_HOST_NAME_KEY),
         new DeprecationDelta("dfs.datanode.data.dir",
-            HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY),
+            ScmConfigKeys.HDDS_DATANODE_DIR_KEY),
         new DeprecationDelta("dfs.datanode.use.datanode.hostname",
             HddsConfigKeys.HDDS_DATANODE_USE_DN_HOSTNAME),
         new DeprecationDelta("dfs.xframe.enabled",
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 77fa48e732..f20d606d43 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -171,8 +171,7 @@
     <name>hdds.datanode.dir</name>
     <value/>
     <tag>OZONE, CONTAINER, STORAGE, MANAGEMENT</tag>
-    <description>Determines where on the local filesystem HDDS data will be
-      stored. Defaults to hdds.datanode.data.dir if not specified.
+    <description>Determines where on the local filesystem HDDS data will be 
stored.
       The directories should be tagged with corresponding storage types
       ([SSD]/[DISK]/[ARCHIVE]/[RAM_DISK]) for storage policies. The default
       storage type will be DISK if the directory does not have a storage type
@@ -4553,4 +4552,126 @@
       allowing for better identification and analysis of performance issues.
     </description>
   </property>
+
+  <property>
+    <name>hdds.datanode.dns.interface</name>
+    <value>default</value>
+    <tag>OZONE, DATANODE</tag>
+    <description>
+      The name of the Network Interface from which a Datanode should
+      report its IP address. e.g. eth2. This setting may be required for some
+      multi-homed nodes where the Datanodes are assigned multiple hostnames
+      and it is desirable for the Datanodes to use a non-default hostname.
+    </description>
+  </property>
+  <property>
+    <name>hdds.datanode.dns.nameserver</name>
+    <value>default</value>
+    <tag>OZONE, DATANODE</tag>
+    <description>
+      The host name or IP address of the name server (DNS) which a Datanode
+      should use to determine its own host name.
+    </description>
+  </property>
+  <property>
+    <name>hdds.datanode.hostname</name>
+    <value/>
+    <tag>OZONE, DATANODE</tag>
+    <description>
+      Optional. The hostname for the Datanode containing this
+      configuration file.  Will be different for each machine.
+      Defaults to current hostname.
+    </description>
+  </property>
+  <property>
+    <name>hdds.datanode.use.datanode.hostname</name>
+    <value>false</value>
+    <tag>OZONE, DATANODE</tag>
+    <description>
+      Whether Datanodes should use Datanode hostnames when
+      connecting to other Datanodes for data transfer.
+    </description>
+  </property>
+  <property>
+    <name>hdds.xframe.enabled</name>
+    <value>true</value>
+    <tag>OZONE, HDDS</tag>
+    <description>
+      If true, then enables protection against clickjacking by returning
+      X_FRAME_OPTIONS header value set to SAMEORIGIN.
+      Clickjacking protection prevents an attacker from using transparent or
+      opaque layers to trick a user into clicking on a button
+      or link on another page.
+    </description>
+  </property>
+  <property>
+    <name>hdds.xframe.value</name>
+    <value>SAMEORIGIN</value>
+    <tag>OZONE, HDDS</tag>
+    <description>
+      This configration value allows user to specify the value for the
+      X-FRAME-OPTIONS. The possible values for this field are
+      DENY, SAMEORIGIN and ALLOW-FROM. Any other value will throw an
+      exception when Datanodes are starting up.
+    </description>
+  </property>
+  <property>
+    <name>hdds.metrics.session-id</name>
+    <value/>
+    <tag>OZONE, HDDS</tag>
+    <description>
+      Get the user-specified session identifier. The default is the empty 
string.
+      The session identifier is used to tag metric data that is reported to 
some
+      performance metrics system via the org.apache.hadoop.metrics API.  The
+      session identifier is intended, in particular, for use by 
Hadoop-On-Demand
+      (HOD) which allocates a virtual Hadoop cluster dynamically and 
transiently.
+      HOD will set the session identifier by modifying the mapred-site.xml file
+      before starting the cluster.
+      When not running under HOD, this identifer is expected to remain set to
+      the empty string.
+    </description>
+  </property>
+  <property>
+    <name>hdds.datanode.kerberos.principal</name>
+    <value/>
+    <tag>OZONE, DATANODE</tag>
+    <description>
+      The Datanode service principal. This is typically set to
+      dn/[email protected]. Each Datanode will substitute _HOST with its
+      own fully qualified hostname at startup. The _HOST placeholder
+      allows using the same configuration setting on all Datanodes.
+    </description>
+  </property>
+  <property>
+    <name>hdds.datanode.kerberos.keytab.file</name>
+    <value/>
+    <tag>OZONE, DATANODE</tag>
+    <description>
+      The keytab file used by each Datanode daemon to login as its
+      service principal. The principal name is configured with
+      hdds.datanode.kerberos.principal.
+    </description>
+  </property>
+  <property>
+    <name>hdds.metrics.percentiles.intervals</name>
+    <value></value>
+    <tag>OZONE, DATANODE</tag>
+    <description>
+      Comma-delimited set of integers denoting the desired rollover intervals
+      (in seconds) for percentile latency metrics on the Datanode.
+      By default, percentile latency metrics are disabled.
+    </description>
+  </property>
+
+  <property>
+    <name>net.topology.node.switch.mapping.impl</name>
+    <value>org.apache.hadoop.net.ScriptBasedMapping</value>
+    <tag>OZONE, SCM</tag>
+    <description>
+      The default implementation of the DNSToSwitchMapping. It
+      invokes a script specified in net.topology.script.file.name to resolve
+      node names. If the value for net.topology.script.file.name is not set, 
the
+      default value of DEFAULT_RACK is returned for all node names.
+    </description>
+  </property>
 </configuration>
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java
index 7547036a59..63157450a1 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java
@@ -98,7 +98,7 @@ public void setUp() throws IOException {
     conf.setBoolean(HDDS_CONTAINER_TOKEN_ENABLED, true);
 
     String volumeDir = testDir + OZONE_URI_DELIMITER + "disk1";
-    conf.set(HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY, volumeDir);
+    conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, volumeDir);
   }
 
   @ParameterizedTest
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
index ec64da7637..b0773db766 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
@@ -53,6 +53,7 @@
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos;
 import 
org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.security.SecurityConfig;
 import 
org.apache.hadoop.hdds.security.x509.certificate.client.DNCertificateClient;
 import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
@@ -97,7 +98,7 @@ public static void setUp() throws Exception {
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath());
     //conf.set(ScmConfigKeys.OZONE_SCM_NAMES, "localhost");
     String volumeDir = testDir + "/disk1";
-    conf.set(HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY, volumeDir);
+    conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, volumeDir);
 
     conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
     conf.setClass(OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY,
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java
index 5b9a5abe0d..8f84eb5751 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java
@@ -55,6 +55,7 @@
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.container.common.ContainerTestUtils;
 import 
org.apache.hadoop.ozone.container.common.helpers.BlockDeletingServiceMetrics;
@@ -301,6 +302,7 @@ public void 
testDeleteBlockCommandHandleWhenDeleteCommandQueuesFull()
     // Setting up the test environment
     OzoneConfiguration configuration = new OzoneConfiguration();
     configuration.set(HddsConfigKeys.OZONE_METADATA_DIRS, folder.toString());
+    configuration.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, folder.toString());
     DatanodeDetails datanodeDetails = 
MockDatanodeDetails.randomDatanodeDetails();
     DatanodeConfiguration dnConf =
         configuration.getObject(DatanodeConfiguration.class);
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
index ab99d5f883..d3fc67d053 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
@@ -76,7 +76,7 @@ public void setup() throws Exception {
     String dataDirKey = volume1 + "," + volume2;
     volumes.add(volume1);
     volumes.add(volume2);
-    conf.set(HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY, dataDirKey);
+    conf.set(HDDS_DATANODE_DIR_KEY, dataDirKey);
     conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
         dataDirKey);
     initializeVolumeSet();
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
index 4f1838ce9f..60eea0ac3f 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
@@ -90,7 +90,7 @@ public class TestVolumeSetDiskChecks {
   @AfterEach
   public void cleanup() {
     final Collection<String> dirs = conf.getTrimmedStringCollection(
-        HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY);
+        ScmConfigKeys.HDDS_DATANODE_DIR_KEY);
 
     for (String d: dirs) {
       FileUtils.deleteQuietly(new File(d));
@@ -115,7 +115,7 @@ public void testOzoneDirsAreCreated() throws IOException {
 
     // Verify that the Ozone dirs were created during initialization.
     Collection<String> dirs = conf.getTrimmedStringCollection(
-        HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY);
+        ScmConfigKeys.HDDS_DATANODE_DIR_KEY);
     for (String d : dirs) {
       assertTrue(new File(d).isDirectory());
     }
@@ -222,7 +222,7 @@ private OzoneConfiguration getConfWithDataNodeDirs(int 
numDirs) {
     for (int i = 0; i < numDirs; ++i) {
       dirs.add(new File(dir, randomAlphanumeric(10)).toString());
     }
-    ozoneConf.set(HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY,
+    ozoneConf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY,
         String.join(",", dirs));
 
     final List<String> metaDirs = new ArrayList<>();
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java
index f03a3f4079..65d1d4553a 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java
@@ -43,6 +43,7 @@
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
@@ -74,6 +75,7 @@ class TestContainerImporter {
   @BeforeEach
   void setup() {
     conf = new OzoneConfiguration();
+    conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, tempDir.getAbsolutePath());
   }
 
   @Test
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java
index a8fbd1a60d..be2c315198 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java
@@ -24,8 +24,10 @@
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
 
+import java.io.File;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
@@ -38,17 +40,22 @@
 import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 /**
  * Test for {@link SendContainerRequestHandler}.
  */
 class TestSendContainerRequestHandler {
 
+  @TempDir
+  private File tempDir;
+
   private OzoneConfiguration conf;
 
   @BeforeEach
   void setup() {
     conf = new OzoneConfiguration();
+    conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, tempDir.getAbsolutePath());
   }
 
   @Test
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
index f58887ad61..81d3ec1697 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
@@ -17,7 +17,6 @@
 
 package org.apache.hadoop.hdds.utils;
 
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
 import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL_DEFAULT;
 import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_RECON_HEARTBEAT_INTERVAL;
@@ -404,16 +403,10 @@ public static Collection<String> 
getOzoneDatanodeRatisDirectory(
     return rawLocations;
   }
 
-  public static Collection<String> getDatanodeStorageDirs(
-      ConfigurationSource conf) {
-    Collection<String> rawLocations = conf.getTrimmedStringCollection(
-        HDDS_DATANODE_DIR_KEY);
-    if (rawLocations.isEmpty()) {
-      rawLocations = 
conf.getTrimmedStringCollection(HDDS_DATANODE_DATA_DIR_KEY);
-    }
+  public static Collection<String> getDatanodeStorageDirs(ConfigurationSource 
conf) {
+    Collection<String> rawLocations = 
conf.getTrimmedStringCollection(HDDS_DATANODE_DIR_KEY);
     if (rawLocations.isEmpty()) {
-      throw new IllegalArgumentException("No location configured in either "
-          + HDDS_DATANODE_DIR_KEY + " or " + HDDS_DATANODE_DATA_DIR_KEY);
+      throw new IllegalArgumentException("No location configured in " + 
HDDS_DATANODE_DIR_KEY);
     }
     return rawLocations;
   }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
index 63afb9aed6..9c040f9aa0 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
@@ -132,19 +132,7 @@ private void addPropertiesNotInXml() {
         HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT,
         OzoneConfigKeys.HDDS_SCM_CLIENT_RPC_TIME_OUT,
         OzoneConfigKeys.HDDS_SCM_CLIENT_MAX_RETRY_TIMEOUT,
-        OzoneConfigKeys.HDDS_SCM_CLIENT_FAILOVER_MAX_RETRY,
-        HddsConfigKeys.HDDS_DATANODE_DNS_INTERFACE_KEY,
-        HddsConfigKeys.HDDS_DATANODE_DNS_NAMESERVER_KEY,
-        HddsConfigKeys.HDDS_DATANODE_HOST_NAME_KEY,
-        HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY,
-        HddsConfigKeys.HDDS_DATANODE_USE_DN_HOSTNAME,
-        HddsConfigKeys.HDDS_XFRAME_OPTION_ENABLED,
-        HddsConfigKeys.HDDS_XFRAME_OPTION_VALUE,
-        HddsConfigKeys.HDDS_METRICS_SESSION_ID_KEY,
-        ScmConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
-        HddsConfigKeys.HDDS_DATANODE_KERBEROS_PRINCIPAL_KEY,
-        HddsConfigKeys.HDDS_DATANODE_KERBEROS_KEYTAB_FILE_KEY,
-        HddsConfigKeys.HDDS_METRICS_PERCENTILES_INTERVALS_KEY
+        OzoneConfigKeys.HDDS_SCM_CLIENT_FAILOVER_MAX_RETRY
     ));
   }
 }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java
index e9672bc601..1029860375 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.ozone;
 
 import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_CLIENT_ADDRESS_KEY;
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_DATA_DIR_KEY;
 import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_HTTP_ADDRESS_KEY;
 import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED;
@@ -96,7 +95,6 @@ public OzoneConfiguration apply(OzoneConfiguration conf) 
throws IOException {
     }
     String reservedSpaceString = String.join(",", reservedSpaceList);
     String listOfDirs = String.join(",", dataDirs);
-    dnConf.set(HDDS_DATANODE_DATA_DIR_KEY, listOfDirs);
     dnConf.set(HDDS_DATANODE_DIR_KEY, listOfDirs);
     dnConf.set(HDDS_DATANODE_DIR_DU_RESERVED, reservedSpaceString);
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to