HDFS-7684. The host:port settings of the deamons should be trimmed before use. Contributed by Anu Engineer.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/693e43c4 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/693e43c4 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/693e43c4 Branch: refs/heads/HDFS-EC Commit: 693e43c4dcdbc56efa3c737a0e4941f01ed4ad23 Parents: 2efa407 Author: Akira Ajisaka <aajis...@apache.org> Authored: Thu Feb 12 17:38:37 2015 -0800 Committer: Zhe Zhang <z...@apache.org> Committed: Mon Feb 16 10:29:48 2015 -0800 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../server/blockmanagement/DatanodeManager.java | 8 +- .../hadoop/hdfs/server/datanode/DataNode.java | 6 +- .../server/datanode/web/DatanodeHttpServer.java | 2 +- .../hadoop/hdfs/server/namenode/BackupNode.java | 6 +- .../hdfs/server/namenode/ImageServlet.java | 2 +- .../hadoop/hdfs/server/namenode/NameNode.java | 4 +- .../server/namenode/NameNodeHttpServer.java | 4 +- .../hdfs/server/namenode/SecondaryNameNode.java | 4 +- .../hdfs/server/namenode/TestMalformedURLs.java | 59 ++++++++ .../src/test/resources/hdfs-site.malformed.xml | 143 +++++++++++++++++++ 11 files changed, 223 insertions(+), 18 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/693e43c4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 6c1885e..09ae2e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -628,6 +628,9 @@ Release 2.7.0 - UNRELEASED HDFS-7694. FSDataInputStream should support "unbuffer" (cmccabe) + HDFS-7684. The host:port settings of the daemons should be trimmed before + use. (Anu Engineer via aajisaka) + OPTIMIZATIONS HDFS-7454. Reduce memory footprint for AclEntries in NameNode. http://git-wip-us.apache.org/repos/asf/hadoop/blob/693e43c4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 15e7010..f5fe161 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -187,16 +187,16 @@ public class DatanodeManager { this.fsClusterStats = newFSClusterStats(); this.defaultXferPort = NetUtils.createSocketAddr( - conf.get(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, + conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT)).getPort(); this.defaultInfoPort = NetUtils.createSocketAddr( - conf.get(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, + conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT)).getPort(); this.defaultInfoSecurePort = NetUtils.createSocketAddr( - conf.get(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, + conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT)).getPort(); this.defaultIpcPort = NetUtils.createSocketAddr( - conf.get(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, + conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort(); try { this.hostFileManager.refresh(conf.get(DFSConfigKeys.DFS_HOSTS, ""), http://git-wip-us.apache.org/repos/asf/hadoop/blob/693e43c4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 8d3b3a2..4428408 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -726,7 +726,7 @@ public class DataNode extends ReconfigurableBase private void initIpcServer(Configuration conf) throws IOException { InetSocketAddress ipcAddr = NetUtils.createSocketAddr( - conf.get(DFS_DATANODE_IPC_ADDRESS_KEY)); + conf.getTrimmed(DFS_DATANODE_IPC_ADDRESS_KEY)); // Add all the RPC protocols that the Datanode implements RPC.setProtocolEngine(conf, ClientDatanodeProtocolPB.class, @@ -1324,7 +1324,7 @@ public class DataNode extends ReconfigurableBase * Determine the http server's effective addr */ public static InetSocketAddress getInfoAddr(Configuration conf) { - return NetUtils.createSocketAddr(conf.get(DFS_DATANODE_HTTP_ADDRESS_KEY, + return NetUtils.createSocketAddr(conf.getTrimmed(DFS_DATANODE_HTTP_ADDRESS_KEY, DFS_DATANODE_HTTP_ADDRESS_DEFAULT)); } @@ -2825,7 +2825,7 @@ public class DataNode extends ReconfigurableBase static InetSocketAddress getStreamingAddr(Configuration conf) { return NetUtils.createSocketAddr( - conf.get(DFS_DATANODE_ADDRESS_KEY, DFS_DATANODE_ADDRESS_DEFAULT)); + conf.getTrimmed(DFS_DATANODE_ADDRESS_KEY, DFS_DATANODE_ADDRESS_DEFAULT)); } @Override // DataNodeMXBean http://git-wip-us.apache.org/repos/asf/hadoop/blob/693e43c4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java index 4ee82fb..b620ba6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java @@ -151,7 +151,7 @@ public class DatanodeHttpServer implements Closeable { } if (httpsServer != null) { - InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get( + InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.getTrimmed( DFS_DATANODE_HTTPS_ADDRESS_KEY, DFS_DATANODE_HTTPS_ADDRESS_DEFAULT)); ChannelFuture f = httpsServer.bind(secInfoSocAddr); f.syncUninterruptibly(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/693e43c4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java index b7f8a38..430f00c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java @@ -94,13 +94,13 @@ public class BackupNode extends NameNode { ///////////////////////////////////////////////////// @Override // NameNode protected InetSocketAddress getRpcServerAddress(Configuration conf) { - String addr = conf.get(BN_ADDRESS_NAME_KEY, BN_ADDRESS_DEFAULT); + String addr = conf.getTrimmed(BN_ADDRESS_NAME_KEY, BN_ADDRESS_DEFAULT); return NetUtils.createSocketAddr(addr); } @Override protected InetSocketAddress getServiceRpcServerAddress(Configuration conf) { - String addr = conf.get(BN_SERVICE_RPC_ADDRESS_KEY); + String addr = conf.getTrimmed(BN_SERVICE_RPC_ADDRESS_KEY); if (addr == null || addr.isEmpty()) { return null; } @@ -122,7 +122,7 @@ public class BackupNode extends NameNode { @Override // NameNode protected InetSocketAddress getHttpServerAddress(Configuration conf) { assert getNameNodeAddress() != null : "rpcAddress should be calculated first"; - String addr = conf.get(BN_HTTP_ADDRESS_NAME_KEY, BN_HTTP_ADDRESS_DEFAULT); + String addr = conf.getTrimmed(BN_HTTP_ADDRESS_NAME_KEY, BN_HTTP_ADDRESS_DEFAULT); return NetUtils.createSocketAddr(addr); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/693e43c4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java index 702c8f1..c565eb5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java @@ -247,7 +247,7 @@ public class ImageServlet extends HttpServlet { DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY, conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY), DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, - conf.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, + conf.getTrimmed(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT)); LOG.warn(msg); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/693e43c4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 9dcf25b..04be1ad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -450,7 +450,7 @@ public class NameNode implements NameNodeStatusMXBean { */ public static InetSocketAddress getServiceAddress(Configuration conf, boolean fallback) { - String addr = conf.get(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY); + String addr = conf.getTrimmed(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY); if (addr == null || addr.isEmpty()) { return fallback ? getAddress(conf) : null; } @@ -578,7 +578,7 @@ public class NameNode implements NameNodeStatusMXBean { /** @return the NameNode HTTP address. */ public static InetSocketAddress getHttpAddress(Configuration conf) { return NetUtils.createSocketAddr( - conf.get(DFS_NAMENODE_HTTP_ADDRESS_KEY, DFS_NAMENODE_HTTP_ADDRESS_DEFAULT)); + conf.getTrimmed(DFS_NAMENODE_HTTP_ADDRESS_KEY, DFS_NAMENODE_HTTP_ADDRESS_DEFAULT)); } protected void loadNamesystem(Configuration conf) throws IOException { http://git-wip-us.apache.org/repos/asf/hadoop/blob/693e43c4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java index f9fbdc2..662c0e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -103,7 +103,7 @@ public class NameNodeHttpServer { final String infoHost = bindAddress.getHostName(); final InetSocketAddress httpAddr = bindAddress; - final String httpsAddrString = conf.get( + final String httpsAddrString = conf.getTrimmed( DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT); InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString); @@ -127,7 +127,7 @@ public class NameNodeHttpServer { if (policy.isHttpsEnabled()) { // assume same ssl port for all datanodes - InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get( + InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.getTrimmed( DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT)); httpServer.setAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY, http://git-wip-us.apache.org/repos/asf/hadoop/blob/693e43c4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index 10f1720..83e6426 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -200,7 +200,7 @@ public class SecondaryNameNode implements Runnable, } public static InetSocketAddress getHttpAddress(Configuration conf) { - return NetUtils.createSocketAddr(conf.get( + return NetUtils.createSocketAddr(conf.getTrimmed( DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT)); } @@ -253,7 +253,7 @@ public class SecondaryNameNode implements Runnable, final InetSocketAddress httpAddr = infoSocAddr; - final String httpsAddrString = conf.get( + final String httpsAddrString = conf.getTrimmed( DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY, DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT); InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString); http://git-wip-us.apache.org/repos/asf/hadoop/blob/693e43c4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMalformedURLs.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMalformedURLs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMalformedURLs.java new file mode 100644 index 0000000..2515da1 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMalformedURLs.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.namenode; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.MiniDFSCluster; + +import static org.junit.Assert.assertNotEquals; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import org.apache.hadoop.hdfs.DFSConfigKeys; + +public class TestMalformedURLs { + private MiniDFSCluster cluster; + Configuration config; + + @Before + public void setUp() throws Exception { + Configuration.addDefaultResource("hdfs-site.malformed.xml"); + config = new Configuration(); + } + + @Test + public void testTryStartingCluster() throws Exception { + // if we are able to start the cluster, it means + // that we were able to read the configuration + // correctly. + + assertNotEquals(config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY), + config.getTrimmed(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY)); + cluster = new MiniDFSCluster.Builder(config).build(); + cluster.waitActive(); + } + + @After + public void tearDown() throws Exception { + if (cluster != null) { + cluster.shutdown(); + } + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/693e43c4/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hdfs-site.malformed.xml ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hdfs-site.malformed.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hdfs-site.malformed.xml new file mode 100644 index 0000000..fdf5017 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hdfs-site.malformed.xml @@ -0,0 +1,143 @@ +<?xml version="1.0"?> +<?xml-stylesheet type="text/xsl" href="configuration.xsl"?> + +<!-- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +--> + +<!-- +This file creates URLs with spaces at the beginning and +end and makes sure that we read them correctly. +JIRA - HDFS 7684 + +--> + + +<configuration> + +<property> + <name>dfs.namenode.secondary.http-address</name> + <value>0.0.0.0:50090 </value> + <description> + The secondary namenode http server address and port. + </description> +</property> + +<property> + <name>dfs.namenode.secondary.https-address</name> + <value>0.0.0.0:50091 </value> + <description> + The secondary namenode HTTPS server address and port. + </description> +</property> + +<property> + <name>dfs.datanode.address</name> + <value>0.0.0.0:50010 </value> + <description> + The datanode server address and port for data transfer. + </description> +</property> + +<property> + <name>dfs.datanode.http.address</name> + <value>0.0.0.0:50075 </value> + <description> + The datanode http server address and port. + </description> +</property> + +<property> + <name>dfs.datanode.ipc.address</name> + <value>0.0.0.0:50020 </value> + <description> + The datanode ipc server address and port. + </description> +</property> + +<property> + <name>dfs.datanode.handler.count</name> + <value>10</value> + <description>The number of server threads for the datanode.</description> +</property> + +<property> + <name>dfs.namenode.http-address</name> + <value>0.0.0.0:50070 </value> + <description> + The address and the base port where the dfs namenode web ui will listen on. + </description> +</property> + +<property> + <name>dfs.datanode.https.address</name> + <value>0.0.0.0:50475 </value> + <description>The datanode secure http server address and port.</description> +</property> + +<property> + <name>dfs.namenode.https-address</name> + <value>0.0.0.0:50470 </value> + <description>The namenode secure http server address and port.</description> +</property> + + <property> + <name>dfs.namenode.backup.address</name> + <value>0.0.0.0:50100 </value> + <description> + The backup node server address and port. + If the port is 0 then the server will start on a free port. + </description> +</property> + + <property> + <name>dfs.namenode.backup.http-address</name> + <value> 0.0.0.0:50105 </value> + <description> + The backup node http server address and port. + If the port is 0 then the server will start on a free port. + </description> +</property> + + +<property> + <name>dfs.journalnode.rpc-address</name> + <value>0.0.0.0:8485</value> + <description> + The JournalNode RPC server address and port. + </description> +</property> + +<property> + <name>dfs.journalnode.http-address</name> + <value>0.0.0.0:8480</value> + <description> + The address and port the JournalNode HTTP server listens on. + If the port is 0 then the server will start on a free port. + </description> +</property> + +<property> + <name>dfs.journalnode.https-address</name> + <value>0.0.0.0:8481</value> + <description> + The address and port the JournalNode HTTPS server listens on. + If the port is 0 then the server will start on a free port. + </description> +</property> + + +</configuration>