HDFS-13272. DataNodeHttpServer to have configurable HttpServer2 threads. Contributed by Erik Krogen
(cherry picked from commit b2acaa52d21edd4b38083bf0a749caf76dfb79fd) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/54ff2931 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/54ff2931 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/54ff2931 Branch: refs/heads/branch-2.9 Commit: 54ff293105ae0d28484a3942fc9da793250c5fda Parents: 1f32345 Author: Chris Douglas <[email protected]> Authored: Wed May 2 21:23:57 2018 -0700 Committer: Chris Douglas <[email protected]> Committed: Thu May 10 15:20:30 2018 -0700 ---------------------------------------------------------------------- .../hdfs/server/datanode/web/DatanodeHttpServer.java | 10 +++++++++- .../test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java | 4 ++++ 2 files changed, 13 insertions(+), 1 deletion(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/54ff2931/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java index c44f7da..f95d696 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java @@ -43,6 +43,7 @@ import io.netty.handler.stream.ChunkedWriteHandler; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -91,6 +92,11 @@ public class DatanodeHttpServer implements Closeable { private InetSocketAddress httpsAddress; static final Log LOG = LogFactory.getLog(DatanodeHttpServer.class); + @InterfaceAudience.Private + public static final String DATANODE_HTTP_MAX_THREADS_KEY = + "dfs.datanode.http.max-threads"; + private static final int DATANODE_HTTP_MAX_THREADS_DEFAULT = 10; + public DatanodeHttpServer(final Configuration conf, final DataNode datanode, final ServerSocketChannel externalHttpChannel) @@ -99,7 +105,9 @@ public class DatanodeHttpServer implements Closeable { this.conf = conf; Configuration confForInfoServer = new Configuration(conf); - confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS, 10); + confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS, + conf.getInt(DATANODE_HTTP_MAX_THREADS_KEY, + DATANODE_HTTP_MAX_THREADS_DEFAULT)); int proxyPort = confForInfoServer.getInt(DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT, 0); HttpServer2.Builder builder = new HttpServer2.Builder() http://git-wip-us.apache.org/repos/asf/hadoop/blob/54ff2931/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 6dfa5b1..63348de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -104,6 +104,7 @@ import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl; +import org.apache.hadoop.hdfs.server.datanode.web.DatanodeHttpServer; import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -855,6 +856,9 @@ public class MiniDFSCluster implements AutoCloseable { conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, StaticMapping.class, DNSToSwitchMapping.class); } + // Set to the minimum number of threads possible to avoid starting + // unnecessary threads in unit tests + conf.setInt(DatanodeHttpServer.DATANODE_HTTP_MAX_THREADS_KEY, 2); // In an HA cluster, in order for the StandbyNode to perform checkpoints, // it needs to know the HTTP port of the Active. So, if ephemeral ports --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
