BIGTOP-2576. For small clusters it is useful to turn 
replace-datanode-on-failure off


Project: http://git-wip-us.apache.org/repos/asf/bigtop/repo
Commit: http://git-wip-us.apache.org/repos/asf/bigtop/commit/35fdeb83
Tree: http://git-wip-us.apache.org/repos/asf/bigtop/tree/35fdeb83
Diff: http://git-wip-us.apache.org/repos/asf/bigtop/diff/35fdeb83

Branch: refs/heads/master
Commit: 35fdeb83bf21f56580d3778211209fb2927e858d
Parents: 014ee22
Author: Roman Shaposhnik <[email protected]>
Authored: Mon Nov 7 10:21:37 2016 -0800
Committer: Roman Shaposhnik <[email protected]>
Committed: Mon Nov 7 23:32:39 2016 -0800

----------------------------------------------------------------------
 bigtop-deploy/puppet/hieradata/bigtop/cluster.yaml       | 11 +++++++++++
 bigtop-deploy/puppet/modules/hadoop/manifests/init.pp    |  1 +
 .../puppet/modules/hadoop/templates/hdfs-site.xml        |  7 +++++++
 3 files changed, 19 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/bigtop/blob/35fdeb83/bigtop-deploy/puppet/hieradata/bigtop/cluster.yaml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/puppet/hieradata/bigtop/cluster.yaml 
b/bigtop-deploy/puppet/hieradata/bigtop/cluster.yaml
index c747b4e..49904c6 100644
--- a/bigtop-deploy/puppet/hieradata/bigtop/cluster.yaml
+++ b/bigtop-deploy/puppet/hieradata/bigtop/cluster.yaml
@@ -84,6 +84,17 @@ hadoop::common_hdfs::hadoop_namenode_host: 
"%{hiera('bigtop::hadoop_head_node')}
 # actually default but needed for hadoop_namenode_uri here
 hadoop::common_hdfs::hadoop_namenode_port: "8020"
 
+# If there is a datanode/network failure in the write pipeline, DFSClient will 
try
+# to remove the failed datanode from the pipeline and then continue writing 
with the
+# remaining datanodes. As a result, the number of datanodes in the pipeline is 
decreased.
+# The feature is to add new datanodes to the pipeline. This is a site-wide 
property to
+# enable/disable the feature. When the cluster size is extremely small, e.g. 3 
nodes or
+# less, cluster administrators may want to set the policy to NEVER in the 
default
+# configuration file or disable this feature. Otherwise, users may experience 
an unusually
+# high rate of pipeline failures since it is impossible to find new datanodes 
for replacement.
+# See also dfs.client.block.write.replace-datanode-on-failure.policy
+# hadoop::common_hdfs::hdfs_replace_datanode_on_failure: "false"
+
 # Set as shown below in site.yaml to also enable Kerberos authentication
 # on the web GUIs of journalnode, namenode, datanode, resourcemanager and
 # nodemanager when you enable Kerberos for Hadoop API communication. This

http://git-wip-us.apache.org/repos/asf/bigtop/blob/35fdeb83/bigtop-deploy/puppet/modules/hadoop/manifests/init.pp
----------------------------------------------------------------------
diff --git a/bigtop-deploy/puppet/modules/hadoop/manifests/init.pp 
b/bigtop-deploy/puppet/modules/hadoop/manifests/init.pp
index 732fc75..3d357bc 100644
--- a/bigtop-deploy/puppet/modules/hadoop/manifests/init.pp
+++ b/bigtop-deploy/puppet/modules/hadoop/manifests/init.pp
@@ -201,6 +201,7 @@ class hadoop ($hadoop_security_authentication = "simple",
       $hdfs_data_dirs = suffix($hadoop::hadoop_storage_dirs, "/hdfs"),
       $hdfs_shortcut_reader = undef,
       $hdfs_support_append = undef,
+      $hdfs_replace_datanode_on_failure = undef,
       $hdfs_webhdfs_enabled = "true",
       $hdfs_replication = undef,
       $hdfs_datanode_fsdataset_volume_choosing_policy = undef,

http://git-wip-us.apache.org/repos/asf/bigtop/blob/35fdeb83/bigtop-deploy/puppet/modules/hadoop/templates/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/bigtop-deploy/puppet/modules/hadoop/templates/hdfs-site.xml 
b/bigtop-deploy/puppet/modules/hadoop/templates/hdfs-site.xml
index 5440574..9f98929 100644
--- a/bigtop-deploy/puppet/modules/hadoop/templates/hdfs-site.xml
+++ b/bigtop-deploy/puppet/modules/hadoop/templates/hdfs-site.xml
@@ -242,6 +242,13 @@
     <value><%= @hdfs_data_dirs.map { |dir| "file://#{dir}" }.join(",") 
%></value>
   </property>
 
+<% if @hdfs_replace_datanode_on_failure %>
+  <property>
+    <name>dfs.client.block.write.replace-datanode-on-failure.enable</name>
+    <value><%= @hdfs_replace_datanode_on_failure %></value>
+  </property>
+<% end %>
+
 <% if @hdfs_support_append %>
   <property>
     <name>dfs.support.append</name>

Reply via email to