This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch branch-2.7
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/branch-2.7 by this push:
     new d4c46c3  AMBARI-24681. Cannot deploy cluster without HDFS_CLIENT 
(#2370)
d4c46c3 is described below

commit d4c46c3f145eedcf1e9beaf945b61d7db72ef096
Author: Doroszlai, Attila <[email protected]>
AuthorDate: Tue Sep 25 11:31:06 2018 +0200

    AMBARI-24681. Cannot deploy cluster without HDFS_CLIENT (#2370)
---
 .../main/resources/stack-hooks/after-INSTALL/scripts/params.py   | 7 +++++--
 .../stack-hooks/after-INSTALL/scripts/shared_initialization.py   | 4 ++--
 .../src/main/resources/stack-hooks/before-ANY/scripts/hook.py    | 2 +-
 .../src/main/resources/stack-hooks/before-ANY/scripts/params.py  | 7 +++++--
 .../stack-hooks/before-ANY/scripts/shared_initialization.py      | 4 ++--
 .../main/resources/stack-hooks/before-INSTALL/scripts/params.py  | 6 +++---
 .../main/resources/stack-hooks/before-START/scripts/params.py    | 9 +++++----
 .../stack-hooks/before-START/scripts/shared_initialization.py    | 2 +-
 8 files changed, 24 insertions(+), 17 deletions(-)

diff --git 
a/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/params.py 
b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/params.py
index dc64b0b..5b6e864 100644
--- 
a/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/params.py
@@ -97,10 +97,13 @@ mapred_log_dir_prefix = 
default("/configurations/mapred-env/mapred_log_dir_prefi
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 user_group = config['configurations']['cluster-env']['user_group']
 
+namenode_hosts = default("/clusterHostInfo/namenode_hosts", [])
 hdfs_client_hosts = default("/clusterHostInfo/hdfs_client_hosts", [])
-has_hdfs_clients = not len(hdfs_client_hosts) == 0
+has_hdfs_clients = len(hdfs_client_hosts) > 0
+has_namenode = len(namenode_hosts) > 0
+has_hdfs = has_hdfs_clients or has_namenode
 
-if has_hdfs_clients or dfs_type == 'HCFS':
+if has_hdfs or dfs_type == 'HCFS':
   hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
 
   mount_table_xml_inclusion_file_full_path = None
diff --git 
a/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py
 
b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py
index 0633545..8e0c2f3 100644
--- 
a/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py
+++ 
b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py
@@ -77,8 +77,8 @@ def setup_config():
   else:
     Logger.warning("Parameter hadoop_conf_dir is missing or directory does not 
exist. This is expected if this host does not have any Hadoop components.")
 
-  if is_hadoop_conf_dir_present and (params.has_hdfs_clients or 
stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS'):
-    # create core-site only if the hadoop config diretory exists
+  if is_hadoop_conf_dir_present and (params.has_hdfs or 
stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS'):
+    # create core-site only if the hadoop config directory exists
     XmlConfig("core-site.xml",
               conf_dir=params.hadoop_conf_dir,
               configurations=params.config['configurations']['core-site'],
diff --git 
a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/hook.py 
b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/hook.py
index dba7bce..25ca3a9 100644
--- a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/hook.py
+++ b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/hook.py
@@ -29,7 +29,7 @@ class BeforeAnyHook(Hook):
     env.set_params(params)
 
     setup_users()
-    if params.has_hdfs_clients or params.dfs_type == 'HCFS':
+    if params.has_hdfs or params.dfs_type == 'HCFS':
       setup_hadoop_env()
     setup_java()
 
diff --git 
a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py 
b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py
index 26ff345..1d69dac 100644
--- a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py
+++ b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py
@@ -177,6 +177,7 @@ zeppelin_group = 
config['configurations']['zeppelin-env']["zeppelin_group"]
 user_group = config['configurations']['cluster-env']['user_group']
 
 ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_hosts", [])
+namenode_hosts = default("/clusterHostInfo/namenode_hosts", [])
 hdfs_client_hosts = default("/clusterHostInfo/hdfs_client_hosts", [])
 hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
 oozie_servers = default("/clusterHostInfo/oozie_server", [])
@@ -188,7 +189,9 @@ zeppelin_master_hosts = 
default("/clusterHostInfo/zeppelin_master_hosts", [])
 version_for_stack_feature_checks = get_stack_feature_version(config)
 
 
-has_hdfs_clients = not len(hdfs_client_hosts) == 0
+has_namenode = len(namenode_hosts) > 0
+has_hdfs_clients = len(hdfs_client_hosts) > 0
+has_hdfs = has_hdfs_clients or has_namenode
 has_ganglia_server = not len(ganglia_server_hosts) == 0
 has_tez = 'tez-site' in config['configurations']
 has_hbase_masters = not len(hbase_master_hosts) == 0
@@ -242,7 +245,7 @@ for ns, dfs_ha_namenode_ids in 
dfs_ha_namenode_ids_all_ns.iteritems():
   if found:
     break
 
-if has_hdfs_clients or dfs_type == 'HCFS':
+if has_hdfs or dfs_type == 'HCFS':
     hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
     hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
 
diff --git 
a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py
 
b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py
index 5034bbe..ec9497f 100644
--- 
a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py
+++ 
b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py
@@ -74,7 +74,7 @@ def setup_users():
       Logger.info('Skipping setting uid for hbase user as host is sys prepped')
 
   if should_create_users_and_groups:
-    if params.has_hdfs_clients:
+    if params.has_hdfs:
       create_dfs_cluster_admins()
     if params.has_tez and params.stack_version_formatted != "" and 
compare_versions(params.stack_version_formatted, '2.3') >= 0:
       create_tez_am_view_acls()
@@ -180,7 +180,7 @@ def setup_hadoop_env():
   import params
   stackversion = params.stack_version_unformatted
   Logger.info("FS Type: {0}".format(params.dfs_type))
-  if params.has_hdfs_clients or stackversion.find('Gluster') >= 0 or 
params.dfs_type == 'HCFS':
+  if params.has_hdfs or stackversion.find('Gluster') >= 0 or params.dfs_type 
== 'HCFS':
     if params.security_enabled:
       tc_owner = "root"
     else:
diff --git 
a/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py 
b/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py
index 15b51d1..0ba8332 100644
--- 
a/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py
@@ -60,14 +60,14 @@ hive_server_host =  
default("/clusterHostInfo/hive_server_hosts", [])
 hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
 hs_host = default("/clusterHostInfo/historyserver_hosts", [])
 jtnode_host = default("/clusterHostInfo/jtnode_hosts", [])
-namenode_host = default("/clusterHostInfo/namenode_hosts", [])
+namenode_hosts = default("/clusterHostInfo/namenode_hosts", [])
 zk_hosts = default("/clusterHostInfo/zookeeper_server_hosts", [])
 ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_hosts", [])
 storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", [])
 falcon_host =  default('/clusterHostInfo/falcon_server_hosts', [])
 
 has_sqoop_client = 'sqoop-env' in config['configurations']
-has_namenode = not len(namenode_host) == 0
+has_namenode = len(namenode_hosts) > 0
 has_hs = not len(hs_host) == 0
 has_resourcemanager = not len(rm_host) == 0
 has_slaves = not len(slave_hosts) == 0
@@ -81,7 +81,7 @@ has_storm_server = not len(storm_server_hosts) == 0
 has_falcon_server = not len(falcon_host) == 0
 has_tez = 'tez-site' in config['configurations']
 
-is_namenode_master = hostname in namenode_host
+is_namenode_master = hostname in namenode_hosts
 is_jtnode_master = hostname in jtnode_host
 is_rmnode_master = hostname in rm_host
 is_hsnode_master = hostname in hs_host
diff --git 
a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py 
b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py
index e3c22ba..faccce3 100644
--- 
a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py
@@ -111,7 +111,7 @@ hive_server_host =  
default("/clusterHostInfo/hive_server_hosts", [])
 hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
 hs_host = default("/clusterHostInfo/historyserver_hosts", [])
 jtnode_host = default("/clusterHostInfo/jtnode_hosts", [])
-namenode_host = default("/clusterHostInfo/namenode_hosts", [])
+namenode_hosts = default("/clusterHostInfo/namenode_hosts", [])
 hdfs_client_hosts = default("/clusterHostInfo/hdfs_client_hosts", [])
 zk_hosts = default("/clusterHostInfo/zookeeper_server_hosts", [])
 ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_hosts", [])
@@ -124,8 +124,9 @@ if 'cluster-env' in config['configurations'] and \
 else:
   ams_collector_hosts = 
",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
 
-has_namenode = not len(namenode_host) == 0
-has_hdfs_clients = not len(hdfs_client_hosts) == 0
+has_namenode = len(namenode_hosts) > 0
+has_hdfs_clients = len(hdfs_client_hosts) > 0
+has_hdfs = has_hdfs_clients or has_namenode
 has_resourcemanager = not len(rm_host) == 0
 has_slaves = not len(slave_hosts) == 0
 has_oozie_server = not len(oozie_servers) == 0
@@ -136,7 +137,7 @@ has_zk_host = not len(zk_hosts) == 0
 has_ganglia_server = not len(ganglia_server_hosts) == 0
 has_metric_collector = not len(ams_collector_hosts) == 0
 
-is_namenode_master = hostname in namenode_host
+is_namenode_master = hostname in namenode_hosts
 is_jtnode_master = hostname in jtnode_host
 is_rmnode_master = hostname in rm_host
 is_hsnode_master = hostname in hs_host
diff --git 
a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/shared_initialization.py
 
b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/shared_initialization.py
index ef31242..ce6b869 100644
--- 
a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/shared_initialization.py
+++ 
b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/shared_initialization.py
@@ -96,7 +96,7 @@ def setup_hadoop():
 
     create_microsoft_r_dir()
 
-  if params.has_hdfs_clients or params.dfs_type == 'HCFS':
+  if params.has_hdfs or params.dfs_type == 'HCFS':
     # if WebHDFS is not enabled we need this jar to create hadoop folders and 
copy tarballs to HDFS.
     if params.sysprep_skip_copy_fast_jar_hdfs:
       print "Skipping copying of fast-hdfs-resource.jar as host is sys prepped"

Reply via email to