http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/nfsgateway.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/nfsgateway.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/nfsgateway.py
deleted file mode 100644
index 9aa100f..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/nfsgateway.py
+++ /dev/null
@@ -1,137 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.script import Script
-from resource_management.libraries.functions.check_process_status import 
check_process_status
-from resource_management.libraries.functions.security_commons import 
build_expectations, \
-  cached_kinit_executor, get_params_from_filesystem, 
validate_security_config_properties, \
-  FILE_TYPE_XML
-from hdfs_nfsgateway import nfsgateway
-from hdfs import hdfs
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import iop_select
-from resource_management.libraries.functions.version import compare_versions, 
format_hdp_stack_version
-
-class NFSGateway(Script):
-
-  def get_stack_to_component(self):
-    return {"BigInsights": "hadoop-hdfs-nfs3"}
-
-  def install(self, env):
-    import params
-
-    env.set_params(params)
-
-    self.install_packages(env, params.exclude_packages)
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    if Script.is_hdp_stack_greater_or_equal('4.1.0.0'):
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      iop_select.select("hadoop-hdfs-nfs3", params.version)
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    self.configure(env)
-    nfsgateway(action="start")
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    nfsgateway(action="stop")
-
-  def configure(self, env):
-    import params
-
-    env.set_params(params)
-    hdfs()
-    nfsgateway(action="configure")
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    check_process_status(status_params.nfsgateway_pid_file)
-
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', 
props_value_check, props_empty_check,
-                                                props_read_check)
-    props_value_check = None
-    props_empty_check = ['nfs.keytab.file',
-                         'nfs.kerberos.principal']
-    props_read_check = ['nfs.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', 
props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-    hdfs_expectations.update(hdfs_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': 
FILE_TYPE_XML,
-                                                  'hdfs-site.xml': 
FILE_TYPE_XML})
-    if 'core-site' in security_params and 'hadoop.security.authentication' in 
security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() 
== 'kerberos':
-      result_issues = validate_security_config_properties(security_params, 
hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ('hdfs-site' not in security_params or
-                'nfs.keytab.file' not in security_params['hdfs-site'] or
-                'nfs.kerberos.principal' not in security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set 
property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                
security_params['hdfs-site']['nfs.keytab.file'],
-                                security_params['hdfs-site'][
-                                  'nfs.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. 
Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-if __name__ == "__main__":
-  NFSGateway().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/params.py
deleted file mode 100644
index 93de1cc..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/params.py
+++ /dev/null
@@ -1,448 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions.version import 
format_hdp_stack_version, compare_versions
-from resource_management.libraries.functions.default import default
-from resource_management import *
-import status_params
-import ambari_simplejson as json
-import utils
-import os
-import itertools
-import re
-
-command_phase=""
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-stack_name = default("/hostLevelParams/stack_name", None)
-upgrade_direction = default("/commandParams/upgrade_direction", None)
-retryAble = default("/commandParams/command_retry_enabled", False)
-
-stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-stack_version = format_hdp_stack_version(stack_version_unformatted)
-
-# New Cluster Stack Version that is defined during the RESTART of a Rolling 
Upgrade
-version = default("/commandParams/version", None)
-
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-hdfs_user = status_params.hdfs_user
-root_user = "root"
-hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
-namenode_pid_file = status_params.namenode_pid_file
-zkfc_pid_file = status_params.zkfc_pid_file
-
-# Some datanode settings
-dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
-dfs_dn_http_addr = 
default('/configurations/hdfs-site/dfs.datanode.http.address', None)
-dfs_dn_https_addr = 
default('/configurations/hdfs-site/dfs.datanode.https.address', None)
-dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
-dfs_dn_ipc_address = 
config['configurations']['hdfs-site']['dfs.datanode.ipc.address']
-secure_dn_ports_are_in_use = False
-
-#hadoop params
-mapreduce_libs_path = "/usr/iop/current/hadoop-mapreduce-client/*"
-hadoop_libexec_dir = conf_select.get_hadoop_dir("libexec")
-hadoop_bin = conf_select.get_hadoop_dir("sbin")
-hadoop_bin_dir = conf_select.get_hadoop_dir("bin")
-hadoop_home = "/usr/iop/current/hadoop-client"
-if not security_enabled:
-  hadoop_secure_dn_user = '""'
-else:
-  dfs_dn_port = utils.get_port(dfs_dn_addr)
-  dfs_dn_http_port = utils.get_port(dfs_dn_http_addr)
-  dfs_dn_https_port = utils.get_port(dfs_dn_https_addr)
-  # We try to avoid inability to start datanode as a plain user due to usage 
of root-owned ports
-  if dfs_http_policy == "HTTPS_ONLY":
-    secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or 
utils.is_secure_port(dfs_dn_https_port)
-  elif dfs_http_policy == "HTTP_AND_HTTPS":
-    secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or 
utils.is_secure_port(dfs_dn_http_port) or 
utils.is_secure_port(dfs_dn_https_port)
-  else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
-    secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or 
utils.is_secure_port(dfs_dn_http_port)
-  if secure_dn_ports_are_in_use:
-    hadoop_secure_dn_user = hdfs_user
-  else:
-    hadoop_secure_dn_user = '""'
-
-hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-limits_conf_dir = "/etc/security/limits.d"
-hdfs_user_nofile_limit = 
default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
-hdfs_user_nproc_limit = 
default("/configurations/hadoop-env/hdfs_user_nproc_limit", "65536")
-hadoop_lib_home = conf_select.get_hadoop_dir("lib")
-ambari_libs_dir = "/var/lib/ambari-agent/lib"
-
-#snappy
-create_lib_snappy_symlinks = False
-snappy_so = "libsnappy.so"
-so_target_dir_x86 = format("{hadoop_lib_home}/native/Linux-i386-32")
-so_target_dir_x64 = format("{hadoop_lib_home}/native/Linux-amd64-64")
-so_target_x86 = format("{so_target_dir_x86}/{snappy_so}")
-so_target_x64 = format("{so_target_dir_x64}/{snappy_so}")
-so_src_dir_x86 = format("{hadoop_home}/lib")
-so_src_dir_x64 = format("{hadoop_home}/lib/native")
-so_src_x86 = format("{so_src_dir_x86}/{snappy_so}")
-so_src_x64 = format("{so_src_dir_x64}/{snappy_so}")
-
-execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir
-ulimit_cmd = "ulimit -c unlimited ; "
-
-#security params
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-falcon_user = config['configurations']['falcon-env']['falcon_user']
-
-#exclude file
-hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
-exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-update_exclude_file_only = 
default("/commandParams/update_exclude_file_only",False)
-
-klist_path_local = functions.get_klist_path()
-kinit_path_local = functions.get_kinit_path()
-#hosts
-hostname = config["hostname"]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-nm_host = default("/clusterHostInfo/nm_host", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
-zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
-falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
-
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_namenodes = not len(namenode_host) == 0
-has_jobtracker = not len(jtnode_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_histroryserver = not len(hs_host) == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_slaves = not len(slave_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_journalnode_hosts = not len(journalnode_hosts)  == 0
-has_zkfc_hosts = not len(zkfc_hosts)  == 0
-has_falcon_host = not len(falcon_host)  == 0
-
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-#users and groups
-yarn_user = config['configurations']['yarn-env']['yarn_user']
-hbase_user = config['configurations']['hbase-env']['hbase_user']
-oozie_user = config['configurations']['oozie-env']['oozie_user']
-webhcat_user = config['configurations']['hive-env']['hcat_user']
-hcat_user = config['configurations']['hive-env']['hcat_user']
-hive_user = config['configurations']['hive-env']['hive_user']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-smokeuser_principal =  
config['configurations']['cluster-env']['smokeuser_principal_name']
-mapred_user = config['configurations']['mapred-env']['mapred_user']
-hdfs_principal_name = 
default('/configurations/hadoop-env/hdfs_principal_name', None)
-
-user_group = config['configurations']['cluster-env']['user_group']
-root_group = "root"
-proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
-
-#hadoop params
-hdfs_log_dir_prefix = 
config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hadoop_root_logger = 
config['configurations']['hadoop-env']['hadoop_root_logger']
-
-dfs_domain_socket_path = 
config['configurations']['hdfs-site']['dfs.domain.socket.path']
-dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
-
-jn_edits_dir = 
config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']
-
-dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
-
-namenode_dirs_created_stub_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}")
-namenode_dirs_stub_filename = "namenode_dirs_created"
-
-smoke_hdfs_user_dir = format("/user/{smoke_user}")
-smoke_hdfs_user_mode = 0770
-
-
-hdfs_namenode_formatted_mark_suffix = "/namenode-formatted/"
-namenode_formatted_old_mark_dirs = ["/var/run/hadoop/hdfs/namenode-formatted", 
-  format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted"),
-  "/var/lib/hdfs/namenode/formatted"]
-dfs_name_dirs = dfs_name_dir.split(",")
-namenode_formatted_mark_dirs = []
-for dn_dir in dfs_name_dirs:
- tmp_mark_dir = format("{dn_dir}{hdfs_namenode_formatted_mark_suffix}")
- namenode_formatted_mark_dirs.append(tmp_mark_dir)
-
-# Use the namenode RPC address if configured, otherwise, fallback to the 
default file system
-namenode_address = None
-if 'dfs.namenode.rpc-address' in config['configurations']['hdfs-site']:
-  namenode_rpcaddress = 
config['configurations']['hdfs-site']['dfs.namenode.rpc-address']
-  namenode_address = format("hdfs://{namenode_rpcaddress}")
-else:
-  namenode_address = config['configurations']['core-site']['fs.defaultFS']
-
-fs_checkpoint_dirs = 
default("/configurations/hdfs-site/dfs.namenode.checkpoint.dir", "").split(',')
-
-dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
-dfs_data_dir = ",".join([re.sub(r'^\[.+\]', '', dfs_dir.strip()) for dfs_dir 
in dfs_data_dir.split(",")])
-
-data_dir_mount_file = 
"/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist"
-
-# HDFS High Availability properties
-dfs_ha_enabled = False
-dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", 
None)
-dfs_ha_namenode_ids = 
default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"),
 None)
-dfs_ha_automatic_failover_enabled = 
default("/configurations/hdfs-site/dfs.ha.automatic-failover.enabled", False)
-
-# hostname of the active HDFS HA Namenode (only used when HA is enabled)
-dfs_ha_namenode_active = 
default("/configurations/hadoop-env/dfs_ha_initial_namenode_active", None)
-# hostname of the standby HDFS HA Namenode (only used when HA is enabled)
-dfs_ha_namenode_standby = 
default("/configurations/hadoop-env/dfs_ha_initial_namenode_standby", None)
-
-namenode_id = None
-namenode_rpc = None
-
-dfs_ha_namemodes_ids_list = []
-other_namenode_id = None
-
-if dfs_ha_namenode_ids:
-  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
-  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
-  if dfs_ha_namenode_ids_array_len > 1:
-    dfs_ha_enabled = True
-if dfs_ha_enabled:
-  for nn_id in dfs_ha_namemodes_ids_list:
-    nn_host = 
config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
-    if hostname in nn_host:
-      namenode_id = nn_id
-      namenode_rpc = nn_host
-
-if dfs_http_policy is not None and dfs_http_policy.upper() == "HTTPS_ONLY":
-  https_only = True
-  journalnode_address = 
default('/configurations/hdfs-site/dfs.journalnode.https-address', None)
-else:
-  https_only = False
-  journalnode_address = 
default('/configurations/hdfs-site/dfs.journalnode.http-address', None)
-
-if journalnode_address:
-  journalnode_port = journalnode_address.split(":")[1]
-  
-if security_enabled:
-  dn_principal_name = 
config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
-  dn_keytab = config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
-  dn_principal_name = dn_principal_name.replace('_HOST',hostname.lower())
-
-  dn_kinit_cmd = format("{kinit_path_local} -kt {dn_keytab} 
{dn_principal_name};")
-
-  nn_principal_name = 
config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
-  nn_keytab = config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
-  nn_principal_name = nn_principal_name.replace('_HOST',hostname.lower())
-
-  nn_kinit_cmd = format("{kinit_path_local} -kt {nn_keytab} 
{nn_principal_name};")
-
-  jn_principal_name = 
default("/configurations/hdfs-site/dfs.journalnode.kerberos.principal", None)
-  if jn_principal_name:
-    jn_principal_name = jn_principal_name.replace('_HOST', hostname.lower())
-  jn_keytab = default("/configurations/hdfs-site/dfs.journalnode.keytab.file", 
None)
-  jn_kinit_cmd = format("{kinit_path_local} -kt {jn_keytab} 
{jn_principal_name};")
-else:
-  dn_kinit_cmd = ""
-  nn_kinit_cmd = ""
-  jn_kinit_cmd = ""
-   
-
-hdfs_site = config['configurations']['hdfs-site']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-
-import functools
-#create partial functions with common arguments for every HdfsDirectory call
-#to create hdfs directory we need to call params.HdfsDirectory in code
-HdfsResource = functools.partial(
-  HdfsResource,
-  user=hdfs_user,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir,
-  principal_name = hdfs_principal_name,
-  hdfs_site = hdfs_site,
-  default_fs = default_fs
-)
-
-lzo_enabled = True
-lzo_packages = ["lzo", "hadoop-lzo", "hadoop-lzo-native"]
-io_compression_codecs = 
config['configurations']['core-site']['io.compression.codecs']
-if not "com.hadoop.compression.lzo" in io_compression_codecs:
-  lzo_enabled = False
-  exclude_packages = ["lzo", "hadoop-lzo", "hadoop-lzo-native", "liblzo2-2"]
-else:
-  exclude_packages = []
-name_node_params = default("/commandParams/namenode", '{"threshold":"10"}')
-
-#hadoop params
-hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
-
-#hadoop-env.sh
-java_home = config['hostLevelParams']['java_home']
-java_version = int(config['hostLevelParams']['java_version'])
-
-jsvc_path = "/usr/lib/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = 
config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = 
config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = 
format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = 
format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-mapred_pid_dir_prefix = 
default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = 
default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-# ranger host
-ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
-has_ranger_admin = not len(ranger_admin_hosts) == 0
-xml_configurations_supported = 
config['configurations']['ranger-env']['xml_configurations_supported']
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-
-#ranger hdfs properties
-policymgr_mgr_url = 
config['configurations']['admin-properties']['policymgr_external_url']
-sql_connector_jar = 
config['configurations']['admin-properties']['SQL_CONNECTOR_JAR']
-xa_audit_db_name = 
config['configurations']['admin-properties']['audit_db_name']
-xa_audit_db_user = 
config['configurations']['admin-properties']['audit_db_user']
-xa_db_host = config['configurations']['admin-properties']['db_host']
-repo_name = str(config['clusterName']) + '_hadoop'
-
-hadoop_security_authentication = 
config['configurations']['core-site']['hadoop.security.authentication']
-hadoop_security_authorization = 
config['configurations']['core-site']['hadoop.security.authorization']
-fs_default_name = config['configurations']['core-site']['fs.defaultFS']
-hadoop_security_auth_to_local = 
config['configurations']['core-site']['hadoop.security.auth_to_local']
-hadoop_rpc_protection = 
config['configurations']['ranger-hdfs-plugin-properties']['hadoop.rpc.protection']
-common_name_for_certificate = 
config['configurations']['ranger-hdfs-plugin-properties']['common.name.for.certificate']
-
-repo_config_username = 
config['configurations']['ranger-hdfs-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
-
-if security_enabled:
-  sn_principal_name = 
default("/configurations/hdfs-site/dfs.secondary.namenode.kerberos.principal", 
"nn/_h...@example.com")
-  sn_principal_name = sn_principal_name.replace('_HOST',hostname.lower())
-
-ranger_env = config['configurations']['ranger-env']
-ranger_plugin_properties = 
config['configurations']['ranger-hdfs-plugin-properties']
-policy_user = 
config['configurations']['ranger-hdfs-plugin-properties']['policy_user']
-
-#For curl command in ranger plugin to get db connector
-jdk_location = config['hostLevelParams']['jdk_location']
-java_share_dir = '/usr/share/java'
-
-is_https_enabled = config['configurations']['hdfs-site']['dfs.https.enable'] 
if \
-  not is_empty(config['configurations']['hdfs-site']['dfs.https.enable']) else 
False
-
-if has_ranger_admin:
-  enable_ranger_hdfs = 
(config['configurations']['ranger-hdfs-plugin-properties']['ranger-hdfs-plugin-enabled'].lower()
 == 'yes')
-  xa_audit_db_password = 
unicode(config['configurations']['admin-properties']['audit_db_password'])
-  repo_config_password = 
unicode(config['configurations']['ranger-hdfs-plugin-properties']['REPOSITORY_CONFIG_PASSWORD'])
-  xa_audit_db_flavor = 
(config['configurations']['admin-properties']['DB_FLAVOR']).lower()
-
-  if xa_audit_db_flavor == 'mysql':
-    jdbc_symlink_name = "mysql-jdbc-driver.jar"
-    jdbc_jar_name = "mysql-connector-java.jar"
-    audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
-    jdbc_driver = "com.mysql.jdbc.Driver"
-  elif xa_audit_db_flavor == 'oracle':
-    jdbc_jar_name = "ojdbc6.jar"
-    jdbc_symlink_name = "oracle-jdbc-driver.jar"
-    colon_count = xa_db_host.count(':')
-    if colon_count == 2 or colon_count == 0:
-      audit_jdbc_url = format('jdbc:oracle:thin:@{xa_db_host}')
-    else:
-      audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
-    jdbc_driver = "oracle.jdbc.OracleDriver"
-  elif xa_audit_db_flavor == 'postgres':
-    jdbc_jar_name = "postgresql.jar"
-    jdbc_symlink_name = "postgres-jdbc-driver.jar"
-    audit_jdbc_url = 
format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
-    jdbc_driver = "org.postgresql.Driver"
-  elif xa_audit_db_flavor == 'mssql':
-    jdbc_jar_name = "sqljdbc4.jar"
-    jdbc_symlink_name = "mssql-jdbc-driver.jar"
-    audit_jdbc_url = 
format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
-    jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
-  elif xa_audit_db_flavor == 'sqla':
-    jdbc_jar_name = "sajdbc4.jar"
-    jdbc_symlink_name = "sqlanywhere-jdbc-driver.tar.gz"
-    audit_jdbc_url = 
format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
-    jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
-
-  downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}")
-  driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}")
-  driver_curl_target = format("{hadoop_lib_home}/{jdbc_jar_name}")
-
-  hdfs_ranger_plugin_config = {
-    'username': repo_config_username,
-    'password': repo_config_password,
-    'hadoop.security.authentication': hadoop_security_authentication,
-    'hadoop.security.authorization': hadoop_security_authorization,
-    'fs.default.name': fs_default_name,
-    'hadoop.security.auth_to_local': hadoop_security_auth_to_local,
-    'hadoop.rpc.protection': hadoop_rpc_protection,
-    'commonNameForCertificate': common_name_for_certificate,
-    'dfs.datanode.kerberos.principal': dn_principal_name if security_enabled 
else '',
-    'dfs.namenode.kerberos.principal': nn_principal_name if security_enabled 
else '',
-    'dfs.secondary.namenode.kerberos.principal': sn_principal_name if 
security_enabled else ''
-  }
-
-  hdfs_ranger_plugin_repo = {
-    'isActive': 'true',
-    'config': json.dumps(hdfs_ranger_plugin_config),
-    'description': 'hdfs repo',
-    'name': repo_name,
-    'repositoryType': 'hdfs',
-    'assetType': '1'
-  }
-  
-  ranger_audit_solr_urls = 
config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
-  xa_audit_db_is_enabled = 
config['configurations']['ranger-hdfs-audit']['xasecure.audit.destination.db'] 
if xml_configurations_supported else None
-  xa_audit_hdfs_is_enabled = 
config['configurations']['ranger-hdfs-audit']['xasecure.audit.destination.hdfs']
 if xml_configurations_supported else None
-  ssl_keystore_password = 
unicode(config['configurations']['ranger-hdfs-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'])
 if xml_configurations_supported else None
-  ssl_truststore_password = 
unicode(config['configurations']['ranger-hdfs-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'])
 if xml_configurations_supported else None
-  credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if 
xml_configurations_supported else None
-
-  #For SQLA explicitly disable audit to DB for Ranger
-  if xa_audit_db_flavor == 'sqla':
-    xa_audit_db_is_enabled = False

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/service_check.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/service_check.py
deleted file mode 100644
index 8e919cf..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/service_check.py
+++ /dev/null
@@ -1,109 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.core.shell import as_user
-from resource_management.libraries.functions.curl_krb_request import 
curl_krb_request
-from resource_management.core.logger import Logger
-
-class HdfsServiceCheck(Script):
-  def service_check(self, env):
-    import params
-
-    env.set_params(params)
-    unique = functions.get_unique_id_and_date()
-    dir = '/tmp'
-    tmp_file = format("{dir}/{unique}")
-
-    safemode_command = format("dfsadmin -fs {namenode_address} -safemode get | 
grep OFF")
-
-    if params.security_enabled:
-      Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} 
{hdfs_principal_name}"),
-        user=params.hdfs_user
-      )
-    ExecuteHadoop(safemode_command,
-                  user=params.hdfs_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=20,
-                  bin_dir=params.hadoop_bin_dir
-    )
-    params.HdfsResource(dir,
-                        type="directory",
-                        action="create_on_execute",
-                        mode=0777
-    )
-    params.HdfsResource(tmp_file,
-                        type="file",
-                        action="delete_on_execute",
-    )
-
-    params.HdfsResource(tmp_file,
-                        type="file",
-                        source="/etc/passwd",
-                        action="create_on_execute"
-    )
-    params.HdfsResource(None, action="execute")
-
-    if params.has_journalnode_hosts:
-      if params.security_enabled:
-        for host in params.journalnode_hosts:
-          if params.https_only:
-            uri = format("https://{host}:{journalnode_port}";)
-          else:
-            uri = format("http://{host}:{journalnode_port}";)
-          response, errmsg, time_millis = curl_krb_request(params.tmp_dir, 
params.smoke_user_keytab,
-                                                           
params.smokeuser_principal, uri, "jn_service_check",
-                                                           
params.kinit_path_local, False, None, params.smoke_user)
-          if not response:
-            Logger.error("Cannot access WEB UI on: {0}. Error : {1}", uri, 
errmsg)
-            return 1
-      else:
-        journalnode_port = params.journalnode_port
-        checkWebUIFileName = "checkWebUI.py"
-        checkWebUIFilePath = format("{tmp_dir}/{checkWebUIFileName}")
-        comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
-        checkWebUICmd = format("python {checkWebUIFilePath} -m 
{comma_sep_jn_hosts} -p {journalnode_port} -s {https_only}")
-        File(checkWebUIFilePath,
-             content=StaticFile(checkWebUIFileName),
-             mode=0775)
-
-        Execute(checkWebUICmd,
-                logoutput=True,
-                try_sleep=3,
-                tries=5,
-                user=params.smoke_user
-        )
-
-    if params.is_namenode_master:
-      if params.has_zkfc_hosts:
-        pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-        pid_file = format("{pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
-        check_zkfc_process_cmd = as_user(format(
-          "ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 
2>&1"), user=params.hdfs_user)
-        Execute(check_zkfc_process_cmd,
-                logoutput=True,
-                try_sleep=3,
-                tries=5
-        )
-
-
-if __name__ == "__main__":
-  HdfsServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/setup_ranger_hdfs.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/setup_ranger_hdfs.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/setup_ranger_hdfs.py
deleted file mode 100644
index 255891e..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/setup_ranger_hdfs.py
+++ /dev/null
@@ -1,90 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-from resource_management.core.logger import Logger
-from resource_management.core.resources.system import Execute
-from resource_management.libraries.functions.constants import Direction
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.version import compare_versions
-
-def setup_ranger_hdfs(upgrade_type=None):
-  import params
-
-  if params.has_ranger_admin:
-
-    if params.xml_configurations_supported:
-      from resource_management.libraries.functions.setup_ranger_plugin_xml 
import setup_ranger_plugin
-    else:
-      from resource_management.libraries.functions.setup_ranger_plugin import 
setup_ranger_plugin
-
-    hdp_version = None
-
-    if upgrade_type is not None:
-      hdp_version = params.version
-
-    if params.retryAble:
-      Logger.info("HDFS: Setup ranger: command retry enables thus retrying if 
ranger admin is down !")
-    else:
-      Logger.info("HDFS: Setup ranger: command retry not enabled thus skipping 
if ranger admin is down !")
-
-    setup_ranger_plugin('hadoop-client', 'hdfs',
-                        params.downloaded_custom_connector, 
params.driver_curl_source,
-                        params.driver_curl_target, params.java_home,
-                        params.repo_name, params.hdfs_ranger_plugin_repo,
-                        params.ranger_env, params.ranger_plugin_properties,
-                        params.policy_user, params.policymgr_mgr_url,
-                        params.enable_ranger_hdfs, 
conf_dict=params.hadoop_conf_dir,
-                        component_user=params.hdfs_user, 
component_group=params.user_group, cache_service_list=['hdfs'],
-                        
plugin_audit_properties=params.config['configurations']['ranger-hdfs-audit'], 
plugin_audit_attributes=params.config['configuration_attributes']['ranger-hdfs-audit'],
-                        
plugin_security_properties=params.config['configurations']['ranger-hdfs-security'],
 
plugin_security_attributes=params.config['configuration_attributes']['ranger-hdfs-security'],
-                        
plugin_policymgr_ssl_properties=params.config['configurations']['ranger-hdfs-policymgr-ssl'],
 
plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-hdfs-policymgr-ssl'],
-                        component_list=['hadoop-client'], 
audit_db_is_enabled=params.xa_audit_db_is_enabled,
-                        credential_file=params.credential_file, 
xa_audit_db_password=params.xa_audit_db_password, 
-                        
ssl_truststore_password=params.ssl_truststore_password, 
ssl_keystore_password=params.ssl_keystore_password,
-                        hdp_version_override = hdp_version, 
skip_if_rangeradmin_down= not params.retryAble)
-  else:
-    Logger.info('Ranger admin not installed')
-
-def create_ranger_audit_hdfs_directories(check):
-  import params
-
-  if params.has_ranger_admin:
-    if params.xml_configurations_supported and params.enable_ranger_hdfs and 
params.xa_audit_hdfs_is_enabled:
-      params.HdfsResource("/ranger/audit",
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.hdfs_user,
-                         group=params.hdfs_user,
-                         mode=0755,
-                         recursive_chmod=True,
-                         only_if=check
-      )
-      params.HdfsResource("/ranger/audit/hdfs",
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.hdfs_user,
-                         group=params.hdfs_user,
-                         mode=0700,
-                         recursive_chmod=True,
-                         only_if=check
-      )
-      params.HdfsResource(None, action="execute", only_if=check)
-  else:
-    Logger.info('Ranger admin not installed')

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/snamenode.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/snamenode.py
deleted file mode 100644
index bcbd22b..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/snamenode.py
+++ /dev/null
@@ -1,142 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import iop_select
-from resource_management.libraries.functions.version import compare_versions, 
format_hdp_stack_version
-from resource_management.libraries.functions.security_commons import 
build_expectations, \
-  cached_kinit_executor, get_params_from_filesystem, 
validate_security_config_properties, \
-  FILE_TYPE_XML
-from resource_management.core.logger import Logger
-
-from hdfs_snamenode import snamenode
-from hdfs import hdfs
-
-class SNameNode(Script):
-
-  def get_stack_to_component(self):
-    return {"BigInsights": "hadoop-hdfs-secondarynamenode"}
-
-  def install(self, env):
-    import params
-
-    env.set_params(params)
-
-    self.install_packages(env, params.exclude_packages)
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    Logger.info("Executing Stack Upgrade pre-restart")
-    import params
-    env.set_params(params)
-
-    if params.version and 
compare_versions(format_hdp_stack_version(params.version), '4.0.0.0') >= 0:
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      iop_select.select("hadoop-hdfs-secondarynamenode", params.version)
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    self.configure(env)
-    snamenode(action="start")
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    snamenode(action="stop")
-
-  def configure(self, env):
-    import params
-
-    env.set_params(params)
-    hdfs("secondarynamenode")
-    snamenode(action="configure")
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    check_process_status(status_params.snamenode_pid_file)
-
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', 
props_value_check, props_empty_check,
-                                                props_read_check)
-    props_value_check = None
-    props_empty_check = 
['dfs.secondary.namenode.kerberos.internal.spnego.principal',
-                         'dfs.secondary.namenode.keytab.file',
-                         'dfs.secondary.namenode.kerberos.principal']
-    props_read_check = ['dfs.secondary.namenode.keytab.file']
-    hdfs_site_expectations = build_expectations('hdfs-site', 
props_value_check, props_empty_check,
-                                                props_read_check)
-
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-    hdfs_expectations.update(hdfs_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                 {'core-site.xml': 
FILE_TYPE_XML,
-                                                  'hdfs-site.xml': 
FILE_TYPE_XML})
-
-    if 'core-site' in security_params and 'hadoop.security.authentication' in 
security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() 
== 'kerberos':
-      result_issues = validate_security_config_properties(security_params, 
hdfs_expectations)
-      if not result_issues:  # If all validations passed successfully
-        try:
-          # Double check the dict before calling execute
-          if ('hdfs-site' not in security_params or
-                  'dfs.secondary.namenode.keytab.file' not in 
security_params['hdfs-site'] or
-                  'dfs.secondary.namenode.kerberos.principal' not in 
security_params['hdfs-site']):
-            self.put_structured_out({"securityState": "UNSECURED"})
-            self.put_structured_out(
-              {"securityIssuesFound": "Keytab file or principal are not set 
property."})
-            return
-
-          cached_kinit_executor(status_params.kinit_path_local,
-                                status_params.hdfs_user,
-                                
security_params['hdfs-site']['dfs.secondary.namenode.keytab.file'],
-                                security_params['hdfs-site'][
-                                  'dfs.secondary.namenode.kerberos.principal'],
-                                status_params.hostname,
-                                status_params.tmp_dir)
-          self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-        except Exception as e:
-          self.put_structured_out({"securityState": "ERROR"})
-          self.put_structured_out({"securityStateErrorInfo": str(e)})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. 
Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-
-if __name__ == "__main__":
-  SNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/status_params.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/status_params.py
deleted file mode 100644
index eab5de3..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/status_params.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.script.script import Script
-
-config = Script.get_config()
-
-hadoop_pid_dir_prefix = 
config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hadoop_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-datanode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
-namenode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
-snamenode_pid_file = 
format("{hadoop_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
-journalnode_pid_file = 
format("{hadoop_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
-zkfc_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
-nfsgateway_pid_file = 
format("{hadoop_pid_dir_prefix}/root/hadoop_privileged_nfs3.pid")
-
-# Security related/required params
-hostname = config['hostname']
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-hdfs_user_principal = 
config['configurations']['hadoop-env']['hdfs_principal_name']
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-kinit_path_local = 
get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', 
None))
-tmp_dir = Script.get_tmp_dir()

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/utils.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/utils.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/utils.py
deleted file mode 100644
index f572fab..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/utils.py
+++ /dev/null
@@ -1,407 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-import re
-import urllib2
-import ambari_simplejson as json # simplejson is much faster comparing to 
Python 2.6 json module and has the same functions set.
-
-from resource_management.core.resources.system import Directory, File, Execute
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions import check_process_status
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.core import shell
-from resource_management.core.shell import as_user, as_sudo
-from resource_management.core.exceptions import ComponentIsNotRunning
-from resource_management.core.logger import Logger
-from resource_management.libraries.functions.curl_krb_request import 
curl_krb_request
-from resource_management.core.exceptions import Fail
-from resource_management.libraries.functions.namenode_ha_utils import 
get_namenode_states
-from resource_management.libraries.script.script import Script
-
-from zkfc_slave import ZkfcSlave
-
-def safe_zkfc_op(action, env):
-  """
-  Idempotent operation on the zkfc process to either start or stop it.
-  :param action: start or stop
-  :param env: environment
-  """
-  Logger.info("Performing action {0} on zkfc.".format(action))
-  zkfc = None
-  if action == "start":
-    try:
-      zkfc = ZkfcSlave()
-      zkfc.status(env)
-    except ComponentIsNotRunning:
-      if zkfc:
-        zkfc.start(env)
-
-  if action == "stop":
-    try:
-      zkfc = ZkfcSlave()
-      zkfc.status(env)
-    except ComponentIsNotRunning:
-      pass
-    else:
-      if zkfc:
-        zkfc.stop(env)
-
-def initiate_safe_zkfc_failover():
-  """
-  If this is the active namenode, initiate a safe failover and wait for it to 
become the standby.
-
-  If an error occurs, force a failover to happen by killing zkfc on this host. 
In this case, during the Restart,
-  will also have to start ZKFC manually.
-  """
-  import params
-
-  # Must kinit before running the HDFS command
-  if params.security_enabled:
-    Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} 
{hdfs_principal_name}"),
-            user = params.hdfs_user)
-
-  active_namenode_id = None
-  standby_namenode_id = None
-  active_namenodes, standby_namenodes, unknown_namenodes = 
get_namenode_states(params.hdfs_site, params.security_enabled, params.hdfs_user)
-  if active_namenodes:
-    active_namenode_id = active_namenodes[0][0]
-  if standby_namenodes:
-    standby_namenode_id = standby_namenodes[0][0]
-
-  if active_namenode_id:
-    Logger.info(format("Active NameNode id: {active_namenode_id}"))
-  if standby_namenode_id:
-    Logger.info(format("Standby NameNode id: {standby_namenode_id}"))
-  if unknown_namenodes:
-    for unknown_namenode in unknown_namenodes:
-      Logger.info("NameNode HA state for {0} is 
unknown".format(unknown_namenode[0]))
-
-  if params.namenode_id == active_namenode_id and params.other_namenode_id == 
standby_namenode_id:
-    # Failover if this NameNode is active and other NameNode is up and in 
standby (i.e. ready to become active on failover)
-    Logger.info(format("NameNode {namenode_id} is active and NameNode 
{other_namenode_id} is in standby"))
-
-    failover_command = format("hdfs haadmin -failover {namenode_id} 
{other_namenode_id}")
-    check_standby_cmd = format("hdfs haadmin -getServiceState {namenode_id} | 
grep standby")
-
-    msg = "Rolling Upgrade - Initiating a ZKFC failover on active NameNode 
host {0}.".format(params.hostname)
-    Logger.info(msg)
-    code, out = shell.call(failover_command, user=params.hdfs_user, 
logoutput=True)
-    Logger.info(format("Rolling Upgrade - failover command returned {code}"))
-    wait_for_standby = False
-
-    if code == 0:
-      wait_for_standby = True
-    else:
-      # Try to kill ZKFC manually
-      was_zkfc_killed = kill_zkfc(params.hdfs_user)
-      code, out = shell.call(check_standby_cmd, user=params.hdfs_user, 
logoutput=True)
-      Logger.info(format("Rolling Upgrade - check for standby returned 
{code}"))
-      if code == 255 and out:
-        Logger.info("Rolling Upgrade - NameNode is already down.")
-      else:
-        if was_zkfc_killed:
-          # Only mandate that this be the standby namenode if ZKFC was indeed 
killed to initiate a failover.
-          wait_for_standby = True
-
-    if wait_for_standby:
-      Logger.info("Waiting for this NameNode to become the standby one.")
-      Execute(check_standby_cmd,
-              user=params.hdfs_user,
-              tries=50,
-              try_sleep=6,
-              logoutput=True)
-  else:
-    msg = "Rolling Upgrade - Skipping ZKFC failover on NameNode host 
{0}.".format(params.hostname)
-    Logger.info(msg)
-
-def kill_zkfc(zkfc_user):
-  """
-  There are two potential methods for failing over the namenode, especially 
during a Rolling Upgrade.
-  Option 1. Kill zkfc on primary namenode provided that the secondary is up 
and has zkfc running on it.
-  Option 2. Silent failover (not supported as of IOP 4.0.0.0)
-  :param zkfc_user: User that started the ZKFC process.
-  :return: Return True if ZKFC was killed, otherwise, false.
-  """
-  import params
-  if params.dfs_ha_enabled:
-    if params.zkfc_pid_file:
-      check_process = as_user(format("ls {zkfc_pid_file} > /dev/null 2>&1 && 
ps -p `cat {zkfc_pid_file}` > /dev/null 2>&1"), user=zkfc_user)
-      code, out = shell.call(check_process)
-      if code == 0:
-        Logger.debug("ZKFC is running and will be killed.")
-        kill_command = format("kill -15 `cat {zkfc_pid_file}`")
-        Execute(kill_command,
-                user=zkfc_user
-        )
-        File(params.zkfc_pid_file,
-             action = "delete",
-             )
-        return True
-  return False
-
-
-def get_service_pid_file(name, user):
-  """
-  Get the pid file path that was used to start the service by the user.
-  :param name: Service name
-  :param user: User that started the service.
-  :return: PID file path
-  """
-  import params
-  pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
-  pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
-  return pid_file
-
-
-def service(action=None, name=None, user=None, options="", 
create_pid_dir=False,
-            create_log_dir=False):
-  """
-  :param action: Either "start" or "stop"
-  :param name: Component name, e.g., "namenode", "datanode", 
"secondarynamenode", "zkfc"
-  :param user: User to run the command as
-  :param options: Additional options to pass to command as a string
-  :param create_pid_dir: Create PID directory
-  :param create_log_dir: Crate log file directory
-  """
-  import params
-
-  options = options if options else ""
-  pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
-  pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
-  hadoop_env_exports = {
-    'HADOOP_LIBEXEC_DIR': params.hadoop_libexec_dir
-  }
-  log_dir = format("{hdfs_log_dir_prefix}/{user}")
-
-  # NFS GATEWAY is always started by root using jsvc due to rpcbind bugs
-  # on Linux such as CentOS6.2. 
https://bugzilla.redhat.com/show_bug.cgi?id=731542
-  if name == "nfs3" :
-    pid_file = format("{pid_dir}/hadoop_privileged_nfs3.pid")
-    custom_export = {
-      'HADOOP_PRIVILEGED_NFS_USER': params.hdfs_user,
-      'HADOOP_PRIVILEGED_NFS_PID_DIR': pid_dir,
-      'HADOOP_PRIVILEGED_NFS_LOG_DIR': log_dir
-    }
-    hadoop_env_exports.update(custom_export)
-
-  process_id_exists_command = as_sudo(["test", "-f", pid_file]) + " && " + 
as_sudo(["pgrep", "-F", pid_file])
-
-  # on STOP directories shouldn't be created
-  # since during stop still old dirs are used (which were created during 
previous start)
-  if action != "stop":
-    if name == "nfs3":
-      Directory(params.hadoop_pid_dir_prefix,
-                mode=0755,
-                owner=params.root_user,
-                group=params.root_group
-      )
-    else:
-      Directory(params.hadoop_pid_dir_prefix,
-                  mode=0755,
-                  owner=params.hdfs_user,
-                  group=params.user_group
-      )
-    if create_pid_dir:
-      Directory(pid_dir,
-                owner=user,
-                recursive=True)
-    if create_log_dir:
-      if name == "nfs3":
-        Directory(log_dir,
-                  mode=0775,
-                  owner=params.root_user,
-                  group=params.user_group)
-      else:
-        Directory(log_dir,
-                  owner=user,
-                  recursive=True)
-
-  if params.security_enabled and name == "datanode":
-    ## The directory where pid files are stored in the secure data environment.
-    hadoop_secure_dn_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-    hadoop_secure_dn_pid_file = 
format("{hadoop_secure_dn_pid_dir}/hadoop_secure_dn.pid")
-
-    if params.secure_dn_ports_are_in_use:
-      user = "root"
-      pid_file = format(
-        "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
-
-    if action == 'stop' and os.path.isfile(hadoop_secure_dn_pid_file):
-        # We need special handling for this case to handle the situation
-        # when we configure non-root secure DN and then restart it
-        # to handle new configs. Otherwise we will not be able to stop
-        # a running instance 
-        user = "root"
-        
-        try:
-          check_process_status(hadoop_secure_dn_pid_file)
-          
-          custom_export = {
-            'HADOOP_SECURE_DN_USER': params.hdfs_user
-          }
-          hadoop_env_exports.update(custom_export)
-          
-        except ComponentIsNotRunning:
-          pass
-
-  hadoop_daemon = format("{hadoop_bin}/hadoop-daemon.sh")
-
-  if user == "root":
-    cmd = [hadoop_daemon, "--config", params.hadoop_conf_dir, action, name]
-    if options:
-      cmd += [options, ]
-    daemon_cmd = as_sudo(cmd)
-  else:
-    cmd = format("{ulimit_cmd} {hadoop_daemon} --config {hadoop_conf_dir} 
{action} {name}")
-    if options:
-      cmd += " " + options
-    daemon_cmd = as_user(cmd, user)
-     
-  if action == "start":
-    # remove pid file from dead process
-    File(pid_file, action="delete", not_if=process_id_exists_command)
-    Execute(daemon_cmd, not_if=process_id_exists_command, 
environment=hadoop_env_exports)
-
-  elif action == "stop":
-    Execute(daemon_cmd, only_if=process_id_exists_command, 
environment=hadoop_env_exports)
-    File(pid_file, action="delete")
-
-
-def get_value_from_jmx(qry, property):
-  try:
-    response = urllib2.urlopen(qry)
-    data = response.read()
-    if data:
-      data_dict = json.loads(data)
-      return data_dict["beans"][0][property]
-  except:
-    return None
-
-def get_jmx_data(nn_address, modeler_type, metric, encrypted=False, 
security_enabled=False):
-  """
-  :param nn_address: Namenode Address, e.g., host:port, ** MAY ** be preceded 
with "http://"; or "https://"; already.
-  If not preceded, will use the encrypted param to determine.
-  :param modeler_type: Modeler type to query using startswith function
-  :param metric: Metric to return
-  :return: Return an object representation of the metric, or None if it does 
not exist
-  """
-  if not nn_address or not modeler_type or not metric:
-    return None
-
-  nn_address = nn_address.strip()
-  if not nn_address.startswith("http"):
-    nn_address = ("https://"; if encrypted else "http://";) + nn_address
-  if not nn_address.endswith("/"):
-    nn_address = nn_address + "/"
-
-  nn_address = nn_address + "jmx"
-  Logger.info("Retrieve modeler: %s, metric: %s from JMX endpoint %s" % 
(modeler_type, metric, nn_address))
-
-  if security_enabled:
-    import params
-    data, error_msg, time_millis = curl_krb_request(params.tmp_dir, 
params.smoke_user_keytab, params.smokeuser_principal, nn_address,
-                            "jn_upgrade", params.kinit_path_local, False, 
None, params.smoke_user)
-  else:
-    data = urllib2.urlopen(nn_address).read()
-  my_data = None
-  if data: 
-    data_dict = json.loads(data)
-    if data_dict:
-      for el in data_dict['beans']:
-        if el is not None and el['modelerType'] is not None and 
el['modelerType'].startswith(modeler_type):
-          if metric in el:
-            my_data = el[metric]
-            if my_data:
-              my_data = json.loads(str(my_data))
-              break
-  return my_data
-
-def get_port(address):
-  """
-  Extracts port from the address like 0.0.0.0:1019
-  """
-  if address is None:
-    return None
-  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
-  if m is not None and len(m.groups()) >= 2:
-    return int(m.group(2))
-  else:
-    return None
-
-
-def is_secure_port(port):
-  """
-  Returns True if port is root-owned at *nix systems
-  """
-  if port is not None:
-    return port < 1024
-  else:
-    return False
-
-def is_previous_fs_image():
-  """
-  Return true if there's a previous folder in the HDFS namenode directories.
-  """
-  import params
-  if params.dfs_name_dir:
-    nn_name_dirs = params.dfs_name_dir.split(',')
-    for nn_dir in nn_name_dirs:
-      prev_dir = os.path.join(nn_dir, "previous")
-      if os.path.isdir(prev_dir):
-        return True
-  return False
-
-def get_hdfs_binary(distro_component_name):
-  """
-  Get the hdfs binary to use depending on the stack and version.
-  :param distro_component_name: e.g., hadoop-hdfs-namenode, 
hadoop-hdfs-datanode
-  :return: The hdfs binary to use
-  """
-  import params
-  hdfs_binary = "hdfs"
-  return hdfs_binary
-
-def get_dfsadmin_base_command(hdfs_binary, use_specific_namenode = False):
-  """
-  Get the dfsadmin base command constructed using hdfs_binary path and passing 
namenode address as explicit -fs argument
-  :param hdfs_binary: path to hdfs binary to use
-  :param use_specific_namenode: flag if set and Namenode HA is enabled, then 
the dfsadmin command will use
-  current namenode's address
-  :return: the constructed dfsadmin base command
-  """
-  import params
-  dfsadmin_base_command = ""
-  if params.dfs_ha_enabled and use_specific_namenode:
-    dfsadmin_base_command = format("{hdfs_binary} dfsadmin -fs 
hdfs://{params.namenode_rpc}")
-  else:
-    dfsadmin_base_command = format("{hdfs_binary} dfsadmin -fs 
{params.namenode_address}")
-  return dfsadmin_base_command
-
-def is_previous_fs_image():
-  """
-  Return true if there's a previous folder in the HDFS namenode directories.
-  """
-  import params
-  if params.dfs_name_dir:
-    nn_name_dirs = params.dfs_name_dir.split(',')
-    for nn_dir in nn_name_dirs:
-      prev_dir = os.path.join(nn_dir, "previous")
-      if os.path.isdir(prev_dir):
-        return True
-  return False

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/zkfc_slave.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/zkfc_slave.py
deleted file mode 100644
index 3bc74f1..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/scripts/zkfc_slave.py
+++ /dev/null
@@ -1,150 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.libraries.functions.check_process_status import 
check_process_status
-from resource_management.libraries.functions.security_commons import 
build_expectations, \
-  cached_kinit_executor, get_params_from_filesystem, 
validate_security_config_properties, \
-  FILE_TYPE_XML
-import utils  # this is needed to avoid a circular dependency since utils.py 
calls this class
-from hdfs import hdfs
-
-
-class ZkfcSlave(Script):
-  def install(self, env):
-    import params
-    env.set_params(params)
-    self.install_packages(env, params.exclude_packages)
-    env.set_params(params)
-
-  def start(self, env, upgrade_type=None):
-    import params
-
-    env.set_params(params)
-    self.configure(env)
-    Directory(params.hadoop_pid_dir_prefix,
-              mode=0755,
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-    # format the znode for this HA setup
-    # only run this format command if the active namenode hostname is set
-    # The Ambari UI HA Wizard prompts the user to run this command
-    # manually, so this guarantees it is only run in the Blueprints case
-    if params.dfs_ha_enabled and \
-       params.dfs_ha_namenode_active is not None:
-      success =  initialize_ha_zookeeper(params)
-      if not success:
-        raise Fail("Could not initialize HA state in zookeeper")
-
-    utils.service(
-      action="start", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
-      create_log_dir=True
-    )
-
-  def stop(self, env, upgrade_type=None):
-    import params
-
-    env.set_params(params)
-    utils.service(
-      action="stop", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
-      create_log_dir=True
-    )
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    hdfs("zkfc_slave")
-    pass
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    check_process_status(status_params.zkfc_pid_file)
-
-  def security_status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    props_value_check = {"hadoop.security.authentication": "kerberos",
-                         "hadoop.security.authorization": "true"}
-    props_empty_check = ["hadoop.security.auth_to_local"]
-    props_read_check = None
-    core_site_expectations = build_expectations('core-site', 
props_value_check, props_empty_check,
-                                                props_read_check)
-    hdfs_expectations = {}
-    hdfs_expectations.update(core_site_expectations)
-
-    security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
-                                                   {'core-site.xml': 
FILE_TYPE_XML})
-    result_issues = validate_security_config_properties(security_params, 
hdfs_expectations)
-    if 'core-site' in security_params and 'hadoop.security.authentication' in 
security_params['core-site'] and \
-        security_params['core-site']['hadoop.security.authentication'].lower() 
== 'kerberos':
-      if not result_issues:  # If all validations passed successfully
-        if status_params.hdfs_user_principal or status_params.hdfs_user_keytab:
-          try:
-            cached_kinit_executor(status_params.kinit_path_local,
-                                  status_params.hdfs_user,
-                                  status_params.hdfs_user_keytab,
-                                  status_params.hdfs_user_principal,
-                                  status_params.hostname,
-                                  status_params.tmp_dir)
-            self.put_structured_out({"securityState": "SECURED_KERBEROS"})
-          except Exception as e:
-            self.put_structured_out({"securityState": "ERROR"})
-            self.put_structured_out({"securityStateErrorInfo": str(e)})
-        else:
-          self.put_structured_out(
-            {"securityIssuesFound": "hdfs principal and/or keytab file is not 
specified"})
-          self.put_structured_out({"securityState": "UNSECURED"})
-      else:
-        issues = []
-        for cf in result_issues:
-          issues.append("Configuration file %s did not pass the validation. 
Reason: %s" % (cf, result_issues[cf]))
-        self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
-        self.put_structured_out({"securityState": "UNSECURED"})
-    else:
-      self.put_structured_out({"securityState": "UNSECURED"})
-
-def initialize_ha_zookeeper(params):
-  try:
-    iterations = 10
-    formatZK_cmd = "hdfs zkfc -formatZK -nonInteractive"
-    Logger.info("Initialize HA state in ZooKeeper: %s" % (formatZK_cmd))
-    for i in range(iterations):
-      Logger.info('Try %d out of %d' % (i+1, iterations))
-      code, out = shell.call(formatZK_cmd, logoutput=False, 
user=params.hdfs_user)
-      if code == 0:
-        Logger.info("HA state initialized in ZooKeeper successfully")
-        return True
-      elif code == 2:
-        Logger.info("HA state already initialized in ZooKeeper")
-        return True
-      else:
-        Logger.warning('HA state initialization in ZooKeeper failed with %d 
error code. Will retry' % (code))
-  except Exception as ex:
-    Logger.error('HA state initialization in ZooKeeper threw an exception. 
Reason %s' %(str(ex)))
-  return False
-
-if __name__ == "__main__":
-  ZkfcSlave().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/templates/exclude_hosts_list.j2
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/templates/exclude_hosts_list.j2
deleted file mode 100644
index a92cdc1..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in hdfs_exclude_file %}
-{{host}}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/templates/hdfs.conf.j2
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/templates/hdfs.conf.j2
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/templates/hdfs.conf.j2
deleted file mode 100644
index 2cb7365..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/templates/hdfs.conf.j2
+++ /dev/null
@@ -1,35 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-{{hdfs_user}}   - nofile {{hdfs_user_nofile_limit}}
-{{hdfs_user}}   - nproc  {{hdfs_user_nproc_limit}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/templates/slaves.j2
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/templates/slaves.j2
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/templates/slaves.j2
deleted file mode 100644
index 4a9e713..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/HDFS/package/templates/slaves.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in slave_hosts %}
-{{host}}
-{% endfor %}

Reply via email to