http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/package/scripts/oozie_service.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/package/scripts/oozie_service.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/package/scripts/oozie_service.py
deleted file mode 100644
index 485686f..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/package/scripts/oozie_service.py
+++ /dev/null
@@ -1,124 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-import time
-from resource_management import *
-from resource_management.core.shell import as_user
-from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
-from ambari_commons import OSConst
-
-def oozie_service(action = 'start', upgrade_type=None):
-  """
-  Starts or stops the Oozie service
-  :param action: 'start' or 'stop'
-  :param upgrade_type: type of upgrade, either "rolling" or "non_rolling"
-  skipped since a variation of them was performed during the rolling upgrade
-  :return:
-  """
-  import params
-
-  environment={'OOZIE_CONFIG': params.conf_dir}
-
-  if params.security_enabled:
-    if params.oozie_principal is None:
-      oozie_principal_with_host = 'missing_principal'
-    else:
-      oozie_principal_with_host = params.oozie_principal.replace("_HOST", 
params.hostname)
-    kinit_if_needed = format("{kinit_path_local} -kt {oozie_keytab} 
{oozie_principal_with_host};")
-  else:
-    kinit_if_needed = ""
-
-  no_op_test = as_user(format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat 
{pid_file}` >/dev/null 2>&1"), user=params.oozie_user)
-
-  if action == 'start':
-    start_cmd = format("cd {oozie_tmp_dir} && {oozie_home}/bin/oozie-start.sh")
-
-    if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
-       params.jdbc_driver_name == "org.postgresql.Driver" or \
-       params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
-      db_connection_check_command = format("{java_home}/bin/java -cp 
{check_db_connection_jar}:{target} 
org.apache.ambari.server.DBConnectionVerification '{oozie_jdbc_connection_url}' 
{oozie_metastore_user_name} {oozie_metastore_user_passwd!p} {jdbc_driver_name}")
-    else:
-      db_connection_check_command = None
-
-    if upgrade_type is None:
-
-      if not os.path.isfile(params.target) and params.jdbc_driver_name == 
"org.postgresql.Driver":
-        print format("ERROR: jdbc file {target} is unavailable. Please, follow 
next steps:\n" \
-          "1) Download postgresql-9.0-801.jdbc4.jar.\n2) Create needed 
directory: mkdir -p {oozie_home}/libserver/\n" \
-          "3) Copy postgresql-9.0-801.jdbc4.jar to newly created dir: cp 
/path/to/jdbc/postgresql-9.0-801.jdbc4.jar " \
-          "{oozie_home}/libserver/\n4) Copy postgresql-9.0-801.jdbc4.jar to 
libext: cp " \
-          "/path/to/jdbc/postgresql-9.0-801.jdbc4.jar {oozie_home}/libext/\n")
-        exit(1)
-
-      if db_connection_check_command:
-        Execute( db_connection_check_command,
-                 tries=5,
-                 try_sleep=10,
-                 user=params.oozie_user,
-        )
-
-      Execute( format("cd {oozie_tmp_dir} && {oozie_home}/bin/ooziedb.sh 
create -sqlfile oozie.sql -run"),
-               user = params.oozie_user, not_if = no_op_test,
-               ignore_failures = True
-      )
-
-      if params.security_enabled:
-        Execute(kinit_if_needed,
-                user = params.oozie_user,
-        )
-      if params.host_sys_prepped:
-        print "Skipping creation of oozie sharelib as host is sys prepped"
-        hdfs_share_dir_exists = True # skip time-expensive hadoop fs -ls check
-      elif WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, 
params.default_fs):
-        # check with webhdfs is much faster than executing hadoop fs -ls.
-        util = WebHDFSUtil(params.hdfs_site, params.oozie_user, 
params.security_enabled)
-        list_status = util.run_command(params.hdfs_share_dir, 'GETFILESTATUS', 
method='GET', ignore_status_codes=['404'], assertable_result=False)
-        hdfs_share_dir_exists = ('FileStatus' in list_status)
-      else:
-        # have to do time expensive hadoop fs -ls check.
-        hdfs_share_dir_exists = shell.call(format("{kinit_if_needed} hadoop 
--config {hadoop_conf_dir} dfs -ls {hdfs_share_dir} | awk 'BEGIN {{count=0;}} 
/share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'"),
-                                 user=params.oozie_user)[0]
-
-      if not hdfs_share_dir_exists:
-        Execute( params.put_shared_lib_to_hdfs_cmd,
-                 user = params.oozie_user,
-                 path = params.execute_path
-        )
-        params.HdfsResource(format("{oozie_hdfs_user_dir}/share"),
-                             type="directory",
-                             action="create_on_execute",
-                             mode=0755,
-                             recursive_chmod=True,
-        )
-        params.HdfsResource(None, action="execute")
-
-
-    # start oozie
-    Execute( start_cmd, environment=environment, user = params.oozie_user,
-         not_if = no_op_test )
-
-  elif action == 'stop':
-    stop_cmd  = format("cd {oozie_tmp_dir} && {oozie_home}/bin/oozie-stop.sh")
-    # stop oozie
-    Execute(stop_cmd, environment=environment, only_if  = no_op_test,
-      user = params.oozie_user)
-    File(params.pid_file, action = "delete")
-    # Wait a bit more to wait database(Derby) shutdown completely, since it 
only allow one JVM connected.
-    time.sleep(10)

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/package/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/package/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/package/scripts/params.py
deleted file mode 100644
index 55b91c5..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/package/scripts/params.py
+++ /dev/null
@@ -1,259 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management import *
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-from resource_management.core import System
-from resource_management.libraries import Script
-from resource_management.libraries.functions import default
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.functions import get_port_from_url
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.version import 
format_hdp_stack_version
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.libraries.resources import HdfsResource
-from resource_management.libraries.functions import conf_select
-from urlparse import urlparse
-import status_params
-import itertools
-import os
-
-# server configurations
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-sudo = AMBARI_SUDO_BINARY
-
-hostname = config["hostname"]
-
-# New Cluster Stack Version that is defined during the RESTART of a Rolling 
Upgrade
-version = default("/commandParams/version", None)
-stack_name = default("/hostLevelParams/stack_name", None)
-upgrade_direction = default("/commandParams/upgrade_direction", None)
-
-stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-stack_version = format_hdp_stack_version(stack_version_unformatted)
-
-#hadoop params
-hadoop_bin_dir = conf_select.get_hadoop_dir("bin")
-hadoop_lib_home = conf_select.get_hadoop_dir("lib")
-
-
-# if this is a server action, then use the server binaries; smoke tests
-# use the client binaries
-server_role_dir_mapping = { 'OOZIE_SERVER' : 'oozie-server',
-  'OOZIE_SERVICE_CHECK' : 'oozie-client' }
-
-command_role = default("/role", "")
-if command_role not in server_role_dir_mapping:
-  command_role = 'OOZIE_SERVICE_CHECK'
-
-oozie_root = server_role_dir_mapping[command_role]
-
-
-# using the correct oozie root dir, format the correct location
-oozie_lib_dir = format("/usr/iop/current/{oozie_root}")
-oozie_setup_sh = format("/usr/iop/current/{oozie_root}/bin/oozie-setup.sh")
-oozie_webapps_dir = 
format("/usr/iop/current/{oozie_root}/oozie-server/webapps")
-oozie_webapps_conf_dir = 
format("/usr/iop/current/{oozie_root}/oozie-server/conf")
-oozie_libext_dir = format("/usr/iop/current/{oozie_root}/libext")
-#oozie_libext_customer_dir = 
format("/usr/iop/current/{oozie_root}/libext-customer")
-oozie_server_dir = format("/usr/iop/current/{oozie_root}/oozie-server")
-oozie_shared_lib = format("/usr/iop/current/{oozie_root}/share")
-oozie_home = format("/usr/iop/current/{oozie_root}")
-oozie_bin_dir = format("/usr/iop/current/{oozie_root}/bin")
-oozie_examples_regex = format("/usr/iop/current/{oozie_root}/doc")
-
-# set the falcon home for copying JARs; if in an upgrade, then use the version 
of falcon that matches the version of oozie
-falcon_home = '/usr/iop/current/falcon-client'
-if stack_version is not None:
-  falcon_home = '/usr/iop/{0}/falcon'.format(stack_version)
-execute_path = oozie_bin_dir + os.pathsep + hadoop_bin_dir
-
-hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-conf_dir = status_params.conf_dir
-hive_conf_dir = format("{conf_dir}/action-conf/hive")
-oozie_user = config['configurations']['oozie-env']['oozie_user']
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-smokeuser_principal = 
config['configurations']['cluster-env']['smokeuser_principal_name']
-oozie_admin_users = 
format(config['configurations']['oozie-env']['oozie_admin_users'])
-user_group = config['configurations']['cluster-env']['user_group']
-jdk_location = config['hostLevelParams']['jdk_location']
-check_db_connection_jar_name = "DBConnectionVerification.jar"
-check_db_connection_jar = 
format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
-oozie_tmp_dir = "/var/tmp/oozie"
-oozie_hdfs_user_dir = format("/user/{oozie_user}")
-oozie_pid_dir = status_params.oozie_pid_dir
-pid_file = status_params.pid_file
-hadoop_jar_location = "/usr/lib/hadoop/"
-java_share_dir = "/usr/share/java"
-# Dependency on ext_js  not supported on IOP
-#ext_js_file = "ext-2.2.zip"
-#ext_js_path = "/usr/share/iop-oozie/ext-2.2.zip"
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-hdfs_principal_name = 
config['configurations']['hadoop-env']['hdfs_principal_name']
-oozie_initial_heapsize = 
str(config['configurations']['oozie-env']['oozie_initial_heapsize']).rstrip('m')
 + 'm'
-oozie_heapsize = 
str(config['configurations']['oozie-env']['oozie_heapsize']).rstrip('m') + 'm'
-oozie_permsize = 
str(config['configurations']['oozie-env']['oozie_permsize']).rstrip('m') + 'm'
-
-kinit_path_local = get_kinit_path()
-oozie_service_keytab = 
config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
-oozie_principal = 
config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
-http_principal = 
config['configurations']['oozie-site']['oozie.authentication.kerberos.principal']
-oozie_site = config['configurations']['oozie-site']
-if security_enabled:
-  #older versions of oozie have problems when using _HOST in principal
-  #by testing, newer versions of oozie also need this replacement
-  oozie_site = dict(config['configurations']['oozie-site'])
-  oozie_site['oozie.service.HadoopAccessorService.kerberos.principal'] = \
-    oozie_principal.replace('_HOST', hostname)
-  oozie_site['oozie.authentication.kerberos.principal'] = \
-    http_principal.replace('_HOST', hostname)
-
-smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-oozie_keytab = default("/configurations/oozie-env/oozie_keytab", 
oozie_service_keytab)
-oozie_env_sh_template = config['configurations']['oozie-env']['content']
-
-oracle_driver_jar_name = "ojdbc6.jar"
-
-java_home = config['hostLevelParams']['java_home']
-java_version = config['hostLevelParams']['java_version']
-oozie_metastore_user_name = 
config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
-oozie_metastore_user_passwd = 
default("/configurations/oozie-site/oozie.service.JPAService.jdbc.password","")
-oozie_jdbc_connection_url = 
default("/configurations/oozie-site/oozie.service.JPAService.jdbc.url", "")
-oozie_log_dir = config['configurations']['oozie-env']['oozie_log_dir']
-oozie_data_dir = config['configurations']['oozie-env']['oozie_data_dir']
-oozie_server_port = 
get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
-oozie_server_admin_port = 
config['configurations']['oozie-env']['oozie_admin_port']
-if 'export OOZIE_HTTPS_PORT' in oozie_env_sh_template or 'oozie.https.port' in 
config['configurations']['oozie-site'] or 'oozie.https.keystore.file' in 
config['configurations']['oozie-site'] or 'oozie.https.keystore.pass' in 
config['configurations']['oozie-site']:
-  oozie_secure = '-secure'
-else:
-  oozie_secure = ''
-fs_root = config['configurations']['core-site']['fs.defaultFS']
-
-put_shared_lib_to_hdfs_cmd = format("{oozie_setup_sh} sharelib create -fs 
{fs_root} -locallib {oozie_shared_lib}")
-
-jdbc_driver_name = 
default("/configurations/oozie-site/oozie.service.JPAService.jdbc.driver", "")
-
-if jdbc_driver_name == "com.mysql.jdbc.Driver":
-  #jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
-  jdbc_driver_jar = "mysql-connector-java.jar"
-  jdbc_symlink_name = "mysql-jdbc-driver.jar"
-elif jdbc_driver_name == "org.postgresql.Driver":
-  jdbc_driver_jar = 
format("{oozie_home}/libserver/postgresql-9.0-801.jdbc4.jar")  #oozie using 
it's own postgres jdbc
-  jdbc_symlink_name = "postgres-jdbc-driver.jar"
-elif jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
-  #jdbc_driver_jar = "/usr/share/java/ojdbc6.jar"
-  jdbc_driver_jar = "ojdbc.jar"
-  jdbc_symlink_name = "oracle-jdbc-driver.jar"
-else:
-  jdbc_driver_jar = ""
-  jdbc_symlink_name = ""
-
-driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}")
-driver_curl_target = format("{java_share_dir}/{jdbc_driver_jar}")
-downloaded_custom_connector = format("{tmp_dir}/{jdbc_driver_jar}")
-if jdbc_driver_name == "org.postgresql.Driver":
-  target = jdbc_driver_jar
-else:
-  target = format("{oozie_libext_dir}/{jdbc_driver_jar}")
-
-hostname = config["hostname"]
-hdfs_share_dir = format("{oozie_hdfs_user_dir}/share")
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
-has_falcon_host = not len(falcon_host)  == 0
-
-#oozie-log4j.properties
-if (('oozie-log4j' in config['configurations']) and ('content' in 
config['configurations']['oozie-log4j'])):
-  log4j_props = config['configurations']['oozie-log4j']['content']
-else:
-  log4j_props = None
-
-oozie_hdfs_user_mode = 0775
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-
-https_port = None
-# try to get https port form oozie-env content
-for line in oozie_env_sh_template.splitlines():
-  result = re.match(r"export\s+OOZIE_HTTPS_PORT=(\d+)", line)
-  if result is not None:
-    https_port = result.group(1)
-# or from oozie-site.xml
-if https_port is None and 'oozie.https.port' in 
config['configurations']['oozie-site']:
-  https_port = config['configurations']['oozie-site']['oozie.https.port']
-
-oozie_base_url = config['configurations']['oozie-site']['oozie.base.url']
-
-# construct proper url for https
-if https_port is not None:
-  parsed_url = urlparse(oozie_base_url)
-  oozie_base_url = oozie_base_url.replace(parsed_url.scheme, "https")
-  if parsed_url.port is None:
-    oozie_base_url.replace(parsed_url.hostname, ":".join([parsed_url.hostname, 
str(https_port)]))
-  else:
-    oozie_base_url = oozie_base_url.replace(str(parsed_url.port), 
str(https_port))
-
-hdfs_site = config['configurations']['hdfs-site']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-is_webhdfs_enabled = 
config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
-import functools
-#create partial functions with common arguments for every HdfsResource call
-#to create/delete hdfs directory/file/copyfromlocal we need to call 
params.HdfsResource in code
-HdfsResource = functools.partial(
-  HdfsResource,
-  user=hdfs_user,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir,
-  principal_name = hdfs_principal_name,
-  hdfs_site = hdfs_site,
-  default_fs = default_fs
-)
-
-#LZO support
-
-#-----LZO is not suppported in IOP distribution since it is GPL license--------
-
-'''
-io_compression_codecs = 
default("/configurations/core-site/io.compression.codecs", None)
-lzo_enabled = io_compression_codecs is not None and 
"com.hadoop.compression.lzo" in io_compression_codecs.lower()
-
-# stack_is_iop40_or_further
-underscored_version = stack_version_unformatted.replace('.', '_')
-dashed_version = stack_version_unformatted.replace('.', '-')
-lzo_packages_to_family = {
-  "any": ["hadoop-lzo", ],
-  "redhat": ["lzo", "hadoop-lzo-native"],
-  "suse": ["lzo", "hadoop-lzo-native"],
-  "ubuntu": ["liblzo2-2", ]
-}
-
-
-lzo_packages_to_family["redhat"] += 
[format("hadooplzo_{underscorred_version}_*")]
-lzo_packages_to_family["suse"] += 
[format("hadooplzo_{underscorred_version}_*")]
-lzo_packages_to_family["ubuntu"] += [format("hadooplzo_{dashed_version}_*")]
-
-lzo_packages_for_current_host = lzo_packages_to_family['any'] + 
lzo_packages_to_family[System.get_instance().os_family]
-all_lzo_packages = set(itertools.chain(*lzo_packages_to_family.values()))
-'''

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/package/scripts/service_check.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/package/scripts/service_check.py
deleted file mode 100644
index fdfd552..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/package/scripts/service_check.py
+++ /dev/null
@@ -1,140 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-import glob
-
-from resource_management.core.resources.system import Execute
-from resource_management.core.resources import File
-from resource_management.core.source import StaticFile
-from resource_management.core.system import System
-from resource_management.libraries.functions import format
-from resource_management.libraries.script import Script
-from resource_management.libraries.resources.xml_config import XmlConfig
-from resource_management.core.exceptions import Fail
-from ambari_commons.os_family_impl import OsFamilyImpl
-from ambari_commons import OSConst
-
-from resource_management.core.logger import Logger
-
-class OozieServiceCheck(Script):
-  pass
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class OozieServiceCheckDefault(OozieServiceCheck):
-
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    prepare_hdfs_file_name = 'prepareOozieHdfsDirectories.sh'
-    smoke_test_file_name = 'oozieSmoke2.sh'
-
-    if 'yarn-site' in params.config['configurations']:
-      XmlConfig("yarn-site.xml",
-                conf_dir=params.hadoop_conf_dir,
-                configurations=params.config['configurations']['yarn-site'],
-                owner=params.hdfs_user,
-                group=params.user_group,
-                mode=0644
-      )
-    else:
-      raise Fail("yarn-site.xml was not present in config parameters.")
-
-    OozieServiceCheckDefault.oozie_smoke_shell_file(smoke_test_file_name, 
prepare_hdfs_file_name)
-
-  @staticmethod
-  def oozie_smoke_shell_file(file_name, prepare_hdfs_file_name):
-    import params
-
-    File(format("{tmp_dir}/{file_name}"),
-         content=StaticFile(file_name),
-         mode=0755
-    )
-    File(format("{tmp_dir}/{prepare_hdfs_file_name}"),
-         content=StaticFile(prepare_hdfs_file_name),
-         mode=0755
-    )
-
-    os_family = System.get_instance().os_family
-    oozie_examples_dir = glob.glob(params.oozie_examples_regex)[0]
-    oozie_examples_tar_file = 
os.path.join(oozie_examples_dir,"oozie-examples.tar.gz");
-    if not os.path.isfile(oozie_examples_tar_file):
-      oozie_examples_dir = 
glob.glob(os.path.join(oozie_examples_dir,"oozie-4.2.0_IBM*"))[0]
-
-    Execute(format("{tmp_dir}/{prepare_hdfs_file_name} {conf_dir} 
{oozie_examples_dir} {hadoop_conf_dir} "),
-            tries=3,
-            try_sleep=5,
-            logoutput=True
-    )
-
-    examples_dir = format('/user/{smokeuser}/examples')
-    params.HdfsResource(examples_dir,
-                        action = "delete_on_execute",
-                        type = "directory"
-    )
-    params.HdfsResource(examples_dir,
-      action = "create_on_execute",
-      type = "directory",
-      source = format("{oozie_examples_dir}/examples"),
-      owner = params.smokeuser,
-      group = params.user_group
-    )
-
-    input_data_dir = format('/user/{smokeuser}/input-data')
-    params.HdfsResource(input_data_dir,
-                        action = "delete_on_execute",
-                        type = "directory"
-    )
-    params.HdfsResource(input_data_dir,
-      action = "create_on_execute",
-      type = "directory",
-      source = format("{oozie_examples_dir}/examples/input-data"),
-      owner = params.smokeuser,
-      group = params.user_group
-    )
-    params.HdfsResource(None, action="execute")
-
-    if params.security_enabled:
-      sh_cmd = format(
-        "{tmp_dir}/{file_name} {os_family} {oozie_lib_dir} {conf_dir} 
{oozie_bin_dir} {oozie_base_url} {oozie_examples_dir} {hadoop_conf_dir} 
{hadoop_bin_dir} {smokeuser} {security_enabled} {smokeuser_keytab} 
{kinit_path_local} {smokeuser_principal}")
-    else:
-      sh_cmd = format(
-        "{tmp_dir}/{file_name} {os_family} {oozie_lib_dir} {conf_dir} 
{oozie_bin_dir} {oozie_base_url} {oozie_examples_dir} {hadoop_conf_dir} 
{hadoop_bin_dir} {smokeuser} {security_enabled}")
-
-    Execute(sh_cmd,
-            path=params.execute_path,
-            tries=3,
-            try_sleep=5,
-            logoutput=True
-    )
-
-@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
-class OozieServiceCheckWindows(OozieServiceCheck):
-
-  def service_check(self, env):
-    import params
-
-    env.set_params(params)
-    smoke_cmd = os.path.join(params.hdp_root, "Run-SmokeTests.cmd")
-    service = "OOZIE"
-    Execute(format("cmd /C {smoke_cmd} {service}"), logoutput=True)
-
-if __name__ == "__main__":
-  OozieServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/package/scripts/status_params.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/package/scripts/status_params.py
deleted file mode 100644
index 58d5b86..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/package/scripts/status_params.py
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.script.script import Script
-
-# a map of the Ambari role to the component name
-# for use with /usr/iop/current/<component>
-SERVER_ROLE_DIRECTORY_MAP = {
-  'OOZIE_SERVER' : 'oozie-server',
-  'OOZIE_CLIENT' : 'oozie-client',
-  'OOZIE_SERVICE_CHECK' : 'oozie-client',
-  'ru_execute_tasks' : 'oozie-server'
-}
-
-component_directory = 
Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "OOZIE_CLIENT")
-
-config = Script.get_config()
-
-oozie_pid_dir = config['configurations']['oozie-env']['oozie_pid_dir']
-pid_file = format("{oozie_pid_dir}/oozie.pid")
-
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-kinit_path_local = 
get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', 
None))
-conf_dir = format("/usr/iop/current/{component_directory}/conf")
-tmp_dir = Script.get_tmp_dir()
-oozie_user = config['configurations']['oozie-env']['oozie_user']
-hostname = config["hostname"]

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/package/templates/adminusers.txt.j2
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/package/templates/adminusers.txt.j2
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/package/templates/adminusers.txt.j2
deleted file mode 100644
index 2a0f7b2..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/package/templates/adminusers.txt.j2
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Users should be set using following rules:
-#
-#     One user name per line
-#     Empty lines and lines starting with '#' are ignored
-
-{% if oozie_admin_users %}
-{% for oozie_admin_user in oozie_admin_users.split(',') %}
-{{oozie_admin_user|trim}}
-{% endfor %}
-{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/package/templates/oozie-log4j.properties.j2
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/package/templates/oozie-log4j.properties.j2
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/package/templates/oozie-log4j.properties.j2
deleted file mode 100644
index e39428f..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/OOZIE/package/templates/oozie-log4j.properties.j2
+++ /dev/null
@@ -1,93 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License. See accompanying LICENSE file.
-#
-
-# If the Java System property 'oozie.log.dir' is not defined at Oozie start up 
time
-# XLogService sets its value to '${oozie.home}/logs'
-
-log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
-log4j.appender.oozie.Append=true
-log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - 
SERVER[${oozie.instance.id}] %m%n
-log4j.appender.oozie.RollingPolicy.FileNamePattern=${log4j.appender.oozie.File}-%d{yyyy-MM-dd}
-log4j.appender.oozie.DatePattern='.'yyyy-MM-dd
-
-log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
-log4j.appender.oozieops.Append=true
-log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - 
%m%n
-
-log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
-log4j.appender.oozieinstrumentation.Append=true
-log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p 
%c{1}:%L - %m%n
-
-log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
-log4j.appender.oozieaudit.Append=true
-log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - 
%m%n
-
-log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
-log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
-log4j.appender.openjpa.Append=true
-log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
-log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.logger.openjpa=INFO, openjpa
-log4j.logger.oozieops=INFO, oozieops
-log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
-log4j.logger.oozieaudit=ALL, oozieaudit
-log4j.logger.org.apache.oozie=INFO, oozie
-log4j.logger.org.apache.hadoop=WARN, oozie
-log4j.logger.org.mortbay=WARN, oozie
-log4j.logger.org.hsqldb=WARN, oozie
-log4j.logger.org.apache.hadoop.security.authentication.server=INFO, oozie

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/configuration/pig-env.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/configuration/pig-env.xml
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/configuration/pig-env.xml
deleted file mode 100644
index 1a4372f..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/configuration/pig-env.xml
+++ /dev/null
@@ -1,38 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <!-- pig-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for pig-env.sh file</description>
-    <value>
-JAVA_HOME={{java64_home}}
-HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-
-#if [ -d "/usr/lib/tez" ]; then
-#  PIG_OPTS="$PIG_OPTS -Dmapreduce.framework.name=yarn"
-#fi
-    </value>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/configuration/pig-log4j.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/configuration/pig-log4j.xml
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/configuration/pig-log4j.xml
deleted file mode 100644
index 4f656f4..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/configuration/pig-log4j.xml
+++ /dev/null
@@ -1,65 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <description>Custom log4j.properties</description>
-    <value>
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# ***** Set root logger level to DEBUG and its only appender to A.
-log4j.logger.org.apache.pig=info, A
-
-# ***** A is set to be a ConsoleAppender.
-log4j.appender.A=org.apache.log4j.ConsoleAppender
-# ***** A uses PatternLayout.
-log4j.appender.A.layout=org.apache.log4j.PatternLayout
-log4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
-    </value>
-    <value-attributes>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/configuration/pig-properties.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/configuration/pig-properties.xml
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/configuration/pig-properties.xml
deleted file mode 100644
index ea0fab5..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/configuration/pig-properties.xml
+++ /dev/null
@@ -1,631 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <value>
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-# Pig configuration file. All values can be overwritten by command line
-# arguments; for a description of the properties, run
-#
-#     pig -h properties
-#
-
-############################################################################
-#
-# == Logging properties
-#
-
-# Location of pig log file. If blank, a file with a timestamped slug
-# ('pig_1399336559369.log') will be generated in the current working directory.
-#
-# pig.logfile=
-# pig.logfile=/tmp/pig-err.log
-
-# Log4j configuration file. Set at runtime with the -4 parameter. The source
-# distribution has a ./conf/log4j.properties.template file you can rename and
-# customize.
-#
-# log4jconf=./conf/log4j.properties
-
-# Verbose Output.
-# * false (default): print only INFO and above to screen
-# * true: Print all log messages to screen
-#
-# verbose=false
-
-# Omit timestamps on log messages. (default: false)
-#
-# brief=false
-
-# Logging level. debug=OFF|ERROR|WARN|INFO|DEBUG (default: INFO)
-#
-# debug=INFO
-
-# Roll up warnings across tasks, so that when millions of mappers suddenly cry
-# out in error they are partially silenced. (default, recommended: true)
-#
-# aggregate.warning=true
-
-# Should DESCRIBE pretty-print its schema?
-# * false (default): print on a single-line, suitable for pasting back in to 
your script
-# * true (recommended): prints on multiple lines with indentation, much more 
readable
-#
-# pig.pretty.print.schema=false
-
-# === Profiling UDFs  ===
-
-# Turn on UDF timers? This will cause two counters to be
-# tracked for every UDF and LoadFunc in your script: approx_microsecs measures
-# approximate time spent inside a UDF approx_invocations reports the 
approximate
-# number of times the UDF was invoked.
-#
-# * false (default): do not record timing information of UDFs.
-# * true: report UDF performance. Uses more counters, but gives more insight
-#   into script operation
-#
-# pig.udf.profile=false
-
-# Specify frequency of profiling (default: every 100th).
-# pig.udf.profile.frequency=100
-
-############################################################################
-#
-# == Site-specific Properties
-#
-
-# Execution Mode. Local mode is much faster, but only suitable for small 
amounts
-# of data. Local mode interprets paths on the local file system; Mapreduce mode
-# on the HDFS. Read more under 'Execution Modes' within the Getting Started
-# documentation.
-#
-# * mapreduce (default): use the Hadoop cluster defined in your Hadoop config 
files
-# * local: use local mode
-#
-# exectype=mapreduce
-
-# Bootstrap file with default statements to execute in every Pig job, similar 
to
-# .bashrc.  If blank, uses the file '.pigbootup' from your home directory; If a
-# value is supplied, that file is NOT loaded.  This does not do tilde expansion
-# -- you must supply the full path to the file.
-#
-# pig.load.default.statements=
-# pig.load.default.statements=/home/bob/.pigrc
-
-# Kill all waiting/running MR jobs upon a MR job failure? (default: false) If
-# false, jobs that can proceed independently will do so unless a parent stage
-# fails. If true, the failure of any stage in the script kills all jobs.
-#
-# stop.on.failure=false
-
-# File containing the pig script to run. Rarely set in the properties file.
-# Commandline: -f
-#
-# file=
-
-# Jarfile to load, colon separated. Rarely used.
-#
-# jar=
-
-# Register additional .jar files to use with your Pig script.
-# Most typically used as a command line option (see 
http://pig.apache.org/docs/r0.12.0/basic.html#register):
-#
-#     pig -Dpig.additional.jars=hdfs://nn.mydomain.com:9020/myjars/my.jar
-#
-# pig.additional.jars=&lt;colon separated list of jars with optional 
wildcards&gt;
-# 
pig.additional.jars=/usr/local/share/pig/pig/contrib/piggybank/java/piggybank.jar:/usr/local/share/pig/datafu/datafu-pig/build/libs/datafu-pig-1.2.1.jar
-
-# Specify potential packages to which a UDF or a group of UDFs belong,
-# eliminating the need to qualify the UDF on every call. See
-# http://pig.apache.org/docs/r0.12.0/udf.html#use-short-names
-#
-# Commandline use:
-#
-#     pig \
-#       
-Dpig.additional.jars=$PIG_HOME/contrib/piggybank/java/piggybank.jar:$PIG_HOME/../datafu/datafu-pig/build/libs/datafu-pig-1.2.1.jar
 \
-#       -Dudf.import.list=org.apache.pig.piggybank.evaluation:datafu.pig.util \
-#       happy_job.pig
-#
-# udf.import.list=&lt;colon separated list of imports&gt;
-# 
udf.import.list=org.apache.pig.piggybank.evaluation:datafu.pig.bags:datafu.pig.hash:datafu.pig.stats:datafu.pig.util
-
-#
-# Reuse jars across jobs run by the same user? (default: false) If enabled, 
jars
-# are placed in ${pig.user.cache.location}/${user.name}/.pigcache. Since most
-# jars change infrequently, this gives a minor speedup.
-#
-# pig.user.cache.enabled=false
-
-# Base path for storing jars cached by the pig.user.cache.enabled feature. 
(default: /tmp)
-#
-# pig.user.cache.location=/tmp
-
-# Replication factor for cached jars. If not specified 
mapred.submit.replication
-# is used, whose default is 10.
-#
-# pig.user.cache.replication=10
-
-# Default UTC offset. (default: the host's current UTC offset) Supply a UTC
-# offset in Java's timezone format: e.g., +08:00.
-#
-# pig.datetime.default.tz=
-
-############################################################################
-#
-# Memory impacting properties
-#
-
-# Amount of memory (as fraction of heap) allocated to bags before a spill is
-# forced. Default is 0.2, meaning 20% of available memory. Note that this 
memory
-# is shared across all large bags used by the application. See
-# http://pig.apache.org/docs/r0.12.0/perf.html#memory-management
-#
-# pig.cachedbag.memusage=0.2
-
-# Don't spill bags smaller than this size (bytes). Default: 5000000, or about
-# 5MB. Usually, the more spilling the longer runtime, so you might want to tune
-# it according to heap size of each task and so forth.
-#
-# pig.spill.size.threshold=5000000
-
-# EXPERIMENTAL: If a file bigger than this size (bytes) is spilled -- thus
-# freeing a bunch of ram -- tell the JVM to perform garbage collection.  This
-# should help reduce the number of files being spilled, but causes 
more-frequent
-# garbage collection. Default: 40000000 (about 40 MB)
-#
-# pig.spill.gc.activation.size=40000000
-
-# Maximum amount of data to replicate using the distributed cache when doing
-# fragment-replicated join. (default: 1000000000, about 1GB) Consider 
increasing
-# this in a production environment, but carefully.
-#
-# pig.join.replicated.max.bytes=1000000000
-
-# Fraction of heap available for the reducer to perform a skewed join. A low
-# fraction forces Pig to use more reducers, but increases the copying cost. See
-# http://pig.apache.org/docs/r0.12.0/perf.html#skewed-joins
-#
-# pig.skewedjoin.reduce.memusage=0.3
-
-#
-# === SchemaTuple ===
-#
-# The SchemaTuple feature (PIG-2632) uses a tuple's schema (when known) to
-# generate a custom Java class to hold records. Otherwise, tuples are loaded as
-# a plain list that is unaware of its contents' schema -- and so each element
-# has to be wrapped as a Java object on its own. This can provide more 
efficient
-# CPU utilization, serialization, and most of all memory usage.
-#
-# This feature is considered experimental and is off by default. You can
-# selectively enable it for specific operations using pig.schematuple.udf,
-# pig.schematuple.load, pig.schematuple.fr_join and pig.schematuple.merge_join
-#
-
-# Enable the SchemaTuple optimization in all available cases? (default: false; 
recommended: true)
-#
-# pig.schematuple=false
-
-# EXPERIMENTAL: Use SchemaTuples with UDFs (default: value of pig.schematuple).
-# pig.schematuple.udf=false
-
-# EXPERIMENTAL, CURRENTLY NOT IMPLEMENTED, but in the future, LoadFunc's with
-# known schemas should output SchemaTuples. (default: value of pig.schematuple)
-# pig.schematuple.load=false
-
-# EXPERIMENTAL: Use SchemaTuples in replicated joins. The potential memory
-# saving here is significant. (default: value of pig.schematuple)
-# pig.schematuple.fr_join=false
-
-# EXPERIMENTAL: Use SchemaTuples in merge joins. (default: value of 
pig.schematuple).
-# pig.schematuple.merge_join=false
-
-############################################################################
-#
-# Serialization options
-#
-
-# Omit empty part files from the output? (default: false)
-#
-# * false (default): reducers generates an output file, even if output is empty
-# * true (recommended): do not generate zero-byte part files
-#
-# The default behavior of MapReduce is to generate an empty file for no data, 
so
-# Pig follows that. But many small files can cause annoying extra map tasks and
-# put load on the HDFS, so consider setting this to 'true'
-#
-# pig.output.lazy=false
-
-#
-# === Tempfile Handling
-#
-
-# EXPERIMENTAL: Storage format for temporary files generated by intermediate
-# stages of Pig jobs. This can provide significant speed increases for certain
-# codecs, as reducing the amount of data transferred to and from disk can more
-# than make up for the cost of compression/compression. Recommend that you set
-# up LZO compression in Hadoop and specify tfile storage.
-#
-# Compress temporary files?
-# * false (default): do not compress
-# * true (recommended): compress temporary files.
-#
-# pig.tmpfilecompression=false
-# pig.tmpfilecompression=true
-
-# Tempfile storage container type.
-#
-# * tfile (default, recommended): more efficient, but only supports supports 
gz(gzip) and lzo compression.
-#   
https://issues.apache.org/jira/secure/attachment/12396286/TFile%20Specification%2020081217.pdf
-# * seqfile: only supports gz(gzip), lzo, snappy, and bzip2 compression
-#
-# pig.tmpfilecompression.storage=tfile
-
-# Codec types for intermediate job files. tfile supports gz(gzip) and lzo;
-# seqfile support gz(gzip), lzo, snappy, bzip2
-#
-# * lzo (recommended with caveats): moderate compression, low cpu burden;
-#   typically leads to a noticeable speedup. Best default choice, but you must
-#   set up LZO independently due to license incompatibility
-# * snappy: moderate compression, low cpu burden; typically leads to a 
noticeable speedup..
-# * gz (default): higher compression, high CPU burden. Typically leads to a 
noticeable slowdown.
-# * bzip2: most compression, major CPU burden. Typically leads to a noticeable 
slowdown.
-#
-# pig.tmpfilecompression.codec=gzip
-
-#
-# === Split Combining
-#
-
-#
-# Should pig try to combine small files for fewer map tasks? This improves the
-# efficiency of jobs with many small input files, reduces the overhead on the
-# jobtracker, and reduces the number of output files a map-only job
-# produces. However, it only works with certain loaders and increases non-local
-# map tasks. See http://pig.apache.org/docs/r0.12.0/perf.html#combine-files
-#
-# * false (default, recommended): _do_ combine files
-# * true: do not combine files
-#
-# pig.noSplitCombination=false
-
-#
-# Size, in bytes, of data to be processed by a single map. Smaller files are
-# combined untill this size is reached. If unset, defaults to the file system's
-# default block size.
-#
-# pig.maxCombinedSplitSize=
-
-# ###########################################################################
-#
-# Execution options
-#
-
-# Should pig omit combiners? (default, recommended: false -- meaning pig _will_
-# use combiners)
-#
-# When combiners work well, they eliminate a significant amount of
-# data. However, if they do not eliminate much data -- say, a DISTINCT 
operation
-# that only eliminates 5% of the records -- they add a noticeable overhead to
-# the job. So the recommended default is false (use combiners), selectively
-# disabling them per-job:
-#
-#     pig -Dpig.exec.nocombiner=true distinct_but_not_too_much.pig
-#
-# pig.exec.nocombiner=false
-
-# EXPERIMENTAL: Aggregate records in map task before sending to the combiner?
-# (default: false, 10; recommended: true, 10). In cases where there is a 
massive
-# reduction of data in the aggregation step, pig can do a first pass of
-# aggregation before the data even leaves the mapper, saving much serialization
-# overhead. It's off by default but can give a major improvement to
-# group-and-aggregate operations. Pig skips partial aggregation unless 
reduction
-# is better than a factor of minReduction (default: 10). See
-# http://pig.apache.org/docs/r0.12.0/perf.html#hash-based-aggregation
-#
-# pig.exec.mapPartAgg=false
-# pig.exec.mapPartAgg.minReduction=10
-
-#
-# === Control how many reducers are used.
-#
-
-# Estimate number of reducers naively using a fixed amount of data per
-# reducer. Optimally, you have both fewer reducers than available reduce slots,
-# and reducers that are neither getting too little data (less than a half-GB or
-# so) nor too much data (more than 2-3 times the reducer child process max heap
-# size). The default of 1000000000 (about 1GB) is probably low for a production
-# cluster -- however it's much worse to set this too high (reducers spill many
-# times over in group-sort) than too low (delay waiting for reduce slots).
-#
-# pig.exec.reducers.bytes.per.reducer=1000000000
-
-#
-# Don't ever use more than this many reducers. (default: 999)
-#
-# pig.exec.reducers.max=999
-
-#
-# === Local mode for small jobs
-#
-
-# EXPERIMENTAL: Use local mode for small jobs? If true, jobs with input data
-# size smaller than pig.auto.local.input.maxbytes bytes and one or no reducers
-# are run in local mode, which is much faster. Note that file paths are still
-# interpreted as pig.exectype implies.
-#
-# * true (recommended): allow local mode for small jobs, which is much faster.
-# * false (default): always use pig.exectype.
-#
-# pig.auto.local.enabled=false
-
-#
-# Definition of a small job for the pig.auto.local.enabled feature. Only jobs
-# with less than this may bytes are candidates to run locally (default:
-# 100000000 bytes, about 1GB)
-#
-# pig.auto.local.input.maxbytes=100000000
-
-############################################################################
-#
-# Security Features
-#
-
-# Comma-delimited list of commands/operators that are disallowed. This security
-# feature can be used by administrators to block use of certain commands by
-# users.
-#
-# * &lt;blank&gt; (default): all commands and operators are allowed.
-# * fs,set (for example): block all filesystem commands and config changes 
from pig scripts.
-#
-# pig.blacklist=
-# pig.blacklist=fs,set
-
-# Comma-delimited list of the only commands/operators that are allowed. This
-# security feature can be used by administrators to block use of certain
-# commands by users.
-#
-# * &lt;blank&gt; (default): all commands and operators not on the 
pig.blacklist are allowed.
-# * load,store,filter,group: only LOAD, STORE, FILTER, GROUP
-#   from pig scripts. All other commands and operators will fail.
-#
-# pig.whitelist=
-# pig.whitelist=load,store,filter,group
-
-#####################################################################
-#
-# Advanced Site-specific Customizations
-#
-
-# Remove intermediate output files?
-#
-# * true (default, recommended): remove the files
-# * false: do NOT remove the files. You must clean them up yourself.
-#
-# Keeping them is useful for advanced debugging, but can be dangerous -- you
-# must clean them up yourself.  Inspect the intermediate outputs with
-#
-#     LOAD '/path/to/tmp/file' USING org.apache.pig.impl.io.TFileStorage();
-#
-# (Or ...SequenceFileInterStorage if pig.tmpfilecompression.storage is seqfile)
-#
-# pig.delete.temp.files=true
-
-# EXPERIMENTAL: A Pig Progress Notification Listener (PPNL) lets you wire pig's
-# progress into your visibility stack. To use a PPNL, supply the fully 
qualified
-# class name of a PPNL implementation. Note that only one PPNL can be set up, 
so
-# if you need several, write a PPNL that will chain them.
-#
-# See https://github.com/twitter/ambrose for a pretty awesome one of these
-#
-# pig.notification.listener=&lt;fully qualified class name of a PPNL 
implementation&gt;
-
-# String argument to pass to your PPNL constructor (optional). Only a single
-# string value is allowed. (default none)
-#
-# pig.notification.listener.arg=&lt;somevalue&gt;
-
-# EXPERIMENTAL: Class invoked to estimate the number of reducers to use.
-# (default: 
org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.InputSizeReducerEstimator)
-#
-# If you don't know how or why to write a PigReducerEstimator, you're unlikely
-# to use this. By default, the naive mapReduceLayer.InputSizeReducerEstimator 
is
-# used, but you can specify anything implementing the interface
-# 
org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigReducerEstimator
-#
-# pig.exec.reducer.estimator=&lt;fully qualified class name of a 
PigReducerEstimator implementation&gt;
-
-# Optional String argument to pass to your PigReducerEstimator. (default: none;
-# a single String argument is allowed).
-#
-# pig.exec.reducer.estimator.arg=&lt;somevalue&gt;
-
-# Class invoked to report the size of reducers output. By default, the 
reducers'
-# output is computed as the total size of output files. But not every storage 
is
-# file-based, and so this logic can be replaced by implementing the interface
-# 
org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigStatsOutputSizeReader
-# If you need to register more than one reader, you can register them as a 
comma
-# separated list. Every reader implements a boolean supports(POStore sto) 
method.
-# When there are more than one reader, they are consulted in order, and the
-# first one whose supports() method returns true will be used.
-#
-# pig.stats.output.size.reader=&lt;fully qualified class name of a 
PigStatsOutputSizeReader implementation&gt;
-# pig.stats.output.size.reader.unsupported=&lt;comma separated list of 
StoreFuncs that are not supported by this reader&gt;
-
-# By default, Pig retrieves TaskReports for every launched task to compute
-# various job statistics. But this can cause OOM if the number of tasks is
-# large. In such case, you can disable it by setting this property to true.
-# pig.stats.notaskreport=false
-
-#
-# Override hadoop configs programatically
-#
-# By default, Pig expects hadoop configs (hadoop-site.xml and core-site.xml)
-# to be present on the classpath. There are cases when these configs are
-# needed to be passed programatically, such as while using the PigServer API.
-# In such cases, you can override hadoop configs by setting the property
-# "pig.use.overriden.hadoop.configs".
-#
-# When this property is set to true, Pig ignores looking for hadoop configs
-# in the classpath and instead picks it up from Properties/Configuration
-# object passed to it.
-#
-# pig.use.overriden.hadoop.configs=false
-
-# Implied LoadFunc for the LOAD operation when no USING clause is
-# present. Supply the fully qualified class name of a LoadFunc
-# implementation. Note: setting this means you will have to modify most code
-# brought in from elsewhere on the web, as people generally omit the USING
-# clause for TSV files.
-#
-# * org.apache.pig.builtin.PigStorage (default): the traditional 
tab-separated-values LoadFunc
-# * my.custom.udfcollection.MyCustomLoadFunc (for example): use 
MyCustomLoadFunc instead
-#
-# pig.default.load.func=&lt;fully qualified class name of a LoadFunc 
implementation&gt;
-
-# The implied StoreFunc for STORE operations with no USING clause. Supply the
-# fully qualified class name of a StoreFunc implementation.
-#
-# * org.apache.pig.builtin.PigStorage (default): the traditional 
tab-separated-values StoreFunc.
-# * my.custom.udfcollection.MyCustomStoreFunc (for example): use 
MyCustomStoreFunc instead
-#
-# pig.default.store.func=&lt;fully qualified class name of a StoreFunc 
implementation&gt;
-
-# Recover jobs when the application master is restarted? (default: false). This
-# is a Hadoop 2 specific property; enable it to take advantage of AM recovery.
-#
-# pig.output.committer.recovery.support=true
-
-# Should scripts check to prevent multiple stores writing to the same location?
-# (default: false) When set to true, stops the execution of script right away.
-#
-pig.location.check.strict=false
-
-# In addition to the fs-style commands (rm, ls, etc) Pig can now execute
-# SQL-style DDL commands, eg "sql create table pig_test(name string, age int)".
-# The only implemented backend is hcat, and luckily that's also the default.
-#
-# pig.sql.type=hcat
-
-# Path to the hcat executable, for use with pig.sql.type=hcat (default: null)
-#
-hcat.bin=/usr/local/hcat/bin/hcat
-
-###########################################################################
-#
-# Overrides for extreme environments
-#
-# (Most people won't have to adjust these parameters)
-#
-
-
-# Limit the pig script length placed in the jobconf xml. (default:10240)
-# Extremely long queries can waste space in the JobConf; since its contents are
-# only advisory, the default is fine unless you are retaining it for forensics.
-#
-# pig.script.max.size=10240
-
-# Disable use of counters by Pig. Note that the word 'counter' is singular 
here.
-#
-# * false (default, recommended): do NOT disable counters.
-# * true: disable counters. Set this to true only when your Pig job will
-#   otherwise die because of using more counters than hadoop configured limit
-#
-# pig.disable.counter=true
-
-# Sample size (per-mapper, in number of rows) the ORDER..BY operation's
-# RandomSampleLoader uses to estimate how your data should be
-# partitioned. (default, recommended: 100 rows per task) Increase this if you
-# have exceptionally large input splits and are unhappy with the reducer skew.
-#
-# pig.random.sampler.sample.size=100
-
-# Process an entire script at once, reducing the amount of work and number of
-# tasks? (default, recommended: true) See 
http://pig.apache.org/docs/r0.12.0/perf.html#multi-query-execution
-#
-# MultiQuery optimization is very useful, and so the recommended default is
-# true. You may find a that a script fails to compile under MultiQuery. If so,
-# disable it at runtime:
-#
-#     pig -no_multiquery script_that_makes_pig_sad.pig
-#
-# opt.multiquery=true
-
-# For small queries, fetch data directly from the HDFS. (default, recommended:
-# true). If you want to force Pig to launch a MR job, for example when you're
-# testing a live cluster, disable with the -N option. See PIG-3642.
-#
-# opt.fetch=true
-
-###########################################################################
-#
-# Streaming properties
-#
-
-# Define what properties will be set in the streaming environment. Just set 
this
-# property to a comma-delimited list of properties to set, and those properties
-# will be set in the environment.
-#
-# pig.streaming.environment=&lt;comma-delimited list of propertes&gt;
-
-# Specify a comma-delimited list of local files to ship to distributed cache 
for
-# streaming job.
-#
-# pig.streaming.ship.files=&lt;comma-delimited list of local files&gt;
-
-# Specify a comma-delimited list of remote files to cache on distributed cache
-# for streaming job.
-#
-# pig.streaming.cache.files=&lt;comma-delimited list of remote files&gt;
-
-# Specify the python command to be used for python streaming udf. By default,
-# python is used, but you can overwrite it with a non-default version such as
-# python2.7.
-#
-# pig.streaming.udf.python.command=python
-
-    </value>
-    <description>Describe all the Pig agent configurations</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/kerberos.json
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/kerberos.json
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/kerberos.json
deleted file mode 100644
index 22dd6cb..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/kerberos.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
-  "services": [
-    {
-      "name": "PIG",
-      "components": [
-        {
-          "name": "PIG",
-          "identities": [
-            {
-              "name": "/HDFS/NAMENODE/hdfs"
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/metainfo.xml
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/metainfo.xml
deleted file mode 100644
index bb93ea9..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/metainfo.xml
+++ /dev/null
@@ -1,87 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>PIG</name>
-      <displayName>Pig</displayName>
-      <comment>Scripting platform for analyzing large datasets</comment>
-      <version>0.15.0</version>
-      <components>
-        <component>
-          <name>PIG</name>
-          <displayName>Pig</displayName>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <commandScript>
-            <script>scripts/pig_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>env</type>
-              <fileName>pig-env.sh</fileName>
-              <dictionaryName>pig-env</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>log4j.properties</fileName>
-              <dictionaryName>pig-log4j</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>pig.properties</fileName>
-              <dictionaryName>pig-properties</dictionaryName>
-            </configFile>                         
-          </configFiles>          
-        </component>
-      </components>
-      
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>pig_4_2_*</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>YARN</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>pig-env</config-type>
-        <config-type>pig-log4j</config-type>
-        <config-type>pig-properties</config-type>
-      </configuration-dependencies>
-
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/metainfo.xml~
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/metainfo.xml~
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/metainfo.xml~
deleted file mode 100644
index ac10d72..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/metainfo.xml~
+++ /dev/null
@@ -1,86 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>PIG</name>
-      <displayName>Pig</displayName>
-      <comment>Scripting platform for analyzing large datasets</comment>
-      <version>0.15.0.4.1</version>
-      <components>
-        <component>
-          <name>PIG</name>
-          <displayName>Pig</displayName>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <commandScript>
-            <script>scripts/pig_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1200</timeout>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>env</type>
-              <fileName>pig-env.sh</fileName>
-              <dictionaryName>pig-env</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>log4j.properties</fileName>
-              <dictionaryName>pig-log4j</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>pig.properties</fileName>
-              <dictionaryName>pig-properties</dictionaryName>
-            </configFile>                         
-          </configFiles>          
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>pig</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>YARN</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>pig-env</config-type>
-        <config-type>pig-log4j</config-type>
-        <config-type>pig-properties</config-type>
-      </configuration-dependencies>
-
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/package/files/pigSmoke.sh
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/package/files/pigSmoke.sh
deleted file mode 100644
index a22456e..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/package/files/pigSmoke.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-/*Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License */
-
-A = load 'passwd' using PigStorage(':');
-B = foreach A generate \$0 as id;
-store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/package/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/package/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/package/scripts/params.py
deleted file mode 100644
index 89ab726..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/package/scripts/params.py
+++ /dev/null
@@ -1,25 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management.libraries.functions.default import default
-
-from params_linux import *
-
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/package/scripts/params_linux.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/package/scripts/params_linux.py
deleted file mode 100644
index 226ccc1..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/package/scripts/params_linux.py
+++ /dev/null
@@ -1,88 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import iop_select
-from resource_management.libraries.functions.version import 
format_hdp_stack_version
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions import get_kinit_path
-
-# server configurations
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-stack_name = default("/hostLevelParams/stack_name", None)
-
-stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-iop_stack_version = format_hdp_stack_version(stack_version_unformatted)
-
-# New Cluster Stack Version that is defined during the RESTART of a Rolling 
Upgrade
-version = default("/commandParams/version", None)
-
-# hadoop default parameters
-hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-hadoop_bin_dir = iop_select.get_hadoop_dir("bin")
-
-# hadoop parameters for 2.2+
-pig_conf_dir = "/usr/iop/current/pig-client/conf"
-hadoop_home = iop_select.get_hadoop_dir("home")
-pig_bin_dir = '/usr/iop/current/pig-client/bin'
-
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdfs_principal_name = 
config['configurations']['hadoop-env']['hdfs_principal_name']
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-smokeuser_principal = 
config['configurations']['cluster-env']['smokeuser_principal_name']
-user_group = config['configurations']['cluster-env']['user_group']
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-kinit_path_local = 
get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', 
None))
-pig_env_sh_template = config['configurations']['pig-env']['content']
-
-# not supporting 32 bit jdk.
-java64_home = config['hostLevelParams']['java_home']
-
-pig_properties = config['configurations']['pig-properties']['content']
-
-log4j_props = config['configurations']['pig-log4j']['content']
-
-
-
-hdfs_site = config['configurations']['hdfs-site']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-
-import functools
-#create partial functions with common arguments for every HdfsResource call
-#to create hdfs directory we need to call params.HdfsResource in code
-HdfsResource = functools.partial(
-  HdfsResource,
-  user=hdfs_user,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir,
-  principal_name = hdfs_principal_name,
-  hdfs_site = hdfs_site,
-  default_fs = default_fs
- )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/package/scripts/pig.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/package/scripts/pig.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/package/scripts/pig.py
deleted file mode 100644
index 18ab5c6..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/package/scripts/pig.py
+++ /dev/null
@@ -1,61 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import os
-from resource_management import *
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-
-@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
-def pig():
-  import params
-
-  Directory( params.pig_conf_dir,
-    recursive = True,
-    owner = params.hdfs_user,
-    group = params.user_group
-  )
-
-  File(format("{pig_conf_dir}/pig-env.sh"),
-    owner=params.hdfs_user,
-    mode=0755,
-    content=InlineTemplate(params.pig_env_sh_template)
-  )
-
-  # pig_properties is always set to a default even if it's not in the payload
-  File(format("{params.pig_conf_dir}/pig.properties"),
-              mode=0644,
-              group=params.user_group,
-              owner=params.hdfs_user,
-              content=params.pig_properties
-  )
-
-  if (params.log4j_props != None):
-    File(format("{params.pig_conf_dir}/log4j.properties"),
-      mode=0644,
-      group=params.user_group,
-      owner=params.hdfs_user,
-      content=params.log4j_props
-    )
-  elif (os.path.exists(format("{params.pig_conf_dir}/log4j.properties"))):
-    File(format("{params.pig_conf_dir}/log4j.properties"),
-      mode=0644,
-      group=params.user_group,
-      owner=params.hdfs_user
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/44e21f8e/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/package/scripts/pig_client.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/package/scripts/pig_client.py
 
b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/package/scripts/pig_client.py
deleted file mode 100644
index 4f71ac4..0000000
--- 
a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/PIG/package/scripts/pig_client.py
+++ /dev/null
@@ -1,59 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-import os
-from resource_management import *
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import iop_select
-from pig import pig
-
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-
-class PigClient(Script):
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    pig()
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
-class PigClientLinux(PigClient):
-  def get_stack_to_component(self):
-    return {"BigInsights": "hadoop-client"}
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    if params.version and 
compare_versions(format_hdp_stack_version(params.version), '4.0.0.0') >= 0:
-      conf_select.select(params.stack_name, "pig", params.version)
-      conf_select.select(params.stack_name, "hadoop", params.version)
-      iop_select.select("hadoop-client", params.version) # includes pig-client
-
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-if __name__ == "__main__":
-  PigClient().execute()

Reply via email to