http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
index 0946d84..fc53b44 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
@@ -24,6 +24,26 @@ import os
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
+#RPM versioning support
+rpm_version = default("/configurations/cluster-env/rpm_version", None)
+
+#hadoop params
+if rpm_version:
+  mapreduce_libs_path = "/usr/bigtop/current/hadoop-mapreduce-client/*"
+  hadoop_libexec_dir = "/usr/bigtop/current/hadoop-client/libexec"
+  hadoop_bin = "/usr/bigtop/current/hadoop-client/sbin"
+  hadoop_bin_dir = "/usr/bigtop/current/hadoop-client/bin"
+else:
+  mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+  hadoop_bin = "/usr/lib/hadoop/sbin"
+  hadoop_bin_dir = "/usr/bin"
+
+hadoop_conf_dir = "/etc/hadoop/conf"
+hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
+limits_conf_dir = "/etc/security/limits.d"
+
+execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir
 ulimit_cmd = "ulimit -c unlimited; "
 
 #security params
@@ -96,9 +116,7 @@ user_group = 
config['configurations']['cluster-env']['user_group']
 proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
 
 #hadoop params
-hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
-hadoop_bin = "/usr/lib/hadoop/sbin"
 
 hdfs_log_dir_prefix = 
config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
 hadoop_root_logger = 
config['configurations']['hadoop-env']['hadoop_root_logger']
@@ -106,8 +124,6 @@ hadoop_root_logger = 
config['configurations']['hadoop-env']['hadoop_root_logger'
 dfs_domain_socket_path = 
config['configurations']['hdfs-site']['dfs.domain.socket.path']
 dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
 
-hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-
 jn_edits_dir = 
config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']
 
 dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
@@ -124,6 +140,13 @@ namenode_formatted_mark_dir = 
format("/var/lib/hdfs/namenode/formatted/")
 fs_checkpoint_dir = 
config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir']
 
 dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
+data_dir_mount_file = 
config['configurations']['hadoop-env']['dfs.datanode.data.dir.mount.file']
+
+dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
+dfs_dn_http_addr = 
default('/configurations/hdfs-site/dfs.datanode.http.address', None)
+dfs_dn_https_addr = 
default('/configurations/hdfs-site/dfs.datanode.https.address', None)
+dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
+
 # HDFS High Availability properties
 dfs_ha_enabled = False
 dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", 
None)
@@ -174,11 +197,10 @@ HdfsDirectory = functools.partial(
   hdfs_user=hdfs_user,
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local
+  kinit_path_local = kinit_path_local,
+  bin_dir = hadoop_bin_dir
 )
 
-limits_conf_dir = "/etc/security/limits.d"
-
 io_compression_codecs = 
config['configurations']['core-site']['io.compression.codecs']
 if not "com.hadoop.compression.lzo" in io_compression_codecs:
   exclude_packages = ["lzo", "hadoop-lzo", "hadoop-lzo-native", "liblzo2-2"]
@@ -187,14 +209,15 @@ else:
 name_node_params = default("/commandParams/namenode", None)
 
 #hadoop params
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-
 hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
 
 #hadoop-env.sh
 java_home = config['hostLevelParams']['java_home']
+stack_version = str(config['hostLevelParams']['stack_version'])
+
+stack_is_champlain_or_further = not (stack_version.startswith('2.0') or 
stack_version.startswith('2.1'))
 
-if str(config['hostLevelParams']['stack_version']).startswith('2.0') and 
System.get_instance().os_family != "suse":
+if stack_version.startswith('2.0') and System.get_instance().os_family != 
"suse":
   # deprecated rhel jsvc_path
   jsvc_path = "/usr/libexec/bigtop-utils"
 else:
@@ -214,5 +237,4 @@ ttnode_heapsize = "1024m"
 
 dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
 mapred_pid_dir_prefix = 
default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
 mapred_log_dir_prefix = 
default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/service_check.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/service_check.py
index 89cb433..81d7ca5 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/service_check.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/service_check.py
@@ -31,28 +31,26 @@ class HdfsServiceCheck(Script):
 
     safemode_command = "dfsadmin -safemode get | grep OFF"
 
-    create_dir_cmd = format("fs -mkdir {dir} ; hadoop fs -chmod 777 {dir}")
-    test_dir_exists = format("hadoop fs -test -e {dir}")
+    create_dir_cmd = format("fs -mkdir {dir}")
+    chmod_command = format("fs -chmod 777 {dir}")
+    test_dir_exists = format("su -s /bin/bash - {smoke_user} -c 
'{hadoop_bin_dir}/hadoop --config {hadoop_conf_dir} fs -test -e {dir}'")
     cleanup_cmd = format("fs -rm {tmp_file}")
     #cleanup put below to handle retries; if retrying there wil be a stale file
     #that needs cleanup; exit code is fn of second command
     create_file_cmd = format(
-      "{cleanup_cmd}; hadoop fs -put /etc/passwd {tmp_file}")
+      "{cleanup_cmd}; hadoop --config {hadoop_conf_dir} fs -put /etc/passwd 
{tmp_file}")
     test_cmd = format("fs -test -e {tmp_file}")
-
-    log_dir = format("{hdfs_log_dir_prefix}/{smoke_user}")
-    Directory(log_dir, owner=params.smoke_user, recursive=True)
-
     if params.security_enabled:
       Execute(format(
-        "su - {smoke_user} -c '{kinit_path_local} -kt {smoke_user_keytab} "
+        "su -s /bin/bash - {smoke_user} -c '{kinit_path_local} -kt 
{smoke_user_keytab} "
         "{smoke_user}'"))
     ExecuteHadoop(safemode_command,
                   user=params.smoke_user,
                   logoutput=True,
                   conf_dir=params.hadoop_conf_dir,
                   try_sleep=3,
-                  tries=20
+                  tries=20,
+                  bin_dir=params.hadoop_bin_dir
     )
     ExecuteHadoop(create_dir_cmd,
                   user=params.smoke_user,
@@ -60,21 +58,32 @@ class HdfsServiceCheck(Script):
                   not_if=test_dir_exists,
                   conf_dir=params.hadoop_conf_dir,
                   try_sleep=3,
-                  tries=5
+                  tries=5,
+                  bin_dir=params.hadoop_bin_dir
+    )
+    ExecuteHadoop(chmod_command,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5,
+                  bin_dir=params.hadoop_bin_dir
     )
     ExecuteHadoop(create_file_cmd,
                   user=params.smoke_user,
                   logoutput=True,
                   conf_dir=params.hadoop_conf_dir,
                   try_sleep=3,
-                  tries=5
+                  tries=5,
+                  bin_dir=params.hadoop_bin_dir
     )
     ExecuteHadoop(test_cmd,
                   user=params.smoke_user,
                   logoutput=True,
                   conf_dir=params.hadoop_conf_dir,
                   try_sleep=3,
-                  tries=5
+                  tries=5,
+                  bin_dir=params.hadoop_bin_dir
     )
     if params.has_journalnode_hosts:
       journalnode_port = params.journalnode_port
@@ -83,7 +92,7 @@ class HdfsServiceCheck(Script):
       checkWebUIFilePath = format("{tmp_dir}/{checkWebUIFileName}")
       comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
       checkWebUICmd = format(
-        "su - {smoke_test_user} -c 'python {checkWebUIFilePath} -m "
+        "su -s /bin/bash - {smoke_test_user} -c 'python {checkWebUIFilePath} 
-m "
         "{comma_sep_jn_hosts} -p {journalnode_port}'")
       File(checkWebUIFilePath,
            content=StaticFile(checkWebUIFileName))

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/utils.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/utils.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/utils.py
index 2650749..3f4bdb5 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/utils.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/utils.py
@@ -16,8 +16,10 @@ See the License for the specific language governing 
permissions and
 limitations under the License.
 
 """
+import os
 
 from resource_management import *
+import re
 
 
 def service(action=None, name=None, user=None, create_pid_dir=False,
@@ -30,10 +32,6 @@ def service(action=None, name=None, user=None, 
create_pid_dir=False,
   check_process = format(
     "ls {pid_file} >/dev/null 2>&1 &&"
     " ps -p `cat {pid_file}` >/dev/null 2>&1")
-  hadoop_daemon = format(
-    "export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && "
-    "{hadoop_bin}/hadoop-daemon.sh")
-  cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
 
   if create_pid_dir:
     Directory(pid_dir,
@@ -44,12 +42,76 @@ def service(action=None, name=None, user=None, 
create_pid_dir=False,
               owner=user,
               recursive=True)
 
+  hadoop_env_exports = {
+    'HADOOP_LIBEXEC_DIR': params.hadoop_libexec_dir
+  }
+
   if params.security_enabled and name == "datanode":
+    dfs_dn_port = get_port(params.dfs_dn_addr)
+    dfs_dn_http_port = get_port(params.dfs_dn_http_addr)
+    dfs_dn_https_port = get_port(params.dfs_dn_https_addr)
+
+    # We try to avoid inability to start datanode as a plain user due to usage 
of root-owned ports
+    if params.dfs_http_policy == "HTTPS_ONLY":
+      secure_ports_are_in_use = is_secure_port(dfs_dn_port) or 
is_secure_port(dfs_dn_https_port)
+    elif params.dfs_http_policy == "HTTP_AND_HTTPS":
+      secure_ports_are_in_use = is_secure_port(dfs_dn_port) or 
is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
+    else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
+      secure_ports_are_in_use = is_secure_port(dfs_dn_port) or 
is_secure_port(dfs_dn_http_port)
+
+    # Calculate HADOOP_SECURE_DN_* env vars, but not append them yet
+    # These variables should not be set when starting secure datanode as a 
non-root
+    ## On secure datanodes, user to run the datanode as after dropping 
privileges
+    hadoop_secure_dn_user = params.hdfs_user
+    ## Where log files are stored in the secure data environment.
+    hadoop_secure_dn_log_dir = 
format("{hdfs_log_dir_prefix}/{hadoop_secure_dn_user}")
+    ## The directory where pid files are stored in the secure data environment.
+    hadoop_secure_dn_pid_dir = 
format("{hadoop_pid_dir_prefix}/{hadoop_secure_dn_user}")
+    hadoop_secure_dn_exports = {
+      'HADOOP_SECURE_DN_USER' : hadoop_secure_dn_user,
+      'HADOOP_SECURE_DN_LOG_DIR' : hadoop_secure_dn_log_dir,
+      'HADOOP_SECURE_DN_PID_DIR' : hadoop_secure_dn_pid_dir
+    }
+    hadoop_secure_dn_pid_file = 
format("{hadoop_secure_dn_pid_dir}/hadoop_secure_dn.pid")
+
+    # At Champlain stack and further, we may start datanode as a non-root even 
in secure cluster
+    if not params.stack_is_champlain_or_further or secure_ports_are_in_use:
       user = "root"
       pid_file = format(
         "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
+      if params.stack_is_champlain_or_further:
+        hadoop_env_exports.update(hadoop_secure_dn_exports)
+
+    if action == 'stop' and params.stack_is_champlain_or_further and \
+      os.path.isfile(hadoop_secure_dn_pid_file):
+        # We need special handling for this case to handle the situation
+        # when we configure non-root secure DN and then restart it
+        # to handle new configs. Otherwise we will not be able to stop
+        # a running instance
+        user = "root"
+        try:
+          with open(hadoop_secure_dn_pid_file, 'r') as f:
+            pid = f.read()
+          os.kill(int(pid), 0)
+          hadoop_env_exports.update(hadoop_secure_dn_exports)
+        except IOError:
+          pass  # Can not open pid file
+        except ValueError:
+          pass  # Pid file content is invalid
+        except OSError:
+          pass  # Process is not running
 
-  daemon_cmd = format("{ulimit_cmd} su - {user} -c '{cmd} {action} {name}'")
+
+  hadoop_env_exports_str = ''
+  for exp in hadoop_env_exports.items():
+    hadoop_env_exports_str += "export {0}={1} && ".format(exp[0], exp[1])
+
+  hadoop_daemon = format(
+    "{hadoop_env_exports_str}"
+    "{hadoop_bin}/hadoop-daemon.sh")
+  cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
+
+  daemon_cmd = format("{ulimit_cmd} su -s /bin/bash - {user} -c '{cmd} 
{action} {name}'")
 
   service_is_up = check_process if action == "start" else None
   #remove pid file from dead process
@@ -64,3 +126,24 @@ def service(action=None, name=None, user=None, 
create_pid_dir=False,
     File(pid_file,
          action="delete",
     )
+
+def get_port(address):
+  """
+  Extracts port from the address like 0.0.0.0:1019
+  """
+  if address is None:
+    return None
+  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
+  if m is not None:
+    return int(m.group(2))
+  else:
+    return None
+
+def is_secure_port(port):
+  """
+  Returns True if port is root-owned at *nix systems
+  """
+  if port is not None:
+    return port < 1024
+  else:
+    return False

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hcat-env.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hcat-env.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hcat-env.xml
new file mode 100644
index 0000000..91b402b
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hcat-env.xml
@@ -0,0 +1,57 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- hcat-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hcat-env.sh file</description>
+    <value>
+      # Licensed to the Apache Software Foundation (ASF) under one
+      # or more contributor license agreements. See the NOTICE file
+      # distributed with this work for additional information
+      # regarding copyright ownership. The ASF licenses this file
+      # to you under the Apache License, Version 2.0 (the
+      # "License"); you may not use this file except in compliance
+      # with the License. You may obtain a copy of the License at
+      #
+      # http://www.apache.org/licenses/LICENSE-2.0
+      #
+      # Unless required by applicable law or agreed to in writing, software
+      # distributed under the License is distributed on an "AS IS" BASIS,
+      # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 
implied.
+      # See the License for the specific language governing permissions and
+      # limitations under the License.
+
+      JAVA_HOME={{java64_home}}
+      HCAT_PID_DIR={{hcat_pid_dir}}/
+      HCAT_LOG_DIR={{hcat_log_dir}}/
+      HCAT_CONF_DIR={{hcat_conf_dir}}
+      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+      #DBROOT is the path where the connector jars are downloaded
+      DBROOT={{hcat_dbroot}}
+      USER={{hcat_user}}
+      METASTORE_PORT={{hive_metastore_port}}
+    </value>
+  </property>
+  
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hive-env.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hive-env.xml
index 1dc94a1..7444331 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hive-env.xml
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hive-env.xml
@@ -28,7 +28,7 @@
   </property>
   <property>
     <name>hive_database</name>
-    <value>New PosgreSQL Database</value>
+    <value>New PostgreSQL Database</value>
     <description>
       Property that determines whether the HIVE DB is managed by Ambari.
     </description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hive-site.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hive-site.xml
index 4fce97e..87940a7 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hive-site.xml
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/hive-site.xml
@@ -45,7 +45,7 @@ limitations under the License.
   </property>
 <!-- End changes metastore database to postgres -->
 
-    <property>
+  <property>
     <name>javax.jdo.option.ConnectionUserName</name>
     <value>hive</value>
     <description>username to use against metastore database</description>
@@ -53,7 +53,7 @@ limitations under the License.
 
   <property require-input="true">
     <name>javax.jdo.option.ConnectionPassword</name>
-    <value> </value>
+    <value></value>
     <property-type>PASSWORD</property-type>
     <description>password to use against metastore database</description>
   </property>
@@ -72,20 +72,6 @@ limitations under the License.
   </property>
 
   <property>
-    <name>hive.metastore.kerberos.keytab.file</name>
-    <value>/etc/security/keytabs/hive.service.keytab</value>
-    <description>The path to the Kerberos Keytab file containing the metastore
-     thrift server's service principal.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.principal</name>
-    <value>hive/_h...@example.com</value>
-    <description>The service principal for the metastore thrift server. The 
special
-    string _HOST will be replaced automatically with the correct host 
name.</description>
-  </property>
-
-  <property>
     <name>hive.metastore.cache.pinobjtypes</name>
     <value>Table,Database,Type,FieldSchema,Order</value>
     <description>List of comma separated metastore object types that should be 
pinned in the cache</description>
@@ -98,24 +84,6 @@ limitations under the License.
   </property>
 
   <property>
-    <name>hive.metastore.pre.event.listeners</name>
-    
<value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
-    <description>Pre-event listener classes to be loaded on the metastore side 
to run code
-      whenever databases, tables, and partitions are created, altered, or 
dropped.
-      Set to 
org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
-      if metastore-side authorization is desired.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.pre.event.listeners</name>
-    
<value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
-    <description>Pre-event listener classes to be loaded on the metastore side 
to run code
-      whenever databases, tables, and partitions are created, altered, or 
dropped.
-      Set to 
org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
-      if metastore-side authorization is desired.</description>
-  </property>
-
-  <property>
     <name>hive.metastore.client.socket.timeout</name>
     <value>60</value>
     <description>MetaStore Client socket timeout in seconds</description>
@@ -147,6 +115,24 @@ limitations under the License.
   </property>
 
   <property>
+    <name>hive.metastore.pre.event.listeners</name>
+    
<value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
+    <description>Pre-event listener classes to be loaded on the metastore side 
to run code
+      whenever databases, tables, and partitions are created, altered, or 
dropped.
+      Set to 
org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
+      if metastore-side authorization is desired.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.pre.event.listeners</name>
+    
<value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
+    <description>Pre-event listener classes to be loaded on the metastore side 
to run code
+      whenever databases, tables, and partitions are created, altered, or 
dropped.
+      Set to 
org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
+      if metastore-side authorization is desired.</description>
+  </property>
+
+  <property>
     <name>hive.security.authenticator.manager</name>
     <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
     <description>Hive client authenticator manager class name. The 
user-defined authenticator class should implement interface 
org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  </description>
@@ -198,6 +184,12 @@ limitations under the License.
   </property>
 
   <property>
+    <name>hive.enforce.sortmergebucketmapjoin</name>
+    <value>true</value>
+    <description>If the user asked for sort-merge bucketed map-side join, and 
it cannot be performed, should the query fail or not</description>
+  </property>
+
+  <property>
     <name>hive.map.aggr</name>
     <value>true</value>
     <description>Whether to use map-side aggregation in Hive Group By 
queries.</description>
@@ -267,7 +259,7 @@ limitations under the License.
 
   <property>
     <name>hive.optimize.reducededuplication.min.reducer</name>
-    <value>1</value>
+    <value>4</value>
     <description>Reduce deduplication merges two RSs by moving 
key/parts/reducer-num of the child RS to parent RS.
       That means if reducer-num of the child RS is fixed (order by or forced 
bucketing) and small, it can make very slow, single MR.
       The optimization will be disabled if number of reducers is less than 
specified value.
@@ -295,7 +287,7 @@ limitations under the License.
 
   <property>
     <name>hive.vectorized.execution.enabled</name>
-    <value>false</value>
+    <value>true</value>
     <description>This flag controls the vectorized mode of query execution as 
documented in HIVE-4160 (as of Hive 0.13.0)
     </description>
   </property>
@@ -316,6 +308,191 @@ limitations under the License.
   </property>
 
   <property>
+    <name>hive.execution.engine</name>
+    <value>mr</value>
+    <description>Whether to use MR or Tez</description>
+  </property>
+
+  <!-- 
+  <property>
+    <name>hive.exec.post.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>Comma-separated list of post-execution hooks to be invoked 
for each statement.</description>
+  </property>
+
+  <property>
+    <name>hive.exec.pre.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>Comma-separated list of pre-execution hooks to be invoked for 
each statement.</description>
+  </property>
+
+  <property>
+    <name>hive.exec.failure.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>Comma-separated list of on-failure hooks to be invoked for 
each statement.</description>
+  </property>
+  -->
+
+  <property>
+    <name>hive.vectorized.groupby.maxentries</name>
+    <value>100000</value>
+    <description>Max number of entries in the vector group by aggregation 
hashtables.
+      Exceeding this will trigger a flush irrelevant of memory pressure 
condition.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.vectorized.groupby.checkinterval</name>
+    <value>1024</value>
+    <description>Number of entries added to the group by aggregation hash 
before a reocmputation of average entry size is performed.</description>
+  </property>
+
+  <property>
+    <name>hive.vectorized.groupby.flush.percent</name>
+    <value>0.1</value>
+    <description>Percent of entries in the group by aggregation hash flushed 
when the memory treshold is exceeded.</description>
+  </property>
+
+  <property>
+    <name>hive.stats.autogather</name>
+    <value>true</value>
+    <description>A flag to gather statistics automatically during the INSERT 
OVERWRITE command.</description>
+  </property>
+
+  <property>
+    <name>hive.tez.container.size</name>
+    <value>682</value>
+    <description>By default, Tez uses the java options from map tasks. Use 
this property to override that value. Assigned value must match value specified 
for mapreduce.map.child.java.opts.</description>
+  </property>
+
+  <property>
+    <name>hive.tez.input.format</name>
+    <value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
+    <description>The default input format for Tez. Tez groups splits in the 
Application Master.</description>
+  </property>
+
+  <property>
+    <name>hive.tez.java.opts</name>
+    <value>-server -Xmx1024m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 
-XX:+UseNUMA -XX:+UseParallelGC</value>
+    <description>Java command line options for Tez. Must be assigned the same 
value as mapreduce.map.child.java.opts.</description>
+  </property>
+
+  <property>
+    <name>hive.compute.query.using.stats</name>
+    <value>true</value>
+    <description>
+      When set to true Hive will answer a few queries like count(1) purely 
using stats
+      stored in metastore. For basic stats collection turn on the config 
hive.stats.autogather to true.
+      For more advanced stats collection need to run analyze table queries.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.orc.splits.include.file.footer</name>
+    <value>false</value>
+    <description>
+      If turned on splits generated by orc will include metadata about the 
stripes in the file. This
+      data is read remotely (from the client or HS2 machine) and sent to all 
the tasks.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.limit.optimize.enable</name>
+    <value>true</value>
+    <description>Whether to enable the optimization of trying a smaller subset 
of data for simple LIMIT first.</description>
+  </property>
+
+  <property>
+    <name>hive.limit.pushdown.memory.usage</name>
+    <value>0.04</value>
+    <description>The max memory to be used for hash in RS operator for top K 
selection.</description>
+  </property>
+
+  <property>
+    <name>hive.server2.tez.default.queues</name>
+    <value>default</value>
+    <description>A comma-separated list of queues configured for the 
cluster.</description>
+  </property>
+
+  <property>
+    <name>hive.server2.tez.sessions.per.default.queue</name>
+    <value>1</value>
+    <description>The number of sessions for each queue named in the 
hive.server2.tez.default.queues.</description>
+  </property>
+
+  <property>
+    <name>hive.server2.tez.initialize.default.sessions</name>
+    <value>false</value>
+    <description>Enables a user to use HiveServer2 without enabling Tez for 
HiveServer2. Users may potentially may want to run queries with Tez without a 
pool of sessions.</description>
+  </property>
+
+  <property>
+    <name>hive.txn.manager</name>
+    <value>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager</value>
+    <description>Select the class to do transaction management. The default 
DummyTxnManager does no transactions and retains the legacy 
behavior.</description>
+  </property>
+
+  <property>
+    <name>hive.txn.timeout</name>
+    <value>300</value>
+    <description>Time after which transactions are declared aborted if the 
client has not sent a heartbeat, in seconds.</description>
+  </property>
+
+  <property>
+    <name>hive.txn.max.open.batch</name>
+    <value>1000</value>
+    <description>Maximum number of transactions that can be fetched in one 
call to open_txns(). Increasing this will decrease the number of delta files 
created when streaming data into Hive. But it will also increase the number of 
open transactions at any given time, possibly impacting read 
performance.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.initiator.on</name>
+    <value>false</value>
+    <description>Whether to run the compactor's initiator thread in this 
metastore instance or not. If there is more than one instance of the thrift 
metastore this should only be set to true for one of them.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.worker.threads</name>
+    <value>0</value>
+    <description>Number of compactor worker threads to run on this metastore 
instance. Can be different values on different metastore 
instances.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.worker.timeout</name>
+    <value>86400L</value>
+    <description>Time, in seconds, before a given compaction in working state 
is declared a failure and returned to the initiated state.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.check.interval</name>
+    <value>300L</value>
+    <description>Time in seconds between checks to see if any partitions need 
compacted. This should be kept high because each check for compaction requires 
many calls against the NameNode.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.delta.num.threshold</name>
+    <value>10</value>
+    <description>Number of delta files that must exist in a directory before 
the compactor will attempt a minor compaction.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.delta.pct.threshold</name>
+    <value>0.1f</value>
+    <description>Percentage (by size) of base that deltas can be before major 
compaction is initiated.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.abortedtxn.threshold</name>
+    <value>1000</value>
+    <description>Number of aborted transactions involving a particular table 
or partition before major compaction is initiated.</description>
+  </property>
+
+  <property>
+    <name>datanucleus.cache.level2.type</name>
+    <value>none</value>
+    <description>Determines caching mechanism DataNucleus L2 cache will use. 
It is strongly recommended to use default value of 'none' as other values may 
cause consistency errors in Hive.</description>
+  </property>
+
+  <property>
     <name>hive.server2.thrift.port</name>
     <value>10000</value>
     <description>
@@ -323,4 +500,39 @@ limitations under the License.
     </description>
   </property>
 
+  <property>
+      <name>hive.server2.authentication.spnego.principal</name>
+      <value>HTTP/_h...@example.com</value>
+      <description>
+          This keytab would be used by HiveServer2 when Kerberos security is 
enabled and HTTP transport mode is used.
+      </description>
+  </property>
+
+  <property>
+      <name>hive.server2.authentication.spnego.keytab</name>
+      <value>/etc/security/keytabs/spnego.service.keytab</value>
+      <description>
+          The SPNEGO service principal would be used by HiveServer2 when 
Kerberos security is enabled and HTTP transport mode is used.
+      </description>
+  </property>
+
+  <property>
+    <name>hive.server2.support.dynamic.service.discovery</name>
+    <value>false</value>
+    <description>Whether HiveServer2 supports dynamic service discovery for its
+      clients. To support this, each instance of HiveServer2 currently uses
+      ZooKeeper to register itself, when it is brought up. JDBC/ODBC clients
+      should use the ZooKeeper ensemble: hive.zookeeper.quorum in their
+      connection string.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.zookeeper.namespace</name>
+    <value>hiveserver2</value>
+    <description>The parent node in ZooKeeper used by HiveServer2 when
+      supporting dynamic service discovery.
+    </description>
+  </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/webhcat-env.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/webhcat-env.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/webhcat-env.xml
new file mode 100644
index 0000000..14a473f
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/webhcat-env.xml
@@ -0,0 +1,54 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- webhcat-env.sh -->
+  <property>
+    <name>content</name>
+    <description>webhcat-env.sh content</description>
+    <value>
+# The file containing the running pid
+PID_FILE={{webhcat_pid_file}}
+
+TEMPLETON_LOG_DIR={{templeton_log_dir}}/
+
+
+WEBHCAT_LOG_DIR={{templeton_log_dir}}/
+
+# The console error log
+ERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log
+
+# The console log
+CONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log
+
+#TEMPLETON_JAR=templeton_jar_name
+
+#HADOOP_PREFIX=hadoop_prefix
+
+#HCAT_PREFIX=hive_prefix
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+export HADOOP_HOME={{hadoop_home}}
+    </value>
+  </property>
+  
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/webhcat-site.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/webhcat-site.xml
new file mode 100644
index 0000000..0523dab
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/configuration/webhcat-site.xml
@@ -0,0 +1,138 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- 
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<!-- The default settings for Templeton. -->
+<!-- Edit templeton-site.xml to change settings for your local -->
+<!-- install. -->
+
+<configuration supports_final="true">
+
+  <property>
+    <name>templeton.port</name>
+      <value>50111</value>
+    <description>The HTTP port for the main server.</description>
+  </property>
+
+  <property>
+    <name>templeton.hadoop.conf.dir</name>
+    <value>/etc/hadoop/conf</value>
+    <description>The path to the Hadoop configuration.</description>
+  </property>
+
+  <property>
+    <name>templeton.jar</name>
+    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
+    <description>The path to the Templeton jar file.</description>
+  </property>
+
+  <property>
+    <name>templeton.libjars</name>
+    <value>/usr/lib/zookeeper/zookeeper.jar</value>
+    <description>Jars to add the the classpath.</description>
+  </property>
+
+
+  <property>
+    <name>templeton.hadoop</name>
+    <value>/usr/bin/hadoop</value>
+    <description>The path to the Hadoop executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.pig.archive</name>
+    <value>hdfs:///apps/webhcat/pig.tar.gz</value>
+    <description>The path to the Pig archive.</description>
+  </property>
+
+  <property>
+    <name>templeton.pig.path</name>
+    <value>pig.tar.gz/pig/bin/pig</value>
+    <description>The path to the Pig executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.hcat</name>
+    <value>/usr/bin/hcat</value>
+    <description>The path to the hcatalog executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.hive.archive</name>
+    <value>hdfs:///apps/webhcat/hive.tar.gz</value>
+    <description>The path to the Hive archive.</description>
+  </property>
+
+  <property>
+    <name>templeton.hive.home</name>
+    <value>hive.tar.gz/hive</value>
+    <description>The path to the Hive home within the tar. Has no effect if 
templeton.hive.archive is not set.</description>
+  </property>
+
+  <property>
+    <name>templeton.hcat.home</name>
+    <value>hive.tar.gz/hive/hcatalog</value>
+    <description>The path to the HCat home within the tar. Has no effect if 
templeton.hive.archive is not set.</description>
+  </property>
+
+  <property>
+    <name>templeton.hive.path</name>
+    <value>hive.tar.gz/hive/bin/hive</value>
+    <description>The path to the Hive executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.hive.properties</name>
+    <value>hive.metastore.local=false, 
hive.metastore.uris=thrift://localhost:9933, 
hive.metastore.sasl.enabled=false</value>
+    <description>Properties to set when running hive.</description>
+  </property>
+
+
+  <property>
+    <name>templeton.zookeeper.hosts</name>
+    <value>localhost:2181</value>
+    <description>ZooKeeper servers, as comma separated host:port 
pairs</description>
+  </property>
+
+  <property>
+    <name>templeton.storage.class</name>
+    <value>org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage</value>
+    <description>The class to use as storage</description>
+  </property>
+
+  <property>
+   <name>templeton.override.enabled</name>
+   <value>false</value>
+   <description>
+     Enable the override path in templeton.override.jars
+   </description>
+ </property>
+
+ <property>
+    <name>templeton.streaming.jar</name>
+    <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
+    <description>The hdfs path to the Hadoop streaming jar file.</description>
+  </property> 
+
+  <property>
+    <name>templeton.exec.timeout</name>
+    <value>60000</value>
+    <description>Time out for templeton api</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/metainfo.xml 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/metainfo.xml
index b35bd04..f8a84bf 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/metainfo.xml
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/metainfo.xml
@@ -30,6 +30,7 @@
           <displayName>Hive Metastore</displayName>
           <category>MASTER</category>
           <cardinality>1</cardinality>
+          <clientsToUpdateConfigs></clientsToUpdateConfigs>
           <auto-deploy>
             <enabled>true</enabled>
             <co-locate>HIVE/HIVE_SERVER</co-locate>
@@ -46,6 +47,7 @@
           <displayName>HiveServer2</displayName>
           <category>MASTER</category>
           <cardinality>1</cardinality>
+          <clientsToUpdateConfigs></clientsToUpdateConfigs>
           <dependencies>
             <dependency>
               <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
@@ -75,7 +77,58 @@
             <scriptType>PYTHON</scriptType>
           </commandScript>
         </component>
-
+        <component>
+          <name>WEBHCAT_SERVER</name>
+          <displayName>WebHCat Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <clientsToUpdateConfigs>
+            <client>HCAT</client>
+          </clientsToUpdateConfigs>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HIVE/WEBHCAT_SERVER</co-locate>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/webhcat_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
         <component>
           <name>POSTGRESQL_SERVER</name>
           <displayName>PostgreSQL Server</displayName>
@@ -92,6 +145,7 @@
           <displayName>MySQL Server</displayName>
           <category>MASTER</category>
           <cardinality>0-1</cardinality>
+          <clientsToUpdateConfigs></clientsToUpdateConfigs>
           <commandScript>
             <script>scripts/mysql_server.py</script>
             <scriptType>PYTHON</scriptType>
@@ -130,6 +184,22 @@
             </configFile>                         
           </configFiles>
         </component>
+        <component>
+          <name>HCAT</name>
+          <displayName>HCat Client</displayName>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/hcat_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>env</type>
+              <fileName>hcat-env.sh</fileName>
+              <dictionaryName>hcat-env</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
       </components>
 
       <osSpecifics>
@@ -140,6 +210,12 @@
               <name>hive</name>
             </package>
             <package>
+              <name>hive-hcatalog</name>
+            </package>
+            <package>
+              <name>hive-webhcat</name>
+            </package>
+            <package>
               <name>postgresql-server</name>
             </package>
             <package>
@@ -192,80 +268,9 @@
         <config-type>hive-log4j</config-type>
         <config-type>hive-exec-log4j</config-type>
         <config-type>hive-env</config-type>
+        <config-type>webhcat-site</config-type>
+        <config-type>webhcat-env</config-type>
       </configuration-dependencies>
     </service>
-
-    <service>
-      <name>HCATALOG</name>
-      <displayName>HCatalog</displayName>
-      <comment>A table and storage management layer for Hadoop that enables 
users with different data processing tools
-        to more easily read and write data on the grid.
-      </comment>
-      <version>0.13.0.689</version>
-      <components>
-        <component>
-          <name>HCAT</name>
-          <displayName>HCat</displayName>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/hcat_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>hive-site.xml</fileName>
-              <dictionaryName>hive-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>hive-env.sh</fileName>
-              <dictionaryName>hive-env</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>hive-log4j.properties</fileName>
-              <dictionaryName>hive-log4j</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>hive-exec-log4j.properties</fileName>
-              <dictionaryName>hive-exec-log4j</dictionaryName>
-            </configFile>
-          </configFiles>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hive-hcatalog</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>HIVE</service>
-      </requiredServices>
-      
-      <configuration-dependencies>
-        <config-type>hive-site</config-type>
-        <config-type>hive-env</config-type>
-      </configuration-dependencies>
-      <excluded-config-types>
-        <config-type>hive-env</config-type>
-        <config-type>hive-site</config-type>
-        <config-type>hive-exec-log4j</config-type>
-        <config-type>hive-log4j</config-type>
-      </excluded-config-types>
-    </service>
-
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/files/templetonSmoke.sh
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/files/templetonSmoke.sh
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/files/templetonSmoke.sh
new file mode 100644
index 0000000..e26148b
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/files/templetonSmoke.sh
@@ -0,0 +1,96 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export ttonhost=$1
+export smoke_test_user=$2
+export smoke_user_keytab=$3
+export security_enabled=$4
+export kinit_path_local=$5
+export ttonurl="http://${ttonhost}:50111/templeton/v1";
+
+if [[ $security_enabled == "true" ]]; then
+  kinitcmd="${kinit_path_local}  -kt ${smoke_user_keytab} ${smoke_test_user}; "
+else
+  kinitcmd=""
+fi
+
+export no_proxy=$ttonhost
+cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'    
$ttonurl/status 2>&1"
+retVal=`su -s /bin/bash - ${smoke_test_user} -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (status cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit 1
+fi
+
+exit 0
+
+#try hcat ddl command
+echo "user.name=${smoke_test_user}&exec=show databases;" /tmp/show_db.post.txt
+cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d  
\@${destdir}/show_db.post.txt  $ttonurl/ddl 2>&1"
+retVal=`su -s /bin/bash - ${smoke_test_user} -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (ddl cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit  1
+fi
+
+# NOT SURE?? SUHAS
+if [[ $security_enabled == "true" ]]; then
+  echo "Templeton Pig Smoke Tests not run in secure mode"
+  exit 0
+fi
+
+#try pig query
+outname=${smoke_test_user}.`date +"%M%d%y"`.$$;
+ttonTestOutput="/tmp/idtest.${outname}.out";
+ttonTestInput="/tmp/idtest.${outname}.in";
+ttonTestScript="idtest.${outname}.pig"
+
+echo "A = load '$ttonTestInput' using PigStorage(':');"  > /tmp/$ttonTestScript
+echo "B = foreach A generate \$0 as id; " >> /tmp/$ttonTestScript
+echo "store B into '$ttonTestOutput';" >> /tmp/$ttonTestScript
+
+#copy pig script to hdfs
+su -s /bin/bash - ${smoke_test_user} -c "hadoop dfs -copyFromLocal 
/tmp/$ttonTestScript /tmp/$ttonTestScript"
+
+#copy input file to hdfs
+su -s /bin/bash - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /etc/passwd 
$ttonTestInput"
+
+#create, copy post args file
+echo -n "user.name=${smoke_test_user}&file=/tmp/$ttonTestScript" > 
/tmp/pig_post.txt
+
+#submit pig query
+cmd="curl -s -w 'http_code <%{http_code}>' -d  \@${destdir}/pig_post.txt  
$ttonurl/pig 2>&1"
+retVal=`su -s /bin/bash - ${smoke_test_user} -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (pig cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit 1
+fi
+
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hcat.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hcat.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hcat.py
index 53a62ce..31c1673 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hcat.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hcat.py
@@ -25,7 +25,15 @@ import sys
 def hcat():
   import params
 
+  Directory(params.hive_conf_dir,
+            recursive=True,
+            owner=params.hcat_user,
+            group=params.user_group,
+  )
+
+
   Directory(params.hcat_conf_dir,
+            recursive=True,
             owner=params.hcat_user,
             group=params.user_group,
   )
@@ -43,13 +51,8 @@ def hcat():
             group=params.user_group,
             mode=0644)
 
-  hcat_TemplateConfig('hcat-env.sh')
-
-
-def hcat_TemplateConfig(name):
-  import params
-
-  TemplateConfig(format("{hcat_conf_dir}/{name}"),
-                 owner=params.hcat_user,
-                 group=params.user_group
+  File(format("{hcat_conf_dir}/hcat-env.sh"),
+       owner=params.hcat_user,
+       group=params.user_group,
+       content=InlineTemplate(params.hcat_env_sh_template)
   )

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hcat_service_check.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hcat_service_check.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hcat_service_check.py
index ec8faa9..081352a 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hcat_service_check.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hcat_service_check.py
@@ -44,7 +44,7 @@ def hcat_service_check():
             tries=3,
             user=params.smokeuser,
             try_sleep=5,
-            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
+            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin', 
params.execute_path],
             logoutput=True)
 
     if params.security_enabled:
@@ -55,7 +55,8 @@ def hcat_service_check():
                     security_enabled=params.security_enabled,
                     kinit_path_local=params.kinit_path_local,
                     keytab=params.hdfs_user_keytab,
-                    principal=params.hdfs_principal_name
+                    principal=params.hdfs_principal_name,
+                    bin_dir=params.execute_path
       )
     else:
       ExecuteHadoop(test_cmd,
@@ -64,7 +65,8 @@ def hcat_service_check():
                     conf_dir=params.hadoop_conf_dir,
                     security_enabled=params.security_enabled,
                     kinit_path_local=params.kinit_path_local,
-                    keytab=params.hdfs_user_keytab
+                    keytab=params.hdfs_user_keytab,
+                    bin_dir=params.execute_path
       )
 
     cleanup_cmd = format("{kinit_cmd} {tmp_dir}/hcatSmoke.sh hcatsmoke{unique} 
cleanup")
@@ -73,6 +75,6 @@ def hcat_service_check():
             tries=3,
             user=params.smokeuser,
             try_sleep=5,
-            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
+            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin', 
params.execute_path],
             logoutput=True
     )

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hive.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hive.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hive.py
index 0b7fcb4..e388ee5 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hive.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hive.py
@@ -44,7 +44,21 @@ def hive(name=None):
   # The reason is that stale-configs are service-level, not component.
   for conf_dir in params.hive_conf_dirs_list:
     fill_conf_dir(conf_dir)
-    
+
+  XmlConfig("hive-site.xml",
+            conf_dir=params.hive_config_dir,
+            configurations=params.config['configurations']['hive-site'],
+            
configuration_attributes=params.config['configuration_attributes']['hive-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0644)
+
+  File(format("{hive_config_dir}/hive-env.sh"),
+       owner=params.hive_user,
+       group=params.user_group,
+       content=InlineTemplate(params.hive_env_sh_template)
+  )
+
   if name == 'metastore' or name == 'hiveserver2':
     jdbc_connector()
     
@@ -92,7 +106,7 @@ def hive(name=None):
     crt_directory(params.hive_pid_dir)
     crt_directory(params.hive_log_dir)
     crt_directory(params.hive_var_lib)
-    
+
 def fill_conf_dir(component_conf_dir):
   import params
   
@@ -110,20 +124,7 @@ def fill_conf_dir(component_conf_dir):
             group=params.user_group,
             mode=0644)
 
-  XmlConfig("hive-site.xml",
-            conf_dir=component_conf_dir,
-            configurations=params.config['configurations']['hive-site'],
-            
configuration_attributes=params.config['configuration_attributes']['hive-site'],
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=0644)
-  
-  File(format("{component_conf_dir}/hive-env.sh"),
-       owner=params.hive_user,
-       group=params.user_group,
-       content=InlineTemplate(params.hive_env_sh_template)
-  )
-  
+
   crt_file(format("{component_conf_dir}/hive-default.xml.template"))
   crt_file(format("{component_conf_dir}/hive-env.sh.template"))
 
@@ -188,6 +189,7 @@ def jdbc_connector():
     Execute(cmd,
             not_if=format("test -f {target}"),
             creates=params.target,
+            environment= {'PATH' : params.execute_path },
             path=["/bin", "/usr/bin/"])
   elif params.hive_jdbc_driver == "org.postgresql.Driver":
     cmd = format("hive mkdir -p {artifact_dir} ; cp 
/usr/share/java/{jdbc_jar_name} {target}")
@@ -195,6 +197,7 @@ def jdbc_connector():
     Execute(cmd,
             not_if=format("test -f {target}"),
             creates=params.target,
+            environment= {'PATH' : params.execute_path },
             path=["/bin", "usr/bin/"])
 
   elif params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hive_service.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hive_service.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hive_service.py
index f55fe20..88ce2b4 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hive_service.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hive_service.py
@@ -19,7 +19,6 @@ limitations under the License.
 """
 
 from resource_management import *
-import socket
 import sys
 import time
 from resource_management.core.shell import call
@@ -49,6 +48,8 @@ def hive_service(
     
     Execute(demon_cmd,
             user=params.hive_user,
+            environment={'HADOOP_HOME': params.hadoop_home},
+            path=params.execute_path,
             not_if=process_id_exists
     )
 
@@ -70,23 +71,16 @@ def hive_service(
       
       start_time = time.time()
       end_time = start_time + SOCKET_WAIT_SECONDS
-      
-      s = socket.socket()
-      s.settimeout(5)
-            
+
       is_service_socket_valid = False
       print "Waiting for the Hive server to start..."
-      try:
-        while time.time() < end_time:
-          try:
-            s.connect((address, port))
-            is_service_socket_valid = True
-            break
-          except socket.error, e:          
-            time.sleep(5)
-      finally:
-        s.close()
-      
+      while time.time() < end_time:
+        if check_thrift_port_sasl(address, port, 2, 
security_enabled=params.security_enabled):
+          is_service_socket_valid = True
+          break
+        else:
+          time.sleep(2)
+
       elapsed_time = time.time() - start_time    
       
       if is_service_socket_valid == False: 
@@ -103,8 +97,10 @@ def hive_service(
 def check_fs_root():
   import params  
   fs_root_url = format("{fs_root}{hive_apps_whs_dir}")
-  cmd = "/usr/lib/hive/bin/metatool -listFSRoot 2>/dev/null | grep hdfs://"
+  cmd = format("metatool -listFSRoot 2>/dev/null | grep hdfs://")
   code, out = call(cmd, user=params.hive_user)
   if code == 0 and fs_root_url.strip() != out.strip():
-    cmd = format("/usr/lib/hive/bin/metatool -updateLocation 
{fs_root}{hive_apps_whs_dir} {out}")
-    Execute(cmd, user=params.hive_user)
+    cmd = format("metatool -updateLocation {fs_root}{hive_apps_whs_dir} {out}")
+    Execute(cmd,
+            environment= {'PATH' : params.execute_path },
+            user=params.hive_user)

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/install_jars.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/install_jars.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/install_jars.py
index b6d542d..08a0a50 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/install_jars.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/install_jars.py
@@ -53,7 +53,7 @@ def install_tez_jars():
     app_dir_path = None
     lib_dir_path = None
 
-    if len(destination_hdfs_dirs) > 1:
+    if len(destination_hdfs_dirs) > 0:
       for path in destination_hdfs_dirs:
         if 'lib' in path:
           lib_dir_path = path
@@ -64,14 +64,17 @@ def install_tez_jars():
     pass
 
     if app_dir_path:
-      CopyFromLocal(params.tez_local_api_jars,
-                    mode=0755,
-                    owner=params.tez_user,
-                    dest_dir=app_dir_path,
-                    kinnit_if_needed=kinit_if_needed,
-                    hdfs_user=params.hdfs_user
-      )
-    pass
+      for scr_file, dest_file in params.app_dir_files.iteritems():
+        CopyFromLocal(scr_file,
+                      mode=0755,
+                      owner=params.tez_user,
+                      dest_dir=app_dir_path,
+                      dest_file=dest_file,
+                      kinnit_if_needed=kinit_if_needed,
+                      hdfs_user=params.hdfs_user,
+                      hadoop_bin_dir=params.hadoop_bin_dir,
+                      hadoop_conf_dir=params.hadoop_conf_dir
+        )
 
     if lib_dir_path:
       CopyFromLocal(params.tez_local_lib_jars,
@@ -79,7 +82,9 @@ def install_tez_jars():
                     owner=params.tez_user,
                     dest_dir=lib_dir_path,
                     kinnit_if_needed=kinit_if_needed,
-                    hdfs_user=params.hdfs_user
+                    hdfs_user=params.hdfs_user,
+                    hadoop_bin_dir=params.hadoop_bin_dir,
+                    hadoop_conf_dir=params.hadoop_conf_dir
       )
     pass
 
@@ -90,10 +95,14 @@ def get_tez_hdfs_dir_paths(tez_lib_uris = None):
   if tez_lib_uris and tez_lib_uris.strip().find(hdfs_path_prefix, 0) != -1:
     dir_paths = tez_lib_uris.split(',')
     for path in dir_paths:
-      lib_dir_path = path.replace(hdfs_path_prefix, '')
-      lib_dir_path = lib_dir_path if lib_dir_path.endswith(os.sep) else 
lib_dir_path + os.sep
-      lib_dir_paths.append(lib_dir_path)
+      if not "tez.tar.gz" in path:
+        lib_dir_path = path.replace(hdfs_path_prefix, '')
+        lib_dir_path = lib_dir_path if lib_dir_path.endswith(os.sep) else 
lib_dir_path + os.sep
+        lib_dir_paths.append(lib_dir_path)
+      else:
+        lib_dir_path = path.replace(hdfs_path_prefix, '')
+        lib_dir_paths.append(os.path.dirname(lib_dir_path))
     pass
   pass
 
-  return lib_dir_paths
\ No newline at end of file
+  return lib_dir_paths

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/params.py
index 9269666..ac3c4a0 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/params.py
@@ -26,6 +26,59 @@ import os
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
+#RPM versioning support
+rpm_version = default("/configurations/cluster-env/rpm_version", None)
+
+hdp_stack_version = config['hostLevelParams']['stack_version']
+
+#hadoop params
+if rpm_version:
+  hadoop_bin_dir = "/usr/bigtop/current/hadoop-client/bin"
+  hadoop_home = '/usr/bigtop/current/hadoop-client'
+  hadoop_streeming_jars = 
"/usr/bigtop/current/hadoop-mapreduce-client/hadoop-streaming-*.jar"
+  hive_bin = '/usr/bigtop/current/hive-client/bin'
+  hive_lib = '/usr/bigtop/current/hive-client/lib'
+  tez_local_api_jars = '/usr/bigtop/current/tez-client/tez*.jar'
+  tez_local_lib_jars = '/usr/bigtop/current/tez-client/lib/*.jar'
+  tez_tar_file = "/usr/bigtop/current/tez-client/lib/tez*.tar.gz"
+  pig_tar_file = '/usr/bigtop/current/pig-client/pig.tar.gz'
+  hive_tar_file = '/usr/bigtop/current/hive-client/hive.tar.gz'
+  sqoop_tar_file = '/usr/bigtop/current/sqoop-client/sqoop*.tar.gz'
+
+  hcat_lib = '/usr/bigtop/current/hive/hive-hcatalog/share/hcatalog'
+  webhcat_bin_dir = '/usr/bigtop/current/hive-hcatalog/sbin'
+
+else:
+  hadoop_bin_dir = "/usr/bin"
+  hadoop_home = '/usr'
+  hadoop_streeming_jars = '/usr/lib/hadoop-mapreduce/hadoop-streaming-*.jar'
+  hive_bin = '/usr/lib/hive/bin'
+  hive_lib = '/usr/lib/hive/lib/'
+  tez_local_api_jars = '/usr/lib/tez/tez*.jar'
+  tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
+  tez_tar_file = "/usr/lib/tez/tez*.tar.gz"
+  pig_tar_file = '/usr/share/HDP-webhcat/pig.tar.gz'
+  hive_tar_file = '/usr/share/HDP-webhcat/hive.tar.gz'
+  sqoop_tar_file = '/usr/share/HDP-webhcat/sqoop*.tar.gz'
+
+  if str(hdp_stack_version).startswith('2.0'):
+    hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
+    webhcat_bin_dir = '/usr/lib/hcatalog/sbin'
+  # for newer versions
+  else:
+    hcat_lib = '/usr/lib/hive-hcatalog/share/hcatalog'
+    webhcat_bin_dir = '/usr/lib/hive-hcatalog/sbin'
+
+hadoop_conf_dir = "/etc/hadoop/conf"
+hive_conf_dir = "/etc/hive/conf"
+hive_client_conf_dir = "/etc/hive/conf"
+hive_server_conf_dir = '/etc/hive/conf.server'
+
+# for newer versions
+hcat_conf_dir = '/etc/hive-hcatalog/conf'
+config_dir = '/etc/hive-webhcat/conf'
+
+execute_path = os.environ['PATH'] + os.pathsep + hive_bin + os.pathsep + 
hadoop_bin_dir
 hive_metastore_user_name = 
config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
 hive_jdbc_connection_url = 
config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
 
@@ -34,7 +87,6 @@ hive_metastore_db_type = 
config['configurations']['hive-env']['hive_database_typ
 
 #users
 hive_user = config['configurations']['hive-env']['hive_user']
-hive_lib = '/usr/lib/hive/lib/'
 #JDBC driver jar name
 hive_jdbc_driver = 
config['configurations']['hive-site']['javax.jdo.option.ConnectionDriverName']
 if hive_jdbc_driver == "com.mysql.jdbc.Driver":
@@ -51,11 +103,9 @@ check_db_connection_jar_name = 
"DBConnectionVerification.jar"
 check_db_connection_jar = 
format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
 
 #common
-hdp_stack_version = config['hostLevelParams']['stack_version']
 hive_metastore_port = 
get_port_from_url(config['configurations']['hive-site']['hive.metastore.uris']) 
#"9083"
 hive_var_lib = '/var/lib/hive'
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-hive_bin = '/usr/lib/hive/bin'
 hive_server_host = config['clusterHostInfo']['hive_server_host'][0]
 hive_server_port = 
default('/configurations/hive-site/hive.server2.thrift.port',"10000")
 hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
@@ -77,8 +127,6 @@ hive_log_dir = 
config['configurations']['hive-env']['hive_log_dir']
 hive_pid_dir = status_params.hive_pid_dir
 hive_pid = status_params.hive_pid
 #Default conf dir for client
-hive_client_conf_dir = "/etc/hive/conf"
-hive_server_conf_dir = "/etc/hive/conf"
 hive_conf_dirs_list = [hive_server_conf_dir, hive_client_conf_dir]
 
 if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE"]:
@@ -92,8 +140,6 @@ hive_database_name = 
config['configurations']['hive-env']['hive_database_name']
 #Starting hiveserver2
 start_hiveserver2_script = 'startHiveserver2.sh.j2'
 
-hadoop_home = '/usr/lib/hadoop'
-
 ##Starting metastore
 start_metastore_script = 'startMetastore.sh'
 hive_metastore_pid = status_params.hive_metastore_pid
@@ -137,8 +183,6 @@ postgresql_daemon_name = 
status_params.postgresql_daemon_name
 init_metastore_schema = True
 
 ########## HCAT
-hcat_conf_dir = '/etc/hive-hcatalog/conf'
-hcat_lib = '/usr/lib/hive-hcatalog/share/hcatalog'
 
 hcat_dbroot = hcat_lib
 
@@ -147,8 +191,7 @@ webhcat_user = 
config['configurations']['hive-env']['webhcat_user']
 
 hcat_pid_dir = status_params.hcat_pid_dir
 hcat_log_dir = config['configurations']['hive-env']['hcat_log_dir']
-
-hadoop_conf_dir = '/etc/hadoop/conf'
+hcat_env_sh_template = config['configurations']['hcat-env']['content']
 
 #hive-log4j.properties.template
 if (('hive-log4j' in config['configurations']) and ('content' in 
config['configurations']['hive-log4j'])):
@@ -170,23 +213,17 @@ hive_hdfs_user_mode = 0700
 hive_apps_whs_dir = 
config['configurations']['hive-site']["hive.metastore.warehouse.dir"]
 #for create_hdfs_directory
 hostname = config["hostname"]
-hadoop_conf_dir = "/etc/hadoop/conf"
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = 
config['configurations']['hadoop-env']['hdfs_principal_name']
-kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", 
"/usr/sbin"])
 
 # Tez libraries
 tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
-tez_local_api_jars = '/usr/lib/tez/tez*.jar'
-tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
 tez_user = config['configurations']['tez-env']['tez_user']
 
 if System.get_instance().os_family == "ubuntu":
   mysql_configname = '/etc/mysql/my.cnf'
 else:
   mysql_configname = '/etc/my.cnf'
-  
 
 # Hive security
 hive_authorization_enabled = 
config['configurations']['hive-site']['hive.security.authorization.enabled']
@@ -200,16 +237,47 @@ if os.path.exists(mysql_jdbc_driver_jar):
 else:  
   hive_exclude_packages = []
 
+########################################################
+########### WebHCat related params #####################
+########################################################
+
+webhcat_env_sh_template = config['configurations']['webhcat-env']['content']
+templeton_log_dir = config['configurations']['hive-env']['hcat_log_dir']
+templeton_pid_dir = status_params.hcat_pid_dir
+
+webhcat_pid_file = status_params.webhcat_pid_file
+
+templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
+
+
+webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
+
+webhcat_apps_dir = "/apps/webhcat"
+
+hcat_hdfs_user_dir = format("/user/{hcat_user}")
+hcat_hdfs_user_mode = 0755
+webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
+webhcat_hdfs_user_mode = 0755
+#for create_hdfs_directory
+security_param = "true" if security_enabled else "false"
+
+if str(hdp_stack_version).startswith('2.0') or 
str(hdp_stack_version).startswith('2.1'):
+  app_dir_files = {tez_local_api_jars:None}
+else:
+  app_dir_files = {
+              tez_local_api_jars:None,
+              tez_tar_file:"tez.tar.gz"
+  }
+
 import functools
 #create partial functions with common arguments for every HdfsDirectory call
 #to create hdfs directory we need to call params.HdfsDirectory in code
-#create partial functions with common arguments for every HdfsDirectory call
-#to create hdfs directory we need to call params.HdfsDirectory in code
 HdfsDirectory = functools.partial(
   HdfsDirectory,
   conf_dir=hadoop_conf_dir,
   hdfs_user=hdfs_user,
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local
+  kinit_path_local = kinit_path_local,
+  bin_dir = hadoop_bin_dir
 )

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_server.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_server.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_server.py
index a1cd13f..6732573 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_server.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_server.py
@@ -41,8 +41,12 @@ class PostgreSQLServer(Script):
     self.update_pghda_conf(env)
     self.update_postgresql_conf(env)
 
-    # restart the postgresql server for the changes to take effect
-    self.stop(env)
+    # Reload the settings and start the postgresql server for the changes to 
take effect
+    # Note: Don't restart the postgresql server because when Ambari server and 
the hive metastore on the same machine,
+    # they will share the same postgresql server instance. Restarting the 
postgresql database may cause the ambari server database connection lost
+    postgresql_service(postgresql_daemon_name=params.postgresql_daemon_name, 
action = 'reload')
+
+    # ensure the postgresql server is started because the add hive metastore 
user requires the server is running.
     self.start(env)
 
     # create the database and hive_metastore_user

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_service.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_service.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_service.py
index 6443e05..cc7b4cc 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_service.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_service.py
@@ -37,3 +37,5 @@ def postgresql_service(postgresql_daemon_name=None, 
action='start'):
       logoutput = True,
       not_if = status_cmd
     )
+  else:
+    Execute(cmd, logoutput = True)

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/service_check.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/service_check.py
index 09ba1bf..b75578b 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/service_check.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/service_check.py
@@ -23,6 +23,7 @@ import socket
 import sys
 
 from hcat_service_check import hcat_service_check
+from webhcat_service_check import webhcat_service_check
 
 class HiveServiceCheck(Script):
   def service_check(self, env):
@@ -31,17 +32,15 @@ class HiveServiceCheck(Script):
 
     address=format("{hive_server_host}")
     port=int(format("{hive_server_port}"))
-    s = socket.socket()
     print "Test connectivity to hive server"
-    try:
-      s.connect((address, port))
+    if check_thrift_port_sasl(address, port, 
security_enabled=params.security_enabled):
       print "Successfully connected to %s on port %s" % (address, port)
-      s.close()
-    except socket.error, e:
-      print "Connection to %s on port %s failed: %s" % (address, port, e)
-      sys.exit(1)
+    else:
+      print "Connection to %s on port %s failed" % (address, port)
+      exit(1)
 
     hcat_service_check()
+    webhcat_service_check()
 
 if __name__ == "__main__":
   HiveServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/status_params.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/status_params.py
index a90fd15..3e50761 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/status_params.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/status_params.py
@@ -28,6 +28,7 @@ hive_pid = 'hive-server.pid'
 hive_metastore_pid = 'hive.pid'
 
 hcat_pid_dir = config['configurations']['hive-env']['hcat_pid_dir'] 
#hcat_pid_dir
+webhcat_pid_file = format('{hcat_pid_dir}/webhcat.pid')
 
 if System.get_instance().os_family == "suse" or 
System.get_instance().os_family == "ubuntu":
   daemon_name = 'mysql'

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat.py
new file mode 100644
index 0000000..c6f41dd
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat.py
@@ -0,0 +1,131 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management import *
+import sys
+import os.path
+import glob
+
+
+def webhcat():
+  import params
+
+  params.HdfsDirectory(params.webhcat_apps_dir,
+                       action="create_delayed",
+                       owner=params.webhcat_user,
+                       mode=0755
+  )
+  if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
+    params.HdfsDirectory(params.hcat_hdfs_user_dir,
+                         action="create_delayed",
+                         owner=params.hcat_user,
+                         mode=params.hcat_hdfs_user_mode
+    )
+  params.HdfsDirectory(params.webhcat_hdfs_user_dir,
+                       action="create_delayed",
+                       owner=params.webhcat_user,
+                       mode=params.webhcat_hdfs_user_mode
+  )
+  params.HdfsDirectory(None, action="create")
+
+  Directory(params.templeton_pid_dir,
+            owner=params.webhcat_user,
+            mode=0755,
+            group=params.user_group,
+            recursive=True)
+
+  Directory(params.templeton_log_dir,
+            owner=params.webhcat_user,
+            mode=0755,
+            group=params.user_group,
+            recursive=True)
+
+  Directory(params.config_dir,
+            recursive=True,
+            owner=params.webhcat_user,
+            group=params.user_group)
+
+  XmlConfig("webhcat-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['webhcat-site'],
+            
configuration_attributes=params.config['configuration_attributes']['webhcat-site'],
+            owner=params.webhcat_user,
+            group=params.user_group,
+  )
+
+  File(format("{config_dir}/webhcat-env.sh"),
+       owner=params.webhcat_user,
+       group=params.user_group,
+       content=InlineTemplate(params.webhcat_env_sh_template)
+  )
+
+  if params.security_enabled:
+    kinit_if_needed = format("{kinit_path_local} -kt {hdfs_user_keytab} 
{hdfs_principal_name};")
+  else:
+    kinit_if_needed = ""
+
+  if kinit_if_needed:
+    Execute(kinit_if_needed,
+            user=params.webhcat_user,
+            path='/bin'
+    )
+
+  CopyFromLocal(params.hadoop_streeming_jars,
+                owner=params.webhcat_user,
+                mode=0755,
+                dest_dir=params.webhcat_apps_dir,
+                kinnit_if_needed=kinit_if_needed,
+                hdfs_user=params.hdfs_user,
+                hadoop_bin_dir=params.hadoop_bin_dir,
+                hadoop_conf_dir=params.hadoop_conf_dir
+  )
+
+  if (os.path.isfile(params.pig_tar_file)):
+    CopyFromLocal(params.pig_tar_file,
+                  owner=params.webhcat_user,
+                  mode=0755,
+                  dest_dir=params.webhcat_apps_dir,
+                  kinnit_if_needed=kinit_if_needed,
+                  hdfs_user=params.hdfs_user,
+                  hadoop_bin_dir=params.hadoop_bin_dir,
+                  hadoop_conf_dir=params.hadoop_conf_dir
+    )
+
+  if (os.path.isfile(params.hive_tar_file)):
+    CopyFromLocal(params.hive_tar_file,
+                  owner=params.webhcat_user,
+                  mode=0755,
+                  dest_dir=params.webhcat_apps_dir,
+                  kinnit_if_needed=kinit_if_needed,
+                  hdfs_user=params.hdfs_user,
+                  hadoop_bin_dir=params.hadoop_bin_dir,
+                  hadoop_conf_dir=params.hadoop_conf_dir
+    )
+
+  if (len(glob.glob(params.sqoop_tar_file)) > 0):
+    CopyFromLocal(params.sqoop_tar_file,
+                  owner=params.webhcat_user,
+                  mode=0755,
+                  dest_dir=params.webhcat_apps_dir,
+                  kinnit_if_needed=kinit_if_needed,
+                  hdfs_user=params.hdfs_user,
+                  hadoop_bin_dir=params.hadoop_bin_dir,
+                  hadoop_conf_dir=params.hadoop_conf_dir
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat_server.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat_server.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat_server.py
new file mode 100644
index 0000000..088cb41
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat_server.py
@@ -0,0 +1,53 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import sys
+from resource_management import *
+
+from webhcat import webhcat
+from webhcat_service import webhcat_service
+
+class WebHCatServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    webhcat()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    webhcat_service(action = 'start')
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    webhcat_service(action = 'stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.webhcat_pid_file)
+
+if __name__ == "__main__":
+  WebHCatServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat_service.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat_service.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat_service.py
new file mode 100644
index 0000000..644d554
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat_service.py
@@ -0,0 +1,40 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management import *
+
+def webhcat_service(action='start'):
+  import params
+
+  cmd = format('env HADOOP_HOME={hadoop_home} 
{webhcat_bin_dir}/webhcat_server.sh')
+
+  if action == 'start':
+    demon_cmd = format('{cmd} start')
+    no_op_test = format('ls {webhcat_pid_file} >/dev/null 2>&1 && ps -p `cat 
{webhcat_pid_file}` >/dev/null 2>&1')
+    Execute(demon_cmd,
+            user=params.webhcat_user,
+            not_if=no_op_test
+    )
+  elif action == 'stop':
+    demon_cmd = format('{cmd} stop')
+    Execute(demon_cmd,
+            user=params.webhcat_user
+    )
+    Execute(format('rm -f {webhcat_pid_file}'))

Reply via email to