This is an automated email from the ASF dual-hosted git repository.

guyuqi pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/bigtop.git


The following commit(s) were added to refs/heads/master by this push:
     new fa27a679 BIGTOP-3777: Add Spark support for Bigtop 3.1.0 Mpack (#985)
fa27a679 is described below

commit fa27a67934235051fdb45fdc959b12c7f8218046
Author: timyuer <[email protected]>
AuthorDate: Mon Aug 22 17:41:42 2022 +0800

    BIGTOP-3777: Add Spark support for Bigtop 3.1.0 Mpack (#985)
    
    Add Spark support for Bigtop 3.1.0 Mpack
    Fix spark_thrift_server in params.py
---
 .../stacks/BGTP/1.0/services/SPARK/alerts.json     |  56 +++
 .../SPARK/configuration/spark-defaults.xml         | 257 ++++++++++++++
 .../1.0/services/SPARK/configuration/spark-env.xml | 166 +++++++++
 .../configuration/spark-hive-site-override.xml     |  83 +++++
 .../SPARK/configuration/spark-log4j-properties.xml |  46 +++
 .../configuration/spark-metrics-properties.xml     | 165 +++++++++
 .../configuration/spark-thrift-fairscheduler.xml   |  37 ++
 .../stacks/BGTP/1.0/services/SPARK/kerberos.json   | 132 ++++++++
 .../stacks/BGTP/1.0/services/SPARK/metainfo.xml    | 243 +++++++++++++
 .../scripts/alerts/alert_spark_thrift_port.py      | 174 ++++++++++
 .../SPARK/package/scripts/job_history_server.py    |  91 +++++
 .../1.0/services/SPARK/package/scripts/params.py   | 259 ++++++++++++++
 .../SPARK/package/scripts/service_check.py         |  77 +++++
 .../services/SPARK/package/scripts/setup_spark.py  | 154 +++++++++
 .../services/SPARK/package/scripts/spark_client.py |  56 +++
 .../SPARK/package/scripts/spark_service.py         | 187 +++++++++++
 .../SPARK/package/scripts/spark_thrift_server.py   |  85 +++++
 .../SPARK/package/scripts/status_params.py         |  39 +++
 .../package/templates/input.config-spark.json.j2   |  61 ++++
 .../1.0/services/SPARK/quicklinks/quicklinks.json  |  28 ++
 .../1.0/services/SPARK/role_command_order.json     |  10 +
 .../BGTP/1.0/services/SPARK/service_advisor.py     | 374 +++++++++++++++++++++
 .../1.0/services/SPARK/themes/directories.json     | 128 +++++++
 .../dev-support/docker/centos7/build-containers.sh |   4 +-
 24 files changed, 2910 insertions(+), 2 deletions(-)

diff --git 
a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/alerts.json
 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/alerts.json
new file mode 100644
index 00000000..534bd845
--- /dev/null
+++ 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/alerts.json
@@ -0,0 +1,56 @@
+{
+  "SPARK": {
+    "service": [],
+    "SPARK_JOBHISTORYSERVER": [
+      {
+        "name": "SPARK_JOBHISTORYSERVER_PROCESS",
+        "label": "Spark History Server",
+        "description": "This host-level alert is triggered if the Spark 
History Server cannot be determined to be up.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "PORT",
+          "uri": "{{spark-defaults/spark.history.ui.port}}",
+          "default_port": 18081,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5
+            }
+          }
+        }
+      }
+    ],
+    "SPARK_THRIFTSERVER": [
+      {
+        "name": "spark_thriftserver_status",
+        "label": "Spark Thrift Server",
+        "description": "This host-level alert is triggered if the Spark Thrift 
Server cannot be determined to be up.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "SCRIPT",
+          "path": "SPARK/package/scripts/alerts/alert_spark_thrift_port.py",
+          "parameters": [
+            {
+              "name": "check.command.timeout",
+              "display_name": "Command Timeout",
+              "value": 60.0,
+              "type": "NUMERIC",
+              "description": "The maximum time before check command will be 
killed by timeout",
+              "units": "seconds",
+              "threshold": "CRITICAL"
+            }
+          ]
+        }
+      }
+    ]
+  }
+}
diff --git 
a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/configuration/spark-defaults.xml
 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/configuration/spark-defaults.xml
new file mode 100644
index 00000000..f50bed14
--- /dev/null
+++ 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/configuration/spark-defaults.xml
@@ -0,0 +1,257 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>spark.yarn.queue</name>
+    <value>default</value>
+    <description>
+      The name of the YARN queue to which the application is submitted.
+    </description>
+    <depends-on>
+      <property>
+        <type>capacity-scheduler</type>
+        <name>yarn.scheduler.capacity.root.queues</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false" />
+  </property>
+  <property>
+    <name>spark.history.provider</name>
+    <value>org.apache.spark.deploy.history.FsHistoryProvider</value>
+    <description>
+      Name of history provider
+    </description>
+    <on-ambari-upgrade add="true" />
+  </property>
+  <property>
+    <name>spark.history.ui.port</name>
+    <value>18081</value>
+    <description>
+      The port to which the web interface of the History Server binds.
+    </description>
+    <final>true</final>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true" />
+  </property>
+  <property>
+    <name>spark.history.fs.logDirectory</name>
+    <display-name>Spark History FS Log directory</display-name>
+    <value>hdfs:///spark-history/</value>
+    <description>
+      Base directory for history spark application log.
+    </description>
+    <on-ambari-upgrade add="true" />
+  </property>
+  <property>
+    <name>spark.history.kerberos.principal</name>
+    <value>none</value>
+    <description>
+      Kerberos principal name for the Spark History Server.
+    </description>
+    <property-type>KERBEROS_PRINCIPAL</property-type>
+    <on-ambari-upgrade add="true" />
+  </property>
+  <property>
+    <name>spark.history.kerberos.keytab</name>
+    <value>none</value>
+    <description>
+      Location of the kerberos keytab file for the Spark History Server.
+    </description>
+    <on-ambari-upgrade add="true" />
+  </property>
+  <property>
+    <name>spark.eventLog.enabled</name>
+    <value>true</value>
+    <description>
+        Whether to log Spark events, useful for reconstructing the Web UI 
after the application has finished.
+    </description>
+    <on-ambari-upgrade add="true" />
+  </property>
+  <property>
+    <name>spark.eventLog.dir</name>
+    <display-name>Spark Eventlog directory</display-name>
+    <value>hdfs:///spark-history/</value>
+    <description>
+        Base directory in which Spark events are logged, if 
spark.eventLog.enabled is true.
+    </description>
+    <on-ambari-upgrade add="true" />
+  </property>
+  <property>
+    <name>spark.yarn.historyServer.address</name>
+    <value>{{spark_history_server_host}}:{{spark_history_ui_port}}</value>
+    <description>The address of the Spark history server (i.e. 
host.com:18081). The address should not contain a scheme (http://). Defaults to 
not being set since the history server is an optional service. This address is 
given to the YARN ResourceManager when the Spark application finishes to link 
the application from the ResourceManager UI to the Spark history server 
UI.</description>
+    <on-ambari-upgrade add="true" />
+  </property>
+  <property>
+    <name>spark.scheduler.allocation.file</name>
+    <value>file:///{{spark_conf}}/spark-thrift-fairscheduler.xml</value>
+    <description>
+      Scheduler configuration file for thriftserver.
+    </description>
+    <on-ambari-upgrade add="true" />
+  </property>
+  <property>
+    <name>spark.scheduler.mode</name>
+    <value>FAIR</value>
+    <description>
+      The scheduling mode between jobs submitted to the same SparkContext.
+    </description>
+    <on-ambari-upgrade add="true" />
+  </property>
+  <property>
+    <name>spark.hadoop.cacheConf</name>
+    <value>false</value>
+    <description>
+      Specifies whether HadoopRDD caches the Hadoop configuration object
+    </description>
+    <on-ambari-upgrade add="true" />
+  </property>
+  
+  <property>
+    <name>spark.yarn.executor.failuresValidityInterval</name>
+    <value>2h</value>
+    <description>
+      Defines the validity interval for executor failure tracking.
+      Executor failures which are older than the validity interval will be 
ignored.
+    </description>
+    <on-ambari-upgrade add="false" />
+  </property>
+  <property>
+    <name>spark.yarn.maxAppAttempts</name>
+    <value>1</value>
+    <description>
+      The maximum number of attempts that will be made to submit the 
application.
+      It should be no larger than the global number of max attempts in the 
YARN configuration.
+    </description>
+    <on-ambari-upgrade add="false" />
+  </property>
+
+  <property>
+    <name>spark.history.fs.cleaner.enabled</name>
+    <value>true</value>
+    <description>Specifies whether the History Server should periodically 
clean up event logs from storage.</description>
+    <on-ambari-upgrade add="false" />
+  </property>
+  <property>
+    <name>spark.history.fs.cleaner.interval</name>
+    <value>7d</value>
+    <description>How often the filesystem job history cleaner checks for files 
to delete. Files are only deleted if they are older than 
spark.history.fs.cleaner.maxAge</description>
+    <on-ambari-upgrade add="false" />
+  </property>
+  <property>
+    <name>spark.history.fs.cleaner.maxAge</name>
+    <value>90d</value>
+    <description>Job history files older than this will be deleted when the 
filesystem history cleaner runs.</description>
+    <on-ambari-upgrade add="false" />
+  </property>
+
+
+  <property>
+    <name>spark.sql.statistics.fallBackToHdfs</name>
+    <value>true</value>
+    <description></description>
+    <on-ambari-upgrade add="false" />
+  </property>
+  <property>
+    <name>spark.sql.autoBroadcastJoinThreshold</name>
+    <value>10MB</value>
+    <description>Configures the maximum size in bytes for a table that will be 
broadcast to all worker nodes when performing a join.</description>
+    <on-ambari-upgrade add="false" />
+  </property>
+  <property>
+    <name>spark.io.compression.lz4.blockSize</name>
+    <value>128kb</value>
+    <description>Block size in bytes used in LZ4 compression, in the case when 
LZ4 compression codec is used. Lowering this block size will also lower shuffle 
memory usage when LZ4 is used.</description>
+    <on-ambari-upgrade add="false" />
+  </property>
+
+
+  <property>
+    <name>spark.sql.orc.filterPushdown</name>
+    <value>true</value>
+    <description>Enables filter pushdown for ORC formats.</description>
+    <on-ambari-upgrade add="false" />
+  </property>
+  <property>
+    <name>spark.sql.hive.convertMetastoreOrc</name>
+    <value>true</value>
+    <description>Enables new ORC format to read/write Hive 
Tables.</description>
+    <on-ambari-upgrade add="false" />
+  </property>
+
+
+  <property>
+    <name>spark.shuffle.io.backLog</name>
+    <value>8192</value>
+    <description>Requested maximum length of the queue of incoming 
connections.</description>
+    <on-ambari-upgrade add="false" />
+  </property>
+  <property>
+    <name>spark.shuffle.file.buffer</name>
+    <value>1m</value>
+    <description>Size of the in-memory buffer for each shuffle file output 
stream, in KiB unless otherwise specified. These buffers reduce the number of 
disk seeks and system calls made in creating intermediate shuffle 
files.</description>
+    <on-ambari-upgrade add="false" />
+  </property>
+  <property>
+    <name>spark.master</name>
+    <value>yarn</value>
+    <description>The deploying mode of spark application.</description>
+    <on-ambari-upgrade add="false" />
+  </property>
+  <property>
+    <name>spark.executor.extraJavaOptions</name>
+    <value>-XX:+UseNUMA</value>
+    <description></description>
+    <on-ambari-upgrade add="false" />
+  </property>
+  <property>
+    <name>spark.sql.warehouse.dir</name>
+    <value>{{spark_warehouse_dir}}</value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <description></description>
+    <on-ambari-upgrade add="false" />
+  </property>
+
+  <property>
+    <name>spark.sql.hive.metastore.version</name>
+    <value>3.1.3</value>
+    <description></description>
+    <on-ambari-upgrade add="false" />
+  </property>
+  <property>
+    <name>spark.sql.hive.metastore.jars</name>
+    <value>{{hive_home}}/lib/*</value>
+    <description></description>
+    <on-ambari-upgrade add="false" />
+  </property>
+
+  <property>
+    <name>spark.history.store.path</name>
+    <value>/var/lib/spark/shs_db</value>
+    <description></description>
+    <on-ambari-upgrade add="false" />
+  </property>
+
+</configuration>
\ No newline at end of file
diff --git 
a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/configuration/spark-env.xml
 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/configuration/spark-env.xml
new file mode 100644
index 00000000..15bcfeca
--- /dev/null
+++ 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/configuration/spark-env.xml
@@ -0,0 +1,166 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>spark_user</name>
+    <display-name>Spark User</display-name>
+    <value>spark</value>
+    <property-type>USER</property-type>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+      <user-groups>
+        <property>
+          <type>cluster-env</type>
+          <name>user_group</name>
+        </property>
+        <property>
+          <type>spark-env</type>
+          <name>spark_group</name>
+        </property>
+      </user-groups>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>spark_group</name>
+    <display-name>Spark Group</display-name>
+    <value>spark</value>
+    <property-type>GROUP</property-type>
+    <description>spark group</description>
+    <value-attributes>
+      <type>user</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>spark_log_dir</name>
+    <display-name>Spark Log directory</display-name>
+    <value>/var/log/spark</value>
+    <description>Spark Log Dir</description>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>spark_pid_dir</name>
+    <display-name>Spark PID directory</display-name>
+    <value>/var/run/spark</value>
+    <value-attributes>
+      <type>directory</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>spark_daemon_memory</name>
+    <value>2048</value>
+    <description>Memory for Master, Worker and history server (default: 
2G)</description>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hive_kerberos_keytab</name>
+    <value>{{hive_kerberos_keytab}}</value>
+    <description>hive keytab for spark thirft server</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hive_kerberos_principal</name>
+    <value>{{hive_kerberos_principal}}</value>
+    <description>hive principal for spark thrift server</description>
+    <property-type>KERBEROS_PRINCIPAL</property-type>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- spark-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for spark-env.sh file</description>
+    <value>
+#!/usr/bin/env bash
+
+# This file is sourced when running various Spark programs.
+# Copy it as spark-env.sh and edit that to configure Spark for your site.
+
+# Options read in YARN client mode
+#SPARK_EXECUTOR_INSTANCES="2" #Number of workers to start (Default: 2)
+#SPARK_EXECUTOR_CORES="1" #Number of cores for the workers (Default: 1).
+#SPARK_EXECUTOR_MEMORY="1G" #Memory per Worker (e.g. 1000M, 2G) (Default: 1G)
+#SPARK_DRIVER_MEMORY="512M" #Memory for Master (e.g. 1000M, 2G) (Default: 512 
Mb)
+#SPARK_YARN_APP_NAME="spark" #The name of your application (Default: Spark)
+#SPARK_YARN_QUEUE="default" #The hadoop queue to use for allocation requests 
(Default: default)
+#SPARK_YARN_DIST_FILES="" #Comma separated list of files to be distributed 
with the job.
+#SPARK_YARN_DIST_ARCHIVES="" #Comma separated list of archives to be 
distributed with the job.
+
+{% if security_enabled %}
+export 
SPARK_HISTORY_OPTS='-Dspark.ui.filters=org.apache.hadoop.security.authentication.server.AuthenticationFilter
 
-Dspark.org.apache.hadoop.security.authentication.server.AuthenticationFilter.params="type=kerberos,kerberos.principal={{spnego_principal}},kerberos.keytab={{spnego_keytab}}"'
+{% endif %}
+
+
+# Generic options for the daemons used in the standalone deploy mode
+
+# Alternate conf dir. (Default: ${SPARK_HOME}/conf)
+export SPARK_CONF_DIR=${SPARK_CONF_DIR:-{{spark_home}}/conf}
+
+# Where log files are stored.(Default:${SPARK_HOME}/logs)
+#export SPARK_LOG_DIR=${SPARK_HOME:-{{spark_home}}}/logs
+export SPARK_LOG_DIR={{spark_log_dir}}
+
+# Where the pid file is stored. (Default: /tmp)
+export SPARK_PID_DIR={{spark_pid_dir}}
+
+#Memory for Master, Worker and history server (default: 1024MB)
+export SPARK_DAEMON_MEMORY={{spark_daemon_memory}}m
+
+# A string representing this instance of spark.(Default: $USER)
+SPARK_IDENT_STRING=$USER
+
+# The scheduling priority for daemons. (Default: 0)
+SPARK_NICENESS=0
+
+export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+export SPARK_DIST_CLASSPATH=$(${HADOOP_HOME}/bin/hadoop classpath)
+
+# The java implementation to use.
+export JAVA_HOME={{java_home}}
+
+</value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>spark_thrift_cmd_opts</name>
+    <description>additional spark thrift server commandline 
options</description>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>
diff --git 
a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/configuration/spark-hive-site-override.xml
 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/configuration/spark-hive-site-override.xml
new file mode 100644
index 00000000..15351908
--- /dev/null
+++ 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/configuration/spark-hive-site-override.xml
@@ -0,0 +1,83 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<configuration supports_final="true">
+  <property>
+    <name>hive.server2.enable.doAs</name>
+    <value>false</value>
+    <description>
+      Disable impersonation in Hive Server 2.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hive.metastore.client.socket.timeout</name>
+    <value>1800</value>
+    <description>MetaStore Client socket timeout in seconds</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hive.metastore.client.connect.retry.delay</name>
+    <value>5</value>
+    <description>
+      Expects a time value - number of seconds for the client to wait between 
consecutive connection attempts
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hive.server2.thrift.port</name>
+    <value>10016</value>
+    <description>
+      TCP port number to listen on, default 10015.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hive.server2.thrift.http.port</name>
+    <value>10002</value>
+    <description>Port number of HiveServer2 Thrift interface when 
hive.server2.transport.mode is 'http'.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hive.server2.transport.mode</name>
+    <value>binary</value>
+    <description>
+      Expects one of [binary, http].
+      Transport mode of HiveServer2.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>metastore.catalog.default</name>
+    <value>hive</value>
+    <description></description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hive.load.data.owner</name>
+    <value>spark</value>
+    <description></description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hive.exec.scratchdir</name>
+    <value>/tmp/spark</value>
+    <description></description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git 
a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/configuration/spark-log4j-properties.xml
 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/configuration/spark-log4j-properties.xml
new file mode 100644
index 00000000..93cf8972
--- /dev/null
+++ 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/configuration/spark-log4j-properties.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <property>
+    <name>content</name>
+    <description>Spark-log4j-Properties</description>
+    <value>
+# Set everything to be logged to the console
+log4j.rootCategory=INFO, console
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p 
%c{1}: %m%n
+
+# Settings to quiet third party logs that are too verbose
+log4j.logger.org.eclipse.jetty=WARN
+log4j.logger.org.eclipse.jetty.util.component.AbstractLifeCycle=ERROR
+log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO
+log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO
+
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>
diff --git 
a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/configuration/spark-metrics-properties.xml
 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/configuration/spark-metrics-properties.xml
new file mode 100644
index 00000000..a490bcab
--- /dev/null
+++ 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/configuration/spark-metrics-properties.xml
@@ -0,0 +1,165 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true" supports_adding_forbidden="true">
+  <property>
+    <name>content</name>
+    <description>Spark-metrics-properties</description>
+    <value>
+# syntax: [instance].sink|source.[name].[options]=[value]
+
+# This file configures Spark's internal metrics system. The metrics system is
+# divided into instances which correspond to internal components.
+# Each instance can be configured to report its metrics to one or more sinks.
+# Accepted values for [instance] are "master", "worker", "executor", "driver",
+# and "applications". A wild card "*" can be used as an instance name, in
+# which case all instances will inherit the supplied property.
+#
+# Within an instance, a "source" specifies a particular set of grouped metrics.
+# there are two kinds of sources:
+# 1. Spark internal sources, like MasterSource, WorkerSource, etc, which will
+# collect a Spark component's internal state. Each instance is paired with a
+# Spark source that is added automatically.
+# 2. Common sources, like JvmSource, which will collect low level state.
+# These can be added through configuration options and are then loaded
+# using reflection.
+#
+# A "sink" specifies where metrics are delivered to. Each instance can be
+# assigned one or more sinks.
+#
+# The sink|source field specifies whether the property relates to a sink or
+# source.
+#
+# The [name] field specifies the name of source or sink.
+#
+# The [options] field is the specific property of this source or sink. The
+# source or sink is responsible for parsing this property.
+#
+# Notes:
+# 1. To add a new sink, set the "class" option to a fully qualified class
+# name (see examples below).
+# 2. Some sinks involve a polling period. The minimum allowed polling period
+# is 1 second.
+# 3. Wild card properties can be overridden by more specific properties.
+# For example, master.sink.console.period takes precedence over
+# *.sink.console.period.
+# 4. A metrics specific configuration
+# "spark.metrics.conf=${SPARK_HOME}/conf/metrics.properties" should be
+# added to Java properties using -Dspark.metrics.conf=xxx if you want to
+# customize metrics system. You can also put the file in ${SPARK_HOME}/conf
+# and it will be loaded automatically.
+# 5. MetricsServlet is added by default as a sink in master, worker and client
+# driver, you can send http request "/metrics/json" to get a snapshot of all 
the
+# registered metrics in json format. For master, requests 
"/metrics/master/json" and
+# "/metrics/applications/json" can be sent seperately to get metrics snapshot 
of
+# instance master and applications. MetricsServlet may not be configured by 
self.
+#
+
+## List of available sinks and their properties.
+
+# org.apache.spark.metrics.sink.ConsoleSink
+# Name: Default: Description:
+# period 10 Poll period
+# unit seconds Units of poll period
+
+# org.apache.spark.metrics.sink.CSVSink
+# Name: Default: Description:
+# period 10 Poll period
+# unit seconds Units of poll period
+# directory /tmp Where to store CSV files
+
+# org.apache.spark.metrics.sink.GangliaSink
+# Name: Default: Description:
+# host NONE Hostname or multicast group of Ganglia server
+# port NONE Port of Ganglia server(s)
+# period 10 Poll period
+# unit seconds Units of poll period
+# ttl 1 TTL of messages sent by Ganglia
+# mode multicast Ganglia network mode ('unicast' or 'multicast')
+
+# org.apache.spark.metrics.sink.JmxSink
+
+# org.apache.spark.metrics.sink.MetricsServlet
+# Name: Default: Description:
+# path VARIES* Path prefix from the web server root
+# sample false Whether to show entire set of samples for histograms ('false' 
or 'true')
+#
+# * Default path is /metrics/json for all instances except the master. The 
master has two paths:
+# /metrics/aplications/json # App information
+# /metrics/master/json # Master information
+
+# org.apache.spark.metrics.sink.GraphiteSink
+# Name: Default: Description:
+# host NONE Hostname of Graphite server
+# port NONE Port of Graphite server
+# period 10 Poll period
+# unit seconds Units of poll period
+# prefix EMPTY STRING Prefix to prepend to metric name
+
+## Examples
+# Enable JmxSink for all instances by class name
+#*.sink.jmx.class=org.apache.spark.metrics.sink.JmxSink
+
+# Enable ConsoleSink for all instances by class name
+#*.sink.console.class=org.apache.spark.metrics.sink.ConsoleSink
+
+# Polling period for ConsoleSink
+#*.sink.console.period=10
+
+#*.sink.console.unit=seconds
+
+# Master instance overlap polling period
+#master.sink.console.period=15
+
+#master.sink.console.unit=seconds
+
+# Enable CsvSink for all instances
+#*.sink.csv.class=org.apache.spark.metrics.sink.CsvSink
+
+# Polling period for CsvSink
+#*.sink.csv.period=1
+
+#*.sink.csv.unit=minutes
+
+# Polling directory for CsvSink
+#*.sink.csv.directory=/tmp/
+
+# Worker instance overlap polling period
+#worker.sink.csv.period=10
+
+#worker.sink.csv.unit=minutes
+
+# Enable jvm source for instance master, worker, driver and executor
+#master.source.jvm.class=org.apache.spark.metrics.source.JvmSource
+
+#worker.source.jvm.class=org.apache.spark.metrics.source.JvmSource
+
+#driver.source.jvm.class=org.apache.spark.metrics.source.JvmSource
+
+#executor.source.jvm.class=org.apache.spark.metrics.source.JvmSource
+
+    </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>
diff --git 
a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/configuration/spark-thrift-fairscheduler.xml
 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/configuration/spark-thrift-fairscheduler.xml
new file mode 100644
index 00000000..716f0608
--- /dev/null
+++ 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/configuration/spark-thrift-fairscheduler.xml
@@ -0,0 +1,37 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration supports_final="true" supports_adding_forbidden="true">
+  <property>
+    <name>fairscheduler_content</name>
+    <description>This is the jinja template for spark-thrift-fairscheduler.xml 
file.</description>
+    <value><![CDATA[<?xml version="1.0"?>
+<allocations>
+    <pool name="default">
+        <schedulingMode>FAIR</schedulingMode>
+        <weight>1</weight>
+        <minShare>2</minShare>
+    </pool>
+</allocations>
+]]>
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true" />
+  </property>
+</configuration>
\ No newline at end of file
diff --git 
a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/kerberos.json
 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/kerberos.json
new file mode 100644
index 00000000..77e76538
--- /dev/null
+++ 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/kerberos.json
@@ -0,0 +1,132 @@
+{
+  "services": [
+    {
+      "name": "SPARK",
+      "identities": [
+        {
+          "name": "spark_smokeuser",
+          "reference": "/smokeuser"
+        },
+        {
+          "name": "sparkuser",
+          "principal": {
+            "value": "${spark-env/spark_user}${principal_suffix}@${realm}",
+            "type": "user",
+            "local_username": "${spark-env/spark_user}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/spark.headless.keytab",
+            "owner": {
+              "name": "${spark-env/spark_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": ""
+            }
+          }
+        },
+        {
+          "name": "atlas_kafka",
+          "principal": {
+            "value": "spark_atlas@${realm}",
+            "type": "user",
+            "configuration": 
"spark-atlas-application-properties-override/atlas.jaas.KafkaClient.option.principal"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/spark-atlas.headless.keytab",
+            "owner": {
+              "name": "${spark-env/spark_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": ""
+            }
+          }
+        },
+        {
+          "name": "spark_service_keytab",
+          "principal": {
+            "value": "spark/_HOST@${realm}",
+            "type": "service",
+            "configuration": 
"spark-hive-site-override/hive.server2.authentication.kerberos.principal",
+            "local_username": "${spark-env/spark_user}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/spark.service.keytab",
+            "owner": {
+              "name": "${spark-env/spark_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": ""
+            },
+            "configuration": 
"spark-hive-site-override/hive.server2.authentication.kerberos.keytab"
+          }
+        }
+      ],
+      "configurations": [
+        {
+          "spark-defaults": {
+            "spark.history.kerberos.enabled": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "SPARK_JOBHISTORYSERVER",
+          "identities": [
+            {
+              "name": "hdfs",
+              "reference": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "history_server_spnego",
+              "reference": "/spnego",
+              "principal": {
+                "configuration": 
"spark-defaults/history.server.spnego.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": 
"spark-defaults/history.server.spnego.keytab.file"
+              }
+            },
+            {
+              "name": "shs_spark_service_identity",
+              "reference": "/SPARK/spark_service_keytab",
+              "principal": {
+                "configuration": 
"spark-defaults/spark.history.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "spark-defaults/spark.history.kerberos.keytab"
+              }
+            },
+            {
+              "name": "sts_spark_service_identity_2_thrift",
+              "reference": "/SPARK/spark_service_keytab",
+              "principal": {
+                "configuration": "spark-thrift-sparkconf/spark.yarn.principal"
+              },
+              "keytab": {
+                "configuration": "spark-thrift-sparkconf/spark.yarn.keytab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "SPARK_CLIENT"
+        },
+        {
+          "name": "SPARK_THRIFTSERVER",
+          "identities": [
+            {
+              "name": "hdfs",
+              "reference": "/HDFS/NAMENODE/hdfs"
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}
\ No newline at end of file
diff --git 
a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/metainfo.xml
 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/metainfo.xml
new file mode 100644
index 00000000..022e3699
--- /dev/null
+++ 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/metainfo.xml
@@ -0,0 +1,243 @@
+<?xml version="1.0"?>
+<!--Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SPARK</name>
+      <displayName>Spark</displayName>
+      <comment>Apache Spark is a unified analytics engine for large-scale data 
processing.</comment>
+      <version>Bigtop+3.1</version>
+      <components>
+        <component>
+          <name>SPARK_JOBHISTORYSERVER</name>
+          <displayName>Spark History Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>HIVE/HIVE_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/job_history_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>spark_jobhistory_server</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+        <component>
+          <name>SPARK_THRIFTSERVER</name>
+          <displayName>Spark Thrift Server</displayName>
+          <category>SLAVE</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>HIVE/HIVE_METASTORE</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/spark_thrift_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <logs>
+            <log>
+              <logId>spark_thriftserver</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+        <component>
+          <name>SPARK_CLIENT</name>
+          <displayName>Spark Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/spark_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>env</type>
+              <fileName>spark-log4j.properties</fileName>
+              <dictionaryName>-log4j-properties</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>spark-env.sh</fileName>
+              <dictionaryName>spark-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>spark-metrics.properties</fileName>
+              <dictionaryName>spark-metrics-properties</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>properties</type>
+              <fileName>spark-defaults.conf</fileName>
+              <dictionaryName>spark-defaults</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>spark-defaults</config-type>
+        <config-type>spark-env</config-type>
+        <config-type>spark-log4j-properties</config-type>
+        <config-type>spark-metrics-properties</config-type>
+        <config-type>spark-hive-site-override</config-type>
+        <config-type>spark-thrift-fairscheduler</config-type>
+      </configuration-dependencies>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>HDFS</service>
+        <service>YARN</service>
+        <service>HIVE</service>
+      </requiredServices>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>spark-core</name>
+            </package>
+            <package>
+              <name>spark-python</name>
+            </package>
+            <package>
+              <name>spark-history-server</name>
+            </package>
+            <package>
+              <name>spark-thriftserver</name>
+            </package>
+            <package>
+              <name>spark-datanucleus</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+
+      <themes>
+        <theme>
+          <fileName>directories.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+    </service>
+  </services>
+</metainfo>
\ No newline at end of file
diff --git 
a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/alerts/alert_spark_thrift_port.py
 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/alerts/alert_spark_thrift_port.py
new file mode 100644
index 00000000..97cb1c8e
--- /dev/null
+++ 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/alerts/alert_spark_thrift_port.py
@@ -0,0 +1,174 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import os
+import socket
+import time
+import logging
+import traceback
+from resource_management.libraries.functions import format
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.core.resources import Execute
+from resource_management.core import global_lock
+
+
+stack_root = Script.get_stack_root()
+
+OK_MESSAGE = "TCP OK - {0:.3f}s response on port {1}"
+CRITICAL_MESSAGE = "Connection failed on host {0}:{1} ({2})"
+
+HIVE_SERVER_THRIFT_PORT_KEY = 
'{{spark-hive-site-override/hive.server2.thrift.port}}'
+HIVE_SERVER_THRIFT_HTTP_PORT_KEY = 
'{{spark-hive-site-override/hive.server2.thrift.http.port}}'
+HIVE_SERVER_TRANSPORT_MODE_KEY = 
'{{spark-hive-site-override/hive.server2.transport.mode}}'
+HIVE_SERVER_HTTP_ENDPOINT = 
'{{spark-hive-site-override/hive.server2.http.endpoint}}'
+HIVE_SERVER2_USE_SSL_KEY = '{{spark-hive-site-override/hive.server2.use.SSL}}'
+
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+
+HIVE_SERVER2_KERBEROS_KEYTAB = 
'{{spark-hive-site-override/hive.server2.authentication.kerberos.keytab}}'
+HIVE_SERVER2_PRINCIPAL_KEY = 
'{{spark-hive-site-override/hive.server2.authentication.kerberos.principal}}'
+
+# The configured Kerberos executable search paths, if any
+KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = 
'{{kerberos-env/executable_search_paths}}'
+
+THRIFT_PORT_DEFAULT = 10002
+HIVE_SERVER_TRANSPORT_MODE_DEFAULT = 'binary'
+
+SPARK_USER_KEY = '{{spark-env/spark_user}}'
+
+CHECK_COMMAND_TIMEOUT_KEY = 'check.command.timeout'
+CHECK_COMMAND_TIMEOUT_DEFAULT = 60.0
+
+logger = logging.getLogger('ambari_alerts')
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def get_tokens():
+    """
+    Returns a tuple of tokens in the format {{site/property}} that will be used
+    to build the dictionary passed into execute
+    """
+    return (HIVE_SERVER_THRIFT_PORT_KEY, HIVE_SERVER_THRIFT_HTTP_PORT_KEY, 
HIVE_SERVER_TRANSPORT_MODE_KEY, HIVE_SERVER_HTTP_ENDPOINT,
+            HIVE_SERVER2_USE_SSL_KEY, SECURITY_ENABLED_KEY, 
KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, SPARK_USER_KEY, 
HIVE_SERVER2_KERBEROS_KEYTAB,
+            HIVE_SERVER2_PRINCIPAL_KEY)
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def execute(configurations={}, parameters={}, host_name=None):
+    """
+    Returns a tuple containing the result code and a pre-formatted result label
+
+    Keyword arguments:
+    configurations (dictionary): a mapping of configuration key to value
+    parameters (dictionary): a mapping of script parameter key to value
+    host_name (string): the name of this host where the alert is running
+    """
+
+    spark_home = os.path.join(stack_root, "current", 'spark-client')
+
+    if configurations is None:
+        return ('UNKNOWN', ['There were no configurations supplied to the 
script.'])
+
+    transport_mode = HIVE_SERVER_TRANSPORT_MODE_DEFAULT
+    if HIVE_SERVER_TRANSPORT_MODE_KEY in configurations:
+        transport_mode = configurations[HIVE_SERVER_TRANSPORT_MODE_KEY]
+
+    if HIVE_SERVER_HTTP_ENDPOINT in configurations:
+        http_endpoint = configurations[HIVE_SERVER_HTTP_ENDPOINT]
+
+    port = THRIFT_PORT_DEFAULT
+    if transport_mode.lower() == 'binary' and HIVE_SERVER_THRIFT_PORT_KEY in 
configurations:
+        port = int(configurations[HIVE_SERVER_THRIFT_PORT_KEY])
+    elif transport_mode.lower() == 'http' and HIVE_SERVER_THRIFT_HTTP_PORT_KEY 
in configurations:
+        port = int(configurations[HIVE_SERVER_THRIFT_HTTP_PORT_KEY])
+
+    ssl_enabled = False
+    if (HIVE_SERVER2_USE_SSL_KEY in configurations
+        and str(configurations[HIVE_SERVER2_USE_SSL_KEY]).upper() == 'TRUE'):
+        ssl_enabled = True
+
+    security_enabled = False
+    if SECURITY_ENABLED_KEY in configurations:
+        security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() 
== 'TRUE'
+
+    hive_kerberos_keytab = None
+    if HIVE_SERVER2_KERBEROS_KEYTAB in configurations:
+        hive_kerberos_keytab = configurations[HIVE_SERVER2_KERBEROS_KEYTAB]
+
+    if host_name is None:
+        host_name = socket.getfqdn()
+
+    hive_principal = None
+    if HIVE_SERVER2_PRINCIPAL_KEY in configurations:
+        hive_principal = configurations[HIVE_SERVER2_PRINCIPAL_KEY]
+        hive_principal = hive_principal.replace('_HOST',host_name.lower())
+
+    # Get the configured Kerberos executable search paths, if any
+    if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
+        kerberos_executable_search_paths = 
configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
+    else:
+        kerberos_executable_search_paths = None
+
+    kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
+
+    sparkuser = configurations[SPARK_USER_KEY]
+
+    if security_enabled:
+        kinitcmd = format("{kinit_path_local} -kt {hive_kerberos_keytab} 
{hive_principal}; ")
+        # prevent concurrent kinit
+        kinit_lock = global_lock.get_lock(global_lock.LOCK_TYPE_KERBEROS)
+        kinit_lock.acquire()
+        try:
+            Execute(kinitcmd, user=sparkuser)
+        finally:
+            kinit_lock.release()
+
+    result_code = None
+    try:
+        if host_name is None:
+            host_name = socket.getfqdn()
+
+        beeline_url = ["jdbc:hive2://{host_name}:{port}/default", 
"transportMode={transport_mode}"]
+        if security_enabled:
+            beeline_url.append("principal={hive_principal}")
+        if transport_mode == "http":
+            beeline_url.append("httpPath={http_endpoint}")
+            if ssl_enabled:
+                beeline_url.append("ssl=true")
+
+        # append url according to used transport
+
+        beeline_cmd = os.path.join(spark_home, "bin", "beeline")
+        cmd = "! %s -u '%s'  -e '' 2>&1| awk '{print}'|grep -i -e 'Connection 
refused' -e 'Invalid URL' -e 'Error: Could not open'" % \
+              (beeline_cmd, format(";".join(beeline_url)))
+
+        start_time = time.time()
+        try:
+            Execute(cmd, user=sparkuser, path=[beeline_cmd], 
timeout=CHECK_COMMAND_TIMEOUT_DEFAULT)
+            total_time = time.time() - start_time
+            result_code = 'OK'
+            label = OK_MESSAGE.format(total_time, port)
+        except:
+            result_code = 'CRITICAL'
+            label = CRITICAL_MESSAGE.format(host_name, port, 
traceback.format_exc())
+    except:
+        label = traceback.format_exc()
+        result_code = 'UNKNOWN'
+
+    return (result_code, [label])
+
diff --git 
a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/job_history_server.py
 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/job_history_server.py
new file mode 100644
index 00000000..bf181495
--- /dev/null
+++ 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/job_history_server.py
@@ -0,0 +1,91 @@
+#!/usr/bin/python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+import os
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
+from resource_management.libraries.functions.check_process_status import 
check_process_status
+from resource_management.libraries.functions.stack_features import 
check_stack_feature
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+from setup_spark import *
+from spark_service import spark_service
+
+
+class JobHistoryServer(Script):
+
+  def install(self, env):
+    import params
+    env.set_params(params)
+    
+    self.install_packages(env)
+    
+  def configure(self, env, upgrade_type=None, config_dir=None):
+    import params
+    env.set_params(params)
+    
+    setup_spark(env, 'server', upgrade_type=upgrade_type, action = 'config')
+    
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    
+    self.configure(env)
+    spark_service('jobhistoryserver', upgrade_type=upgrade_type, 
action='start')
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    
+    spark_service('jobhistoryserver', upgrade_type=upgrade_type, action='stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+
+    check_process_status(status_params.spark_history_server_pid_file)
+    
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, 
params.version):
+      Logger.info("Executing Spark Job History Server Stack Upgrade 
pre-restart")
+      stack_select.select_packages(params.version)
+
+  def get_log_folder(self):
+    import params
+    return params.spark_log_dir
+  
+  def get_user(self):
+    import params
+    return params.spark_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.spark_history_server_pid_file]
+
+if __name__ == "__main__":
+  JobHistoryServer().execute()
diff --git 
a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/params.py
 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/params.py
new file mode 100644
index 00000000..9552c2d1
--- /dev/null
+++ 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/params.py
@@ -0,0 +1,259 @@
+#!/usr/bin/python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import socket
+import status_params
+import os
+from urlparse import urlparse
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+
+from resource_management.libraries.functions.stack_features import 
check_stack_feature
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions import conf_select, stack_select
+from resource_management.libraries.functions.version import 
format_stack_version, get_major_version
+from resource_management.libraries.functions.copy_tarball import 
get_sysprep_skip_copy_tarballs_hdfs
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import 
get_not_managed_resources
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.script.script import Script
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+
+sudo = AMBARI_SUDO_BINARY
+
+# a map of the Ambari role to the component name
+# for use with <stack-root>/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'SPARK_JOBHISTORYSERVER' : 'spark-historyserver',
+  'SPARK_CLIENT' : 'spark-client',
+  'SPARK_THRIFTSERVER' : 'spark-thriftserver'
+}
+
+HIVE_SERVER_ROLE_DIRECTORY_MAP = {
+  'HIVE_METASTORE' : 'hive-metastore',
+  'HIVE_SERVER' : 'hive-server2',
+  'HIVE_CLIENT' : 'hive-client'
+}
+
+component_directory = 
Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "SPARK_CLIENT")
+hive_component_directory = 
Script.get_component_from_role(HIVE_SERVER_ROLE_DIRECTORY_MAP, "HIVE_CLIENT")
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
+fqdn = socket.getfqdn().lower()
+
+cluster_name = config['clusterName']
+stack_name = status_params.stack_name
+stack_root = Script.get_stack_root()
+stack_version_unformatted = config['clusterLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+major_stack_version = get_major_version(stack_version_formatted)
+
+sysprep_skip_copy_tarballs_hdfs = get_sysprep_skip_copy_tarballs_hdfs()
+
+# New Cluster Stack Version that is defined during the RESTART of a Stack 
Upgrade
+version = default("/commandParams/version", None)
+
+spark_conf = '/etc/spark/conf'
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hive_home = '/usr/lib/hive'
+hive_conf_dir = format("{hive_home}/conf")
+yarn_application_classpath = 
config['configurations']['yarn-site']['yarn.application.classpath']
+
+hadoop_home = stack_select.get_hadoop_dir("home")
+spark_log_dir = config['configurations']['spark-env']['spark_log_dir']
+spark_pid_dir = status_params.spark_pid_dir
+spark_home='/usr/lib/spark'
+
+spark_daemon_memory = 
config['configurations']['spark-env']['spark_daemon_memory']
+spark_thrift_server_conf_file = spark_conf + "/spark-defaults.conf"
+java_home = config['ambariLevelParams']['java_home']
+
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_principal_name = 
config['configurations']['hadoop-env']['hdfs_principal_name']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+user_group = config['configurations']['cluster-env']['user_group']
+
+spark_user = status_params.spark_user
+hive_user = status_params.hive_user
+spark_group = status_params.spark_group
+user_group = status_params.user_group
+spark_hdfs_user_dir = format("/user/{spark_user}")
+spark_history_dir = 
default('/configurations/spark-defaults/spark.history.fs.logDirectory', 
"hdfs:///spark-history")
+
+spark_lib_dir = "/var/lib/spark"
+spark_history_store_path = 
default("/configurations/spark-defaults/spark.history.store.path", 
"/var/lib/spark/shs_db")
+
+spark_warehouse_dir = 
default("/configurations/hive-site/hive.metastore.warehouse.dir", 
"/warehouse/tablespace/managed/hive")
+whs_dir_protocol = urlparse(spark_warehouse_dir).scheme
+default_metastore_catalog = 
config['configurations']['spark-hive-site-override']["metastore.catalog.default"]
+
+spark_history_server_pid_file = status_params.spark_history_server_pid_file
+spark_thrift_server_pid_file = status_params.spark_thrift_server_pid_file
+
+spark_history_server_start = 
format("{spark_home}/sbin/start-history-server.sh")
+spark_history_server_stop = format("{spark_home}/sbin/stop-history-server.sh")
+
+spark_thrift_server_start = format("{spark_home}/sbin/start-thriftserver.sh")
+spark_thrift_server_stop = format("{spark_home}/sbin/stop-thriftserver.sh")
+spark_hadoop_lib_native = 
format("{stack_root}/current/hadoop-client/lib/native:{stack_root}/current/hadoop-client/lib/native/Linux-amd64-64")
+
+run_example_cmd = format("{spark_home}/bin/run-example")
+spark_smoke_example = "SparkPi"
+spark_service_check_cmd = format(
+  "{run_example_cmd} --master yarn --deploy-mode cluster --num-executors 1 
--driver-memory 256m --executor-memory 256m --executor-cores 1 
{spark_smoke_example} 1")
+
+spark_jobhistoryserver_hosts = 
default("/clusterHostInfo/spark_jobhistoryserver_hosts", [])
+
+if len(spark_jobhistoryserver_hosts) > 0:
+  spark_history_server_host = spark_jobhistoryserver_hosts[0]
+else:
+  spark_history_server_host = "localhost"
+
+# spark-defaults params
+ui_ssl_enabled = default("configurations/spark-defaults/spark.ssl.enabled", 
False)
+
+spark_yarn_historyServer_address = default(spark_history_server_host, 
"localhost")
+spark_history_scheme = "http"
+spark_history_ui_port = 
config['configurations']['spark-defaults']['spark.history.ui.port']
+
+if ui_ssl_enabled:
+  spark_history_ui_port = str(int(spark_history_ui_port) + 400)
+  spark_history_scheme = "https"
+
+
+spark_env_sh = config['configurations']['spark-env']['content']
+spark_log4j_properties = 
config['configurations']['spark-log4j-properties']['content']
+spark_metrics_properties = 
config['configurations']['spark-metrics-properties']['content']
+
+hive_server_host = default("/clusterHostInfo/hive_server_hosts", [])
+is_hive_installed = not len(hive_server_host) == 0
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+
+kinit_path_local = 
get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', 
None))
+spark_kerberos_keytab =  
config['configurations']['spark-defaults']['spark.history.kerberos.keytab']
+spark_kerberos_principal =  
config['configurations']['spark-defaults']['spark.history.kerberos.principal']
+smoke_user = config['configurations']['cluster-env']['smokeuser']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+smokeuser_principal =  
config['configurations']['cluster-env']['smokeuser_principal_name']
+
+spark_thriftserver_hosts = 
default("/clusterHostInfo/spark_thriftserver_hosts", [])
+has_spark_thriftserver = not len(spark_thriftserver_hosts) == 0
+
+# hive-site params
+spark_hive_properties = {
+  'hive.metastore.uris': 
default('/configurations/hive-site/hive.metastore.uris', '')
+}
+
+# security settings
+if security_enabled:
+  spnego_principal = 
config['configurations']['spark-defaults']['history.server.spnego.kerberos.principal']
+  spnego_principal = spnego_principal.replace('_HOST', fqdn)
+  spnego_keytab = 
config['configurations']['spark-defaults']['history.server.spnego.keytab.file']
+
+  spark_principal = spark_kerberos_principal.replace('_HOST', fqdn)
+
+  if is_hive_installed:
+    spark_hive_properties.update({
+      'hive.metastore.sasl.enabled': 
str(config['configurations']['hive-site']['hive.metastore.sasl.enabled']).lower(),
+      'hive.metastore.kerberos.keytab.file': 
config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file'],
+      'hive.server2.authentication.spnego.principal': 
config['configurations']['hive-site']['hive.server2.authentication.spnego.principal'],
+      'hive.server2.authentication.spnego.keytab': 
config['configurations']['hive-site']['hive.server2.authentication.spnego.keytab'],
+      'hive.metastore.kerberos.principal': 
config['configurations']['hive-site']['hive.metastore.kerberos.principal'],
+      'hive.server2.authentication.kerberos.principal': 
config['configurations']['hive-site']['hive.server2.authentication.kerberos.principal'],
+      'hive.server2.authentication.kerberos.keytab': 
config['configurations']['hive-site']['hive.server2.authentication.kerberos.keytab'],
+      'hive.server2.authentication': 
config['configurations']['hive-site']['hive.server2.authentication'],
+    })
+
+    hive_kerberos_keytab = 
config['configurations']['spark-hive-site-override']['hive.server2.authentication.kerberos.keytab']
+    default_hive_kerberos_principal = 
config['configurations']['spark-hive-site-override']['hive.server2.authentication.kerberos.principal']
+    hive_kerberos_principal = default_hive_kerberos_principal.replace('_HOST', 
fqdn)
+
+spark_transport_mode = 
config['configurations']['spark-hive-site-override']['hive.server2.transport.mode']
+
+if spark_transport_mode.lower() == 'binary':
+  spark_thrift_port = 
int(config['configurations']['spark-hive-site-override']['hive.server2.thrift.port'])
+elif spark_transport_mode.lower() == 'http':
+  spark_thrift_port = 
int(config['configurations']['spark-hive-site-override']['hive.server2.thrift.http.port'])
+  spark_thrift_ssl_enabled = 
default("configurations/spark-hive-site-override/hive.server2.use.SSL", False)
+  spark_thrift_endpoint = 
default("configurations/spark-hive-site-override/hive.server2.http.endpoint", 
"cliservice")
+
+# thrift server support
+spark_thrift_sparkconf = None
+spark_thrift_cmd_opts_properties = ''
+spark_thrift_fairscheduler_content = None
+
+if has_spark_thriftserver and 'spark-defaults' in config['configurations']:
+  spark_thrift_sparkconf = config['configurations']['spark-defaults']
+  spark_thrift_cmd_opts_properties = 
config['configurations']['spark-env']['spark_thrift_cmd_opts']
+
+  if 'spark-thrift-fairscheduler' in config['configurations'] and 
'fairscheduler_content' in 
config['configurations']['spark-thrift-fairscheduler']:
+    spark_thrift_fairscheduler_content = 
config['configurations']['spark-thrift-fairscheduler']['fairscheduler_content']
+
+if is_hive_installed:
+  # update default metastore client properties (async wait for metastore 
component) it is useful in case of
+  # blueprint provisioning when hive-metastore and spark-thriftserver is not 
on the same host.
+  spark_hive_properties.update({
+    'hive.metastore.client.socket.timeout' : 
config['configurations']['hive-site']['hive.metastore.client.socket.timeout']
+  })
+
+  spark_hive_site_override_properties = 
config['configurations']['spark-hive-site-override'].copy()
+  if not default_metastore_catalog:
+    spark_hive_site_override_properties.pop("metastore.catalog.default")
+  spark_hive_properties.update(spark_hive_site_override_properties)
+
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+hdfs_site = config['configurations']['hdfs-site']
+hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore"
+
+hive_schematool_bin = format('{hive_home}/bin')
+hive_metastore_db_type = 
config['configurations']['hive-env']['hive_database_type']
+
+ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", []))
+has_ats = len(ats_host) > 0
+
+dfs_type = default("/clusterLevelParams/dfs_type", "")
+
+
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete hdfs directory/file/copyfromlocal we need to call 
params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file = hdfs_resource_ignore_file,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
+)
+
diff --git 
a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/service_check.py
 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/service_check.py
new file mode 100644
index 00000000..d6fd3c48
--- /dev/null
+++ 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/service_check.py
@@ -0,0 +1,77 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agree in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+import subprocess
+import time
+import os
+
+from resource_management.core.exceptions import Fail
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.core.resources.system import Execute
+from resource_management.core.logger import Logger
+
+CHECK_COMMAND_TIMEOUT_DEFAULT = 60.0
+
+class SparkServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    if params.security_enabled:
+      spark_kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} 
{smokeuser_principal}; ")
+      Execute(spark_kinit_cmd, user=params.smoke_user)
+
+
+    Execute(format("curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k 
{spark_history_scheme}://{spark_history_server_host}:{spark_history_ui_port} | 
grep 200"),
+            tries=5,
+            try_sleep=3,
+            logoutput=True,
+            user=params.smoke_user
+            )
+
+    if params.has_spark_thriftserver:
+      healthy_spark_thrift_host = ""
+      for spark_thrift_host in params.spark_thriftserver_hosts:
+        if params.security_enabled:
+          kerberos_principal = 
params.default_hive_kerberos_principal.replace('_HOST', spark_thrift_host)
+          beeline_url = 
["jdbc:hive2://{spark_thrift_host}:{spark_thrift_port}/default;principal={kerberos_principal}","transportMode={spark_transport_mode}"]
+        else:
+          beeline_url = 
["jdbc:hive2://{spark_thrift_host}:{spark_thrift_port}/default","transportMode={spark_transport_mode}"]
+        # append url according to used transport
+        if params.spark_transport_mode == "http":
+          beeline_url.append("httpPath={spark_thrift_endpoint}")
+          if params.spark_thrift_ssl_enabled:
+            beeline_url.append("ssl=true")
+
+        beeline_cmd = os.path.join(params.spark_home, "bin", "beeline")
+        cmd = "! %s -u '%s'  -e '' 2>&1| awk '{print}'|grep -i -e 'Connection 
refused' -e 'Invalid URL' -e 'Error: Could not open'" % \
+              (beeline_cmd, format(";".join(beeline_url)))
+
+        try:
+          Execute(cmd, user=params.smoke_user, path=[beeline_cmd], 
timeout=CHECK_COMMAND_TIMEOUT_DEFAULT)
+          healthy_spark_thrift_host = spark_thrift_host
+          break
+        except:
+          pass
+
+      if len(params.spark_thriftserver_hosts) > 0 and 
healthy_spark_thrift_host == "":
+        raise Fail("Connection to all Spark thrift servers failed.")
+
+if __name__ == "__main__":
+  SparkServiceCheck().execute()
+
diff --git 
a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/setup_spark.py
 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/setup_spark.py
new file mode 100644
index 00000000..f32d460c
--- /dev/null
+++ 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/setup_spark.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+import fileinput
+import shutil
+import os
+import socket
+
+from urlparse import urlparse
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+from resource_management.core.source import Template, InlineTemplate
+from resource_management.core.resources.system import Directory, File, Link
+from resource_management.libraries.functions.generate_logfeeder_input_config 
import generate_logfeeder_input_config
+from resource_management.libraries.resources.properties_file import 
PropertiesFile
+from resource_management.libraries.functions.version import 
format_stack_version
+from resource_management.libraries.functions.stack_features import 
check_stack_feature
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import lzo_utils
+from resource_management.libraries.resources.xml_config import XmlConfig
+
+def setup_spark(env, type, upgrade_type = None, action = None):
+  import params
+
+  # ensure that matching LZO libraries are installed for Spark
+  lzo_utils.install_lzo_if_needed()
+
+  Directory([params.spark_pid_dir, params.spark_log_dir],
+            owner=params.spark_user,
+            group=params.user_group,
+            mode=0775,
+            create_parents = True
+  )
+  if type == 'server' and action == 'config':
+    Directory(params.spark_lib_dir,
+              owner=params.spark_user,
+              group=params.user_group,
+              create_parents = True,
+              mode=0775
+    )
+
+    Directory(params.spark_history_store_path,
+              owner=params.spark_user,
+              group=params.user_group,
+              create_parents = True,
+              mode=0775
+    )
+
+    params.HdfsResource(params.spark_hdfs_user_dir,
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.spark_user,
+                       mode=0775
+    )
+
+    if params.spark_warehouse_dir and (not params.whs_dir_protocol or 
params.whs_dir_protocol == urlparse(params.default_fs).scheme):
+    # Create Spark Warehouse Dir
+      params.HdfsResource(params.spark_warehouse_dir,
+                          type="directory",
+                          action="create_on_execute",
+                          owner=params.spark_user,
+                          mode=0777
+      )
+
+    params.HdfsResource(None, action="execute")
+
+
+
+    generate_logfeeder_input_config('', Template("input.config-spark.json.j2", 
extra_imports=[default]))
+
+
+
+  spark_defaults = dict(params.config['configurations']['spark-defaults'])
+
+  if params.security_enabled:
+    spark_defaults.pop("history.server.spnego.kerberos.principal")
+    spark_defaults.pop("history.server.spnego.keytab.file")
+    spark_defaults['spark.history.kerberos.principal'] = 
spark_defaults['spark.history.kerberos.principal'].replace('_HOST', 
socket.getfqdn().lower())
+
+  if not params.spark_warehouse_dir:
+      spark_defaults.pop("spark.sql.warehouse.dir")
+
+  PropertiesFile(format("{spark_conf}/spark-defaults.conf"),
+    properties = spark_defaults,
+    key_value_delimiter = " ",
+    owner=params.spark_user,
+    group=params.spark_group,
+    mode=0644
+  )
+
+  # create spark-env.sh in etc/conf dir
+  File(os.path.join(params.spark_conf, 'spark-env.sh'),
+       owner=params.spark_user,
+       group=params.spark_group,
+       content=InlineTemplate(params.spark_env_sh),
+       mode=0644,
+  )
+
+  #create log4j.properties in etc/conf dir
+  File(os.path.join(params.spark_conf, 'log4j.properties'),
+       owner=params.spark_user,
+       group=params.spark_group,
+       content=params.spark_log4j_properties,
+       mode=0644,
+  )
+
+  #create metrics.properties in etc/conf dir
+  File(os.path.join(params.spark_conf, 'metrics.properties'),
+       owner=params.spark_user,
+       group=params.spark_group,
+       content=InlineTemplate(params.spark_metrics_properties),
+       mode=0644
+  )
+
+  if params.is_hive_installed:
+    XmlConfig("hive-site.xml",
+          conf_dir=params.spark_conf,
+          configurations=params.spark_hive_properties,
+          owner=params.spark_user,
+          group=params.spark_group,
+          mode=0644)
+
+  if params.spark_thrift_fairscheduler_content:
+    # create spark-thrift-fairscheduler.xml
+    File(os.path.join(params.spark_conf,"spark-thrift-fairscheduler.xml"),
+      owner=params.spark_user,
+      group=params.spark_group,
+      mode=0755,
+      content=InlineTemplate(params.spark_thrift_fairscheduler_content)
+    )
+
+  if type == "client":
+    Logger.info('Spark client config.')
diff --git 
a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/spark_client.py
 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/spark_client.py
new file mode 100644
index 00000000..6cf1abe6
--- /dev/null
+++ 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/spark_client.py
@@ -0,0 +1,56 @@
+#!/usr/bin/python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.stack_features import 
check_stack_feature
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+from setup_spark import setup_spark
+
+
+class SparkClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env, upgrade_type=None, config_dir=None):
+    import params
+    env.set_params(params)
+    
+    setup_spark(env, 'client', upgrade_type=upgrade_type, action = 'config')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+  
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, 
params.version):
+      Logger.info("Executing Spark Client Stack Upgrade pre-restart")
+      stack_select.select_packages(params.version)
+
+if __name__ == "__main__":
+  SparkClient().execute()
+
diff --git 
a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/spark_service.py
 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/spark_service.py
new file mode 100644
index 00000000..3ce1ef35
--- /dev/null
+++ 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/spark_service.py
@@ -0,0 +1,187 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+import socket
+import tarfile
+import time
+import os
+import shutil
+import glob
+from contextlib import closing
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions.copy_tarball import copy_to_hdfs, 
get_tarball_paths
+from resource_management.libraries.functions import format
+from resource_management.core.resources.system import File, Execute
+from resource_management.libraries.functions.version import 
format_stack_version
+from resource_management.libraries.functions.stack_features import 
check_stack_feature
+from resource_management.libraries.functions.check_process_status import 
check_process_status
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.show_logs import show_logs
+from resource_management.core.shell import as_sudo
+from resource_management.core.exceptions import ComponentIsNotRunning
+from resource_management.core.logger import Logger
+
+CHECK_COMMAND_TIMEOUT_DEFAULT = 60.0
+
+def make_tarfile(output_filename, source_dirs):
+  try:
+    os.remove(output_filename)
+  except OSError:
+    pass
+  parent_dir=os.path.dirname(output_filename)
+  if not os.path.exists(parent_dir):
+    os.makedirs(parent_dir)
+  os.chmod(parent_dir, 0711)
+  with closing(tarfile.open(output_filename, "w:gz")) as tar:
+    for dir in source_dirs:
+      for file in os.listdir(dir):
+        tar.add(os.path.join(dir,file),arcname=file)
+  os.chmod(output_filename, 0644)
+
+
+def spark_service(name, upgrade_type=None, action=None):
+  import params
+
+  if action == 'start':
+
+    effective_version = params.version if upgrade_type is not None else 
params.stack_version_formatted
+    if effective_version:
+      effective_version = format_stack_version(effective_version)
+
+    if params.security_enabled:
+      spark_kinit_cmd = format("{kinit_path_local} -kt {spark_kerberos_keytab} 
{spark_principal}; ")
+      Execute(spark_kinit_cmd, user=params.spark_user)
+
+    if name == 'jobhistoryserver':
+
+      # create spark history directory
+      params.HdfsResource(params.spark_history_dir,
+                          type="directory",
+                          action="create_on_execute",
+                          owner=params.spark_user,
+                          group=params.user_group,
+                          mode=0777,
+                          recursive_chmod=True
+                          )
+      params.HdfsResource(None, action="execute")
+
+      # if params.default_metastore_catalog:
+      #   create_catalog_cmd = format("{hive_schematool_bin}/schematool 
-dbType {hive_metastore_db_type} "
+      #                                 "-createCatalog 
{default_metastore_catalog} "
+      #                                 "-catalogDescription 'Default catalog, 
for Spark' -ifNotExists "
+      #                                 "-catalogLocation 
{default_fs}{spark_warehouse_dir}")
+      #   Execute(create_catalog_cmd, user = params.hive_user)
+
+      historyserver_no_op_test = as_sudo(["test", "-f", 
params.spark_history_server_pid_file]) + " && " + as_sudo(["pgrep", "-F", 
params.spark_history_server_pid_file])
+      try:
+        Execute(params.spark_history_server_start,
+                user=params.spark_user,
+                environment={'JAVA_HOME': params.java_home},
+                not_if=historyserver_no_op_test)
+      except:
+        show_logs(params.spark_log_dir, user=params.spark_user)
+        raise
+
+    elif name == 'sparkthriftserver':
+      import status_params
+      if params.security_enabled:
+        hive_kinit_cmd = format("{kinit_path_local} -kt {hive_kerberos_keytab} 
{hive_kerberos_principal}; ")
+        Execute(hive_kinit_cmd, user=params.spark_user)
+
+      thriftserver_no_op_test= as_sudo(["test", "-f", 
params.spark_thrift_server_pid_file]) + " && " + as_sudo(["pgrep", "-F", 
params.spark_thrift_server_pid_file])
+      try:
+        Execute(format('{spark_thrift_server_start} --properties-file 
{spark_thrift_server_conf_file} {spark_thrift_cmd_opts_properties}'),
+                user=params.spark_user,
+                environment={'JAVA_HOME': params.java_home},
+                not_if=thriftserver_no_op_test
+        )
+      except:
+        show_logs(params.spark_log_dir, user=params.spark_user)
+        raise
+
+      hive_connection_created = False
+      i = 0
+      while i < 15:
+        time.sleep(30)
+        Logger.info("Check connection to STS is created.")
+
+        beeline_url = ["jdbc:hive2://{fqdn}:{spark_thrift_port}/default"]
+
+        if params.security_enabled:
+            beeline_url.append("principal={hive_kerberos_principal}")
+
+        beeline_url.append("transportMode={spark_transport_mode}")
+
+        if params.spark_transport_mode.lower() == 'http':
+            beeline_url.append("httpPath={spark_thrift_endpoint}")
+            if params.spark_thrift_ssl_enabled:
+                beeline_url.append("ssl=true")
+
+        beeline_cmd = os.path.join(params.spark_home, "bin", "beeline")
+        cmd = "! %s -u '%s'  -e '' 2>&1| awk '{print}'|grep -i -e 'Connection 
refused' -e 'Invalid URL' -e 'Error: Could not open'" % \
+              (beeline_cmd, format(";".join(beeline_url)))
+
+        try:
+          Execute(cmd, user=params.spark_user, path=[beeline_cmd], 
timeout=CHECK_COMMAND_TIMEOUT_DEFAULT)
+          hive_connection_created = True
+          Logger.info("Connection to STS is created.")
+          break
+        except:
+          Logger.info("Connection to STS still is not created.")
+          pass
+
+        Logger.info("Check STS process status.")
+        check_process_status(status_params.spark_thrift_server_pid_file)
+
+        i+=1
+
+      if not hive_connection_created:
+        raise ComponentIsNotRunning("Something goes wrong, STS connection was 
not created but STS process still alive. "
+                                    "Potential problems: Hive/YARN doesn't 
work correctly or too slow. For more information check STS logs.")
+
+  elif action == 'stop':
+    if name == 'jobhistoryserver':
+      try:
+        Execute(format('{spark_history_server_stop}'),
+                user=params.spark_user,
+                environment={'JAVA_HOME': params.java_home}
+        )
+      except:
+        show_logs(params.spark_log_dir, user=params.spark_user)
+        raise
+      File(params.spark_history_server_pid_file,
+        action="delete"
+      )
+
+    elif name == 'sparkthriftserver':
+      try:
+        Execute(format('{spark_thrift_server_stop}'),
+                user=params.spark_user,
+                environment={'JAVA_HOME': params.java_home}
+        )
+      except:
+        show_logs(params.spark_log_dir, user=params.spark_user)
+        raise
+      File(params.spark_thrift_server_pid_file,
+        action="delete"
+      )
+
+
diff --git 
a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/spark_thrift_server.py
 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/spark_thrift_server.py
new file mode 100644
index 00000000..f9468c43
--- /dev/null
+++ 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/spark_thrift_server.py
@@ -0,0 +1,85 @@
+#!/usr/bin/python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+import os
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.stack_features import 
check_stack_feature
+from resource_management.libraries.functions.constants import StackFeature
+from resource_management.libraries.functions.check_process_status import 
check_process_status
+from resource_management.core.logger import Logger
+from resource_management.core import shell
+from setup_spark import setup_spark
+from spark_service import spark_service
+
+
+class SparkThriftServer(Script):
+
+  def install(self, env):
+    import params
+    env.set_params(params)
+
+    self.install_packages(env)
+
+  def configure(self, env, upgrade_type=None, config_dir=None):
+    import params
+    env.set_params(params)
+    setup_spark(env, 'server', upgrade_type = upgrade_type, action = 'config')
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    self.configure(env)
+    spark_service('sparkthriftserver', upgrade_type=upgrade_type, 
action='start')
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    spark_service('sparkthriftserver', upgrade_type=upgrade_type, 
action='stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.spark_thrift_server_pid_file)
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    Logger.info("Executing Spark Thrift Server Stack Upgrade pre-restart")
+    stack_select.select_packages(params.version)
+      
+  def get_log_folder(self):
+    import params
+    return params.spark_log_dir
+  
+  def get_user(self):
+    import params
+    return params.spark_user
+
+  def get_pid_files(self):
+    import status_params
+    return [status_params.spark_thrift_server_pid_file]
+
+if __name__ == "__main__":
+  SparkThriftServer().execute()
diff --git 
a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/status_params.py
 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/status_params.py
new file mode 100644
index 00000000..1bd8f2c3
--- /dev/null
+++ 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/scripts/status_params.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.default import default
+
+config = Script.get_config()
+
+spark_user = config['configurations']['spark-env']['spark_user']
+spark_group = config['configurations']['spark-env']['spark_group']
+user_group = config['configurations']['cluster-env']['user_group']
+
+if 'hive-env' in config['configurations']:
+  hive_user = config['configurations']['hive-env']['hive_user']
+else:
+  hive_user = "hive"
+
+spark_pid_dir = config['configurations']['spark-env']['spark_pid_dir']
+spark_history_server_pid_file = 
format("{spark_pid_dir}/spark-{spark_user}-org.apache.spark.deploy.history.HistoryServer-1.pid")
+spark_thrift_server_pid_file = 
format("{spark_pid_dir}/spark-{spark_user}-org.apache.spark.sql.hive.thriftserver.HiveThriftServer2-1.pid")
+stack_name = default("/clusterLevelParams/stack_name", None)
diff --git 
a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/templates/input.config-spark.json.j2
 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/templates/input.config-spark.json.j2
new file mode 100644
index 00000000..089897b9
--- /dev/null
+++ 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/package/templates/input.config-spark.json.j2
@@ -0,0 +1,61 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"spark_jobhistory_server",
+      "rowtype":"service",
+      "path":"{{default('/configurations/spark-env/spark_log_dir', 
'/var/log/spark')}}/spark-*-org.apache.spark.deploy.history.HistoryServer*.out"
+    },
+    {
+      "type":"spark_thriftserver",
+      "rowtype":"service",
+      "path":"{{default('/configurations/spark-env/spark_log_dir', 
'/var/log/spark')}}/spark-*-org.apache.spark.sql.hive.thriftserver.HiveThriftServer2*.out"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "spark_jobhistory_server",
+            "spark_thriftserver",
+            "livy2_server"
+          ]
+        }
+      },
+      "log4j_format":"",
+      
"multiline_pattern":"^(%{SPARK_DATESTAMP:logtime}%{SPACE}%{LOGLEVEL:level})",
+      
"message_pattern":"(?m)^%{SPARK_DATESTAMP:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVAFILE:file}:%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yy/MM/dd HH:mm:ss"
+          }
+        },
+        "level":{
+          "map_field_value":{
+            "pre_value":"WARNING",
+            "post_value":"WARN"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git 
a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/quicklinks/quicklinks.json
 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/quicklinks/quicklinks.json
new file mode 100644
index 00000000..a60a3a01
--- /dev/null
+++ 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/quicklinks/quicklinks.json
@@ -0,0 +1,28 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"HTTP_ONLY"
+    },
+
+    "links": [
+      {
+        "name": "spark_history_server_ui",
+        "label": "Spark History Server UI",
+        "component_name": "SPARK_JOBHISTORYSERVER",
+        "requires_user_name": "false",
+        "url": "%@://%@:%@",
+        "port":{
+          "http_property": "spark.history.ui.port",
+          "http_default_port": "18081",
+          "https_property": "spark.history.ui.port",
+          "https_default_port": "18081",
+          "regex": "^(\\d+)$",
+          "site": "spark-defaults"
+        }
+      }
+    ]
+  }
+}
diff --git 
a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/role_command_order.json
 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/role_command_order.json
new file mode 100644
index 00000000..5dc69490
--- /dev/null
+++ 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/role_command_order.json
@@ -0,0 +1,10 @@
+{
+  "general_deps" : {
+    "_comment" : "dependencies for SPARK",
+    "SPARK_JOBHISTORYSERVER-START": ["HIVE_METASTORE-START"],
+    "SPARK_JOBHISTORYSERVER-RESTART": ["HIVE_METASTORE-RESTART"],
+    "SPARK_THRIFTSERVER-START": ["HIVE_METASTORE-START"],
+    "SPARK_THRIFTSERVER-RESTART": ["HIVE_METASTORE-RESTART"],
+    "SPARK_SERVICE_CHECK-SERVICE_CHECK" : ["SPARK_JOBHISTORYSERVER-START", 
"SPARK_THRIFTSERVER-START"]
+  }
+}
diff --git 
a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/service_advisor.py
 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/service_advisor.py
new file mode 100644
index 00000000..c1e4bd05
--- /dev/null
+++ 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/service_advisor.py
@@ -0,0 +1,374 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+# Python imports
+import imp
+import os
+import traceback
+import re
+import socket
+import fnmatch
+import xml.etree.ElementTree as ET
+
+
+from resource_management.core.logger import Logger
+
+SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
+STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../../../stacks/')
+PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
+
+try:
+  if "BASE_SERVICE_ADVISOR" in os.environ:
+    PARENT_FILE = os.environ["BASE_SERVICE_ADVISOR"]
+  with open(PARENT_FILE, 'rb') as fp:
+    service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE, 
('.py', 'rb', imp.PY_SOURCE))
+except Exception as e:
+  traceback.print_exc()
+  print "Failed to load parent"
+
+class Spark2ServiceAdvisor(service_advisor.ServiceAdvisor):
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(Spark2ServiceAdvisor, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    # Always call these methods
+    self.modifyMastersWithMultipleInstances()
+    self.modifyCardinalitiesDict()
+    self.modifyHeapSizeProperties()
+    self.modifyNotValuableComponents()
+    self.modifyComponentsNotPreferableOnServer()
+    self.modifyComponentLayoutSchemes()
+
+  def modifyMastersWithMultipleInstances(self):
+    """
+    Modify the set of masters with multiple instances.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyCardinalitiesDict(self):
+    """
+    Modify the dictionary of cardinalities.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyHeapSizeProperties(self):
+    """
+    Modify the dictionary of heap size properties.
+    Must be overriden in child class.
+
+    """
+    self.heap_size_properties = {"SPARK2_JOBHISTORYSERVER":
+                                   [{"config-name": "spark2-env",
+                                     "property": "spark_daemon_memory",
+                                     "default": "2048m"}]}
+
+  def modifyNotValuableComponents(self):
+    """
+    Modify the set of components whose host assignment is based on other 
services.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentsNotPreferableOnServer(self):
+    """
+    Modify the set of components that are not preferable on the server.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentLayoutSchemes(self):
+    """
+    Modify layout scheme dictionaries for components.
+    The scheme dictionary basically maps the number of hosts to
+    host index where component should exist.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def getServiceComponentLayoutValidations(self, services, hosts):
+    """
+    Get a list of errors.
+    Must be overriden in child class.
+    """
+
+    return self.getServiceComponentCardinalityValidations(services, hosts, 
"SPARK2")
+
+  def getServiceConfigurationRecommendations(self, configurations, 
clusterData, services, hosts):
+    """
+    Entry point.
+    Must be overriden in child class.
+    """
+    #Logger.info("Class: %s, Method: %s. Recommending Service Configurations." 
%
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    recommender = Spark2Recommender()
+    recommender.recommendSpark2ConfigurationsFromHDP25(configurations, 
clusterData, services, hosts)
+    recommender.recommendSPARK2ConfigurationsFromHDP26(configurations, 
clusterData, services, hosts)
+    recommender.recommendSPARK2ConfigurationsFromHDP30(configurations, 
clusterData, services, hosts)
+
+
+
+  def getServiceConfigurationsValidationItems(self, configurations, 
recommendedDefaults, services, hosts):
+    """
+    Entry point.
+    Validate configurations for the service. Return a list of errors.
+    The code for this function should be the same for each Service Advisor.
+    """
+    #Logger.info("Class: %s, Method: %s. Validating Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    validator = Spark2Validator()
+    # Calls the methods of the validator using arguments,
+    # method(siteProperties, siteRecommendations, configurations, services, 
hosts)
+    return validator.validateListOfConfigUsingMethod(configurations, 
recommendedDefaults, services, hosts, validator.validators)
+
+  def isComponentUsingCardinalityForLayout(self, componentName):
+    return componentName in ('SPARK2_THRIFTSERVER', 'LIVY2_SERVER')
+
+  @staticmethod
+  def isKerberosEnabled(services, configurations):
+    """
+    Determines if security is enabled by testing the value of 
spark2-defaults/spark.history.kerberos.enabled enabled.
+    If the property exists and is equal to "true", then is it enabled; 
otherwise is it assumed to be
+    disabled.
+
+    :type services: dict
+    :param services: the dictionary containing the existing configuration 
values
+    :type configurations: dict
+    :param configurations: the dictionary containing the updated configuration 
values
+    :rtype: bool
+    :return: True or False
+    """
+    if configurations and "spark2-defaults" in configurations and \
+            "spark.history.kerberos.enabled" in 
configurations["spark2-defaults"]["properties"]:
+      return 
configurations["spark2-defaults"]["properties"]["spark.history.kerberos.enabled"].lower()
 == "true"
+    elif services and "spark2-defaults" in services["configurations"] and \
+            "spark.history.kerberos.enabled" in 
services["configurations"]["spark2-defaults"]["properties"]:
+      return 
services["configurations"]["spark2-defaults"]["properties"]["spark.history.kerberos.enabled"].lower()
 == "true"
+    else:
+      return False
+
+
+class Spark2Recommender(service_advisor.ServiceAdvisor):
+  """
+  Spark2 Recommender suggests properties when adding the service for the first 
time or modifying configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(Spark2Recommender, self)
+    self.as_super.__init__(*args, **kwargs)
+
+  def recommendSpark2ConfigurationsFromHDP25(self, configurations, 
clusterData, services, hosts):
+    """
+    :type configurations dict
+    :type clusterData dict
+    :type services dict
+    :type hosts dict
+    """
+    putSparkProperty = self.putProperty(configurations, "spark2-defaults", 
services)
+    putSparkThriftSparkConf = self.putProperty(configurations, 
"spark2-thrift-sparkconf", services)
+
+    spark_queue = self.recommendYarnQueue(services, "spark2-defaults", 
"spark.yarn.queue")
+    if spark_queue is not None:
+      putSparkProperty("spark.yarn.queue", spark_queue)
+
+    spark_thrift_queue = self.recommendYarnQueue(services, 
"spark2-thrift-sparkconf", "spark.yarn.queue")
+    if spark_thrift_queue is not None:
+      putSparkThriftSparkConf("spark.yarn.queue", spark_thrift_queue)
+
+
+  def recommendSPARK2ConfigurationsFromHDP26(self, configurations, 
clusterData, services, hosts):
+    """
+    :type configurations dict
+    :type clusterData dict
+    :type services dict
+    :type hosts dict
+    """
+
+    if Spark2ServiceAdvisor.isKerberosEnabled(services, configurations):
+
+      spark2_defaults = self.getServicesSiteProperties(services, 
"spark2-defaults")
+
+      if spark2_defaults:
+        putSpark2DafaultsProperty = self.putProperty(configurations, 
"spark2-defaults", services)
+        putSpark2DafaultsProperty('spark.acls.enable', 'true')
+        putSpark2DafaultsProperty('spark.admin.acls', '')
+        putSpark2DafaultsProperty('spark.history.ui.acls.enable', 'true')
+        putSpark2DafaultsProperty('spark.history.ui.admin.acls', '')
+
+
+    self.__addZeppelinToLivy2SuperUsers(configurations, services)
+
+
+  def recommendSPARK2ConfigurationsFromHDP30(self, configurations, 
clusterData, services, hosts):
+
+    # SAC
+    if "spark2-atlas-application-properties-override" in 
services["configurations"]:
+      spark2_atlas_application_properties_override = 
self.getServicesSiteProperties(services, 
"spark2-atlas-application-properties-override")
+      spark2_defaults_properties = self.getServicesSiteProperties(services, 
"spark2-defaults")
+      spark2_thriftspark_conf_properties = 
self.getServicesSiteProperties(services, "spark2-thrift-sparkconf")
+      putSpark2DefautlsProperty = self.putProperty(configurations, 
"spark2-defaults", services)
+      putSpark2DefaultsPropertyAttribute = 
self.putPropertyAttribute(configurations,"spark2-defaults")
+      putSpark2ThriftSparkConfProperty = self.putProperty(configurations, 
"spark2-thrift-sparkconf", services)
+      putSpark2AtlasHookProperty = self.putProperty(configurations, 
"spark2-atlas-application-properties-override", services)
+      putSpark2AtlasHookPropertyAttribute = 
self.putPropertyAttribute(configurations,"spark2-atlas-application-properties-override")
+      spark2_sac_enabled = None
+      if 
self.checkSiteProperties(spark2_atlas_application_properties_override, 
"atlas.spark.enabled"):
+        spark2_sac_enabled = 
spark2_atlas_application_properties_override["atlas.spark.enabled"]
+        spark2_sac_enabled = str(spark2_sac_enabled).upper() == 'TRUE'
+
+      if spark2_sac_enabled:
+
+        self.setOrAddValueToProperty(putSpark2DefautlsProperty, 
spark2_defaults_properties, "spark.driver.extraClassPath", 
"/usr/hdp/current/spark-atlas-connector/*", ":")
+        self.setOrAddValueToProperty(putSpark2DefautlsProperty, 
spark2_defaults_properties, "spark.yarn.dist.files", 
"/etc/spark2/conf/atlas-application.properties.yarn#atlas-application.properties",
 ",")
+        self.setOrAddValueToProperty(putSpark2ThriftSparkConfProperty, 
spark2_thriftspark_conf_properties, "spark.driver.extraClassPath", 
"/usr/hdp/current/spark-atlas-connector/*", ":")
+
+        self.setOrAddValueToProperty(putSpark2DefautlsProperty, 
spark2_defaults_properties, "spark.extraListeners", 
"com.hortonworks.spark.atlas.SparkAtlasEventTracker", ",")
+        self.setOrAddValueToProperty(putSpark2DefautlsProperty, 
spark2_defaults_properties, "spark.sql.queryExecutionListeners", 
"com.hortonworks.spark.atlas.SparkAtlasEventTracker", ",")
+        self.setOrAddValueToProperty(putSpark2ThriftSparkConfProperty, 
spark2_thriftspark_conf_properties, "spark.extraListeners", 
"com.hortonworks.spark.atlas.SparkAtlasEventTracker", ",")
+        self.setOrAddValueToProperty(putSpark2ThriftSparkConfProperty, 
spark2_thriftspark_conf_properties, "spark.sql.queryExecutionListeners", 
"com.hortonworks.spark.atlas.SparkAtlasEventTracker", ",")
+
+        self.setOrAddValueToProperty(putSpark2DefautlsProperty, 
spark2_defaults_properties, "spark.sql.streaming.streamingQueryListeners", 
"com.hortonworks.spark.atlas.SparkAtlasStreamingQueryEventTracker", ",")
+        self.setOrAddValueToProperty(putSpark2ThriftSparkConfProperty, 
spark2_thriftspark_conf_properties, 
"spark.sql.streaming.streamingQueryListeners", 
"com.hortonworks.spark.atlas.SparkAtlasStreamingQueryEventTracker", ",")
+
+        putSpark2AtlasHookProperty("atlas.client.checkModelInStart", "false")
+
+      else:
+
+        self.removeValueFromProperty(putSpark2DefautlsProperty, 
spark2_defaults_properties, "spark.driver.extraClassPath", 
"/usr/hdp/current/spark-atlas-connector/*", ":")
+        self.removeValueFromProperty(putSpark2DefautlsProperty, 
spark2_defaults_properties, "spark.yarn.dist.files", 
"/etc/spark2/conf/atlas-application.properties.yarn#atlas-application.properties",
 ",")
+        self.removeValueFromProperty(putSpark2ThriftSparkConfProperty, 
spark2_thriftspark_conf_properties, "spark.driver.extraClassPath", 
"/usr/hdp/current/spark-atlas-connector/*", ":")
+
+        self.removeValueFromProperty(putSpark2DefautlsProperty, 
spark2_defaults_properties, "spark.extraListeners", 
"com.hortonworks.spark.atlas.SparkAtlasEventTracker", ",")
+        self.removeValueFromProperty(putSpark2DefautlsProperty, 
spark2_defaults_properties, "spark.sql.queryExecutionListeners", 
"com.hortonworks.spark.atlas.SparkAtlasEventTracker", ",")
+        self.removeValueFromProperty(putSpark2ThriftSparkConfProperty, 
spark2_thriftspark_conf_properties, "spark.extraListeners", 
"com.hortonworks.spark.atlas.SparkAtlasEventTracker", ",")
+        self.removeValueFromProperty(putSpark2ThriftSparkConfProperty, 
spark2_thriftspark_conf_properties, "spark.sql.queryExecutionListeners", 
"com.hortonworks.spark.atlas.SparkAtlasEventTracker", ",")
+
+        self.removeValueFromProperty(putSpark2DefautlsProperty, 
spark2_defaults_properties, "spark.sql.streaming.streamingQueryListeners", 
"com.hortonworks.spark.atlas.SparkAtlasStreamingQueryEventTracker", ",")
+        self.removeValueFromProperty(putSpark2ThriftSparkConfProperty, 
spark2_thriftspark_conf_properties, 
"spark.sql.streaming.streamingQueryListeners", 
"com.hortonworks.spark.atlas.SparkAtlasStreamingQueryEventTracker", ",")
+
+        putSpark2AtlasHookPropertyAttribute("atlas.client.checkModelInStart", 
"delete", "true")
+
+
+
+  def setOrAddValueToProperty(self, putConfigProperty, config, propertyName, 
propertyValue, separator):
+    if self.checkSiteProperties(config, propertyName) and 
len(str(config[propertyName]).strip()) > 0:
+      putConfigProperty(propertyName, str(config[propertyName]).strip() + 
separator + propertyValue)
+    else:
+      putConfigProperty(propertyName, propertyValue)
+
+  def removeValueFromProperty(self, putConfigProperty, config, propertyName, 
propertyValue, separator):
+    if not self.checkSiteProperties(config, propertyName):
+      return
+    if str(config[propertyName]).strip() == propertyValue:
+      putConfigProperty(propertyName, " ")
+    else:
+      putConfigProperty(propertyName, 
str(config[propertyName]).replace(separator + propertyValue, ""))
+
+  def __addZeppelinToLivy2SuperUsers(self, configurations, services):
+    """
+    If Kerberos is enabled AND Zeppelin is installed AND Spark2 Livy Server is 
installed, then set
+    livy2-conf/livy.superusers to contain the Zeppelin principal name from
+    zeppelin-site/zeppelin.server.kerberos.principal
+
+    :param configurations:
+    :param services:
+    """
+    if Spark2ServiceAdvisor.isKerberosEnabled(services, configurations):
+      zeppelin_site = self.getServicesSiteProperties(services, "zeppelin-site")
+
+      if zeppelin_site and 'zeppelin.server.kerberos.principal' in 
zeppelin_site:
+        zeppelin_principal = 
zeppelin_site['zeppelin.server.kerberos.principal']
+        zeppelin_user = zeppelin_principal.split('@')[0] if zeppelin_principal 
else None
+
+        if zeppelin_user:
+          livy2_conf = self.getServicesSiteProperties(services, 'livy2-conf')
+
+          if livy2_conf:
+            superusers = livy2_conf['livy.superusers'] if livy2_conf and 
'livy.superusers' in livy2_conf else None
+
+            # add the Zeppelin user to the set of users
+            if superusers:
+              _superusers = superusers.split(',')
+              _superusers = [x.strip() for x in _superusers]
+              _superusers = filter(None, _superusers)  # Removes empty string 
elements from array
+            else:
+              _superusers = []
+
+            if zeppelin_user not in _superusers:
+              _superusers.append(zeppelin_user)
+
+              putLivy2ConfProperty = self.putProperty(configurations, 
'livy2-conf', services)
+              putLivy2ConfProperty('livy.superusers', ','.join(_superusers))
+
+
+class Spark2Validator(service_advisor.ServiceAdvisor):
+  """
+  Spark2 Validator checks the correctness of properties whenever the service 
is first added or the user attempts to
+  change configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(Spark2Validator, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    self.validators = [("spark2-defaults", 
self.validateSpark2DefaultsFromHDP25),
+                       ("spark2-thrift-sparkconf", 
self.validateSpark2ThriftSparkConfFromHDP25),
+                       ("spark2-atlas-application-properties-override", 
self.validateSpark2AtlasApplicationPropertiesFromHDP30)]
+
+
+  def validateSpark2DefaultsFromHDP25(self, properties, recommendedDefaults, 
configurations, services, hosts):
+    validationItems = [
+      {
+        "config-name": 'spark.yarn.queue',
+        "item": self.validatorYarnQueue(properties, recommendedDefaults, 
'spark.yarn.queue', services)
+      }
+    ]
+    return self.toConfigurationValidationProblems(validationItems, 
"spark2-defaults")
+
+
+  def validateSpark2ThriftSparkConfFromHDP25(self, properties, 
recommendedDefaults, configurations, services, hosts):
+    validationItems = [
+      {
+        "config-name": 'spark.yarn.queue',
+        "item": self.validatorYarnQueue(properties, recommendedDefaults, 
'spark.yarn.queue', services)
+      }
+    ]
+    return self.toConfigurationValidationProblems(validationItems, 
"spark2-thrift-sparkconf")
+
+  def validateSpark2AtlasApplicationPropertiesFromHDP30(self, properties, 
recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+    servicesList = [service["StackServices"]["service_name"] for service in 
services["services"]]
+    if not "ATLAS" in servicesList and 'atlas.spark.enabled' in 
services['configurations']['spark2-atlas-application-properties-override']['properties']
 and \
+            
str(services['configurations']['spark2-atlas-application-properties-override']['properties']['atlas.spark.enabled']).upper()
 == 'TRUE':
+      validationItems.append({"config-name": 
"spark2-atlas-application-properties-override",
+                                 +                              "item": 
self.getErrorItem("SAC could be enabled only if ATLAS service is available on 
cluster!")})
+    return self.toConfigurationValidationProblems(validationItems, 
"spark2-atlas-application-properties-override")
+
+
+
+
diff --git 
a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/themes/directories.json
 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/themes/directories.json
new file mode 100644
index 00000000..def8df56
--- /dev/null
+++ 
b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/SPARK/themes/directories.json
@@ -0,0 +1,128 @@
+{
+  "name": "default",
+  "description": "Directories theme for SPARK service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "directories",
+        "tabs": [
+          {
+            "name": "directories",
+            "display-name": "Directories",
+            "layout": {
+              "tab-columns": "1",
+              "tab-rows": "4",
+              "sections": [
+                {
+                  "name": "subsection-log-dirs",
+                  "display-name": "LOG DIRS",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-log-dirs",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                },
+                {
+                  "name": "subsection-pid-dirs",
+                  "display-name": "PID DIRS",
+                  "row-index": "1",
+                  "column-index": "0",
+                  "row-span": "1",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-pid-dirs",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "spark-defaults/spark.eventLog.dir",
+          "subsection-name": "subsection-log-dirs"
+        },
+        {
+          "config": "spark-defaults/spark.history.fs.logDirectory",
+          "subsection-name": "subsection-log-dirs"
+        },
+        {
+          "config": "spark-thrift-sparkconf/spark.eventLog.dir",
+          "subsection-name": "subsection-log-dirs"
+        },
+        {
+          "config": "spark-thrift-sparkconf/spark.history.fs.logDirectory",
+          "subsection-name": "subsection-log-dirs"
+        },
+        {
+          "config": "spark-env/spark_log_dir",
+          "subsection-name": "subsection-log-dirs"
+        },
+        {
+          "config": "spark-env/spark_pid_dir",
+          "subsection-name": "subsection-pid-dirs"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "spark-defaults/spark.eventLog.dir",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "spark-defaults/spark.history.fs.logDirectory",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "spark-thrift-sparkconf/spark.eventLog.dir",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "spark-thrift-sparkconf/spark.history.fs.logDirectory",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "spark-env/spark_log_dir",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "spark-env/spark_pid_dir",
+        "widget": {
+          "type": "text-field"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file
diff --git 
a/bigtop-packages/src/common/bigtop-ambari-mpack/dev-support/docker/centos7/build-containers.sh
 
b/bigtop-packages/src/common/bigtop-ambari-mpack/dev-support/docker/centos7/build-containers.sh
index 486708f5..6a6012fa 100755
--- 
a/bigtop-packages/src/common/bigtop-ambari-mpack/dev-support/docker/centos7/build-containers.sh
+++ 
b/bigtop-packages/src/common/bigtop-ambari-mpack/dev-support/docker/centos7/build-containers.sh
@@ -41,13 +41,13 @@ echo -e "\033[32mSetting up ambari-server\033[0m"
 docker exec ambari-server bash -c "ambari-server setup 
--java-home=/usr/lib/jvm/java --database=mysql --databasehost=localhost 
--databaseport=3306 --databasename=ambari --databaseusername=root 
--databasepassword=root -s"
 
 echo -e "\033[32mCreating container ambari-agent-01\033[0m"
-docker run -d -p 9995:9995 --name ambari-agent-01 --hostname ambari-agent-01 
--network ambari --privileged -e "container=docker" -v 
/sys/fs/cgroup:/sys/fs/cgroup:ro ambari:2.7.5 /usr/sbin/init
+docker run -d -p 9995:9995 -p 18081:18081 --name ambari-agent-01 --hostname 
ambari-agent-01 --network ambari --privileged -e "container=docker" -v 
/sys/fs/cgroup:/sys/fs/cgroup:ro ambari:2.7.5 /usr/sbin/init
 docker exec ambari-agent-01 bash -c "echo '$SERVER_PUB_KEY' > 
/root/.ssh/authorized_keys"
 docker exec ambari-agent-01 /bin/systemctl enable sshd
 docker exec ambari-agent-01 /bin/systemctl start sshd
 
 echo -e "\033[32mCreating container ambari-agent-02\033[0m"
-docker run -d --name ambari-agent-02 --hostname ambari-agent-02 --network 
ambari --privileged -e "container=docker" -v /sys/fs/cgroup:/sys/fs/cgroup:ro 
ambari:2.7.5 /usr/sbin/init
+docker run -d -p 8088:8088 --name ambari-agent-02 --hostname ambari-agent-02 
--network ambari --privileged -e "container=docker" -v 
/sys/fs/cgroup:/sys/fs/cgroup:ro ambari:2.7.5 /usr/sbin/init
 docker exec ambari-agent-02 bash -c "echo '$SERVER_PUB_KEY' > 
/root/.ssh/authorized_keys"
 docker exec ambari-agent-02 /bin/systemctl enable sshd
 docker exec ambari-agent-02 /bin/systemctl start sshd

Reply via email to