http://git-wip-us.apache.org/repos/asf/ambari/blob/bcbacf0c/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
index a52f438..d40679a 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterImplTest.java
@@ -48,6 +48,7 @@ import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.ServiceGroup;
 import org.apache.ambari.server.state.StackId;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -214,7 +215,7 @@ public class ClusterImplTest {
     String stackVersion = "HDP-2.1.1";
     String repoVersion = "2.1.1-1234";
     StackId stackId = new StackId(stackVersion);
-    ormTestHelper.createStack(stackId);
+    ormTestHelper.createStackWithRepoVersion(stackId, "");
 
     clusters.addCluster(clusterName, stackId);
     Cluster cluster = clusters.getCluster(clusterName);
@@ -233,7 +234,8 @@ public class ClusterImplTest {
 
     clusters.mapAndPublishHostsToCluster(Sets.newHashSet(hostName1, 
hostName2), clusterName);
 
-    Service hdfs = cluster.addService(null, "HDFS", "", repositoryVersion);
+    ServiceGroup serviceGroup = cluster.addServiceGroup("CORE");
+    Service hdfs = cluster.addService(serviceGroup, "HDFS", "HDFS", 
repositoryVersion);
 
     ServiceComponent nameNode = hdfs.addServiceComponent("NAMENODE");
     nameNode.addServiceComponentHost(hostName1);
@@ -246,7 +248,7 @@ public class ClusterImplTest {
     hdfsClient.addServiceComponentHost(hostName1);
     hdfsClient.addServiceComponentHost(hostName2);
 
-    Service tez = cluster.addService(null, serviceToDelete, "", 
repositoryVersion);
+    Service tez = cluster.addService(serviceGroup, serviceToDelete, 
serviceToDelete, repositoryVersion);
 
     ServiceComponent tezClient = tez.addServiceComponent("TEZ_CLIENT");
     ServiceComponentHost tezClientHost1 =  
tezClient.addServiceComponentHost(hostName1);
@@ -275,7 +277,7 @@ public class ClusterImplTest {
     String hostToDelete = hostName2;
     StackId stackId = new StackId("HDP-2.1.1");
 
-    ormTestHelper.createStack(stackId);
+    ormTestHelper.createStackWithRepoVersion(stackId, "");
     clusters.addCluster(clusterName, stackId);
 
     Cluster cluster = clusters.getCluster(clusterName);
@@ -315,7 +317,7 @@ public class ClusterImplTest {
     String clusterName = "TEST_CLUSTER_SIZE";
     String hostName1 = "host1", hostName2 = "host2";
     StackId stackId = new StackId("HDP", "2.1.1");
-    ormTestHelper.createStack(stackId);
+    ormTestHelper.createStackWithRepoVersion(stackId, "");
     clusters.addCluster(clusterName, stackId);
 
     Cluster cluster = clusters.getCluster(clusterName);

http://git-wip-us.apache.org/repos/asf/ambari/blob/bcbacf0c/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
index 4ce7387..bbfc838 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
@@ -96,6 +96,7 @@ import org.apache.ambari.server.state.ServiceComponentFactory;
 import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.ServiceComponentHostFactory;
 import org.apache.ambari.server.state.ServiceFactory;
+import org.apache.ambari.server.state.ServiceGroup;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.State;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
@@ -1076,9 +1077,10 @@ public class ClusterTest {
 
     RepositoryVersionEntity repositoryVersion = 
helper.getOrCreateRepositoryVersion(c1);
 
-    c1.addService(null, "MAPREDUCE", "", repositoryVersion);
+    ServiceGroup serviceGroup = c1.addServiceGroup("CORE");
+    c1.addService(serviceGroup, "MAPREDUCE", "MAPREDUCE", repositoryVersion);
 
-    Service hdfs = c1.addService(null, "HDFS", "", repositoryVersion);
+    Service hdfs = c1.addService(serviceGroup, "HDFS", "HDFS", 
repositoryVersion);
     ServiceComponent nameNode = hdfs.addServiceComponent("NAMENODE");
 
     assertEquals(2, c1.getServices().size());
@@ -1098,7 +1100,8 @@ public class ClusterTest {
 
     RepositoryVersionEntity repositoryVersion = 
helper.getOrCreateRepositoryVersion(c1);
 
-    c1.addService(null, "HDFS", "", repositoryVersion);
+    ServiceGroup serviceGroup = c1.addServiceGroup("CORE");
+    c1.addService(serviceGroup, "HDFS", "HDFS", repositoryVersion);
 
     Config config1 = configFactory.createNew(c1, "hdfs-site", "version1",
       new HashMap<String, String>() {{ put("a", "b"); }}, new HashMap<>());
@@ -1294,7 +1297,8 @@ public class ClusterTest {
     createDefaultCluster();
 
     RepositoryVersionEntity repositoryVersion = 
helper.getOrCreateRepositoryVersion(c1);
-    c1.addService(null, "HDFS", "", repositoryVersion);
+    ServiceGroup serviceGroup = c1.addServiceGroup("CORE");
+    c1.addService(serviceGroup, "HDFS", "HDFS", repositoryVersion);
 
     Config config1 = configFactory.createNew(c1, "hdfs-site", "version1",
       new HashMap<String, String>() {{ put("a", "b"); }}, new HashMap<>());
@@ -1894,12 +1898,13 @@ public class ClusterTest {
 
     RepositoryVersionEntity repositoryVersion = 
helper.getOrCreateRepositoryVersion(c1);
 
-    Service service = c1.addService(null, "ZOOKEEPER", "", repositoryVersion);
+    ServiceGroup serviceGroup = c1.addServiceGroup("CORE");
+    Service service = c1.addService(serviceGroup, "ZOOKEEPER", "ZOOKEEPER", 
repositoryVersion);
     ServiceComponent sc = service.addServiceComponent("ZOOKEEPER_SERVER");
     sc.addServiceComponentHost("h-1");
     sc.addServiceComponentHost("h-2");
 
-    service = c1.addService(null, "SQOOP", "", repositoryVersion);
+    service = c1.addService(serviceGroup, "SQOOP", "SQOOP", repositoryVersion);
     sc = service.addServiceComponent("SQOOP");
     sc.addServiceComponentHost("h-3");
 
@@ -2005,7 +2010,8 @@ public class ClusterTest {
     // add a service
     String serviceName = "ZOOKEEPER";
     RepositoryVersionEntity repositoryVersion = 
helper.getOrCreateRepositoryVersion(c1);
-    Service service = cluster.addService(null, serviceName, "", 
repositoryVersion);
+    ServiceGroup serviceGroup = cluster.addServiceGroup("CORE");
+    Service service = cluster.addService(serviceGroup, serviceName, 
serviceName, repositoryVersion);
     String configType = "zoo.cfg";
 
     ClusterConfigEntity clusterConfig1 = new ClusterConfigEntity();
@@ -2096,7 +2102,8 @@ public class ClusterTest {
     // add a service
     String serviceName = "ZOOKEEPER";
     RepositoryVersionEntity repositoryVersion = 
helper.getOrCreateRepositoryVersion(c1);
-    Service service = cluster.addService(null, serviceName, "", 
repositoryVersion);
+    ServiceGroup serviceGroup = cluster.addServiceGroup("CORE");
+    Service service = cluster.addService(serviceGroup, serviceName, 
serviceName, repositoryVersion);
     String configType = "zoo.cfg";
 
     // create 5 configurations in the current stack
@@ -2188,7 +2195,8 @@ public class ClusterTest {
     // add a service
     String serviceName = "ZOOKEEPER";
     RepositoryVersionEntity repositoryVersion = 
helper.getOrCreateRepositoryVersion(c1);
-    Service service = cluster.addService(null, serviceName, "", 
repositoryVersion);
+    ServiceGroup serviceGroup = cluster.addServiceGroup("CORE");
+    Service service = cluster.addService(serviceGroup, serviceName, 
serviceName, repositoryVersion);
     String configType = "zoo.cfg";
 
     Map<String, String> properties = new HashMap<>();
@@ -2268,7 +2276,8 @@ public class ClusterTest {
     // add a service
     String serviceName = "ZOOKEEPER";
     RepositoryVersionEntity repositoryVersion = 
helper.getOrCreateRepositoryVersion(c1);
-    Service service = cluster.addService(null, serviceName, "", 
repositoryVersion);
+    ServiceGroup serviceGroup = cluster.addServiceGroup("CORE");
+    Service service = cluster.addService(serviceGroup, serviceName, 
serviceName, repositoryVersion);
     String configType = "zoo.cfg";
 
     ClusterConfigEntity clusterConfig = new ClusterConfigEntity();

http://git-wip-us.apache.org/repos/asf/ambari/blob/bcbacf0c/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
index be4a907..730d51c 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
@@ -65,6 +65,7 @@ import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.ambari.server.state.ServiceGroup;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.State;
 import org.apache.ambari.server.state.host.HostRegistrationRequestEvent;
@@ -144,7 +145,7 @@ public class ClustersTest {
   public void testAddAndGetCluster() throws AmbariException {
     StackId stackId = new StackId("HDP-2.1.1");
 
-    helper.createStack(stackId);
+    helper.createStackWithRepoVersion(stackId,"");
 
     String c1 = "foo";
     String c2 = "foo";
@@ -198,7 +199,7 @@ public class ClustersTest {
   public void testAddAndGetClusterWithSecurityType() throws AmbariException {
     StackId stackId = new StackId("HDP-2.1.1");
 
-    helper.createStack(stackId);
+    helper.createStackWithRepoVersion(stackId, "");
 
     String c1 = "foo";
     SecurityType securityType = SecurityType.KERBEROS;
@@ -265,7 +266,7 @@ public class ClustersTest {
 
     StackId stackId = new StackId("HDP-0.1");
 
-    helper.createStack(stackId);
+    helper.createStackWithRepoVersion(stackId, "");
 
     clusters.addCluster(c1, stackId);
     clusters.addCluster(c2, stackId);
@@ -351,7 +352,7 @@ public class ClustersTest {
 
     StackId stackId = new StackId("HDP-0.1");
 
-    helper.createStack(stackId);
+    helper.createStackWithRepoVersion(stackId, "");
 
     clusters.addCluster(c1, stackId);
     clusters.addCluster(c2, stackId);
@@ -384,7 +385,7 @@ public class ClustersTest {
 
     StackId stackId = new StackId("HDP-0.1");
 
-    helper.createStack(stackId);
+    helper.createStackWithRepoVersion(stackId, "");
 
     clusters.addCluster(c1, stackId);
 
@@ -426,7 +427,8 @@ public class ClustersTest {
     // host config override
     host1.addDesiredConfig(cluster.getClusterId(), true, "_test", config2);
 
-    Service hdfs = cluster.addService(null, "HDFS", "", repositoryVersion);
+    ServiceGroup serviceGroup = cluster.addServiceGroup("CORE");
+    Service hdfs = cluster.addService(serviceGroup, "HDFS", "HDFS", 
repositoryVersion);
 
     
//Assert.assertNotNull(injector.getInstance(ClusterServiceDAO.class).findByClusterAndServiceNames(c1,
 "HDFS"));
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/bcbacf0c/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
index d7214f4..558b334 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ConcurrentServiceConfigVersionTest.java
@@ -21,6 +21,7 @@ package org.apache.ambari.server.state.cluster;
 import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 
@@ -28,6 +29,7 @@ import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.H2DatabaseCleaner;
 import org.apache.ambari.server.ServiceComponentNotFoundException;
 import org.apache.ambari.server.ServiceNotFoundException;
+import org.apache.ambari.server.api.services.ServiceGroupKey;
 import org.apache.ambari.server.api.services.ServiceKey;
 import org.apache.ambari.server.controller.ServiceConfigVersionResponse;
 import 
org.apache.ambari.server.events.listeners.upgrade.HostVersionOutOfSyncListener;
@@ -45,6 +47,8 @@ import org.apache.ambari.server.state.ServiceComponentFactory;
 import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.ServiceComponentHostFactory;
 import org.apache.ambari.server.state.ServiceFactory;
+import org.apache.ambari.server.state.ServiceGroup;
+import org.apache.ambari.server.state.ServiceGroupFactory;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.State;
 import org.easymock.EasyMock;
@@ -78,6 +82,9 @@ public class ConcurrentServiceConfigVersionTest {
   private ServiceFactory serviceFactory;
 
   @Inject
+  private ServiceGroupFactory serviceGroupFactory;
+
+  @Inject
   private ServiceComponentFactory serviceComponentFactory;
 
   @Inject
@@ -95,6 +102,7 @@ public class ConcurrentServiceConfigVersionTest {
    * The cluster.
    */
   private Cluster cluster;
+  private ServiceGroup serviceGroup;
 
   private RepositoryVersionEntity repositoryVersion;
 
@@ -110,10 +118,11 @@ public class ConcurrentServiceConfigVersionTest {
 
     injector.getInstance(GuiceJpaInitializer.class);
     injector.injectMembers(this);
-    helper.createStack(stackId);
+    repositoryVersion = helper.getOrCreateRepositoryVersion(stackId, 
stackId.getStackVersion());
+    helper.createStackWithRepoVersion(stackId, repositoryVersion.getVersion());
     clusters.addCluster("c1", stackId);
     cluster = clusters.getCluster("c1");
-    repositoryVersion = helper.getOrCreateRepositoryVersion(stackId, 
stackId.getStackVersion());
+    serviceGroup = serviceGroupFactory.createNew(cluster, 
"test_service_group", new HashSet<ServiceGroupKey>());
 
     String hostName = "c6401.ambari.apache.org";
     clusters.addHost(hostName);
@@ -220,7 +229,7 @@ public class ConcurrentServiceConfigVersionTest {
     try {
       service = cluster.getService(serviceName);
     } catch (ServiceNotFoundException e) {
-      service = serviceFactory.createNew(cluster, null, new 
ArrayList<ServiceKey>(), serviceName, "", repositoryVersion);
+      service = serviceFactory.createNew(cluster, serviceGroup, new 
ArrayList<ServiceKey>(), serviceName, serviceName, repositoryVersion);
       cluster.addService(service);
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/bcbacf0c/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
index 983143c..ca7a7ac 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ServiceComponentHostConcurrentWriteDeadlockTest.java
@@ -30,6 +30,7 @@ import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.H2DatabaseCleaner;
 import org.apache.ambari.server.ServiceComponentNotFoundException;
 import org.apache.ambari.server.ServiceNotFoundException;
+import org.apache.ambari.server.api.services.ServiceGroupKey;
 import org.apache.ambari.server.api.services.ServiceKey;
 import 
org.apache.ambari.server.events.listeners.upgrade.HostVersionOutOfSyncListener;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
@@ -47,6 +48,8 @@ import org.apache.ambari.server.state.ServiceComponentFactory;
 import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.ServiceComponentHostFactory;
 import org.apache.ambari.server.state.ServiceFactory;
+import org.apache.ambari.server.state.ServiceGroup;
+import org.apache.ambari.server.state.ServiceGroupFactory;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.State;
 import org.apache.ambari.server.testing.DeadlockWarningThread;
@@ -86,6 +89,9 @@ public class ServiceComponentHostConcurrentWriteDeadlockTest {
   private ServiceComponentHostFactory serviceComponentHostFactory;
 
   @Inject
+  private ServiceGroupFactory serviceGroupFactory;
+
+  @Inject
   private ConfigFactory configFactory;
 
   @Inject
@@ -99,6 +105,7 @@ public class ServiceComponentHostConcurrentWriteDeadlockTest 
{
    * The cluster.
    */
   private Cluster cluster;
+  private ServiceGroup serviceGroup;
 
   /**
    * Creates 1 host and add it to the cluster.
@@ -114,12 +121,12 @@ public class 
ServiceComponentHostConcurrentWriteDeadlockTest {
     injector.injectMembers(this);
 
     OrmTestHelper helper = injector.getInstance(OrmTestHelper.class);
-    helper.createStack(stackId);
 
+    m_repositoryVersion = helper.getOrCreateRepositoryVersion(stackId, 
REPO_VERSION);
+    helper.createStackWithRepoVersion(stackId, 
m_repositoryVersion.getVersion());
     clusters.addCluster("c1", stackId);
     cluster = clusters.getCluster("c1");
-    m_repositoryVersion = helper.getOrCreateRepositoryVersion(stackId, 
REPO_VERSION);
-
+    serviceGroup = serviceGroupFactory.createNew(cluster, 
"test_service_group", new HashSet<ServiceGroupKey>());
     Config config1 = configFactory.createNew(cluster, "test-type1", null, new 
HashMap<>(), new HashMap<>());
 
     Config config2 = configFactory.createNew(cluster, "test-type2", null, new 
HashMap<>(), new HashMap<>());
@@ -243,7 +250,7 @@ public class 
ServiceComponentHostConcurrentWriteDeadlockTest {
     try {
       service = cluster.getService(serviceName);
     } catch (ServiceNotFoundException e) {
-      service = serviceFactory.createNew(cluster, null, new 
ArrayList<ServiceKey>(), serviceName, "", m_repositoryVersion);
+      service = serviceFactory.createNew(cluster, serviceGroup, new 
ArrayList<ServiceKey>(), serviceName, serviceName, m_repositoryVersion);
       cluster.addService(service);
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/bcbacf0c/ambari-server/src/test/resources/cluster-settings.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/cluster-settings.xml 
b/ambari-server/src/test/resources/cluster-settings.xml
new file mode 100644
index 0000000..70b100a
--- /dev/null
+++ b/ambari-server/src/test/resources/cluster-settings.xml
@@ -0,0 +1,322 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+    <property>
+        <name>recovery_enabled</name>
+        <value>true</value>
+        <description>Auto start enabled or not for this cluster.</description>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>recovery_type</name>
+        <value>AUTO_START</value>
+        <description>Auto start type.</description>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>recovery_lifetime_max_count</name>
+        <value>1024</value>
+        <description>Auto start lifetime maximum count of recovery attempt 
allowed per host component. This is reset
+            when agent is restarted.
+        </description>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>recovery_max_count</name>
+        <value>6</value>
+        <description>Auto start maximum count of recovery attempt allowed per 
host component in a window. This is reset
+            when agent is restarted.
+        </description>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>recovery_window_in_minutes</name>
+        <value>60</value>
+        <description>Auto start recovery window size in minutes.</description>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>recovery_retry_interval</name>
+        <value>5</value>
+        <description>Auto start recovery retry gap between tries per host 
component.</description>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>security_enabled</name>
+        <value>false</value>
+        <description>Hadoop Security</description>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>kerberos_domain</name>
+        <value>EXAMPLE.COM</value>
+        <description>Kerberos realm.</description>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>ignore_groupsusers_create</name>
+        <display-name>Skip group modifications during install</display-name>
+        <value>false</value>
+        <property-type>ADDITIONAL_USER_PROPERTY</property-type>
+        <description>Whether to ignore failures on users and group 
creation</description>
+        <value-attributes>
+            <overridable>false</overridable>
+            <type>boolean</type>
+        </value-attributes>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>sysprep_skip_create_users_and_groups</name>
+        <display-name>Whether to skip creating users and groups in a 
sysprepped cluster</display-name>
+        <value>false</value>
+        <property-type>ADDITIONAL_USER_PROPERTY</property-type>
+        <description>Whether to skip creating users and groups in a sysprepped 
cluster</description>
+        <value-attributes>
+            <overridable>true</overridable>
+            <type>boolean</type>
+        </value-attributes>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>sysprep_skip_copy_fast_jar_hdfs</name>
+        <display-name>Whether to skip copying the tarballs to HDFS on a 
sysprepped cluster</display-name>
+        <value>false</value>
+        <description>Whether to skip copying the tarballs to HDFS on a 
sysprepped cluster, during both fresh install and
+            stack upgrade
+        </description>
+        <value-attributes>
+            <overridable>true</overridable>
+            <type>boolean</type>
+        </value-attributes>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>sysprep_skip_copy_tarballs_hdfs</name>
+        <display-name>Whether to skip copying the tarballs to HDFS on a 
sysprepped cluster</display-name>
+        <value>false</value>
+        <description>Whether to skip copying the tarballs to HDFS on a 
sysprepped cluster, during both fresh install and
+            stack upgrade
+        </description>
+        <value-attributes>
+            <overridable>true</overridable>
+            <type>boolean</type>
+        </value-attributes>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>sysprep_skip_copy_oozie_share_lib_to_hdfs</name>
+        <display-name>Whether to skip copying the Oozie share lib to HDFS on 
sysprepped cluster</display-name>
+        <value>false</value>
+        <description>Whether to skip copying the Oozie share lib to HDFS on 
sysprepped cluster, during both fresh
+            install and stack upgrade
+        </description>
+        <value-attributes>
+            <overridable>true</overridable>
+            <type>boolean</type>
+        </value-attributes>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>sysprep_skip_setup_jce</name>
+        <display-name>Whether to skip setting up the unlimited key JCE policy 
on sysprepped cluster</display-name>
+        <value>false</value>
+        <description>Whether to skip setting up the unlimited key JCE policy 
on sysprepped cluster, during both fresh
+            install and upgrades
+        </description>
+        <value-attributes>
+            <overridable>true</overridable>
+            <type>boolean</type>
+        </value-attributes>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>smokeuser</name>
+        <display-name>Smoke User</display-name>
+        <value>ambari-qa</value>
+        <property-type>USER</property-type>
+        <description>User executing service checks</description>
+        <value-attributes>
+            <type>user</type>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>smokeuser_keytab</name>
+        <value>/etc/security/keytabs/smokeuser.headless.keytab</value>
+        <description>Path to smoke test user keytab file</description>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>user_group</name>
+        <display-name>Hadoop Group</display-name>
+        <value>hadoop</value>
+        <property-type>GROUP</property-type>
+        <description>Hadoop user group.</description>
+        <value-attributes>
+            <type>user</type>
+            <overridable>false</overridable>
+        </value-attributes>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>repo_suse_rhel_template</name>
+        <value>[{{repo_id}}]
+            name={{repo_id}}
+            {% if mirror_list %}mirrorlist={{mirror_list}}{% else 
%}baseurl={{base_url}}{% endif %}
+
+            path=/
+            enabled=1
+            gpgcheck=0
+        </value>
+        <description>Template of repositories for rhel and suse.</description>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>repo_ubuntu_template</name>
+        <value>{{package_type}} {{base_url}} {{components}}</value>
+        <description>Template of repositories for ubuntu.</description>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>override_uid</name>
+        <value>true</value>
+        <property-type>ADDITIONAL_USER_PROPERTY</property-type>
+        <display-name>Have Ambari manage UIDs</display-name>
+        <description>Have Ambari manage UIDs</description>
+        <value-attributes>
+            <overridable>false</overridable>
+            <type>boolean</type>
+        </value-attributes>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>fetch_nonlocal_groups</name>
+        <value>true</value>
+        <display-name>Ambari fetch nonlocal groups</display-name>
+        <description>Ambari requires fetching all the groups. This can be slow
+            on envs with enabled ldap. Setting this option to false will 
enable Ambari,
+            to skip user/group management connected with ldap groups.
+        </description>
+        <value-attributes>
+            <overridable>false</overridable>
+            <type>boolean</type>
+        </value-attributes>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>managed_hdfs_resource_property_names</name>
+        <value/>
+        <description>Comma separated list of property names with HDFS resource 
paths.
+            Resource from this list will be managed even if it is marked as 
not managed in the stack
+        </description>
+        <value-attributes>
+            <overridable>false</overridable>
+            <empty-value-valid>true</empty-value-valid>
+        </value-attributes>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>alerts_repeat_tolerance</name>
+        <value>1</value>
+        <description>The number of consecutive alerts required to transition 
an alert from the SOFT to the HARD state.
+        </description>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>ignore_bad_mounts</name>
+        <value>false</value>
+        <description>For properties handled by handle_mounted_dirs this will 
make Ambari not to create any
+            directories.
+        </description>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>manage_dirs_on_root</name>
+        <value>true</value>
+        <description>For properties handled by handle_mounted_dirs this will 
make Ambari to manage (create and set
+            permissions) unknown directories on / partition
+        </description>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>one_dir_per_partition</name>
+        <value>false</value>
+        <description>For properties handled by handle_mounted_dirs this will 
make Ambari</description>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>hide_yarn_memory_widget</name>
+        <value>false</value>
+        <description>YARN Memory widget should be hidden by default on the 
dashboard.</description>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>agent_mounts_ignore_list</name>
+        <value/>
+        <description>Comma separated list of the mounts which would be ignored 
by Ambari during property values
+            suggestion by Stack Advisor
+        </description>
+        <on-ambari-upgrade add="false"/>
+        <value-attributes>
+            <visible>true</visible>
+            <empty-value-valid>true</empty-value-valid>
+        </value-attributes>
+    </property>
+    <property>
+        <name>enable_external_ranger</name>
+        <value>false</value>
+        <description>Flag to turn on when external setup of External Ranger is 
done.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>service_check_type</name>
+        <display-name>Service Check Type</display-name>
+        <value>full</value>
+        <description>Indicates the complexity of the service check. Valid 
values are 'minimal' or 'full'.</description>
+        <on-ambari-upgrade add="true"/>
+        <value-attributes>
+            <visible>true</visible>
+            <empty-value-valid>true</empty-value-valid>
+        </value-attributes>
+    </property>
+    <property>
+        <name>namenode_rolling_restart_timeout</name>
+        <value>4200</value>
+        <description>Timeout for namenode rolling restart 
command.</description>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>namenode_rolling_restart_safemode_exit_timeout</name>
+        <value>3600</value>
+        <description>Timeout for safemode exit, during namenode rolling 
restart</description>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>manage_hive_fsroot</name>
+        <value>true</value>
+        <description>If flag is set to true, ambari will manage fsroot for 
Hive, by setting it to fs.defautlFS of local
+            HDFS
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+</configuration>

Reply via email to