Repository: ambari
Updated Branches:
  refs/heads/trunk fa6f80a76 -> c58162fe3


http://git-wip-us.apache.org/repos/asf/ambari/blob/c58162fe/ambari-server/src/test/java/org/apache/ambari/server/state/stack/UpgradePackTest.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/state/stack/UpgradePackTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/state/stack/UpgradePackTest.java
index 9ae78c4..4ea47fa 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/state/stack/UpgradePackTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/state/stack/UpgradePackTest.java
@@ -36,13 +36,17 @@ import 
org.apache.ambari.server.state.stack.UpgradePack.ProcessingComponent;
 import org.apache.ambari.server.state.stack.upgrade.ClusterGrouping;
 import 
org.apache.ambari.server.state.stack.upgrade.ClusterGrouping.ExecuteStage;
 import org.apache.ambari.server.state.stack.upgrade.ConfigureTask;
-import org.apache.ambari.server.state.stack.upgrade.ConfigureTask.Transfer;
+import 
org.apache.ambari.server.state.stack.upgrade.ConfigUpgradeChangeDefinition.Transfer;
 import org.apache.ambari.server.state.stack.upgrade.Direction;
 import org.apache.ambari.server.state.stack.upgrade.Grouping;
+import org.apache.ambari.server.state.stack.upgrade.RestartGrouping;
 import org.apache.ambari.server.state.stack.upgrade.RestartTask;
+import org.apache.ambari.server.state.stack.upgrade.StopGrouping;
 import org.apache.ambari.server.state.stack.upgrade.ServiceCheckGrouping;
 import org.apache.ambari.server.state.stack.upgrade.Task;
 import org.apache.ambari.server.state.stack.upgrade.TransferOperation;
+import org.apache.ambari.server.state.stack.upgrade.UpdateStackGrouping;
+import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -88,9 +92,8 @@ public class UpgradePackTest {
     Map<String, UpgradePack> upgrades = ambariMetaInfo.getUpgradePacks("HDP", 
"2.1.1");
     assertTrue(upgrades.size() > 0);
     assertTrue(upgrades.containsKey("upgrade_test"));
-
-    UpgradePack up = upgrades.get("upgrade_test");
-    assertEquals("2.2.*", up.getTarget());
+    UpgradePack upgrade = upgrades.get("upgrade_test");
+    assertEquals("2.2.*.*", upgrade.getTarget());
 
     Map<String, List<String>> expectedStages = new LinkedHashMap<String, 
List<String>>() {{
       put("ZOOKEEPER", Arrays.asList("ZOOKEEPER_SERVER"));
@@ -100,24 +103,24 @@ public class UpgradePackTest {
     // !!! test the tasks
     int i = 0;
     for (Entry<String, List<String>> entry : expectedStages.entrySet()) {
-      assertTrue(up.getTasks().containsKey(entry.getKey()));
-      assertEquals(i++, indexOf(up.getTasks(), entry.getKey()));
+      assertTrue(upgrade.getTasks().containsKey(entry.getKey()));
+      assertEquals(i++, indexOf(upgrade.getTasks(), entry.getKey()));
 
       // check that the number of components matches
-      assertEquals(entry.getValue().size(), 
up.getTasks().get(entry.getKey()).size());
+      assertEquals(entry.getValue().size(), 
upgrade.getTasks().get(entry.getKey()).size());
 
       // check component ordering
       int j = 0;
       for (String comp : entry.getValue()) {
-        assertEquals(j++, indexOf(up.getTasks().get(entry.getKey()), comp));
+        assertEquals(j++, indexOf(upgrade.getTasks().get(entry.getKey()), 
comp));
       }
     }
 
     // !!! test specific tasks
-    assertTrue(up.getTasks().containsKey("HDFS"));
-    assertTrue(up.getTasks().get("HDFS").containsKey("NAMENODE"));
+    assertTrue(upgrade.getTasks().containsKey("HDFS"));
+    assertTrue(upgrade.getTasks().get("HDFS").containsKey("NAMENODE"));
 
-    ProcessingComponent pc = up.getTasks().get("HDFS").get("NAMENODE");
+    ProcessingComponent pc = upgrade.getTasks().get("HDFS").get("NAMENODE");
     assertNotNull(pc.preTasks);
     assertNotNull(pc.postTasks);
     assertNotNull(pc.tasks);
@@ -129,17 +132,17 @@ public class UpgradePackTest {
     assertEquals(RestartTask.class, pc.tasks.get(0).getClass());
 
 
-    assertTrue(up.getTasks().containsKey("ZOOKEEPER"));
-    assertTrue(up.getTasks().get("ZOOKEEPER").containsKey("ZOOKEEPER_SERVER"));
+    assertTrue(upgrade.getTasks().containsKey("ZOOKEEPER"));
+    
assertTrue(upgrade.getTasks().get("ZOOKEEPER").containsKey("ZOOKEEPER_SERVER"));
 
-    pc = up.getTasks().get("HDFS").get("DATANODE");
+    pc = upgrade.getTasks().get("HDFS").get("DATANODE");
     assertNotNull(pc.preDowngradeTasks);
     assertEquals(0, pc.preDowngradeTasks.size());
     assertNotNull(pc.postDowngradeTasks);
     assertEquals(1, pc.postDowngradeTasks.size());
 
 
-    pc = up.getTasks().get("ZOOKEEPER").get("ZOOKEEPER_SERVER");
+    pc = upgrade.getTasks().get("ZOOKEEPER").get("ZOOKEEPER_SERVER");
     assertNotNull(pc.preTasks);
     assertEquals(1, pc.preTasks.size());
     assertNotNull(pc.postTasks);
@@ -147,56 +150,22 @@ public class UpgradePackTest {
     assertNotNull(pc.tasks);
     assertEquals(1, pc.tasks.size());
 
-    pc = up.getTasks().get("YARN").get("NODEMANAGER");
+    pc = upgrade.getTasks().get("YARN").get("NODEMANAGER");
     assertNotNull(pc.preTasks);
     assertEquals(2, pc.preTasks.size());
     Task t = pc.preTasks.get(1);
     assertEquals(ConfigureTask.class, t.getClass());
     ConfigureTask ct = (ConfigureTask) t;
-    assertEquals("core-site", ct.getConfigType());
-    assertEquals(4, ct.getTransfers().size());
-
-    /*
-    <transfer operation="COPY" from-key="copy-key" to-key="copy-key-to" />
-    <transfer operation="COPY" from-type="my-site" from-key="my-copy-key" 
to-key="my-copy-key-to" />
-    <transfer operation="MOVE" from-key="move-key" to-key="move-key-to" />
-    <transfer operation="DELETE" delete-key="delete-key">
-      <keep-key>important-key</keep-key>
-    </transfer>
-    */
-    Transfer t1 = ct.getTransfers().get(0);
-    assertEquals(TransferOperation.COPY, t1.operation);
-    assertEquals("copy-key", t1.fromKey);
-    assertEquals("copy-key-to", t1.toKey);
-
-    Transfer t2 = ct.getTransfers().get(1);
-    assertEquals(TransferOperation.COPY, t2.operation);
-    assertEquals("my-site", t2.fromType);
-    assertEquals("my-copy-key", t2.fromKey);
-    assertEquals("my-copy-key-to", t2.toKey);
-    assertTrue(t2.keepKeys.isEmpty());
-
-    Transfer t3 = ct.getTransfers().get(2);
-    assertEquals(TransferOperation.MOVE, t3.operation);
-    assertEquals("move-key", t3.fromKey);
-    assertEquals("move-key-to", t3.toKey);
-
-    Transfer t4 = ct.getTransfers().get(3);
-    assertEquals(TransferOperation.DELETE, t4.operation);
-    assertEquals("delete-key", t4.deleteKey);
-    assertNull(t4.toKey);
-    assertTrue(t4.preserveEdits);
-    assertEquals(1, t4.keepKeys.size());
-    assertEquals("important-key", t4.keepKeys.get(0));
+    // check that the Configure task successfully parsed id
+    assertEquals("hdp_2_1_1_nm_pre_upgrade", ct.getId());
   }
 
   @Test
-  public void testGroupOrders() {
+  public void testGroupOrdersForRolling() {
     Map<String, UpgradePack> upgrades = ambariMetaInfo.getUpgradePacks("HDP", 
"2.1.1");
     assertTrue(upgrades.size() > 0);
     assertTrue(upgrades.containsKey("upgrade_test_checks"));
-
-    UpgradePack up = upgrades.get("upgrade_test_checks");
+    UpgradePack upgrade = upgrades.get("upgrade_test_checks");
 
     List<String> expected_up = Arrays.asList(
         "PRE_CLUSTER",
@@ -219,7 +188,7 @@ public class UpgradePackTest {
     Grouping serviceCheckGroup = null;
 
     int i = 0;
-    List<Grouping> groups = up.getGroups(Direction.UPGRADE);
+    List<Grouping> groups = upgrade.getGroups(Direction.UPGRADE);
     for (Grouping g : groups) {
       assertEquals(expected_up.get(i), g.name);
       i++;
@@ -245,7 +214,7 @@ public class UpgradePackTest {
 
 
     i = 0;
-    groups = up.getGroups(Direction.DOWNGRADE);
+    groups = upgrade.getGroups(Direction.DOWNGRADE);
     for (Grouping g : groups) {
       assertEquals(expected_down.get(i), g.name);
       i++;
@@ -253,15 +222,45 @@ public class UpgradePackTest {
 
   }
 
+
+  // TODO AMBARI-12698, add the Downgrade case
   @Test
-  public void testDirection() throws Exception {
+  public void testGroupOrdersForNonRolling() {
     Map<String, UpgradePack> upgrades = ambariMetaInfo.getUpgradePacks("HDP", 
"2.1.1");
     assertTrue(upgrades.size() > 0);
-    assertTrue(upgrades.containsKey("upgrade_direction"));
+    assertTrue(upgrades.containsKey("upgrade_test_nonrolling"));
+    UpgradePack upgrade = upgrades.get("upgrade_test_nonrolling");
 
-    UpgradePack up = upgrades.get("upgrade_direction");
+    List<String> expected_up = Arrays.asList(
+        "PRE_CLUSTER",
+        "Stop High-Level Daemons",
+        "Backups",
+        "Stop Low-Level Daemons",
+        "UPDATE_DESIRED_STACK_ID",
+        "ALL_HOST_OPS",
+        "ZOOKEEPER",
+        "HDFS",
+        "MR and YARN",
+        "POST_CLUSTER");
 
-    List<Grouping> groups = up.getGroups(Direction.UPGRADE);
+    int i = 0;
+    List<Grouping> groups = upgrade.getGroups(Direction.UPGRADE);
+    for (Grouping g : groups) {
+      assertEquals(expected_up.get(i), g.name);
+      i++;
+    }
+  }
+
+
+  @Test
+  public void testDirectionForRolling() throws Exception {
+    Map<String, UpgradePack> upgrades = ambariMetaInfo.getUpgradePacks("HDP", 
"2.1.1");
+    assertTrue(upgrades.size() > 0);
+    assertTrue(upgrades.containsKey("upgrade_direction"));
+    UpgradePack upgrade = upgrades.get("upgrade_direction");
+    assertTrue(upgrade.getType() == UpgradeType.ROLLING);
+
+    List<Grouping> groups = upgrade.getGroups(Direction.UPGRADE);
     assertEquals(4, groups.size());
     Grouping group = groups.get(2);
     assertEquals(ClusterGrouping.class, group.getClass());
@@ -274,7 +273,7 @@ public class UpgradePackTest {
     assertNotNull(stages.get(0).intendedDirection);
     assertEquals(Direction.DOWNGRADE, stages.get(0).intendedDirection);
 
-    groups = up.getGroups(Direction.DOWNGRADE);
+    groups = upgrade.getGroups(Direction.DOWNGRADE);
     assertEquals(3, groups.size());
     // there are two clustergroupings at the end
     group = groups.get(1);
@@ -301,6 +300,73 @@ public class UpgradePackTest {
     Assert.assertTrue(upgradePack.isServiceCheckFailureAutoSkipped());
   }
 
+  @Test
+  public void testDirectionForNonRolling() throws Exception {
+    Map<String, UpgradePack> upgrades = ambariMetaInfo.getUpgradePacks("HDP", 
"2.1.1");
+    assertTrue(upgrades.size() > 0);
+    assertTrue(upgrades.containsKey("upgrade_test_nonrolling"));
+    UpgradePack upgrade = upgrades.get("upgrade_test_nonrolling");
+    assertTrue(upgrade.getType() == UpgradeType.NON_ROLLING);
+
+    List<Grouping> groups = upgrade.getGroups(Direction.UPGRADE);
+    assertEquals(10, groups.size());
+
+    Grouping group = null;
+    ClusterGrouping clusterGroup = null;
+    UpdateStackGrouping updateStackGroup = null;
+    StopGrouping stopGroup = null;
+    RestartGrouping restartGroup = null;
+
+    group = groups.get(0);
+    assertEquals(ClusterGrouping.class, group.getClass());
+    clusterGroup = (ClusterGrouping) group;
+    assertEquals("Prepare Upgrade", clusterGroup.title);
+
+    group = groups.get(1);
+    assertEquals(StopGrouping.class, group.getClass());
+    stopGroup = (StopGrouping) group;
+    assertEquals("Stop Daemons for High-Level Services", stopGroup.title);
+
+    group = groups.get(2);
+    assertEquals(ClusterGrouping.class, group.getClass());
+    clusterGroup = (ClusterGrouping) group;
+    assertEquals("Take Backups", clusterGroup.title);
+
+    group = groups.get(3);
+    assertEquals(StopGrouping.class, group.getClass());
+    stopGroup = (StopGrouping) group;
+    assertEquals("Stop Daemons for Low-Level Services", stopGroup.title);
+
+    group = groups.get(4);
+    assertEquals(UpdateStackGrouping.class, group.getClass());
+    updateStackGroup = (UpdateStackGrouping) group;
+    assertEquals("Update Desired Stack Id", updateStackGroup.title);
+
+    group = groups.get(5);
+    assertEquals(ClusterGrouping.class, group.getClass());
+    clusterGroup = (ClusterGrouping) group;
+    assertEquals("Set Version On All Hosts", clusterGroup.title);
+
+    group = groups.get(6);
+    assertEquals(RestartGrouping.class, group.getClass());
+    restartGroup = (RestartGrouping) group;
+    assertEquals("Zookeeper", restartGroup.title);
+
+    group = groups.get(7);
+    assertEquals(RestartGrouping.class, group.getClass());
+    restartGroup = (RestartGrouping) group;
+    assertEquals("HDFS", restartGroup.title);
+
+    group = groups.get(8);
+    assertEquals(RestartGrouping.class, group.getClass());
+    restartGroup = (RestartGrouping) group;
+    assertEquals("MR and YARN", restartGroup.title);
+
+    group = groups.get(9);
+    assertEquals(ClusterGrouping.class, group.getClass());
+    clusterGroup = (ClusterGrouping) group;
+    assertEquals("Finalize {{direction.text.proper}}", clusterGroup.title);
+  }
 
   private int indexOf(Map<String, ?> map, String keyToFind) {
     int result = -1;
@@ -315,6 +381,4 @@ public class UpgradePackTest {
 
     return result;
   }
-
-
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/c58162fe/ambari-server/src/test/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilderTest.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilderTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilderTest.java
index e2a3995..bac00d4 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilderTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilderTest.java
@@ -43,7 +43,7 @@ public class StageWrapperBuilderTest {
    */
   @Test
   public void testBuildOrder() throws Exception {
-    UpgradeContext upgradeContext = new UpgradeContext(null, null, null, null, 
Direction.UPGRADE);
+    UpgradeContext upgradeContext = new UpgradeContext(null, null, null, null, 
Direction.UPGRADE, UpgradeType.ROLLING);
     MockStageWrapperBuilder builder = new MockStageWrapperBuilder(null);
     List<StageWrapper> stageWrappers = builder.build(upgradeContext);
     List<Integer> invocationOrder = builder.getInvocationOrder();
@@ -64,7 +64,7 @@ public class StageWrapperBuilderTest {
    */
   @Test
   public void testAutoSkipCheckInserted() throws Exception {
-    UpgradeContext upgradeContext = new UpgradeContext(null, null, null, null, 
Direction.UPGRADE);
+    UpgradeContext upgradeContext = new UpgradeContext(null, null, null, null, 
Direction.UPGRADE, UpgradeType.ROLLING);
     upgradeContext.setAutoSkipComponentFailures(true);
     upgradeContext.setAutoSkipServiceCheckFailures(true);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/c58162fe/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py 
b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index 4ca74a8..7615e28 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -736,7 +736,7 @@ class TestHBaseMaster(RMFTestCase):
   def test_upgrade_backup(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + 
"/scripts/hbase_upgrade.py",
                    classname = "HbaseMasterUpgrade",
-                   command = "snapshot",
+                   command = "take_snapshot",
                    config_file="hbase-preupgrade.json",
                    hdp_stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES)

http://git-wip-us.apache.org/repos/asf/ambari/blob/c58162fe/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/config-upgrade.xml 
b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/config-upgrade.xml
new file mode 100644
index 0000000..1301f9d
--- /dev/null
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/config-upgrade.xml
@@ -0,0 +1,101 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<upgrade-config-changes xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";>
+
+  <services>
+    <service name="ZOOKEEPER">
+      <component name="ZOOKEEPER_SERVER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_1_1_zk_post_upgrade">
+          </definition>
+        </changes>
+      </component>
+    </service>
+
+    <service name="HDFS">
+      <component name="NAMENODE">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_1_1_nn_pre_upgrade">
+            <type>hdfs-site</type>
+            <set key="myproperty" value="mynewvalue"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+
+    <service name="YARN">
+      <component name="NODEMANAGER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_1_1_nm_pre_upgrade">
+            <type>core-site</type>
+            <transfer operation="copy" from-key="copy-key"
+                      to-key="copy-key-to"/>
+            <transfer operation="copy" from-type="my-site"
+                      from-key="my-copy-key"
+                      to-key="my-copy-key-to"/>
+            <transfer operation="move" from-key="move-key"
+                      to-key="move-key-to"/>
+            <transfer operation="delete" delete-key="delete-key"
+                      preserve-edits="true">
+              <keep-key>important-key</keep-key>
+            </transfer>
+          </definition>
+        </changes>
+      </component>
+    </service>
+
+    <service name="HIVE">
+      <component name="HIVE_SERVER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_1_1_set_transport_mode">
+            <condition type="hive-site" key="hive.server2.transport.mode" 
value="binary">
+              <type>hive-site</type>
+              <key>hive.server2.thrift.port</key>
+              <value>10010</value>
+            </condition>
+            <condition type="hive-site" key="hive.server2.transport.mode" 
value="http">
+              <type>hive-site</type>
+              <key>hive.server2.http.port</key>
+              <value>10011</value>
+            </condition>
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_1_1_hive_server_foo">
+            <type>hive-site</type>
+            <set key="fooKey" value="fooValue"/>
+            <set key="fooKey2" value="fooValue2"/>
+            <set key="fooKey3" value="fooValue3"/>
+            <transfer operation="copy" from-key="copy-key" 
to-key="copy-key-to" />
+            <transfer operation="move" from-key="move-key" 
to-key="move-key-to" />
+            <transfer operation="delete" delete-key="delete-key" />
+            <transfer operation="delete" delete-key="delete-http" 
if-key="hive.server2.transport.mode" if-type="hive-site" if-value="http" />
+            <transfer operation="delete" delete-key="delete-https-fail" 
if-key="hive.server2.transport.mode" if-type="hive-site" if-value="https" />
+            <transfer operation="delete" delete-key="delete-prop-fail" 
if-key="non.existent" if-type="hive-site" if-value="https" />
+            <transfer operation="delete" delete-key="delete-type-fail" 
if-key="non.existent" if-type="non.existent" if-value="" />
+            <transfer operation="delete" delete-key="delete-null-if-value" 
if-key="non.existent" if-type="non.existent" />
+            <transfer operation="delete" delete-key="delete-blank-if-key" 
if-key="" if-type="non.existent" />
+            <transfer operation="delete" delete-key="delete-blank-if-type" 
if-key="non.existent" if-type="" />
+            <transfer operation="delete" delete-key="delete-thrift" 
if-key="hive.server2.thrift.port" if-type="hive-site" if-value="10001" />
+          </definition>
+        </changes>
+      </component>
+    </service>
+  </services>
+
+</upgrade-config-changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c58162fe/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_bucket_test.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_bucket_test.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_bucket_test.xml
index 92e8c6a..0e6d914 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_bucket_test.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_bucket_test.xml
@@ -16,8 +16,21 @@
    limitations under the License.
 -->
 <upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";>
-  <target>2.2.*</target>
-  
+  <target>2.2.*.*</target>
+  <target-stack>HDP-2.2.6</target-stack>
+  <type>ROLLING</type>
+  <prerequisite-checks>
+    <check>org.apache.ambari.server.checks.HiveMultipleMetastoreCheck</check>
+    
<check>org.apache.ambari.server.checks.MapReduce2JobHistoryStatePreservingCheck</check>
+    
<check>org.apache.ambari.server.checks.SecondaryNamenodeDeletedCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesMapReduceDistributedCacheCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesNamenodeHighAvailabilityCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesNamenodeTruncateCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesTezDistributedCacheCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesYarnWorkPreservingCheck</check>
+    <check>org.apache.ambari.server.checks.YarnRMHighAvailabilityCheck</check>
+    
<check>org.apache.ambari.server.checks.YarnTimelineServerStatePreservingCheck</check>
+  </prerequisite-checks>
   <order>
   
     <group name="ZOOKEEPER" title="Zookeeper">

http://git-wip-us.apache.org/repos/asf/ambari/blob/c58162fe/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_direction.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_direction.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_direction.xml
index 89a9e4f..e12fcd9 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_direction.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_direction.xml
@@ -16,7 +16,9 @@
    limitations under the License.
 -->
 <upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";>
-  <target>2.2.*</target>
+  <target>2.2.*.*</target>
+  <target-stack>HDP-2.2.5</target-stack>
+  <type>ROLLING</type>
   
   <order>
     <group name="ZOOKEEPER" title="Zookeeper">
@@ -75,7 +77,7 @@
           </task>
         </pre-upgrade>
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-upgrade>
           <task xsi:type="configure" />

http://git-wip-us.apache.org/repos/asf/ambari/blob/c58162fe/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test.xml 
b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test.xml
index b7a62f5..827348a 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test.xml
@@ -16,7 +16,9 @@
    limitations under the License.
 -->
 <upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";>
-  <target>2.2.*</target>
+  <target>2.2.*.*</target>
+  <target-stack>HDP-2.1.1</target-stack>
+  <type>ROLLING</type>
   
   <order>
     <group xsi:type="cluster" name="PRE_CLUSTER" title="Pre 
{{direction.text.proper}}">
@@ -125,10 +127,10 @@
           </task>
         </pre-upgrade>
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-upgrade>
-          <task xsi:type="configure" />
+          <task xsi:type="configure" id="2.2.0" />
         </post-upgrade>
       </component>
     </service>
@@ -139,16 +141,13 @@
           <task xsi:type="execute" hosts="master">
             <command>su - {hdfs-user} -c 'dosomething'</command>
           </task>
-          <task xsi:type="configure">
-            <type>hdfs-site</type>
-            <set key="myproperty" value="mynewvalue"/>
-          </task>
+          <task xsi:type="configure" id="hdp_2_1_1_nn_pre_upgrade" />
           <task xsi:type="manual">
             <message>{{direction.verb.proper}} your database</message>
           </task>
         </pre-upgrade>
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-upgrade>
           <task xsi:type="execute">
@@ -159,7 +158,7 @@
       <component name="DATANODE">
         <pre-downgrade />
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-downgrade>
           <task xsi:type="manual">
@@ -182,15 +181,7 @@
           <task xsi:type="execute">
             <command>ls</command>
           </task>
-          <task xsi:type="configure">
-            <type>core-site</type>
-            <transfer operation="copy" from-key="copy-key" 
to-key="copy-key-to" />
-            <transfer operation="copy" from-type="my-site" 
from-key="my-copy-key" to-key="my-copy-key-to" />
-            <transfer operation="move" from-key="move-key" 
to-key="move-key-to" />
-            <transfer operation="delete" delete-key="delete-key" 
preserve-edits="true">
-            <keep-key>important-key</keep-key>
-            </transfer>
-          </task>
+          <task xsi:type="configure" id="hdp_2_1_1_nm_pre_upgrade"/>
         </pre-upgrade>
       </component>
     </service>
@@ -203,36 +194,10 @@
             <message>The HiveServer port will now change to 10010 if hive is 
using a binary transfer mode or 10011 if hive is using an http transport mode. 
You can use "netstat -anp | grep 1001[01]" to determine if the port is 
available on each of following HiveServer host(s): {{hosts.all}}. If the port 
is not available, the process using it must be terminated.</message>
           </task>
 
-          <task xsi:type="configure">
-            <condition type="hive-site" key="hive.server2.transport.mode" 
value="binary">
-              <type>hive-site</type>
-              <key>hive.server2.thrift.port</key>
-              <value>10010</value>
-            </condition>
-            <condition type="hive-site" key="hive.server2.transport.mode" 
value="http">
-              <type>hive-site</type>
-              <key>hive.server2.http.port</key>
-              <value>10011</value>
-            </condition>
-          </task>
-          
-          <task xsi:type="configure">
-            <type>hive-site</type>
-            <set key="fooKey" value="fooValue"/>
-            <set key="fooKey2" value="fooValue2"/>
-            <set key="fooKey3" value="fooValue3"/>
-            <transfer operation="copy" from-key="copy-key" 
to-key="copy-key-to" />
-            <transfer operation="move" from-key="move-key" 
to-key="move-key-to" />
-            <transfer operation="delete" delete-key="delete-key" />
-            <transfer operation="delete" delete-key="delete-http" 
if-key="hive.server2.transport.mode" if-type="hive-site" if-value="http" />
-            <transfer operation="delete" delete-key="delete-https-fail" 
if-key="hive.server2.transport.mode" if-type="hive-site" if-value="https" />
-            <transfer operation="delete" delete-key="delete-prop-fail" 
if-key="non.existent" if-type="hive-site" if-value="https" />
-            <transfer operation="delete" delete-key="delete-type-fail" 
if-key="non.existent" if-type="non.existent" if-value="" />
-            <transfer operation="delete" delete-key="delete-null-if-value" 
if-key="non.existent" if-type="non.existent" />
-            <transfer operation="delete" delete-key="delete-blank-if-key" 
if-key="" if-type="non.existent" />
-            <transfer operation="delete" delete-key="delete-blank-if-type" 
if-key="non.existent" if-type="" />
-            <transfer operation="delete" delete-key="delete-thrift" 
if-key="hive.server2.thrift.port" if-type="hive-site" if-value="10001" />
-          </task>
+          <task xsi:type="configure" id="hdp_2_1_1_set_transport_mode"/>
+
+          <task xsi:type="configure" id="hdp_2_1_1_hive_server_foo"/>
+
         </pre-upgrade>
        </component>
      </service>    

http://git-wip-us.apache.org/repos/asf/ambari/blob/c58162fe/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test_checks.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test_checks.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test_checks.xml
index 7590c5b..05d3db9 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test_checks.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test_checks.xml
@@ -16,8 +16,21 @@
    limitations under the License.
 -->
 <upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";>
-  <target>2.2.*</target>
-  
+  <target>2.2.*.*</target>
+  <target-stack>HDP-2.2.0</target-stack>
+  <type>ROLLING</type>
+  <prerequisite-checks>
+    <check>org.apache.ambari.server.checks.HiveMultipleMetastoreCheck</check>
+    
<check>org.apache.ambari.server.checks.MapReduce2JobHistoryStatePreservingCheck</check>
+    
<check>org.apache.ambari.server.checks.SecondaryNamenodeDeletedCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesMapReduceDistributedCacheCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesNamenodeHighAvailabilityCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesNamenodeTruncateCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesTezDistributedCacheCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesYarnWorkPreservingCheck</check>
+    <check>org.apache.ambari.server.checks.YarnRMHighAvailabilityCheck</check>
+    
<check>org.apache.ambari.server.checks.YarnTimelineServerStatePreservingCheck</check>
+  </prerequisite-checks>
   <order>
     <group xsi:type="cluster" name="PRE_CLUSTER" title="Pre Upgrade" 
stage="pre">
       <execute-stage title="Confirm 1">
@@ -120,10 +133,10 @@
           </task>
         </pre-upgrade>
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-upgrade>
-          <task xsi:type="configure" />
+          <task xsi:type="configure" id="hdp_2_1_1_zk_post_upgrade"/>
         </post-upgrade>
       </component>
     </service>
@@ -133,16 +146,13 @@
           <task xsi:type="execute" hosts="master">
             <command>su - {hdfs-user} -c 'dosomething'</command>
           </task>
-          <task xsi:type="configure">
-            <type>hdfs-site</type>
-            <set key="myproperty" value="mynewvalue"/>
-          </task>
+          <task xsi:type="configure" id="hdp_2_1_1_nn_pre_upgrade"/>
           <task xsi:type="manual">
             <message>Update your database</message>
           </task>
         </pre-upgrade>
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-upgrade>
           <task xsi:type="execute">
@@ -153,7 +163,7 @@
       <component name="DATANODE">
         <pre-downgrade />
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-downgrade>
           <task xsi:type="manual">

http://git-wip-us.apache.org/repos/asf/ambari/blob/c58162fe/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test_nonrolling.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test_nonrolling.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test_nonrolling.xml
new file mode 100644
index 0000000..c1e03e0
--- /dev/null
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test_nonrolling.xml
@@ -0,0 +1,182 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+
+<upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";>
+  <target>2.2.*.*</target>
+  <target-stack>HDP-2.2.3</target-stack>
+  <type>NON_ROLLING</type>
+  <prerequisite-checks>
+    <check>org.apache.ambari.server.checks.HiveMultipleMetastoreCheck</check>
+    
<check>org.apache.ambari.server.checks.MapReduce2JobHistoryStatePreservingCheck</check>
+    
<check>org.apache.ambari.server.checks.SecondaryNamenodeDeletedCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesMapReduceDistributedCacheCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesNamenodeHighAvailabilityCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesNamenodeTruncateCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesTezDistributedCacheCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesYarnWorkPreservingCheck</check>
+    <check>org.apache.ambari.server.checks.YarnRMHighAvailabilityCheck</check>
+    
<check>org.apache.ambari.server.checks.YarnTimelineServerStatePreservingCheck</check>
+  </prerequisite-checks>
+  <order>
+    <group xsi:type="cluster" name="PRE_CLUSTER" title="Prepare Upgrade">
+      <skippable>true</skippable>
+      <direction>UPGRADE</direction>
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop 
YARN Queues">
+        <task xsi:type="manual">
+          <message>Before continuing, please stop all YARN queues.</message>
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="stop" name="Stop High-Level Daemons" title="Stop Daemons 
for High-Level Services">
+      <skippable>true</skippable>
+      <service-check>false</service-check>
+
+      <service name="YARN">
+        <component>NODEMANAGER</component>
+        <component>RESOURCEMANAGER</component>
+        <component>APP_TIMELINE_SERVER</component>
+      </service>
+
+      <service name="MAPREDUCE2">
+        <component>HISTORYSERVER</component>
+      </service>
+    </group>
+
+    <group xsi:type="cluster" name="Backups" title="Take Backups">
+      <direction>UPGRADE</direction>
+      <skippable>true</skippable>
+
+      <execute-stage service="HDFS" component="NAMENODE" title="Snapshot HDFS">
+        <task xsi:type="execute" hosts="master">
+          <script>scripts/namenode.py</script>
+          <function>prepare_non_rolling_upgrade</function>
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="stop" name="Stop Low-Level Daemons" title="Stop Daemons 
for Low-Level Services">
+      <skippable>true</skippable>
+      <service-check>false</service-check>
+
+      <service name="HDFS">
+        <component>DATANODE</component>
+        <component>NAMENODE</component>
+        <component>SECONDARY_NAMENODE</component>
+        <component>ZKFC</component>
+        <component>JOURNALNODE</component>
+      </service>
+
+      <service name="ZOOKEEPER">
+        <component>ZOOKEEPER_SERVER</component>
+      </service>
+    </group>
+
+    <group xsi:type="cluster" name="Restore Backups" title="Restore Backups">
+      <direction>DOWNGRADE</direction>
+      <skippable>true</skippable>
+
+      <execute-stage service="HDFS" component="NAMENODE" title="Snapshot HDFS">
+        <task xsi:type="execute" hosts="master">
+          <script>scripts/namenode.py</script>
+          <function>restore_snapshot</function>
+        </task>
+      </execute-stage>
+    </group>
+
+    <!-- After processing this group, will change the effective Stack of the 
UpgradeContext object. -->
+    <group xsi:type="update-stack" name="UPDATE_DESIRED_STACK_ID" 
title="Update Desired Stack Id">
+      <execute-stage title="Update Desired Stack Id" service="" component="">
+        <task xsi:type="server_action" 
class="org.apache.ambari.server.serveraction.upgrades.UpdateDesiredStackAction">
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="cluster" name="ALL_HOST_OPS" title="Set Version On All 
Hosts">
+      <skippable>true</skippable>
+      <execute-stage title="Update stack to {{version}}">
+        <task xsi:type="execute">
+          <script>scripts/ru_set_all.py</script>
+          <function>actionexecute</function>
+        </task>
+      </execute-stage>
+    </group>
+
+    <!-- Now, restart all of the services. -->
+
+    <group xsi:type="restart" name="ZOOKEEPER" title="Zookeeper">
+      <service name="ZOOKEEPER">
+        <service-check>false</service-check>
+        <component>ZOOKEEPER_SERVER</component>
+        <component>ZOOKEEPER_CLIENT</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="HDFS" title="HDFS">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="HDFS">
+        <component>JOURNALNODE</component>
+        <component>ZKFC</component>
+        <component>NAMENODE</component>
+        <component>SECONDARY_NAMENODE</component>
+        <component>DATANODE</component>
+        <component>HDFS_CLIENT</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="MR and YARN" title="MR and YARN">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="MAPREDUCE2">
+        <component>HISTORYSERVER</component>
+        <component>MAPREDUCE2_CLIENT</component>
+      </service>
+      <service name="YARN">
+        <component>APP_TIMELINE_SERVER</component>
+        <component>RESOURCEMANAGER</component>
+        <component>NODEMANAGER</component>
+        <component>YARN_CLIENT</component>
+      </service>
+    </group>
+
+    <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize 
{{direction.text.proper}}">
+      <skippable>true</skippable>
+
+      <execute-stage title="Confirm Finalize">
+        <direction>UPGRADE</direction>
+        <task xsi:type="manual">
+          <message>Please confirm you are ready to finalize.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HDFS" component="NAMENODE" title="Execute HDFS 
Finalize">
+        <task xsi:type="execute" hosts="master">
+          <script>scripts/namenode.py</script>
+          <function>finalize_non_rolling_upgrade</function>
+        </task>
+      </execute-stage>
+
+      <execute-stage title="Save Cluster State" service="" component="">
+        <task xsi:type="server_action" 
class="org.apache.ambari.server.serveraction.upgrades.FinalizeUpgradeAction">
+        </task>
+      </execute-stage>
+    </group>
+  </order>
+</upgrade>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/c58162fe/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_to_new_stack.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_to_new_stack.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_to_new_stack.xml
index 02b0ebf..a9ce2b0 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_to_new_stack.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_to_new_stack.xml
@@ -16,9 +16,21 @@
    limitations under the License.
 -->
 <upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";>
-  <target>2.2.*</target>
-  <target-stack>HDP-2.2.0</target-stack>
-  
+  <target>2.2.*.*</target>
+  <target-stack>HDP-2.2.4</target-stack>
+  <type>ROLLING</type>
+  <prerequisite-checks>
+    <check>org.apache.ambari.server.checks.HiveMultipleMetastoreCheck</check>
+    
<check>org.apache.ambari.server.checks.MapReduce2JobHistoryStatePreservingCheck</check>
+    
<check>org.apache.ambari.server.checks.SecondaryNamenodeDeletedCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesMapReduceDistributedCacheCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesNamenodeHighAvailabilityCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesNamenodeTruncateCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesTezDistributedCacheCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesYarnWorkPreservingCheck</check>
+    <check>org.apache.ambari.server.checks.YarnRMHighAvailabilityCheck</check>
+    
<check>org.apache.ambari.server.checks.YarnTimelineServerStatePreservingCheck</check>
+  </prerequisite-checks>
   <order>
     <group xsi:type="cluster" name="PRE_CLUSTER" title="Pre 
{{direction.text.proper}}">
       <execute-stage title="Confirm 1">
@@ -135,7 +147,7 @@
           </task>
         </pre-upgrade>
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-upgrade>
           <task xsi:type="configure" />
@@ -159,7 +171,7 @@
           </task>
         </pre-upgrade>
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-upgrade>
           <task xsi:type="execute">
@@ -170,7 +182,7 @@
       <component name="DATANODE">
         <pre-downgrade />
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-downgrade>
           <task xsi:type="manual">

http://git-wip-us.apache.org/repos/asf/ambari/blob/c58162fe/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/config-upgrade.xml 
b/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/config-upgrade.xml
new file mode 100644
index 0000000..90d64b4
--- /dev/null
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/config-upgrade.xml
@@ -0,0 +1,101 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<upgrade-config-changes xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";>
+
+  <services>
+    <service name="ZOOKEEPER">
+      <component name="ZOOKEEPER_SERVER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_2_0_zk_post_upgrade">
+          </definition>
+        </changes>
+      </component>
+    </service>
+
+    <service name="HDFS">
+      <component name="NAMENODE">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_2_0_nn_pre_upgrade">
+            <type>hdfs-site</type>
+            <set key="myproperty" value="mynewvalue"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+
+    <service name="YARN">
+      <component name="NODEMANAGER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_2_0_nm_pre_upgrade">
+            <type>core-site</type>
+            <transfer operation="copy" from-key="copy-key"
+                      to-key="copy-key-to"/>
+            <transfer operation="copy" from-type="my-site"
+                      from-key="my-copy-key"
+                      to-key="my-copy-key-to"/>
+            <transfer operation="move" from-key="move-key"
+                      to-key="move-key-to"/>
+            <transfer operation="delete" delete-key="delete-key"
+                      preserve-edits="true">
+              <keep-key>important-key</keep-key>
+            </transfer>
+          </definition>
+        </changes>
+      </component>
+    </service>
+
+    <service name="HIVE">
+      <component name="HIVE_SERVER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_2_0_set_transport_mode">
+            <condition type="hive-site" key="hive.server2.transport.mode" 
value="binary">
+              <type>hive-site</type>
+              <key>hive.server2.thrift.port</key>
+              <value>10010</value>
+            </condition>
+            <condition type="hive-site" key="hive.server2.transport.mode" 
value="http">
+              <type>hive-site</type>
+              <key>hive.server2.http.port</key>
+              <value>10011</value>
+            </condition>
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_2_0_hive_server_foo">
+            <type>hive-site</type>
+            <set key="fooKey" value="fooValue"/>
+            <set key="fooKey2" value="fooValue2"/>
+            <set key="fooKey3" value="fooValue3"/>
+            <transfer operation="copy" from-key="copy-key" 
to-key="copy-key-to" />
+            <transfer operation="move" from-key="move-key" 
to-key="move-key-to" />
+            <transfer operation="delete" delete-key="delete-key" />
+            <transfer operation="delete" delete-key="delete-http" 
if-key="hive.server2.transport.mode" if-type="hive-site" if-value="http" />
+            <transfer operation="delete" delete-key="delete-https-fail" 
if-key="hive.server2.transport.mode" if-type="hive-site" if-value="https" />
+            <transfer operation="delete" delete-key="delete-prop-fail" 
if-key="non.existent" if-type="hive-site" if-value="https" />
+            <transfer operation="delete" delete-key="delete-type-fail" 
if-key="non.existent" if-type="non.existent" if-value="" />
+            <transfer operation="delete" delete-key="delete-null-if-value" 
if-key="non.existent" if-type="non.existent" />
+            <transfer operation="delete" delete-key="delete-blank-if-key" 
if-key="" if-type="non.existent" />
+            <transfer operation="delete" delete-key="delete-blank-if-type" 
if-key="non.existent" if-type="" />
+            <transfer operation="delete" delete-key="delete-thrift" 
if-key="hive.server2.thrift.port" if-type="hive-site" if-value="10001" />
+          </definition>
+        </changes>
+      </component>
+    </service>
+  </services>
+
+</upgrade-config-changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c58162fe/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test.xml 
b/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test.xml
index 5271ae6..34ebe32 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test.xml
@@ -17,7 +17,20 @@
 -->
 <upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";>
   <target>2.2.*</target>
-  
+  <target-stack>HDP-2.2.0</target-stack>
+  <type>ROLLING</type>
+  <prerequisite-checks>
+    <check>org.apache.ambari.server.checks.HiveMultipleMetastoreCheck</check>
+    
<check>org.apache.ambari.server.checks.MapReduce2JobHistoryStatePreservingCheck</check>
+    
<check>org.apache.ambari.server.checks.SecondaryNamenodeDeletedCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesMapReduceDistributedCacheCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesNamenodeHighAvailabilityCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesNamenodeTruncateCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesTezDistributedCacheCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesYarnWorkPreservingCheck</check>
+    <check>org.apache.ambari.server.checks.YarnRMHighAvailabilityCheck</check>
+    
<check>org.apache.ambari.server.checks.YarnTimelineServerStatePreservingCheck</check>
+  </prerequisite-checks>
   <order>
     <group xsi:type="cluster" name="PRE_CLUSTER" title="Pre 
{{direction.text.proper}}">
       <execute-stage title="Confirm 1">
@@ -126,7 +139,7 @@
           </task>
         </pre-upgrade>
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-upgrade>
           <task xsi:type="configure" />
@@ -149,7 +162,7 @@
           </task>
         </pre-upgrade>
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-upgrade>
           <task xsi:type="execute">
@@ -160,7 +173,7 @@
       <component name="DATANODE">
         <pre-downgrade />
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-downgrade>
           <task xsi:type="manual">

http://git-wip-us.apache.org/repos/asf/ambari/blob/c58162fe/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test_checks.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test_checks.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test_checks.xml
index 892b9b4..14c68be 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test_checks.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test_checks.xml
@@ -16,8 +16,21 @@
    limitations under the License.
 -->
 <upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";>
-  <target>2.2.*</target>
-  
+  <target>2.2.*.*</target>
+  <target-stack>HDP-2.2.1</target-stack>
+  <type>ROLLING</type>
+  <prerequisite-checks>
+    <check>org.apache.ambari.server.checks.HiveMultipleMetastoreCheck</check>
+    
<check>org.apache.ambari.server.checks.MapReduce2JobHistoryStatePreservingCheck</check>
+    
<check>org.apache.ambari.server.checks.SecondaryNamenodeDeletedCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesMapReduceDistributedCacheCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesNamenodeHighAvailabilityCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesNamenodeTruncateCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesTezDistributedCacheCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesYarnWorkPreservingCheck</check>
+    <check>org.apache.ambari.server.checks.YarnRMHighAvailabilityCheck</check>
+    
<check>org.apache.ambari.server.checks.YarnTimelineServerStatePreservingCheck</check>
+  </prerequisite-checks>
   <order>
     <group xsi:type="cluster" name="PRE_CLUSTER" title="Pre Upgrade" 
stage="pre">
       <execute-stage title="Confirm 1">
@@ -125,10 +138,10 @@
           </task>
         </pre-upgrade>
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-upgrade>
-          <task xsi:type="configure" />
+          <task xsi:type="configure" id="hdp_2_2_0_zk_post_upgrade"/>
         </post-upgrade>
       </component>
     </service>
@@ -138,16 +151,13 @@
           <task xsi:type="execute" hosts="master">
             <command>su - {hdfs-user} -c 'dosomething'</command>
           </task>
-          <task xsi:type="configure">
-            <type>hdfs-site</type>
-            <set key="myproperty" value="mynewvalue"/>
-          </task>
+          <task xsi:type="configure" id="hdp_2_2_0_nn_pre_upgrade"/>
           <task xsi:type="manual">
             <message>Update your database</message>
           </task>
         </pre-upgrade>
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-upgrade>
           <task xsi:type="execute">
@@ -158,7 +168,7 @@
       <component name="DATANODE">
         <pre-downgrade />
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-downgrade>
           <task xsi:type="manual">

Reply via email to