Repository: ambari
Updated Branches:
  refs/heads/branch-2.1 1abf2ae9c -> ff8a56af6


http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test.xml 
b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test.xml
index b7a62f5..827348a 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test.xml
@@ -16,7 +16,9 @@
    limitations under the License.
 -->
 <upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";>
-  <target>2.2.*</target>
+  <target>2.2.*.*</target>
+  <target-stack>HDP-2.1.1</target-stack>
+  <type>ROLLING</type>
   
   <order>
     <group xsi:type="cluster" name="PRE_CLUSTER" title="Pre 
{{direction.text.proper}}">
@@ -125,10 +127,10 @@
           </task>
         </pre-upgrade>
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-upgrade>
-          <task xsi:type="configure" />
+          <task xsi:type="configure" id="2.2.0" />
         </post-upgrade>
       </component>
     </service>
@@ -139,16 +141,13 @@
           <task xsi:type="execute" hosts="master">
             <command>su - {hdfs-user} -c 'dosomething'</command>
           </task>
-          <task xsi:type="configure">
-            <type>hdfs-site</type>
-            <set key="myproperty" value="mynewvalue"/>
-          </task>
+          <task xsi:type="configure" id="hdp_2_1_1_nn_pre_upgrade" />
           <task xsi:type="manual">
             <message>{{direction.verb.proper}} your database</message>
           </task>
         </pre-upgrade>
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-upgrade>
           <task xsi:type="execute">
@@ -159,7 +158,7 @@
       <component name="DATANODE">
         <pre-downgrade />
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-downgrade>
           <task xsi:type="manual">
@@ -182,15 +181,7 @@
           <task xsi:type="execute">
             <command>ls</command>
           </task>
-          <task xsi:type="configure">
-            <type>core-site</type>
-            <transfer operation="copy" from-key="copy-key" 
to-key="copy-key-to" />
-            <transfer operation="copy" from-type="my-site" 
from-key="my-copy-key" to-key="my-copy-key-to" />
-            <transfer operation="move" from-key="move-key" 
to-key="move-key-to" />
-            <transfer operation="delete" delete-key="delete-key" 
preserve-edits="true">
-            <keep-key>important-key</keep-key>
-            </transfer>
-          </task>
+          <task xsi:type="configure" id="hdp_2_1_1_nm_pre_upgrade"/>
         </pre-upgrade>
       </component>
     </service>
@@ -203,36 +194,10 @@
             <message>The HiveServer port will now change to 10010 if hive is 
using a binary transfer mode or 10011 if hive is using an http transport mode. 
You can use "netstat -anp | grep 1001[01]" to determine if the port is 
available on each of following HiveServer host(s): {{hosts.all}}. If the port 
is not available, the process using it must be terminated.</message>
           </task>
 
-          <task xsi:type="configure">
-            <condition type="hive-site" key="hive.server2.transport.mode" 
value="binary">
-              <type>hive-site</type>
-              <key>hive.server2.thrift.port</key>
-              <value>10010</value>
-            </condition>
-            <condition type="hive-site" key="hive.server2.transport.mode" 
value="http">
-              <type>hive-site</type>
-              <key>hive.server2.http.port</key>
-              <value>10011</value>
-            </condition>
-          </task>
-          
-          <task xsi:type="configure">
-            <type>hive-site</type>
-            <set key="fooKey" value="fooValue"/>
-            <set key="fooKey2" value="fooValue2"/>
-            <set key="fooKey3" value="fooValue3"/>
-            <transfer operation="copy" from-key="copy-key" 
to-key="copy-key-to" />
-            <transfer operation="move" from-key="move-key" 
to-key="move-key-to" />
-            <transfer operation="delete" delete-key="delete-key" />
-            <transfer operation="delete" delete-key="delete-http" 
if-key="hive.server2.transport.mode" if-type="hive-site" if-value="http" />
-            <transfer operation="delete" delete-key="delete-https-fail" 
if-key="hive.server2.transport.mode" if-type="hive-site" if-value="https" />
-            <transfer operation="delete" delete-key="delete-prop-fail" 
if-key="non.existent" if-type="hive-site" if-value="https" />
-            <transfer operation="delete" delete-key="delete-type-fail" 
if-key="non.existent" if-type="non.existent" if-value="" />
-            <transfer operation="delete" delete-key="delete-null-if-value" 
if-key="non.existent" if-type="non.existent" />
-            <transfer operation="delete" delete-key="delete-blank-if-key" 
if-key="" if-type="non.existent" />
-            <transfer operation="delete" delete-key="delete-blank-if-type" 
if-key="non.existent" if-type="" />
-            <transfer operation="delete" delete-key="delete-thrift" 
if-key="hive.server2.thrift.port" if-type="hive-site" if-value="10001" />
-          </task>
+          <task xsi:type="configure" id="hdp_2_1_1_set_transport_mode"/>
+
+          <task xsi:type="configure" id="hdp_2_1_1_hive_server_foo"/>
+
         </pre-upgrade>
        </component>
      </service>    

http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test_checks.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test_checks.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test_checks.xml
index 7590c5b..05d3db9 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test_checks.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test_checks.xml
@@ -16,8 +16,21 @@
    limitations under the License.
 -->
 <upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";>
-  <target>2.2.*</target>
-  
+  <target>2.2.*.*</target>
+  <target-stack>HDP-2.2.0</target-stack>
+  <type>ROLLING</type>
+  <prerequisite-checks>
+    <check>org.apache.ambari.server.checks.HiveMultipleMetastoreCheck</check>
+    
<check>org.apache.ambari.server.checks.MapReduce2JobHistoryStatePreservingCheck</check>
+    
<check>org.apache.ambari.server.checks.SecondaryNamenodeDeletedCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesMapReduceDistributedCacheCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesNamenodeHighAvailabilityCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesNamenodeTruncateCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesTezDistributedCacheCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesYarnWorkPreservingCheck</check>
+    <check>org.apache.ambari.server.checks.YarnRMHighAvailabilityCheck</check>
+    
<check>org.apache.ambari.server.checks.YarnTimelineServerStatePreservingCheck</check>
+  </prerequisite-checks>
   <order>
     <group xsi:type="cluster" name="PRE_CLUSTER" title="Pre Upgrade" 
stage="pre">
       <execute-stage title="Confirm 1">
@@ -120,10 +133,10 @@
           </task>
         </pre-upgrade>
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-upgrade>
-          <task xsi:type="configure" />
+          <task xsi:type="configure" id="hdp_2_1_1_zk_post_upgrade"/>
         </post-upgrade>
       </component>
     </service>
@@ -133,16 +146,13 @@
           <task xsi:type="execute" hosts="master">
             <command>su - {hdfs-user} -c 'dosomething'</command>
           </task>
-          <task xsi:type="configure">
-            <type>hdfs-site</type>
-            <set key="myproperty" value="mynewvalue"/>
-          </task>
+          <task xsi:type="configure" id="hdp_2_1_1_nn_pre_upgrade"/>
           <task xsi:type="manual">
             <message>Update your database</message>
           </task>
         </pre-upgrade>
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-upgrade>
           <task xsi:type="execute">
@@ -153,7 +163,7 @@
       <component name="DATANODE">
         <pre-downgrade />
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-downgrade>
           <task xsi:type="manual">

http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test_nonrolling.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test_nonrolling.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test_nonrolling.xml
new file mode 100644
index 0000000..c1e03e0
--- /dev/null
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_test_nonrolling.xml
@@ -0,0 +1,182 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+
+<upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";>
+  <target>2.2.*.*</target>
+  <target-stack>HDP-2.2.3</target-stack>
+  <type>NON_ROLLING</type>
+  <prerequisite-checks>
+    <check>org.apache.ambari.server.checks.HiveMultipleMetastoreCheck</check>
+    
<check>org.apache.ambari.server.checks.MapReduce2JobHistoryStatePreservingCheck</check>
+    
<check>org.apache.ambari.server.checks.SecondaryNamenodeDeletedCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesMapReduceDistributedCacheCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesNamenodeHighAvailabilityCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesNamenodeTruncateCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesTezDistributedCacheCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesYarnWorkPreservingCheck</check>
+    <check>org.apache.ambari.server.checks.YarnRMHighAvailabilityCheck</check>
+    
<check>org.apache.ambari.server.checks.YarnTimelineServerStatePreservingCheck</check>
+  </prerequisite-checks>
+  <order>
+    <group xsi:type="cluster" name="PRE_CLUSTER" title="Prepare Upgrade">
+      <skippable>true</skippable>
+      <direction>UPGRADE</direction>
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop 
YARN Queues">
+        <task xsi:type="manual">
+          <message>Before continuing, please stop all YARN queues.</message>
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="stop" name="Stop High-Level Daemons" title="Stop Daemons 
for High-Level Services">
+      <skippable>true</skippable>
+      <service-check>false</service-check>
+
+      <service name="YARN">
+        <component>NODEMANAGER</component>
+        <component>RESOURCEMANAGER</component>
+        <component>APP_TIMELINE_SERVER</component>
+      </service>
+
+      <service name="MAPREDUCE2">
+        <component>HISTORYSERVER</component>
+      </service>
+    </group>
+
+    <group xsi:type="cluster" name="Backups" title="Take Backups">
+      <direction>UPGRADE</direction>
+      <skippable>true</skippable>
+
+      <execute-stage service="HDFS" component="NAMENODE" title="Snapshot HDFS">
+        <task xsi:type="execute" hosts="master">
+          <script>scripts/namenode.py</script>
+          <function>prepare_non_rolling_upgrade</function>
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="stop" name="Stop Low-Level Daemons" title="Stop Daemons 
for Low-Level Services">
+      <skippable>true</skippable>
+      <service-check>false</service-check>
+
+      <service name="HDFS">
+        <component>DATANODE</component>
+        <component>NAMENODE</component>
+        <component>SECONDARY_NAMENODE</component>
+        <component>ZKFC</component>
+        <component>JOURNALNODE</component>
+      </service>
+
+      <service name="ZOOKEEPER">
+        <component>ZOOKEEPER_SERVER</component>
+      </service>
+    </group>
+
+    <group xsi:type="cluster" name="Restore Backups" title="Restore Backups">
+      <direction>DOWNGRADE</direction>
+      <skippable>true</skippable>
+
+      <execute-stage service="HDFS" component="NAMENODE" title="Snapshot HDFS">
+        <task xsi:type="execute" hosts="master">
+          <script>scripts/namenode.py</script>
+          <function>restore_snapshot</function>
+        </task>
+      </execute-stage>
+    </group>
+
+    <!-- After processing this group, will change the effective Stack of the 
UpgradeContext object. -->
+    <group xsi:type="update-stack" name="UPDATE_DESIRED_STACK_ID" 
title="Update Desired Stack Id">
+      <execute-stage title="Update Desired Stack Id" service="" component="">
+        <task xsi:type="server_action" 
class="org.apache.ambari.server.serveraction.upgrades.UpdateDesiredStackAction">
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="cluster" name="ALL_HOST_OPS" title="Set Version On All 
Hosts">
+      <skippable>true</skippable>
+      <execute-stage title="Update stack to {{version}}">
+        <task xsi:type="execute">
+          <script>scripts/ru_set_all.py</script>
+          <function>actionexecute</function>
+        </task>
+      </execute-stage>
+    </group>
+
+    <!-- Now, restart all of the services. -->
+
+    <group xsi:type="restart" name="ZOOKEEPER" title="Zookeeper">
+      <service name="ZOOKEEPER">
+        <service-check>false</service-check>
+        <component>ZOOKEEPER_SERVER</component>
+        <component>ZOOKEEPER_CLIENT</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="HDFS" title="HDFS">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="HDFS">
+        <component>JOURNALNODE</component>
+        <component>ZKFC</component>
+        <component>NAMENODE</component>
+        <component>SECONDARY_NAMENODE</component>
+        <component>DATANODE</component>
+        <component>HDFS_CLIENT</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="MR and YARN" title="MR and YARN">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="MAPREDUCE2">
+        <component>HISTORYSERVER</component>
+        <component>MAPREDUCE2_CLIENT</component>
+      </service>
+      <service name="YARN">
+        <component>APP_TIMELINE_SERVER</component>
+        <component>RESOURCEMANAGER</component>
+        <component>NODEMANAGER</component>
+        <component>YARN_CLIENT</component>
+      </service>
+    </group>
+
+    <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize 
{{direction.text.proper}}">
+      <skippable>true</skippable>
+
+      <execute-stage title="Confirm Finalize">
+        <direction>UPGRADE</direction>
+        <task xsi:type="manual">
+          <message>Please confirm you are ready to finalize.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HDFS" component="NAMENODE" title="Execute HDFS 
Finalize">
+        <task xsi:type="execute" hosts="master">
+          <script>scripts/namenode.py</script>
+          <function>finalize_non_rolling_upgrade</function>
+        </task>
+      </execute-stage>
+
+      <execute-stage title="Save Cluster State" service="" component="">
+        <task xsi:type="server_action" 
class="org.apache.ambari.server.serveraction.upgrades.FinalizeUpgradeAction">
+        </task>
+      </execute-stage>
+    </group>
+  </order>
+</upgrade>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_to_new_stack.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_to_new_stack.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_to_new_stack.xml
index 02b0ebf..a9ce2b0 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_to_new_stack.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.1.1/upgrades/upgrade_to_new_stack.xml
@@ -16,9 +16,21 @@
    limitations under the License.
 -->
 <upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";>
-  <target>2.2.*</target>
-  <target-stack>HDP-2.2.0</target-stack>
-  
+  <target>2.2.*.*</target>
+  <target-stack>HDP-2.2.4</target-stack>
+  <type>ROLLING</type>
+  <prerequisite-checks>
+    <check>org.apache.ambari.server.checks.HiveMultipleMetastoreCheck</check>
+    
<check>org.apache.ambari.server.checks.MapReduce2JobHistoryStatePreservingCheck</check>
+    
<check>org.apache.ambari.server.checks.SecondaryNamenodeDeletedCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesMapReduceDistributedCacheCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesNamenodeHighAvailabilityCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesNamenodeTruncateCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesTezDistributedCacheCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesYarnWorkPreservingCheck</check>
+    <check>org.apache.ambari.server.checks.YarnRMHighAvailabilityCheck</check>
+    
<check>org.apache.ambari.server.checks.YarnTimelineServerStatePreservingCheck</check>
+  </prerequisite-checks>
   <order>
     <group xsi:type="cluster" name="PRE_CLUSTER" title="Pre 
{{direction.text.proper}}">
       <execute-stage title="Confirm 1">
@@ -135,7 +147,7 @@
           </task>
         </pre-upgrade>
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-upgrade>
           <task xsi:type="configure" />
@@ -159,7 +171,7 @@
           </task>
         </pre-upgrade>
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-upgrade>
           <task xsi:type="execute">
@@ -170,7 +182,7 @@
       <component name="DATANODE">
         <pre-downgrade />
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-downgrade>
           <task xsi:type="manual">

http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/config-upgrade.xml 
b/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/config-upgrade.xml
new file mode 100644
index 0000000..90d64b4
--- /dev/null
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/config-upgrade.xml
@@ -0,0 +1,101 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<upgrade-config-changes xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";>
+
+  <services>
+    <service name="ZOOKEEPER">
+      <component name="ZOOKEEPER_SERVER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_2_0_zk_post_upgrade">
+          </definition>
+        </changes>
+      </component>
+    </service>
+
+    <service name="HDFS">
+      <component name="NAMENODE">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_2_0_nn_pre_upgrade">
+            <type>hdfs-site</type>
+            <set key="myproperty" value="mynewvalue"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+
+    <service name="YARN">
+      <component name="NODEMANAGER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_2_0_nm_pre_upgrade">
+            <type>core-site</type>
+            <transfer operation="copy" from-key="copy-key"
+                      to-key="copy-key-to"/>
+            <transfer operation="copy" from-type="my-site"
+                      from-key="my-copy-key"
+                      to-key="my-copy-key-to"/>
+            <transfer operation="move" from-key="move-key"
+                      to-key="move-key-to"/>
+            <transfer operation="delete" delete-key="delete-key"
+                      preserve-edits="true">
+              <keep-key>important-key</keep-key>
+            </transfer>
+          </definition>
+        </changes>
+      </component>
+    </service>
+
+    <service name="HIVE">
+      <component name="HIVE_SERVER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_2_0_set_transport_mode">
+            <condition type="hive-site" key="hive.server2.transport.mode" 
value="binary">
+              <type>hive-site</type>
+              <key>hive.server2.thrift.port</key>
+              <value>10010</value>
+            </condition>
+            <condition type="hive-site" key="hive.server2.transport.mode" 
value="http">
+              <type>hive-site</type>
+              <key>hive.server2.http.port</key>
+              <value>10011</value>
+            </condition>
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_2_0_hive_server_foo">
+            <type>hive-site</type>
+            <set key="fooKey" value="fooValue"/>
+            <set key="fooKey2" value="fooValue2"/>
+            <set key="fooKey3" value="fooValue3"/>
+            <transfer operation="copy" from-key="copy-key" 
to-key="copy-key-to" />
+            <transfer operation="move" from-key="move-key" 
to-key="move-key-to" />
+            <transfer operation="delete" delete-key="delete-key" />
+            <transfer operation="delete" delete-key="delete-http" 
if-key="hive.server2.transport.mode" if-type="hive-site" if-value="http" />
+            <transfer operation="delete" delete-key="delete-https-fail" 
if-key="hive.server2.transport.mode" if-type="hive-site" if-value="https" />
+            <transfer operation="delete" delete-key="delete-prop-fail" 
if-key="non.existent" if-type="hive-site" if-value="https" />
+            <transfer operation="delete" delete-key="delete-type-fail" 
if-key="non.existent" if-type="non.existent" if-value="" />
+            <transfer operation="delete" delete-key="delete-null-if-value" 
if-key="non.existent" if-type="non.existent" />
+            <transfer operation="delete" delete-key="delete-blank-if-key" 
if-key="" if-type="non.existent" />
+            <transfer operation="delete" delete-key="delete-blank-if-type" 
if-key="non.existent" if-type="" />
+            <transfer operation="delete" delete-key="delete-thrift" 
if-key="hive.server2.thrift.port" if-type="hive-site" if-value="10001" />
+          </definition>
+        </changes>
+      </component>
+    </service>
+  </services>
+
+</upgrade-config-changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test.xml 
b/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test.xml
index 5271ae6..34ebe32 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test.xml
@@ -17,7 +17,20 @@
 -->
 <upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";>
   <target>2.2.*</target>
-  
+  <target-stack>HDP-2.2.0</target-stack>
+  <type>ROLLING</type>
+  <prerequisite-checks>
+    <check>org.apache.ambari.server.checks.HiveMultipleMetastoreCheck</check>
+    
<check>org.apache.ambari.server.checks.MapReduce2JobHistoryStatePreservingCheck</check>
+    
<check>org.apache.ambari.server.checks.SecondaryNamenodeDeletedCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesMapReduceDistributedCacheCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesNamenodeHighAvailabilityCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesNamenodeTruncateCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesTezDistributedCacheCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesYarnWorkPreservingCheck</check>
+    <check>org.apache.ambari.server.checks.YarnRMHighAvailabilityCheck</check>
+    
<check>org.apache.ambari.server.checks.YarnTimelineServerStatePreservingCheck</check>
+  </prerequisite-checks>
   <order>
     <group xsi:type="cluster" name="PRE_CLUSTER" title="Pre 
{{direction.text.proper}}">
       <execute-stage title="Confirm 1">
@@ -126,7 +139,7 @@
           </task>
         </pre-upgrade>
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-upgrade>
           <task xsi:type="configure" />
@@ -149,7 +162,7 @@
           </task>
         </pre-upgrade>
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-upgrade>
           <task xsi:type="execute">
@@ -160,7 +173,7 @@
       <component name="DATANODE">
         <pre-downgrade />
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-downgrade>
           <task xsi:type="manual">

http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test_checks.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test_checks.xml
 
b/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test_checks.xml
index 892b9b4..14c68be 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test_checks.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.2.0/upgrades/upgrade_test_checks.xml
@@ -16,8 +16,21 @@
    limitations under the License.
 -->
 <upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";>
-  <target>2.2.*</target>
-  
+  <target>2.2.*.*</target>
+  <target-stack>HDP-2.2.1</target-stack>
+  <type>ROLLING</type>
+  <prerequisite-checks>
+    <check>org.apache.ambari.server.checks.HiveMultipleMetastoreCheck</check>
+    
<check>org.apache.ambari.server.checks.MapReduce2JobHistoryStatePreservingCheck</check>
+    
<check>org.apache.ambari.server.checks.SecondaryNamenodeDeletedCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesMapReduceDistributedCacheCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesNamenodeHighAvailabilityCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesNamenodeTruncateCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesTezDistributedCacheCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesYarnWorkPreservingCheck</check>
+    <check>org.apache.ambari.server.checks.YarnRMHighAvailabilityCheck</check>
+    
<check>org.apache.ambari.server.checks.YarnTimelineServerStatePreservingCheck</check>
+  </prerequisite-checks>
   <order>
     <group xsi:type="cluster" name="PRE_CLUSTER" title="Pre Upgrade" 
stage="pre">
       <execute-stage title="Confirm 1">
@@ -125,10 +138,10 @@
           </task>
         </pre-upgrade>
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-upgrade>
-          <task xsi:type="configure" />
+          <task xsi:type="configure" id="hdp_2_2_0_zk_post_upgrade"/>
         </post-upgrade>
       </component>
     </service>
@@ -138,16 +151,13 @@
           <task xsi:type="execute" hosts="master">
             <command>su - {hdfs-user} -c 'dosomething'</command>
           </task>
-          <task xsi:type="configure">
-            <type>hdfs-site</type>
-            <set key="myproperty" value="mynewvalue"/>
-          </task>
+          <task xsi:type="configure" id="hdp_2_2_0_nn_pre_upgrade"/>
           <task xsi:type="manual">
             <message>Update your database</message>
           </task>
         </pre-upgrade>
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-upgrade>
           <task xsi:type="execute">
@@ -158,7 +168,7 @@
       <component name="DATANODE">
         <pre-downgrade />
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-downgrade>
           <task xsi:type="manual">

http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/browser/HiveBrowserService.java.orig
----------------------------------------------------------------------
diff --git 
a/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/browser/HiveBrowserService.java.orig
 
b/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/browser/HiveBrowserService.java.orig
deleted file mode 100644
index 55919a7..0000000
--- 
a/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/browser/HiveBrowserService.java.orig
+++ /dev/null
@@ -1,282 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.view.hive.resources.browser;
-
-import org.apache.ambari.view.ViewContext;
-import org.apache.ambari.view.ViewResourceHandler;
-import org.apache.ambari.view.hive.client.ColumnDescription;
-import org.apache.ambari.view.hive.client.Cursor;
-import org.apache.ambari.view.hive.client.IConnectionFactory;
-import org.apache.ambari.view.hive.resources.jobs.ResultsPaginationController;
-import org.apache.ambari.view.hive.utils.BadRequestFormattedException;
-import org.apache.ambari.view.hive.utils.ServiceFormattedException;
-import org.apache.ambari.view.hive.utils.SharedObjectsFactory;
-import org.apache.commons.collections4.map.PassiveExpiringMap;
-import org.apache.hive.service.cli.thrift.TSessionHandle;
-import org.json.simple.JSONObject;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.inject.Inject;
-import javax.ws.rs.*;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.Callable;
-
-/**
- * Database access resource
- */
-public class HiveBrowserService {
-  @Inject
-  ViewResourceHandler handler;
-  @Inject
-  protected ViewContext context;
-
-  protected final static Logger LOG =
-      LoggerFactory.getLogger(HiveBrowserService.class);
-
-  private static final long EXPIRING_TIME = 10*60*1000;  // 10 minutes
-  private static Map<String, Cursor> resultsCache;
-  private IConnectionFactory connectionFactory;
-
-  public static Map<String, Cursor> getResultsCache() {
-    if (resultsCache == null) {
-      PassiveExpiringMap<String, Cursor> resultsCacheExpiringMap =
-          new PassiveExpiringMap<String, Cursor>(EXPIRING_TIME);
-      resultsCache = Collections.synchronizedMap(resultsCacheExpiringMap);
-    }
-    return resultsCache;
-  }
-
-  private IConnectionFactory getConnectionFactory() {
-    if (connectionFactory == null)
-      connectionFactory = new SharedObjectsFactory(context);
-    return new SharedObjectsFactory(context);
-  }
-
-  /**
-   * Returns list of databases
-   */
-  @GET
-  @Path("database")
-  @Produces(MediaType.APPLICATION_JSON)
-  public Response databases(@QueryParam("like")String like,
-                            @QueryParam("first") String fromBeginning,
-                            @QueryParam("count") Integer count,
-                            @QueryParam("columns") final String 
requestedColumns) {
-    if (like == null)
-      like = "*";
-    else
-      like = "*" + like + "*";
-    String curl = null;
-    try {
-      JSONObject response = new JSONObject();
-      TSessionHandle session = 
getConnectionFactory().getHiveConnection().getOrCreateSessionByTag("DDL");
-      List<String> tables = 
getConnectionFactory().getHiveConnection().ddl().getDBList(session, like);
-      response.put("databases", tables);
-      return Response.ok(response).build();
-    } catch (WebApplicationException ex) {
-      throw ex;
-    } catch (IllegalArgumentException ex) {
-      throw new BadRequestFormattedException(ex.getMessage(), ex);
-    } catch (Exception ex) {
-      throw new ServiceFormattedException(ex.getMessage(), ex, curl);
-    }
-  }
-
-  /**
-   * Returns list of databases
-   */
-  @GET
-  @Path("database.page")
-  @Produces(MediaType.APPLICATION_JSON)
-  public Response databasesPaginated(@QueryParam("like")String like,
-                            @QueryParam("first") String fromBeginning,
-                            @QueryParam("count") Integer count,
-                            @QueryParam("searchId") String searchId,
-                            @QueryParam("format") String format,
-                            @QueryParam("columns") final String 
requestedColumns) {
-    if (like == null)
-      like = "*";
-    else
-      like = "*" + like + "*";
-    String curl = null;
-    try {
-      final String finalLike = like;
-      return ResultsPaginationController.getInstance(context)
-          .request("databases", searchId, false, fromBeginning, count, format,
-                  new Callable<Cursor>() {
-                    @Override
-                    public Cursor call() throws Exception {
-                      TSessionHandle session = 
getConnectionFactory().getHiveConnection().getOrCreateSessionByTag("DDL");
-                      return 
getConnectionFactory().getHiveConnection().ddl().getDBListCursor(session, 
finalLike);
-                    }
-                  }).build();
-    } catch (WebApplicationException ex) {
-      throw ex;
-    } catch (IllegalArgumentException ex) {
-      throw new BadRequestFormattedException(ex.getMessage(), ex);
-    } catch (Exception ex) {
-      throw new ServiceFormattedException(ex.getMessage(), ex, curl);
-    }
-  }
-
-  /**
-   * Returns list of databases
-   */
-  @GET
-  @Path("database/{db}/table")
-  @Produces(MediaType.APPLICATION_JSON)
-  public Response tablesInDatabase(@PathParam("db") String db,
-                                   @QueryParam("like")String like,
-                                   @QueryParam("first") String fromBeginning,
-                                   @QueryParam("count") Integer count,
-                                   @QueryParam("columns") final String 
requestedColumns) {
-    if (like == null)
-      like = "*";
-    else
-      like = "*" + like + "*";
-    String curl = null;
-    try {
-      JSONObject response = new JSONObject();
-      TSessionHandle session = 
getConnectionFactory().getHiveConnection().getOrCreateSessionByTag("DDL");
-      List<String> tables = 
getConnectionFactory().getHiveConnection().ddl().getTableList(session, db, 
like);
-      response.put("tables", tables);
-      response.put("database", db);
-      return Response.ok(response).build();
-    } catch (WebApplicationException ex) {
-      throw ex;
-    } catch (IllegalArgumentException ex) {
-      throw new BadRequestFormattedException(ex.getMessage(), ex);
-    } catch (Exception ex) {
-      throw new ServiceFormattedException(ex.getMessage(), ex, curl);
-    }
-  }
-
-  /**
-   * Returns list of databases
-   */
-  @GET
-  @Path("database/{db}/table.page")
-  @Produces(MediaType.APPLICATION_JSON)
-  public Response tablesInDatabasePaginated(@PathParam("db") final String db,
-                                   @QueryParam("like")String like,
-                                   @QueryParam("first") String fromBeginning,
-                                   @QueryParam("count") Integer count,
-                                   @QueryParam("searchId") String searchId,
-                                   @QueryParam("format") String format,
-                                   @QueryParam("columns") final String 
requestedColumns) {
-    if (like == null)
-      like = "*";
-    else
-      like = "*" + like + "*";
-    String curl = null;
-    try {
-      final String finalLike = like;
-      return ResultsPaginationController.getInstance(context)
-          .request(db + ":tables", searchId, false, fromBeginning, count, 
format,
-                  new Callable<Cursor>() {
-                    @Override
-                    public Cursor call() throws Exception {
-                      TSessionHandle session = 
getConnectionFactory().getHiveConnection().getOrCreateSessionByTag("DDL");
-                      Cursor cursor = 
getConnectionFactory().getHiveConnection().ddl().getTableListCursor(session, 
db, finalLike);
-                      cursor.selectColumns(requestedColumns);
-                      return cursor;
-                    }
-                  }).build();
-    } catch (WebApplicationException ex) {
-      throw ex;
-    } catch (IllegalArgumentException ex) {
-      throw new BadRequestFormattedException(ex.getMessage(), ex);
-    } catch (Exception ex) {
-      throw new ServiceFormattedException(ex.getMessage(), ex, curl);
-    }
-  }
-
-  /**
-   * Returns list of databases
-   */
-  @GET
-  @Path("database/{db}/table/{table}")
-  @Produces(MediaType.APPLICATION_JSON)
-  public Response describeTable(@PathParam("db") String db,
-                                @PathParam("table") String table,
-                                @QueryParam("like") String like,
-                                @QueryParam("columns") String requestedColumns,
-                                @QueryParam("extended") String extended) {
-    boolean extendedTableDescription = (extended != null && 
extended.equals("true"));
-    String curl = null;
-    try {
-      JSONObject response = new JSONObject();
-      TSessionHandle session = 
getConnectionFactory().getHiveConnection().getOrCreateSessionByTag("DDL");
-      List<ColumnDescription> columnDescriptions = 
getConnectionFactory().getHiveConnection().ddl()
-          .getTableDescription(session, db, table, like, 
extendedTableDescription);
-      response.put("columns", columnDescriptions);
-      response.put("database", db);
-      response.put("table", table);
-      return Response.ok(response).build();
-    } catch (WebApplicationException ex) {
-      throw ex;
-    } catch (IllegalArgumentException ex) {
-      throw new BadRequestFormattedException(ex.getMessage(), ex);
-    } catch (Exception ex) {
-      throw new ServiceFormattedException(ex.getMessage(), ex, curl);
-    }
-  }
-
-  /**
-   * Returns list of databases
-   */
-  @GET
-  @Path("database/{db}/table/{table}.page")
-  @Produces(MediaType.APPLICATION_JSON)
-  public Response describeTablePaginated(@PathParam("db") final String db,
-                                         @PathParam("table") final String 
table,
-                                         @QueryParam("like") final String like,
-                                         @QueryParam("first") String 
fromBeginning,
-                                         @QueryParam("searchId") String 
searchId,
-                                         @QueryParam("count") Integer count,
-                                         @QueryParam("format") String format,
-                                         @QueryParam("columns") final String 
requestedColumns) {
-    String curl = null;
-    try {
-      return ResultsPaginationController.getInstance(context)
-          .request(db + ":tables:" + table + ":columns", searchId, false, 
fromBeginning, count, format,
-              new Callable<Cursor>() {
-                @Override
-                public Cursor call() throws Exception {
-                  TSessionHandle session = 
getConnectionFactory().getHiveConnection().getOrCreateSessionByTag("DDL");
-                  Cursor cursor = 
getConnectionFactory().getHiveConnection().ddl().
-                      getTableDescriptionCursor(session, db, table, like);
-                  cursor.selectColumns(requestedColumns);
-                  return cursor;
-                }
-              }).build();
-    } catch (WebApplicationException ex) {
-      throw ex;
-    } catch (IllegalArgumentException ex) {
-      throw new BadRequestFormattedException(ex.getMessage(), ex);
-    } catch (Exception ex) {
-      throw new ServiceFormattedException(ex.getMessage(), ex, curl);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/jobs/JobService.java.orig
----------------------------------------------------------------------
diff --git 
a/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/jobs/JobService.java.orig
 
b/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/jobs/JobService.java.orig
deleted file mode 100644
index ad46e33..0000000
--- 
a/contrib/views/hive/src/main/java/org/apache/ambari/view/hive/resources/jobs/JobService.java.orig
+++ /dev/null
@@ -1,476 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.view.hive.resources.jobs;
-
-import org.apache.ambari.view.ViewResourceHandler;
-import org.apache.ambari.view.hive.BaseService;
-import org.apache.ambari.view.hive.backgroundjobs.BackgroundJobController;
-import org.apache.ambari.view.hive.client.Connection;
-import org.apache.ambari.view.hive.client.Cursor;
-import org.apache.ambari.view.hive.client.HiveClientException;
-import org.apache.ambari.view.hive.persistence.utils.ItemNotFound;
-import org.apache.ambari.view.hive.resources.jobs.atsJobs.IATSParser;
-import org.apache.ambari.view.hive.resources.jobs.viewJobs.*;
-import org.apache.ambari.view.hive.utils.*;
-import org.apache.commons.beanutils.PropertyUtils;
-import org.apache.commons.csv.CSVFormat;
-import org.apache.commons.csv.CSVPrinter;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.json.simple.JSONObject;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.inject.Inject;
-import javax.servlet.http.HttpServletResponse;
-import javax.ws.rs.*;
-import javax.ws.rs.core.*;
-import java.io.*;
-import java.lang.reflect.InvocationTargetException;
-import java.util.*;
-import java.util.concurrent.Callable;
-
-/**
- * Servlet for queries
- * API:
- * GET /:id
- *      read job
- * POST /
- *      create new job
- *      Required: title, queryFile
- * GET /
- *      get all Jobs of current user
- */
-public class JobService extends BaseService {
-  @Inject
-  ViewResourceHandler handler;
-
-  protected JobResourceManager resourceManager;
-  private IOperationHandleResourceManager opHandleResourceManager;
-  protected final static Logger LOG =
-      LoggerFactory.getLogger(JobService.class);
-  private Aggregator aggregator;
-
-  protected synchronized JobResourceManager getResourceManager() {
-    if (resourceManager == null) {
-      SharedObjectsFactory connectionsFactory = getSharedObjectsFactory();
-      resourceManager = new JobResourceManager(connectionsFactory, context);
-    }
-    return resourceManager;
-  }
-
-  protected IOperationHandleResourceManager 
getOperationHandleResourceManager() {
-    if (opHandleResourceManager == null) {
-      opHandleResourceManager = new 
OperationHandleResourceManager(getSharedObjectsFactory());
-    }
-    return opHandleResourceManager;
-  }
-
-  protected Aggregator getAggregator() {
-    if (aggregator == null) {
-      IATSParser atsParser = getSharedObjectsFactory().getATSParser();
-      aggregator = new Aggregator(getResourceManager(), 
getOperationHandleResourceManager(), atsParser);
-    }
-    return aggregator;
-  }
-
-  protected void setAggregator(Aggregator aggregator) {
-    this.aggregator = aggregator;
-  }
-
-  /**
-   * Get single item
-   */
-  @GET
-  @Path("{jobId}")
-  @Produces(MediaType.APPLICATION_JSON)
-  public Response getOne(@PathParam("jobId") String jobId) {
-    try {
-      JobController jobController = getResourceManager().readController(jobId);
-
-      JSONObject jsonJob = jsonObjectFromJob(jobController);
-
-      return Response.ok(jsonJob).build();
-    } catch (WebApplicationException ex) {
-      throw ex;
-    } catch (ItemNotFound itemNotFound) {
-      throw new NotFoundFormattedException(itemNotFound.getMessage(), 
itemNotFound);
-    } catch (Exception ex) {
-      throw new ServiceFormattedException(ex.getMessage(), ex);
-    }
-  }
-
-  private JSONObject jsonObjectFromJob(JobController jobController) throws 
IllegalAccessException, NoSuchMethodException, InvocationTargetException {
-    Job hiveJob = jobController.getJobPOJO();
-
-    Job mergedJob;
-    try {
-      mergedJob = getAggregator().readATSJob(hiveJob);
-    } catch (ItemNotFound itemNotFound) {
-      throw new ServiceFormattedException("E010 Job not found", itemNotFound);
-    }
-    Map createdJobMap = PropertyUtils.describe(mergedJob);
-    createdJobMap.remove("class"); // no need to show Bean class on client
-
-    JSONObject jobJson = new JSONObject();
-    jobJson.put("job", createdJobMap);
-    return jobJson;
-  }
-
-  /**
-   * Get job results in csv format
-   */
-  @GET
-  @Path("{jobId}/results/csv")
-  @Produces("text/csv")
-  public Response getResultsCSV(@PathParam("jobId") String jobId,
-                                @Context HttpServletResponse response,
-                                @QueryParam("fileName") String fileName,
-                                @QueryParam("columns") final String 
requestedColumns) {
-    try {
-      JobController jobController = getResourceManager().readController(jobId);
-      final Cursor resultSet = jobController.getResults();
-      resultSet.selectColumns(requestedColumns);
-
-      StreamingOutput stream = new StreamingOutput() {
-        @Override
-        public void write(OutputStream os) throws IOException, 
WebApplicationException {
-          Writer writer = new BufferedWriter(new OutputStreamWriter(os));
-          CSVPrinter csvPrinter = new CSVPrinter(writer, CSVFormat.DEFAULT);
-          try {
-
-            try {
-              csvPrinter.printRecord(resultSet.getHeadersRow().getRow());
-            } catch (HiveClientException e) {
-              LOG.error("Error on reading results header", e);
-            }
-
-            while (resultSet.hasNext()) {
-              csvPrinter.printRecord(resultSet.next().getRow());
-              writer.flush();
-            }
-          } finally {
-            writer.close();
-          }
-        }
-      };
-
-      if (fileName == null || fileName.isEmpty()) {
-        fileName = "results.csv";
-      }
-
-      return Response.ok(stream).
-          header("Content-Disposition", String.format("attachment; 
filename=\"%s\"", fileName)).
-          build();
-    } catch (WebApplicationException ex) {
-      throw ex;
-    } catch (ItemNotFound itemNotFound) {
-      throw new NotFoundFormattedException(itemNotFound.getMessage(), 
itemNotFound);
-    } catch (Exception ex) {
-      throw new ServiceFormattedException(ex.getMessage(), ex);
-    }
-  }
-
-  /**
-   * Get job results in csv format
-   */
-  @GET
-  @Path("{jobId}/results/csv/saveToHDFS")
-  @Produces(MediaType.APPLICATION_JSON)
-  public Response getResultsToHDFS(@PathParam("jobId") String jobId,
-                                   @QueryParam("commence") String commence,
-                                   @QueryParam("file") final String targetFile,
-                                   @QueryParam("stop") final String stop,
-                                   @QueryParam("columns") final String 
requestedColumns,
-                                   @Context HttpServletResponse response) {
-    try {
-      final JobController jobController = 
getResourceManager().readController(jobId);
-
-      String backgroundJobId = "csv" + 
String.valueOf(jobController.getJob().getId());
-      if (commence != null && commence.equals("true")) {
-        if (targetFile == null)
-          throw new MisconfigurationFormattedException("targetFile should not 
be empty");
-        
BackgroundJobController.getInstance(context).startJob(String.valueOf(backgroundJobId),
 new Runnable() {
-          @Override
-          public void run() {
-
-            try {
-              Cursor resultSet = jobController.getResults();
-              resultSet.selectColumns(requestedColumns);
-
-              FSDataOutputStream stream = 
getSharedObjectsFactory().getHdfsApi().create(targetFile, true);
-              Writer writer = new BufferedWriter(new 
OutputStreamWriter(stream));
-              CSVPrinter csvPrinter = new CSVPrinter(writer, 
CSVFormat.DEFAULT);
-              try {
-                while (resultSet.hasNext() && 
!Thread.currentThread().isInterrupted()) {
-                  csvPrinter.printRecord(resultSet.next().getRow());
-                  writer.flush();
-                }
-              } finally {
-                writer.close();
-              }
-              stream.close();
-
-            } catch (IOException e) {
-              throw new ServiceFormattedException("F010 Could not write CSV to 
HDFS for job#" + jobController.getJob().getId(), e);
-            } catch (InterruptedException e) {
-              throw new ServiceFormattedException("F010 Could not write CSV to 
HDFS for job#" + jobController.getJob().getId(), e);
-            } catch (ItemNotFound itemNotFound) {
-              throw new NotFoundFormattedException("E020 Job results are 
expired", itemNotFound);
-            }
-
-          }
-        });
-      }
-
-      if (stop != null && stop.equals("true")) {
-        
BackgroundJobController.getInstance(context).interrupt(backgroundJobId);
-      }
-
-      JSONObject object = new JSONObject();
-      object.put("stopped", 
BackgroundJobController.getInstance(context).isInterrupted(backgroundJobId));
-      object.put("jobId", jobController.getJob().getId());
-      object.put("backgroundJobId", backgroundJobId);
-      object.put("operationType", "CSV2HDFS");
-      object.put("status", 
BackgroundJobController.getInstance(context).state(backgroundJobId).toString());
-
-      return Response.ok(object).build();
-    } catch (WebApplicationException ex) {
-      throw ex;
-    } catch (ItemNotFound itemNotFound) {
-      throw new NotFoundFormattedException(itemNotFound.getMessage(), 
itemNotFound);
-    } catch (Exception ex) {
-      throw new ServiceFormattedException(ex.getMessage(), ex);
-    }
-  }
-
-  /**
-   * Get next results page
-   */
-  @GET
-  @Path("{jobId}/results")
-  @Produces(MediaType.APPLICATION_JSON)
-  public Response getResults(@PathParam("jobId") String jobId,
-                             @QueryParam("first") String fromBeginning,
-                             @QueryParam("count") Integer count,
-                             @QueryParam("searchId") String searchId,
-                             @QueryParam("format") String format,
-                             @QueryParam("columns") final String 
requestedColumns) {
-    try {
-      final JobController jobController = 
getResourceManager().readController(jobId);
-      if (!jobController.hasResults()) {
-        return ResultsPaginationController.emptyResponse().build();
-      }
-
-      return ResultsPaginationController.getInstance(context)
-           .request(jobId, searchId, true, fromBeginning, count, format,
-               new Callable<Cursor>() {
-                 @Override
-                 public Cursor call() throws Exception {
-                   Cursor cursor = jobController.getResults();
-                   cursor.selectColumns(requestedColumns);
-                   return cursor;
-                 }
-               }).build();
-    } catch (WebApplicationException ex) {
-      throw ex;
-    } catch (ItemNotFound itemNotFound) {
-      throw new NotFoundFormattedException(itemNotFound.getMessage(), 
itemNotFound);
-    } catch (Exception ex) {
-      throw new ServiceFormattedException(ex.getMessage(), ex);
-    }
-  }
-
-  /**
-   * Renew expiration time for results
-   */
-  @GET
-  @Path("{jobId}/results/keepAlive")
-  public Response keepAliveResults(@PathParam("jobId") String jobId,
-                             @QueryParam("first") String fromBeginning,
-                             @QueryParam("count") Integer count) {
-    try {
-      if (!ResultsPaginationController.getInstance(context).keepAlive(jobId, 
ResultsPaginationController.DEFAULT_SEARCH_ID)) {
-        throw new NotFoundFormattedException("Results already expired", null);
-      }
-      return Response.ok().build();
-    } catch (WebApplicationException ex) {
-      throw ex;
-    } catch (Exception ex) {
-      throw new ServiceFormattedException(ex.getMessage(), ex);
-    }
-  }
-
-  /**
-   * Get progress info
-   */
-  @GET
-  @Path("{jobId}/progress")
-  @Produces(MediaType.APPLICATION_JSON)
-  public Response getProgress(@PathParam("jobId") String jobId) {
-    try {
-      final JobController jobController = 
getResourceManager().readController(jobId);
-
-      ProgressRetriever.Progress progress = new 
ProgressRetriever(jobController.getJob(), getSharedObjectsFactory()).
-          getProgress();
-
-      return Response.ok(progress).build();
-    } catch (WebApplicationException ex) {
-      throw ex;
-    } catch (ItemNotFound itemNotFound) {
-      throw new NotFoundFormattedException(itemNotFound.getMessage(), 
itemNotFound);
-    } catch (Exception ex) {
-      throw new ServiceFormattedException(ex.getMessage(), ex);
-    }
-  }
-
-  /**
-   * Delete single item
-   */
-  @DELETE
-  @Path("{id}")
-  public Response delete(@PathParam("id") String id,
-                         @QueryParam("remove") final String remove) {
-    try {
-      JobController jobController;
-      try {
-        jobController = getResourceManager().readController(id);
-      } catch (ItemNotFound itemNotFound) {
-        throw new NotFoundFormattedException(itemNotFound.getMessage(), 
itemNotFound);
-      }
-      jobController.cancel();
-      if (remove != null && remove.compareTo("true") == 0) {
-        getResourceManager().delete(id);
-      }
-      return Response.status(204).build();
-    } catch (WebApplicationException ex) {
-      throw ex;
-    } catch (ItemNotFound itemNotFound) {
-      throw new NotFoundFormattedException(itemNotFound.getMessage(), 
itemNotFound);
-    } catch (Exception ex) {
-      throw new ServiceFormattedException(ex.getMessage(), ex);
-    }
-  }
-
-  /**
-   * Get all Jobs
-   */
-  @GET
-  @Produces(MediaType.APPLICATION_JSON)
-  public Response getList() {
-    try {
-      LOG.debug("Getting all job");
-      List<Job> allJobs = getAggregator().readAll(context.getUsername());
-      for(Job job : allJobs) {
-        job.setSessionTag(null);
-      }
-
-      JSONObject object = new JSONObject();
-      object.put("jobs", allJobs);
-      return Response.ok(object).build();
-    } catch (WebApplicationException ex) {
-      throw ex;
-    } catch (Exception ex) {
-      throw new ServiceFormattedException(ex.getMessage(), ex);
-    }
-  }
-
-  /**
-   * Create job
-   */
-  @POST
-  @Consumes(MediaType.APPLICATION_JSON)
-  public Response create(JobRequest request, @Context HttpServletResponse 
response,
-                         @Context UriInfo ui) {
-    try {
-      Map jobInfo = PropertyUtils.describe(request.job);
-      Job job = new JobImpl(jobInfo);
-      getResourceManager().create(job);
-
-      JobController createdJobController = 
getResourceManager().readController(job.getId());
-      createdJobController.submit();
-      getResourceManager().saveIfModified(createdJobController);
-
-      response.setHeader("Location",
-          String.format("%s/%s", ui.getAbsolutePath().toString(), 
job.getId()));
-
-      JSONObject jobObject = jsonObjectFromJob(createdJobController);
-
-      return Response.ok(jobObject).status(201).build();
-    } catch (WebApplicationException ex) {
-      throw ex;
-    } catch (ItemNotFound itemNotFound) {
-      throw new NotFoundFormattedException(itemNotFound.getMessage(), 
itemNotFound);
-    } catch (Exception ex) {
-      throw new ServiceFormattedException(ex.getMessage(), ex);
-    }
-  }
-
-  /**
-   * Invalidate session
-   */
-  @DELETE
-  @Path("sessions/{sessionTag}")
-  public Response invalidateSession(@PathParam("sessionTag") String 
sessionTag) {
-    try {
-      Connection connection = getSharedObjectsFactory().getHiveConnection();
-      connection.invalidateSessionByTag(sessionTag);
-      return Response.ok().build();
-    } catch (WebApplicationException ex) {
-      throw ex;
-    } catch (Exception ex) {
-      throw new ServiceFormattedException(ex.getMessage(), ex);
-    }
-  }
-
-  /**
-   * Session status
-   */
-  @GET
-  @Path("sessions/{sessionTag}")
-  @Produces(MediaType.APPLICATION_JSON)
-  public Response sessionStatus(@PathParam("sessionTag") String sessionTag) {
-    try {
-      Connection connection = getSharedObjectsFactory().getHiveConnection();
-
-      JSONObject session = new JSONObject();
-      session.put("sessionTag", sessionTag);
-      try {
-        connection.getSessionByTag(sessionTag);
-        session.put("actual", true);
-      } catch (HiveClientException ex) {
-        session.put("actual", false);
-      }
-
-      JSONObject status = new JSONObject();
-      status.put("session", session);
-      return Response.ok(status).build();
-    } catch (WebApplicationException ex) {
-      throw ex;
-    } catch (Exception ex) {
-      throw new ServiceFormattedException(ex.getMessage(), ex);
-    }
-  }
-
-  /**
-   * Wrapper object for json mapping
-   */
-  public static class JobRequest {
-    public JobImpl job;
-  }
-}

Reply via email to