http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
index 2c152e4..3eac5ce 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
@@ -22,21 +22,48 @@ import com.google.gson.JsonObject;
 import com.google.gson.JsonParser;
 import com.google.inject.Inject;
 import com.google.inject.Injector;
+import com.google.inject.persist.Transactional;
 import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
 import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
+import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.dao.ClusterVersionDAO;
 import org.apache.ambari.server.orm.dao.DaoUtils;
+import org.apache.ambari.server.orm.dao.HostVersionDAO;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.dao.StackDAO;
 import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
+import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
+import org.apache.ambari.server.orm.entities.HostEntity;
+import org.apache.ambari.server.orm.entities.HostVersionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.alert.SourceType;
+import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.orm.entities.StackEntity;
+import org.apache.ambari.server.state.RepositoryVersionState;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.StackInfo;
+import org.apache.ambari.server.state.stack.upgrade.Direction;
+import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper;
+import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
+import org.apache.commons.lang.StringUtils;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.persistence.EntityManager;
 import javax.persistence.Query;
+import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
+import java.text.MessageFormat;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.UUID;
@@ -50,6 +77,11 @@ public class UpgradeCatalog213 extends 
AbstractUpgradeCatalog {
   private static final String AMS_ENV = "ams-env";
   private static final String AMS_HBASE_ENV = "ams-hbase-env";
 
+  public static final String UPGRADE_PACKAGE_COL = "upgrade_package";
+  public static final String UPGRADE_TYPE_COL = "upgrade_type";
+  public static final String UPGRADE_TABLE = "upgrade";
+  public static final String REPO_VERSION_TABLE = "repo_version";
+
 
   /**
    * Logger.
@@ -59,6 +91,11 @@ public class UpgradeCatalog213 extends 
AbstractUpgradeCatalog {
   @Inject
   DaoUtils daoUtils;
 
+  @Inject
+  private RepositoryVersionDAO repositoryVersionDAO;
+
+  @Inject
+  private ClusterDAO clusterDAO;
 
   // ----- Constructors ------------------------------------------------------
 
@@ -70,8 +107,7 @@ public class UpgradeCatalog213 extends 
AbstractUpgradeCatalog {
   @Inject
   public UpgradeCatalog213(Injector injector) {
     super(injector);
-
-    daoUtils = injector.getInstance(DaoUtils.class);
+    this.injector = injector;
   }
 
   // ----- UpgradeCatalog ----------------------------------------------------
@@ -110,12 +146,341 @@ public class UpgradeCatalog213 extends 
AbstractUpgradeCatalog {
 
   @Override
   protected void executeDMLUpdates() throws AmbariException, SQLException {
+    // This one actually performs both DDL and DML, so it needs to be first.
+    executeStackUpgradeDDLUpdates();
+    bootstrapRepoVersionForHDP21();
+
     addMissingConfigs();
     updateAMSConfigs();
     updateAlertDefinitions();
   }
 
   /**
+   * Move the upgrade_package column from the repo_version table to the 
upgrade table as follows,
+   * add column upgrade_package to upgrade table as String 255 and nullable
+   * populate column in the upgrade table
+   * drop the column in the repo_version table
+   * make the column in the upgrade table non-nullable.
+   * This has to be called as part of DML and not DDL since the persistence 
service has to be started.
+   * @throws AmbariException
+   * @throws SQLException
+   */
+  @Transactional
+  private void executeStackUpgradeDDLUpdates() throws SQLException, 
AmbariException {
+    final Configuration.DatabaseType databaseType = 
configuration.getDatabaseType();
+
+    // Add columns
+    if (!dbAccessor.tableHasColumn(UPGRADE_TABLE, UPGRADE_PACKAGE_COL)) {
+      LOG.info("Adding upgrade_package column to upgrade table.");
+      dbAccessor.addColumn(UPGRADE_TABLE, new 
DBColumnInfo(UPGRADE_PACKAGE_COL, String.class, 255, null, true));
+    }
+    if (!dbAccessor.tableHasColumn(UPGRADE_TABLE, UPGRADE_TYPE_COL)) {
+      LOG.info("Adding upgrade_type column to upgrade table.");
+      dbAccessor.addColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_TYPE_COL, 
String.class, 32, null, true));
+    }
+
+    // Populate values in upgrade table.
+    boolean success = this.populateUpgradeTable();
+
+    if (!success) {
+      throw new AmbariException("Errors found while populating the upgrade 
table with values for columns upgrade_type and upgrade_package.");
+    }
+
+    if (dbAccessor.tableHasColumn(REPO_VERSION_TABLE, UPGRADE_PACKAGE_COL)) {
+      LOG.info("Dropping upgrade_package column from repo_version table.");
+      dbAccessor.dropColumn(REPO_VERSION_TABLE, UPGRADE_PACKAGE_COL);
+
+      // Now, make the added column non-nullable
+      // Make the hosts id non-null after all the values are populated
+      LOG.info("Making upgrade_package column in the upgrade table 
non-nullable.");
+      if (databaseType == Configuration.DatabaseType.DERBY) {
+        // This is a workaround for UpgradeTest.java unit test
+        dbAccessor.executeQuery("ALTER TABLE " + UPGRADE_TABLE + " ALTER 
column " + UPGRADE_PACKAGE_COL + " NOT NULL");
+      } else {
+        dbAccessor.alterColumn(UPGRADE_TABLE, new 
DBColumnInfo(UPGRADE_PACKAGE_COL, String.class, 255, null, false));
+      }
+    }
+
+    if (dbAccessor.tableHasColumn(REPO_VERSION_TABLE, UPGRADE_TYPE_COL)) {
+      // Now, make the added column non-nullable
+      // Make the hosts id non-null after all the values are populated
+      LOG.info("Making upgrade_type column in the upgrade table 
non-nullable.");
+      if (databaseType == Configuration.DatabaseType.DERBY) {
+        // This is a workaround for UpgradeTest.java unit test
+        dbAccessor.executeQuery("ALTER TABLE " + UPGRADE_TABLE + " ALTER 
column " + UPGRADE_TYPE_COL + " NOT NULL");
+      } else {
+        dbAccessor.alterColumn(UPGRADE_TABLE, new 
DBColumnInfo(UPGRADE_TYPE_COL, String.class, 32, null, false));
+      }
+    }
+  }
+
+  /**
+   * Populate the upgrade table with values for the columns upgrade_type and 
upgrade_package.
+   * The upgrade_type will default to {@code 
org.apache.ambari.server.state.stack.upgrade.UpgradeType.ROLLING}
+   * whereas the upgrade_package will be calculated.
+   * @return {@code} true on success, and {@code} false otherwise.
+   */
+  private boolean populateUpgradeTable() {
+    boolean success = true;
+    Statement statement = null;
+    ResultSet rs = null;
+    try {
+      statement = dbAccessor.getConnection().createStatement();
+      if (statement != null) {
+        // Need to use SQL since the schema is changing and some of the 
columns have not yet been added..
+        rs = statement.executeQuery("SELECT upgrade_id, cluster_id, 
from_version, to_version, direction, upgrade_package, upgrade_type FROM 
upgrade");
+        if (rs != null) {
+          try {
+            while (rs.next()) {
+              final long upgradeId = rs.getLong("upgrade_id");
+              final long clusterId = rs.getLong("cluster_id");
+              final String fromVersion = rs.getString("from_version");
+              final String toVersion = rs.getString("to_version");
+              final Direction direction = 
Direction.valueOf(rs.getString("direction"));
+              // These two values are likely null.
+              String upgradePackage = rs.getString("upgrade_package");
+              String upgradeType = rs.getString("upgrade_type");
+
+              LOG.info(MessageFormat.format("Populating rows for the upgrade 
table record with " +
+                      "upgrade_id: {0,number,#}, cluster_id: {1,number,#}, 
from_version: {2}, to_version: {3}, direction: {4}",
+                  upgradeId, clusterId, fromVersion, toVersion, direction));
+
+              // Set all upgrades that have been done so far to type "rolling"
+              if (StringUtils.isEmpty(upgradeType)) {
+                LOG.info("Updating the record's upgrade_type to " + 
UpgradeType.ROLLING);
+                dbAccessor.executeQuery("UPDATE upgrade SET upgrade_type = '" 
+ UpgradeType.ROLLING + "' WHERE upgrade_id = " + upgradeId);
+              }
+
+              if (StringUtils.isEmpty(upgradePackage)) {
+                String version = null;
+                StackEntity stack = null;
+
+                if (direction == Direction.UPGRADE) {
+                  version = toVersion;
+                } else if (direction == Direction.DOWNGRADE) {
+                  // TODO AMBARI-12698, this is going to be a problem.
+                  // During a downgrade, the "to_version" is overwritten to 
the source version, but the "from_version"
+                  // doesn't swap. E.g.,
+                  //  upgrade_id | from_version |  to_version  | direction
+                  // ------------+--------------+--------------+----------
+                  //           1 | 2.2.6.0-2800 | 2.3.0.0-2557 | UPGRADE
+                  //           2 | 2.2.6.0-2800 | 2.2.6.0-2800 | DOWNGRADE
+                  version = fromVersion;
+                }
+
+                ClusterEntity cluster = clusterDAO.findById(clusterId);
+
+                if (null != cluster) {
+                  stack = cluster.getDesiredStack();
+                  upgradePackage = this.calculateUpgradePackage(stack, 
version);
+                } else {
+                  LOG.error("Could not find a cluster with cluster_id " + 
clusterId);
+                }
+
+                if (!StringUtils.isEmpty(upgradePackage)) {
+                  LOG.info("Updating the record's upgrade_package to " + 
upgradePackage);
+                  dbAccessor.executeQuery("UPDATE upgrade SET upgrade_package 
= '" + upgradePackage + "' WHERE upgrade_id = " + upgradeId);
+                } else {
+                  success = false;
+                  LOG.error("Unable to populate column upgrade_package for 
record in table upgrade with id " + upgradeId);
+                }
+              }
+            }
+          } catch (Exception e) {
+            success = false;
+            e.printStackTrace();
+            LOG.error("Unable to populate the upgrade_type and upgrade_package 
columns of the upgrade table. " + e);
+          }
+        }
+      }
+    } catch (Exception e) {
+      success = false;
+      e.printStackTrace();
+      LOG.error("Failed to retrieve records from the upgrade table to populate 
the upgrade_type and upgrade_package columns. Exception: " + e);
+    } finally {
+      try {
+        if (rs != null) {
+          rs.close();
+        }
+        if (statement != null) {
+          statement.close();
+        }
+      } catch (SQLException e) {
+        ;
+      }
+    }
+    return success;
+  }
+
+  /**
+   * Find the single Repo Version for the given stack and version, and return 
its upgrade_package column.
+   * Because the upgrade_package column is going to be removed from this 
entity, must use raw SQL
+   * instead of the entity class.
+   * @param stack Stack
+   * @param version Stack version
+   * @return The value of the upgrade_package column, or null if not found.
+   */
+
+  private String calculateUpgradePackage(StackEntity stack, String version) {
+    String upgradePackage = null;
+    // Find the corresponding repo_version, and extract its upgrade_package
+    if (null != version && null != stack) {
+      RepositoryVersionEntity repoVersion = 
repositoryVersionDAO.findByStackNameAndVersion(stack.getStackName(), version);
+
+      Statement statement = null;
+      ResultSet rs = null;
+      try {
+        statement = dbAccessor.getConnection().createStatement();
+        if (statement != null) {
+          // Need to use SQL since the schema is changing and the entity will 
no longer have the upgrade_package column.
+          rs = statement.executeQuery("SELECT upgrade_package FROM 
repo_version WHERE repo_version_id = " + repoVersion.getId());
+          if (rs != null && rs.next()) {
+            upgradePackage = rs.getString("upgrade_package");
+          }
+        }
+      } catch (Exception e) {
+        LOG.error("Failed to retrieve upgrade_package for repo_version record 
with id " + repoVersion.getId() + ". Exception: " + e.getMessage());
+      } finally {
+        try {
+          if (rs != null) {
+            rs.close();
+          }
+          if (statement != null) {
+            statement.close();
+          }
+        } catch (SQLException e) {
+          ;
+        }
+      }
+    }
+    return upgradePackage;
+  }
+
+  /**
+   * If still on HDP 2.1, then no repo versions exist, so need to bootstrap 
the HDP 2.1 repo version,
+   * and mark it as CURRENT in the cluster_version table for the cluster, as 
well as the host_version table
+   * for all hosts.
+   */
+  @Transactional
+  public void bootstrapRepoVersionForHDP21() throws AmbariException, 
SQLException {
+    final String hardcodedInitialVersion = "2.1.0.0-0001";
+    AmbariManagementController amc = 
injector.getInstance(AmbariManagementController.class);
+    AmbariMetaInfo ambariMetaInfo = amc.getAmbariMetaInfo();
+    StackDAO stackDAO = injector.getInstance(StackDAO.class);
+    RepositoryVersionHelper repositoryVersionHelper = 
injector.getInstance(RepositoryVersionHelper.class);
+    RepositoryVersionDAO repositoryVersionDAO = 
injector.getInstance(RepositoryVersionDAO.class);
+    ClusterVersionDAO clusterVersionDAO = 
injector.getInstance(ClusterVersionDAO.class);
+    HostVersionDAO hostVersionDAO = injector.getInstance(HostVersionDAO.class);
+
+    Clusters clusters = amc.getClusters();
+    if (clusters == null) {
+      LOG.error("Unable to get Clusters entity.");
+      return;
+    }
+
+    for (Cluster cluster : clusters.getClusters().values()) {
+      ClusterEntity clusterEntity = 
clusterDAO.findByName(cluster.getClusterName());
+      final StackId stackId = cluster.getCurrentStackVersion();
+      LOG.info(MessageFormat.format("Analyzing cluster {0}, currently at stack 
{1} and version {2}",
+          cluster.getClusterName(), stackId.getStackName(), 
stackId.getStackVersion()));
+
+      if (stackId.getStackName().equalsIgnoreCase("HDP") && 
stackId.getStackVersion().equalsIgnoreCase("2.1")) {
+        final StackInfo stackInfo = 
ambariMetaInfo.getStack(stackId.getStackName(), stackId.getStackVersion());
+        StackEntity stackEntity = stackDAO.find(stackId.getStackName(), 
stackId.getStackVersion());
+
+        LOG.info("Bootstrapping the versions since using HDP-2.1");
+
+        // The actual value is not known, so use this.
+        String displayName = stackId.getStackName() + "-" + 
hardcodedInitialVersion;
+
+        // However, the Repo URLs should be correct.
+        String operatingSystems = 
repositoryVersionHelper.serializeOperatingSystems(stackInfo.getRepositories());
+
+        // Create the Repo Version if it doesn't already exist.
+        RepositoryVersionEntity repoVersionEntity = 
repositoryVersionDAO.findByDisplayName(displayName);
+        if (null != repoVersionEntity) {
+          LOG.info(MessageFormat.format("A Repo Version already exists with 
Display Name: {0}", displayName));
+        } else {
+          final long repoVersionIdSeq = repositoryVersionDAO.findMaxId("id");
+          // Safe to attempt to add the sequence if it doesn't exist already.
+          addSequence("repo_version_id_seq", repoVersionIdSeq, false);
+
+          repoVersionEntity = repositoryVersionDAO.create(
+              stackEntity, hardcodedInitialVersion, displayName, 
operatingSystems);
+          LOG.info(MessageFormat.format("Created Repo Version with ID: 
{0,number,#}\n, Display Name: {1}, Repo URLs: {2}\n",
+              repoVersionEntity.getId(), displayName, operatingSystems));
+        }
+
+        // Create the Cluster Version if it doesn't already exist.
+        ClusterVersionEntity clusterVersionEntity = 
clusterVersionDAO.findByClusterAndStackAndVersion(cluster.getClusterName(),
+            stackId, hardcodedInitialVersion);
+
+        if (null != clusterVersionEntity) {
+          LOG.info(MessageFormat.format("A Cluster Version version for 
cluster: {0}, version: {1}, already exists; its state is {2}.",
+              cluster.getClusterName(), 
clusterVersionEntity.getRepositoryVersion().getVersion(), 
clusterVersionEntity.getState()));
+
+          // If there are not CURRENT cluster versions, make this one the 
CURRENT one.
+          if (clusterVersionEntity.getState() != 
RepositoryVersionState.CURRENT &&
+              
clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), 
RepositoryVersionState.CURRENT).isEmpty()) {
+            clusterVersionEntity.setState(RepositoryVersionState.CURRENT);
+            clusterVersionDAO.merge(clusterVersionEntity);
+          }
+        } else {
+          final long clusterVersionIdSeq = clusterVersionDAO.findMaxId("id");
+          // Safe to attempt to add the sequence if it doesn't exist already.
+          addSequence("cluster_version_id_seq", clusterVersionIdSeq, false);
+
+          clusterVersionEntity = clusterVersionDAO.create(clusterEntity, 
repoVersionEntity, RepositoryVersionState.CURRENT,
+              System.currentTimeMillis(), System.currentTimeMillis(), "admin");
+          LOG.info(MessageFormat.format("Created Cluster Version with ID: 
{0,number,#}, cluster: {1}, version: {2}, state: {3}.",
+              clusterVersionEntity.getId(), cluster.getClusterName(), 
clusterVersionEntity.getRepositoryVersion().getVersion(),
+              clusterVersionEntity.getState()));
+        }
+
+        // Create the Host Versions if they don't already exist.
+        Collection<HostEntity> hosts = clusterEntity.getHostEntities();
+        boolean addedAtLeastOneHost = false;
+        if (null != hosts && !hosts.isEmpty()) {
+          for (HostEntity hostEntity : hosts) {
+            HostVersionEntity hostVersionEntity = 
hostVersionDAO.findByClusterStackVersionAndHost(cluster.getClusterName(),
+                stackId, hardcodedInitialVersion, hostEntity.getHostName());
+
+            if (null != hostVersionEntity) {
+              LOG.info(MessageFormat.format("A Host Version version for 
cluster: {0}, version: {1}, host: {2}, already exists; its state is {3}.",
+                  cluster.getClusterName(), 
hostVersionEntity.getRepositoryVersion().getVersion(),
+                  hostEntity.getHostName(), hostVersionEntity.getState()));
+
+              if (hostVersionEntity.getState() != 
RepositoryVersionState.CURRENT &&
+                  
hostVersionDAO.findByClusterHostAndState(cluster.getClusterName(), 
hostEntity.getHostName(),
+                      RepositoryVersionState.CURRENT).isEmpty()) {
+                hostVersionEntity.setState(RepositoryVersionState.CURRENT);
+                hostVersionDAO.merge(hostVersionEntity);
+              }
+            } else {
+              // This should only be done the first time.
+              if (!addedAtLeastOneHost) {
+                final long hostVersionIdSeq = hostVersionDAO.findMaxId("id");
+                // Safe to attempt to add the sequence if it doesn't exist 
already.
+                addSequence("host_version_id_seq", hostVersionIdSeq, false);
+                addedAtLeastOneHost = true;
+              }
+
+              hostVersionEntity = new HostVersionEntity(hostEntity, 
repoVersionEntity, RepositoryVersionState.CURRENT);
+              hostVersionDAO.create(hostVersionEntity);
+              LOG.info(MessageFormat.format("Created Host Version with ID: 
{0,number,#}, cluster: {1}, version: {2}, host: {3}, state: {4}.",
+                  hostVersionEntity.getId(), cluster.getClusterName(), 
hostVersionEntity.getRepositoryVersion().getVersion(),
+                  hostEntity.getHostName(), hostVersionEntity.getState()));
+            }
+          }
+        } else {
+          LOG.info(MessageFormat.format("Not inserting any Host Version 
records since cluster {0} does not have any hosts.",
+              cluster.getClusterName()));
+        }
+      }
+    }
+  }
+  
+  /**
    * Modifies the JSON of some of the alert definitions which have changed
    * between Ambari versions.
    */

http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql 
b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
index b803c37..a4d0c42 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
@@ -541,7 +541,6 @@ CREATE TABLE repo_version (
   stack_id BIGINT NOT NULL,
   version VARCHAR(255) NOT NULL,
   display_name VARCHAR(128) NOT NULL,
-  upgrade_package VARCHAR(255) NOT NULL,
   repositories LONGTEXT NOT NULL,
   PRIMARY KEY(repo_version_id)
 );
@@ -883,6 +882,8 @@ CREATE TABLE upgrade (
   from_version VARCHAR(255) DEFAULT '' NOT NULL,
   to_version VARCHAR(255) DEFAULT '' NOT NULL,
   direction VARCHAR(255) DEFAULT 'UPGRADE' NOT NULL,
+  upgrade_package VARCHAR(255) NOT NULL,
+  upgrade_type VARCHAR(32) NOT NULL,
   PRIMARY KEY (upgrade_id),
   FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id),
   FOREIGN KEY (request_id) REFERENCES request(request_id)

http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql 
b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
index c3195e5..016d0c4 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
@@ -530,7 +530,6 @@ CREATE TABLE repo_version (
   stack_id NUMBER(19) NOT NULL,
   version VARCHAR2(255) NOT NULL,
   display_name VARCHAR2(128) NOT NULL,
-  upgrade_package VARCHAR2(255) NOT NULL,
   repositories CLOB NOT NULL,
   PRIMARY KEY(repo_version_id)
 );
@@ -872,6 +871,8 @@ CREATE TABLE upgrade (
   from_version VARCHAR2(255) DEFAULT '' NOT NULL,
   to_version VARCHAR2(255) DEFAULT '' NOT NULL,
   direction VARCHAR2(255) DEFAULT 'UPGRADE' NOT NULL,
+  upgrade_package VARCHAR2(255) NOT NULL,
+  upgrade_type VARCHAR2(32) NOT NULL,
   PRIMARY KEY (upgrade_id),
   FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id),
   FOREIGN KEY (request_id) REFERENCES request(request_id)

http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql 
b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
index b7bc440..a3caf50 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
@@ -535,7 +535,6 @@ CREATE TABLE repo_version (
   stack_id BIGINT NOT NULL,
   version VARCHAR(255) NOT NULL,
   display_name VARCHAR(128) NOT NULL,
-  upgrade_package VARCHAR(255) NOT NULL,
   repositories TEXT NOT NULL,
   PRIMARY KEY(repo_version_id)
 );
@@ -874,6 +873,8 @@ CREATE TABLE upgrade (
   from_version VARCHAR(255) DEFAULT '' NOT NULL,
   to_version VARCHAR(255) DEFAULT '' NOT NULL,
   direction VARCHAR(255) DEFAULT 'UPGRADE' NOT NULL,
+  upgrade_package VARCHAR(255) NOT NULL,
+  upgrade_type VARCHAR(32) NOT NULL,
   PRIMARY KEY (upgrade_id),
   FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id),
   FOREIGN KEY (request_id) REFERENCES request(request_id)

http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/main/resources/Ambari-DDL-Postgres-EMBEDDED-CREATE.sql
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/Ambari-DDL-Postgres-EMBEDDED-CREATE.sql 
b/ambari-server/src/main/resources/Ambari-DDL-Postgres-EMBEDDED-CREATE.sql
index cd16120..e4a5799 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Postgres-EMBEDDED-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Postgres-EMBEDDED-CREATE.sql
@@ -602,7 +602,6 @@ CREATE TABLE ambari.repo_version (
   stack_id BIGINT NOT NULL,
   version VARCHAR(255) NOT NULL,
   display_name VARCHAR(128) NOT NULL,
-  upgrade_package VARCHAR(255) NOT NULL,
   repositories TEXT NOT NULL,
   PRIMARY KEY(repo_version_id)
 );
@@ -965,6 +964,8 @@ CREATE TABLE ambari.upgrade (
   from_version VARCHAR(255) DEFAULT '' NOT NULL,
   to_version VARCHAR(255) DEFAULT '' NOT NULL,
   direction VARCHAR(255) DEFAULT 'UPGRADE' NOT NULL,
+  upgrade_package VARCHAR(255) NOT NULL,
+  upgrade_type VARCHAR(32) NOT NULL,
   PRIMARY KEY (upgrade_id),
   FOREIGN KEY (cluster_id) REFERENCES ambari.clusters(cluster_id),
   FOREIGN KEY (request_id) REFERENCES ambari.request(request_id)

http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql 
b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
index 5f98d47..4aaab7e 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
@@ -641,7 +641,6 @@ CREATE TABLE repo_version (
   stack_id BIGINT NOT NULL,
   version VARCHAR(255) NOT NULL,
   display_name VARCHAR(128) NOT NULL,
-  upgrade_package VARCHAR(255) NOT NULL,
   repositories VARCHAR(MAX) NOT NULL,
   PRIMARY KEY CLUSTERED (repo_version_id)
   );
@@ -989,6 +988,8 @@ CREATE TABLE upgrade (
   from_version VARCHAR(255) DEFAULT '' NOT NULL,
   to_version VARCHAR(255) DEFAULT '' NOT NULL,
   direction VARCHAR(255) DEFAULT 'UPGRADE' NOT NULL,
+  upgrade_package VARCHAR(255) NOT NULL,
+  upgrade_type VARCHAR(32) NOT NULL,
   PRIMARY KEY CLUSTERED (upgrade_id),
   FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id),
   FOREIGN KEY (request_id) REFERENCES request(request_id)

http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_upgrade.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_upgrade.py
 
b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_upgrade.py
index 610f527..2dc9883 100644
--- 
a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_upgrade.py
+++ 
b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_upgrade.py
@@ -24,7 +24,7 @@ from resource_management.core.resources.system import Execute
 
 class HbaseMasterUpgrade(Script):
 
-  def snapshot(self, env):
+  def take_snapshot(self, env):
     import params
 
     snap_cmd = "echo 'snapshot_all' | {0} shell".format(params.hbase_cmd)
@@ -33,5 +33,9 @@ class HbaseMasterUpgrade(Script):
 
     Execute(exec_cmd, user=params.hbase_user)
 
+  def restore_snapshot(self, env):
+    import params
+    print "TODO AMBARI-12698"
+
 if __name__ == "__main__":
   HbaseMasterUpgrade().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
index a3c02a6..23e775a 100644
--- 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
+++ 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
@@ -107,9 +107,24 @@ class NameNodeDefault(NameNode):
   def get_stack_to_component(self):
     return {"HDP": "hadoop-hdfs-namenode"}
 
+  def restore_snapshot(self, env):
+    """
+    Restore the snapshot during a Downgrade.
+    """
+    print "TODO AMBARI-12698"
+    pass
+
+  def prepare_non_rolling_upgrade(self, env):
+    print "TODO AMBARI-12698"
+    pass
+
   def prepare_rolling_upgrade(self, env):
     namenode_upgrade.prepare_rolling_upgrade()
 
+  def finalize_non_rolling_upgrade(self, env):
+    print "TODO AMBARI-12698"
+    pass
+
   def finalize_rolling_upgrade(self, env):
     namenode_upgrade.finalize_rolling_upgrade()
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml
new file mode 100644
index 0000000..1da05c2
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.1/upgrades/nonrolling-upgrade-2.3.xml
@@ -0,0 +1,382 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+
+<upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";>
+  <target>2.3.*.*</target>
+  <target-stack>HDP-2.3</target-stack>
+  <type>NON_ROLLING</type>
+
+  <upgrade-path>
+    <intermediate-stack version="2.2"/>
+    <intermediate-stack version="2.3"/>
+  </upgrade-path>
+
+  <order>
+    <group xsi:type="cluster" name="PRE_CLUSTER" title="Prepare Upgrade">
+      <skippable>true</skippable>
+      <direction>UPGRADE</direction>
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop 
YARN Queues">
+        <task xsi:type="manual">
+          <message>Before continuing, please stop all YARN queues. If 
yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to 
true, then you can skip this step since the clients will retry on their 
own.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="STORM" component="NIMBUS" title="Stop Storm 
Topologies">
+        <task xsi:type="manual">
+          <message>Before continuing, please deactivate and kill any currently 
running topologies.</message>
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="stop" name="Stop High-Level Daemons" title="Stop Daemons 
for High-Level Services">
+      <skippable>true</skippable>
+      <service-check>false</service-check>
+
+      <service name="FLUME">
+        <component>FLUME_HANDLER</component>
+      </service>
+
+      <service name="STORM">
+        <component>DRPC_SERVER</component>
+        <component>STORM_UI_SERVER</component>
+        <component>SUPERVISOR</component>
+        <component>STORM_REST_API</component>
+        <component>NIMBUS</component>
+      </service>
+
+      <service name="FALCON">
+        <component>FALCON_SERVER</component>
+      </service>
+
+      <service name="OOZIE">
+        <component>OOZIE_SERVER</component>
+      </service>
+
+      <service name="HIVE">
+        <component>WEBHCAT_SERVER</component>
+        <component>HIVE_SERVER</component>
+        <component>HIVE_METASTORE</component>
+      </service>
+
+      <service name="YARN">
+        <component>NODEMANAGER</component>
+        <component>RESOURCEMANAGER</component>
+        <component>APP_TIMELINE_SERVER</component>
+      </service>
+
+      <service name="MAPREDUCE2">
+        <component>HISTORYSERVER</component>
+      </service>
+    </group>
+
+    <group xsi:type="cluster" name="Backups" title="Take Backups">
+      <direction>UPGRADE</direction>
+      <skippable>true</skippable>
+      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Backup 
Oozie Database">
+        <task xsi:type="manual">
+          <message>Before continuing, please backup the Oozie Server database 
on {{oozie-env/oozie_hostname}}.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_METASTORE" title="Backup 
Hive Metastore">
+        <task xsi:type="manual">
+          <message>Before continuing, please backup the Hive Metastore 
database located on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HBASE" component="HBASE_MASTER" title="Snapshot 
HBASE">
+        <task xsi:type="execute" hosts="master">
+          <script>scripts/hbase_upgrade.py</script>
+          <function>take_snapshot</function>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HDFS" component="NAMENODE" title="Snapshot HDFS">
+        <task xsi:type="execute" hosts="master">
+          <script>scripts/namenode.py</script>
+          <function>prepare_non_rolling_upgrade</function>
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="stop" name="Stop Low-Level Daemons" title="Stop Daemons 
for Low-Level Services">
+      <skippable>true</skippable>
+      <service-check>false</service-check>
+
+      <service name="HBASE">
+        <component>HBASE_REGIONSERVER</component>
+        <component>HBASE_MASTER</component>
+      </service>
+
+      <service name="HDFS">
+        <component>DATANODE</component>
+        <component>NAMENODE</component>
+        <component>SECONDARY_NAMENODE</component>
+        <component>ZKFC</component>
+        <component>JOURNALNODE</component>
+      </service>
+
+      <service name="ZOOKEEPER">
+        <component>ZOOKEEPER_SERVER</component>
+      </service>
+    </group>
+
+    <group name="Marker for Downgrade" title="Marker for Downgrade">
+      <direction>UPGRADE</direction>
+      <!-- TODO, if the user attempts a downgrade before this step, they can 
simply abort. -->
+    </group>
+
+    <group xsi:type="cluster" name="Restore Backups" title="Restore Backups">
+      <direction>DOWNGRADE</direction>
+      <skippable>true</skippable>
+
+      <!-- If the user attempts a downgrade after this point, they will need 
to restore backups
+      before starting any of the services. -->
+
+      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Backup 
Oozie Database">
+        <task xsi:type="manual">
+          <message>Before continuing, please restore the Oozie Server database 
on {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_METASTORE" title="Backup 
Hive Metastore">
+        <task xsi:type="manual">
+          <message>Before continuing, please restore the Hive Metastore 
database located on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HBASE" component="HBASE_MASTER" title="Snapshot 
HBASE">
+        <task xsi:type="execute" hosts="master">
+          <script>scripts/hbase_upgrade.py</script>
+          <function>restore_snapshot</function>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HDFS" component="NAMENODE" title="Snapshot HDFS">
+        <task xsi:type="execute" hosts="master">
+          <script>scripts/namenode.py</script>
+          <function>restore_snapshot</function>
+        </task>
+      </execute-stage>
+    </group>
+
+    <!-- After processing this group, will change the effective Stack of the 
UpgradeContext object. -->
+    <group xsi:type="update-stack" name="UPDATE_DESIRED_STACK_ID" 
title="Update Desired Stack Id">
+      <execute-stage title="Update Desired Stack Id" service="" component="">
+        <task xsi:type="server_action" 
class="org.apache.ambari.server.serveraction.upgrades.UpdateDesiredStackAction">
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="cluster" name="ALL_HOST_OPS" title="Set Version On All 
Hosts">
+      <skippable>true</skippable>
+      <execute-stage title="Update stack to {{version}}">
+        <task xsi:type="execute">
+          <script>scripts/ru_set_all.py</script>
+          <function>actionexecute</function>
+        </task>
+      </execute-stage>
+    </group>
+
+    <!-- Now, restart all of the services. -->
+
+    <group xsi:type="restart" name="ZOOKEEPER" title="Zookeeper">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="ZOOKEEPER">
+        <service-check>false</service-check>
+        <component>ZOOKEEPER_SERVER</component>
+        <component>ZOOKEEPER_CLIENT</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="HDFS" title="HDFS">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="HDFS">
+        <component>JOURNALNODE</component>
+        <component>ZKFC</component>
+        <component>NAMENODE</component>
+        <component>SECONDARY_NAMENODE</component>
+        <component>DATANODE</component>
+        <component>HDFS_CLIENT</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="MR and YARN" title="MR and YARN">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="MAPREDUCE2">
+        <component>HISTORYSERVER</component>
+        <component>MAPREDUCE2_CLIENT</component>
+      </service>
+      <service name="YARN">
+        <component>APP_TIMELINE_SERVER</component>
+        <component>RESOURCEMANAGER</component>
+        <component>NODEMANAGER</component>
+        <component>YARN_CLIENT</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="HBASE" title="HBASE">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="HBASE">
+        <component>HBASE_MASTER</component>
+        <component>HBASE_REGIONSERVER</component>
+        <component>HBASE_CLIENT</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="CLIENTS" title="Tez, Pig, Sqoop Clients">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="TEZ">
+        <component>TEZ_CLIENT</component>
+      </service>
+
+      <service name="PIG">
+        <component>PIG</component>
+      </service>
+
+      <service name="SQOOP">
+        <component>SQOOP</component>
+      </service>
+    </group>
+
+    <group name="SERVICE_CHECK" title="All Service Checks" 
xsi:type="service-check">
+      <skippable>true</skippable>
+      <direction>UPGRADE</direction>
+      <priority>
+        <service>HBASE</service>
+        <service>MAPREDUCE2</service>
+        <service>YARN</service>
+        <service>HDFS</service>
+      </priority>
+    </group>
+
+    <group xsi:type="restart" name="HIVE" title="Hive">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="HIVE">
+        <component>HIVE_METASTORE</component>
+        <component>HIVE_SERVER</component>
+        <component>WEBHCAT_SERVER</component>
+        <component>HIVE_CLIENT</component>
+        <component>HCAT</component>
+      </service>
+    </group>
+
+    <!-- Upgrade Oozie DB only on Upgrade direction, and always create a new 
ShareLib. -->
+    <group name="Upgrade Oozie" title="Upgrade Oozie Database">
+      <direction>UPGRADE</direction>
+      <skippable>true</skippable>
+      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Upgrade 
Oozie Database">
+        <task xsi:type="execute" hosts="any" summary="Upgrading the database 
and creating a new sharelib">
+          <script>scripts/oozie_server_upgrade.py</script>
+          <function>upgrade_oozie_database_and_sharelib</function>
+        </task>
+      </execute-stage>
+    </group>
+
+    <!-- Only create the ShareLib folder during a Downgrade. -->
+    <group name="Downgrade Oozie" title="Downgrade Oozie ShareLib">
+      <direction>DOWNGRADE</direction>
+      <skippable>true</skippable>
+      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Downgrade 
Oozie ShareLib">
+        <task xsi:type="execute" hosts="any" summary="Upgrading the database 
and creating a new sharelib">
+          <script>scripts/oozie_server_upgrade.py</script>
+          <function>create_sharelib</function>
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="restart" name="OOZIE" title="Oozie">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="OOZIE">
+        <component>OOZIE_SERVER</component>
+        <component>OOZIE_CLIENT</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="FALCON" title="Falcon">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="FALCON">
+        <component>FALCON_SERVER</component>
+        <component>FALCON_CLIENT</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="STORM" title="Storm">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="STORM">
+        <component>NIMBUS</component>
+        <component>STORM_REST_API</component>
+        <component>SUPERVISOR</component>
+        <component>STORM_UI_SERVER</component>
+        <component>DRPC_SERVER</component>
+      </service>
+
+      <execute-stage service="STORM" component="DRPC_SERVER" title="Rebuild 
Storm Topology">
+        <task xsi:type="manual">
+          <message>Please rebuild your topology using the new Storm version 
dependencies and resubmit it using the newly created jar.</message>
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="restart" name="FLUME" title="Flume">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="FLUME">
+        <component>FLUME_HANDLER</component>
+      </service>
+    </group>
+
+    <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize 
{{direction.text.proper}}">
+      <skippable>true</skippable>
+      <execute-stage title="Check Unhealthy Hosts" id="unhealthy-hosts">
+        <task xsi:type="manual">
+          <message>The following hosts were unhealthy and should be resolved 
before finalizing can be completed: {{hosts.unhealthy}}</message>
+        </task>
+      </execute-stage>
+      
+      <execute-stage title="Confirm Finalize">
+        <direction>UPGRADE</direction>
+        <task xsi:type="manual">
+          <message>Please confirm you are ready to finalize.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HDFS" component="NAMENODE" title="Execute HDFS 
Finalize">
+        <task xsi:type="execute" hosts="master">
+          <script>scripts/namenode.py</script>
+          <function>finalize_non_rolling_upgrade</function>
+        </task>
+      </execute-stage>
+
+      <execute-stage title="Save Cluster State" service="" component="">
+        <task xsi:type="server_action" 
class="org.apache.ambari.server.serveraction.upgrades.FinalizeUpgradeAction">
+        </task>
+      </execute-stage>
+    </group>
+  </order>
+</upgrade>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/config-upgrade.xml 
b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/config-upgrade.xml
new file mode 100644
index 0000000..fbd21a4
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/config-upgrade.xml
@@ -0,0 +1,55 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<upgrade-config-changes xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";>
+
+  <services>
+    <service name="HIVE">
+      <component name="HIVE_SERVER">
+        <changes>
+          <definition xsi:type="configure" 
id="hdp_2_2_0_0_hive_server_set_transport_modes">
+            <condition type="hive-site" key="hive.server2.transport.mode" 
value="binary">
+              <type>hive-site</type>
+              <key>hive.server2.thrift.port</key>
+              <value>10010</value>
+            </condition>
+            <condition type="hive-site" key="hive.server2.transport.mode" 
value="http">
+              <type>hive-site</type>
+              <key>hive.server2.http.port</key>
+              <value>10011</value>
+            </condition>
+          </definition>
+
+          <definition xsi:type="configure" 
id="hdp_2_2_0_0_hive_server_restore_transport_mode_on_downgrade">
+            <condition type="hive-site" key="hive.server2.transport.mode" 
value="binary">
+              <type>hive-site</type>
+              <key>hive.server2.thrift.port</key>
+              <value>10000</value>
+            </condition>
+            <condition type="hive-site" key="hive.server2.transport.mode" 
value="http">
+              <type>hive-site</type>
+              <key>hive.server2.http.port</key>
+              <value>10001</value>
+            </condition>
+          </definition>
+        </changes>
+      </component>
+    </service>
+  </services>
+
+</upgrade-config-changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.2.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.2.xml
 
b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.2.xml
new file mode 100644
index 0000000..2f6840f
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/nonrolling-upgrade-2.2.xml
@@ -0,0 +1,469 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+
+<upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";>
+  <target>2.2.*.*</target>
+  <target-stack>HDP-2.2</target-stack>
+  <type>NON_ROLLING</type>
+  <prechecks>
+  </prechecks>
+  <order>
+    <group xsi:type="cluster" name="PRE_CLUSTER" title="Prepare Upgrade">
+      <skippable>true</skippable>
+      <direction>UPGRADE</direction>
+      <execute-stage service="YARN" component="RESOURCEMANAGER" title="Stop 
YARN Queues">
+        <task xsi:type="manual">
+          <message>Before continuing, please stop all YARN queues. If 
yarn-site's yarn.resourcemanager.work-preserving-recovery.enabled is set to 
true, then you can skip this step since the clients will retry on their 
own.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="SLIDER" component="SLIDER" title="Stop Long 
Running Applications on Slider">
+        <task xsi:type="manual">
+          <message>Before continuing, please stop all long-running 
applications deployed using Slider. E.g., su - yarn 
"/usr/hdp/current/slider-client/bin/slider stop &lt;app_name&gt;"</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="STORM" component="NIMBUS" title="Stop Storm 
Topologies">
+        <task xsi:type="manual">
+          <message>Before continuing, please deactivate and kill any currently 
running topologies.</message>
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="stop" name="Stop High-Level Daemons" title="Stop Daemons 
for High-Level Services">
+      <skippable>true</skippable>
+      <service-check>false</service-check>
+
+      <service name="FLUME">
+        <component>FLUME_HANDLER</component>
+      </service>
+
+      <service name="STORM">
+        <component>DRPC_SERVER</component>
+        <component>STORM_UI_SERVER</component>
+        <component>SUPERVISOR</component>
+        <component>STORM_REST_API</component>
+        <component>NIMBUS</component>
+      </service>
+
+      <service name="KNOX">
+        <component>KNOX_GATEWAY</component>
+      </service>
+
+      <service name="KAFKA">
+        <component>KAFKA_BROKER</component>
+      </service>
+
+      <service name="FALCON">
+        <component>FALCON_SERVER</component>
+      </service>
+
+      <service name="OOZIE">
+        <component>OOZIE_SERVER</component>
+      </service>
+
+      <service name="SPARK">
+        <component>SPARK_JOBHISTORYSERVER</component>
+      </service>
+
+      <service name="HIVE">
+        <component>WEBHCAT_SERVER</component>
+        <component>HIVE_SERVER</component>
+        <component>HIVE_METASTORE</component>
+      </service>
+
+      <service name="YARN">
+        <component>NODEMANAGER</component>        <!-- TODO, parallelize -->
+        <component>RESOURCEMANAGER</component>
+        <component>APP_TIMELINE_SERVER</component>
+      </service>
+
+      <service name="MAPREDUCE2">
+        <component>HISTORYSERVER</component>
+      </service>
+    </group>
+
+    <group xsi:type="cluster" name="Backups" title="Take Backups">
+      <direction>UPGRADE</direction>
+      <skippable>true</skippable>
+      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Backup 
Oozie Database">
+        <task xsi:type="manual">
+          <message>Before continuing, please backup the Oozie Server database 
on {{oozie-env/oozie_hostname}}.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_METASTORE" title="Backup 
Hive Metastore">
+        <task xsi:type="manual">
+          <message>Before continuing, please backup the Hive Metastore 
database located on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Backup 
Knox Data">
+        <task xsi:type="manual">
+          <message>Before continuing, please backup the Knox data. E.g., "cp 
-RL /etc/knox/data/security ~/knox_backup" on the following host(s): 
{{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HBASE" component="HBASE_MASTER" title="Snapshot 
HBASE">
+        <task xsi:type="execute" hosts="master">
+          <script>scripts/hbase_upgrade.py</script>
+          <function>take_snapshot</function>        <!-- TODO, this function 
used to be called just "snapshot" -->
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HDFS" component="NAMENODE" title="Snapshot HDFS">
+        <task xsi:type="execute" hosts="master">  <!-- TODO, this can be any 
NameNode, not just the active. -->
+          <script>scripts/namenode.py</script>
+          <function>prepare_non_rolling_upgrade</function>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Backup 
Ranger">
+        <task xsi:type="manual">
+          <message>Before continuing, please backup the Ranger Admin database 
and Ranger Audit database on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="stop" name="Stop Low-Level Daemons" title="Stop Daemons 
for Low-Level Services">
+      <skippable>true</skippable>
+      <service-check>false</service-check>
+
+      <service name="HBASE">
+        <component>HBASE_REGIONSERVER</component>
+        <component>HBASE_MASTER</component>
+      </service>
+
+      <service name="HDFS">
+        <component>DATANODE</component>
+        <component>NAMENODE</component>
+        <component>SECONDARY_NAMENODE</component>   <!-- TODO, may not be 
present. -->
+        <component>ZKFC</component>                 <!-- TODO, may not be 
present. -->
+        <component>JOURNALNODE</component>          <!-- TODO, may not be 
present. -->
+      </service>
+
+      <service name="RANGER">
+        <component>RANGER_USERSYNC</component>
+        <component>RANGER_ADMIN</component>
+      </service>
+
+      <service name="ZOOKEEPER">
+        <component>ZOOKEEPER_SERVER</component>
+      </service>
+    </group>
+
+    <group name="Marker for Downgrade" title="Marker for Downgrade">
+      <direction>UPGRADE</direction>
+      <!-- TODO, if the user attempts a downgrade before this step, they can 
simply abort. -->
+    </group>
+
+    <group xsi:type="cluster" name="Restore Backups" title="Restore Backups">
+      <direction>DOWNGRADE</direction>
+      <skippable>true</skippable>
+
+      <!-- If the user attempts a downgrade after this point, they will need 
to restore backups
+      before starting any of the services. -->
+
+      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Backup 
Oozie Database">
+        <task xsi:type="manual">
+          <message>Before continuing, please restore the Oozie Server database 
on {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HIVE" component="HIVE_METASTORE" title="Backup 
Hive Metastore">
+        <task xsi:type="manual">
+          <message>Before continuing, please restore the Hive Metastore 
database located on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="KNOX" component="KNOX_GATEWAY" title="Backup 
Knox Data">
+        <task xsi:type="manual">
+          <message>Before continuing, please restore the Knox data. E.g., "cp 
-RL ~/knox_backup/* /etc/knox/data/security/" on the following host(s): 
{{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HBASE" component="HBASE_MASTER" title="Snapshot 
HBASE">
+        <task xsi:type="execute" hosts="master">
+          <script>scripts/hbase_upgrade.py</script>
+          <function>restore_snapshot</function>   <!-- TODO, this function 
name is new. -->
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HDFS" component="NAMENODE" title="Snapshot HDFS">
+        <task xsi:type="execute" hosts="master">  <!-- TODO, this can be any 
NameNode, not just the active. -->
+          <script>scripts/namenode.py</script>
+          <function>restore_snapshot</function>    <!-- TODO, this function 
doesn't exist yet. -->
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Backup 
Ranger">
+        <task xsi:type="manual">
+          <message>Before continuing, please restore the Ranger Admin database 
and Ranger Audit database on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+    </group>
+
+    <!-- After processing this group, will change the effective Stack of the 
UpgradeContext object. -->
+    <group xsi:type="update-stack" name="UPDATE_DESIRED_STACK_ID" 
title="Update Desired Stack Id">
+      <execute-stage title="Update Desired Stack Id" service="" component="">
+        <task xsi:type="server_action" 
class="org.apache.ambari.server.serveraction.upgrades.UpdateDesiredStackAction">
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="cluster" name="ALL_HOST_OPS" title="Set Version On All 
Hosts">
+      <skippable>true</skippable>
+      <execute-stage title="Update stack to {{version}}">
+        <task xsi:type="execute">
+          <script>scripts/ru_set_all.py</script>
+          <function>actionexecute</function>      <!-- TODO, parallelize -->
+        </task>
+      </execute-stage>
+    </group>
+
+    <!-- Now, restart all of the services. -->
+
+    <group xsi:type="restart" name="ZOOKEEPER" title="Zookeeper">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="ZOOKEEPER">
+        <service-check>false</service-check>        <!-- TODO, enable 
service-check once done testing -->
+        <component>ZOOKEEPER_SERVER</component>
+        <component>ZOOKEEPER_CLIENT</component>     <!-- TODO, parallelize -->
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="RANGER" title="Ranger">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="RANGER">
+        <component>RANGER_ADMIN</component>
+        <component>RANGER_USERSYNC</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="HDFS" title="HDFS">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="HDFS">
+        <component>JOURNALNODE</component>
+        <component>ZKFC</component>
+        <component>NAMENODE</component>
+        <component>SECONDARY_NAMENODE</component>   <!-- TODO, may not be 
present -->
+        <component>DATANODE</component>             <!-- TODO, parallelize -->
+        <component>HDFS_CLIENT</component>          <!-- TODO, parallelize -->
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="MR and YARN" title="MR and YARN">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="MAPREDUCE2">
+        <component>HISTORYSERVER</component>
+        <component>MAPREDUCE2_CLIENT</component>    <!-- TODO, parallelize -->
+      </service>
+      <service name="YARN">
+        <component>APP_TIMELINE_SERVER</component>
+        <component>RESOURCEMANAGER</component>
+        <component>NODEMANAGER</component>          <!-- TODO, parallelize -->
+        <component>YARN_CLIENT</component>          <!-- TODO, parallelize -->
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="HBASE" title="HBASE">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="HBASE">
+        <component>HBASE_MASTER</component>
+        <component>HBASE_REGIONSERVER</component>   <!-- TODO, parallelize -->
+        <component>HBASE_CLIENT</component>         <!-- TODO, parallelize -->
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="CLIENTS" title="Tez, Pig, Sqoop Clients">  
<!-- TODO, parallelize -->
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="TEZ">
+        <component>TEZ_CLIENT</component>
+      </service>
+
+      <service name="PIG">
+        <component>PIG</component>
+      </service>
+
+      <service name="SQOOP">
+        <component>SQOOP</component>
+      </service>
+    </group>
+
+    <group name="SERVICE_CHECK" title="All Service Checks" 
xsi:type="service-check">
+      <skippable>true</skippable>
+      <direction>UPGRADE</direction>
+      <priority>
+        <!-- TODO, for some reason, it flips the order. -->
+        <service>HBASE</service>
+        <service>MAPREDUCE2</service>
+        <service>YARN</service>
+        <service>HDFS</service>
+      </priority>
+    </group>
+
+    <group xsi:type="restart" name="HIVE" title="Hive">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="HIVE">
+        <component>HIVE_METASTORE</component>
+        <component>HIVE_SERVER</component>
+        <component>WEBHCAT_SERVER</component>
+        <component>HIVE_CLIENT</component>          <!-- TODO, parallelize -->
+        <component>HCAT</component>                 <!-- TODO, parallelize -->
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="SPARK" title="Spark">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="SPARK">
+        <component>SPARK_JOBHISTORYSERVER</component>
+        <component>SPARK_CLIENT</component>         <!-- TODO, parallelize -->
+      </service>
+    </group>
+
+    <!-- Upgrade Oozie DB only on Upgrade direction, and always create a new 
ShareLib. -->
+    <group name="Upgrade Oozie" title="Upgrade Oozie Database">
+      <direction>UPGRADE</direction>
+      <skippable>true</skippable>
+      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Upgrade 
Oozie Database">
+        <task xsi:type="execute" hosts="any" summary="Upgrading the database 
and creating a new sharelib">
+          <script>scripts/oozie_server_upgrade.py</script>
+          <function>upgrade_oozie_database_and_sharelib</function>
+        </task>
+      </execute-stage>
+    </group>
+
+    <!-- Only create the ShareLib folder during a Downgrade. -->
+    <group name="Downgrade Oozie" title="Downgrade Oozie ShareLib">
+      <direction>DOWNGRADE</direction>
+      <skippable>true</skippable>
+      <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Downgrade 
Oozie ShareLib">
+        <task xsi:type="execute" hosts="any" summary="Upgrading the database 
and creating a new sharelib">
+          <script>scripts/oozie_server_upgrade.py</script>
+          <function>create_sharelib</function>
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="restart" name="OOZIE" title="Oozie">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="OOZIE">
+        <component>OOZIE_SERVER</component>
+        <component>OOZIE_CLIENT</component>         <!-- TODO, parallelize -->
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="FALCON" title="Falcon">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="FALCON">
+        <component>FALCON_SERVER</component>
+        <component>FALCON_CLIENT</component>        <!-- TODO, parallelize -->
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="KAFKA" title="Kafka">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="KAFKA">
+        <component>KAFKA_BROKER</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="KNOX" title="Knox">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="KNOX">
+        <component>KNOX_GATEWAY</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="STORM" title="Storm">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="STORM">
+        <component>NIMBUS</component>
+        <component>STORM_REST_API</component>
+        <component>SUPERVISOR</component>
+        <component>STORM_UI_SERVER</component>
+        <component>DRPC_SERVER</component>
+      </service>
+
+      <!-- TODO, does this work? -->
+      <execute-stage service="STORM" component="DRPC_SERVER" title="Rebuild 
Storm Topology">
+        <task xsi:type="manual">
+          <message>Please rebuild your topology using the new Storm version 
dependencies and resubmit it using the newly created jar.</message>
+        </task>
+      </execute-stage>
+    </group>
+
+    <group xsi:type="restart" name="SLIDER" title="Slider">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="SLIDER">
+        <component>SLIDER</component>
+      </service>
+    </group>
+
+    <group xsi:type="restart" name="FLUME" title="Flume">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <service name="FLUME">
+        <component>FLUME_HANDLER</component>
+      </service>
+    </group>
+
+    <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize 
{{direction.text.proper}}">
+      <skippable>true</skippable>
+      <execute-stage title="Check Unhealthy Hosts" id="unhealthy-hosts">
+        <task xsi:type="manual">
+          <message>The following hosts were unhealthy and should be resolved 
before finalizing can be completed: {{hosts.unhealthy}}</message>
+        </task>
+      </execute-stage>
+      
+      <execute-stage title="Confirm Finalize">
+        <direction>UPGRADE</direction>
+        <task xsi:type="manual">
+          <message>Please confirm you are ready to finalize.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="HDFS" component="NAMENODE" title="Execute HDFS 
Finalize">
+        <task xsi:type="execute" hosts="master">      <!-- TODO, what happens 
if there's no HA. -->
+          <script>scripts/namenode.py</script>
+          <function>finalize_non_rolling_upgrade</function>
+        </task>
+      </execute-stage>
+
+      <execute-stage title="Save Cluster State" service="" component="">
+        <task xsi:type="server_action" 
class="org.apache.ambari.server.serveraction.upgrades.FinalizeUpgradeAction">
+        </task>
+      </execute-stage>
+    </group>
+  </order>
+</upgrade>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ff8a56af/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.2.xml
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.2.xml 
b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.2.xml
index 3837e63..b351aae 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.2.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.2.xml
@@ -19,8 +19,21 @@
 
 <upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";>
   <target>2.2.*.*</target>
+  <type>ROLLING</type>
   <skip-failures>false</skip-failures>
   <skip-service-check-failures>false</skip-service-check-failures>
+  <prerequisite-checks>
+    <check>org.apache.ambari.server.checks.HiveMultipleMetastoreCheck</check>
+    
<check>org.apache.ambari.server.checks.MapReduce2JobHistoryStatePreservingCheck</check>
+    
<check>org.apache.ambari.server.checks.SecondaryNamenodeDeletedCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesMapReduceDistributedCacheCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesNamenodeHighAvailabilityCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesNamenodeTruncateCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesTezDistributedCacheCheck</check>
+    
<check>org.apache.ambari.server.checks.ServicesYarnWorkPreservingCheck</check>
+    <check>org.apache.ambari.server.checks.YarnRMHighAvailabilityCheck</check>
+    
<check>org.apache.ambari.server.checks.YarnTimelineServerStatePreservingCheck</check>
+  </prerequisite-checks>
 
   <order>
     <group xsi:type="cluster" name="PRE_CLUSTER" title="Prepare Upgrade">
@@ -35,7 +48,7 @@
       <execute-stage service="HBASE" component="HBASE_MASTER" title="Pre 
Upgrade HBase">
         <task xsi:type="execute" hosts="master">
           <script>scripts/hbase_upgrade.py</script>
-          <function>snapshot</function>
+          <function>take_snapshot</function>
         </task>
       </execute-stage>
 
@@ -314,13 +327,13 @@
     <service name="ZOOKEEPER">
       <component name="ZOOKEEPER_SERVER">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
 
       <component name="ZOOKEEPER_CLIENT">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
     </service>
@@ -328,13 +341,13 @@
     <service name="RANGER">
       <component name="RANGER_ADMIN">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
 
       <component name="RANGER_USERSYNC">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
     </service>
@@ -342,31 +355,31 @@
     <service name="HDFS">
       <component name="NAMENODE">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
 
       <component name="DATANODE">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
 
       <component name="HDFS_CLIENT">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
 
       <component name="JOURNALNODE">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
 
       <component name="ZKFC">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
     </service>
@@ -374,13 +387,13 @@
     <service name="MAPREDUCE2">
       <component name="HISTORYSERVER">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
 
       <component name="MAPREDUCE2_CLIENT">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
     </service>
@@ -388,44 +401,44 @@
     <service name="YARN">
       <component name="APP_TIMELINE_SERVER">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
 
       <component name="RESOURCEMANAGER">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
 
       <component name="NODEMANAGER">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
 
       <component name="YARN_CLIENT">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
     </service>
     <service name="HBASE">
       <component name="HBASE_MASTER">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
 
       <component name="HBASE_REGIONSERVER">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
 
       <component name="HBASE_CLIENT">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
     </service>
@@ -433,7 +446,7 @@
     <service name="TEZ">
       <component name="TEZ_CLIENT">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
     </service>
@@ -441,7 +454,7 @@
     <service name="PIG">
       <component name="PIG">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
     </service>
@@ -449,7 +462,7 @@
     <service name="SQOOP">
       <component name="SQOOP">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
     </service>
@@ -457,7 +470,7 @@
     <service name="HIVE">
       <component name="HIVE_METASTORE">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
 
@@ -468,18 +481,7 @@
             <message>Please note that the HiveServer port will now change to 
10010 if hive is using a binary transfer mode or 10011 if hive is using an http 
transport mode. You can use "netstat -anp | grep 1001[01]" to determine if the 
port is available on each of following HiveServer host(s): {{hosts.all}}. If 
the port is not available, the process using it must be terminated.</message>
           </task>
 
-          <task xsi:type="configure">
-            <condition type="hive-site" key="hive.server2.transport.mode" 
value="binary">
-              <type>hive-site</type>
-              <key>hive.server2.thrift.port</key>
-              <value>10010</value>
-            </condition>
-            <condition type="hive-site" key="hive.server2.transport.mode" 
value="http">
-              <type>hive-site</type>
-              <key>hive.server2.http.port</key>
-              <value>10011</value>
-            </condition>
-          </task>
+          <task xsi:type="configure" 
id="hdp_2_2_0_0_hive_server_set_transport_modes" />
         </pre-upgrade>
 
         <pre-downgrade>
@@ -488,40 +490,29 @@
             <message>Please note that the HiveServer port will now change to 
10000 if hive is using a binary transfer mode or 10001 if hive is using an http 
transport mode. You can use "netstat -anp | grep 1000[01]" to determine if the 
port is available on each of following HiveServer host(s): {{hosts.all}}. If 
the port is not available, the process using it must be terminated.</message>
           </task>
 
-          <task xsi:type="configure">
-            <condition type="hive-site" key="hive.server2.transport.mode" 
value="binary">
-              <type>hive-site</type>
-              <key>hive.server2.thrift.port</key>
-              <value>10000</value>
-            </condition>
-            <condition type="hive-site" key="hive.server2.transport.mode" 
value="http">
-              <type>hive-site</type>
-              <key>hive.server2.http.port</key>
-              <value>10001</value>
-            </condition>
-          </task>
+          <task xsi:type="configure" 
id="hdp_2_2_0_0_hive_server_restore_transport_mode_on_downgrade" />
         </pre-downgrade>
 
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
 
       <component name="WEBHCAT_SERVER">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
 
       <component name="HIVE_CLIENT">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
 
       <component name="HCAT">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
     </service>
@@ -529,7 +520,7 @@
     <service name="SLIDER">
       <component name="SLIDER">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
     </service>
@@ -537,12 +528,12 @@
     <service name="SPARK">
       <component name="SPARK_JOBHISTORYSERVER">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
       <component name="SPARK_CLIENT">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
     </service>
@@ -574,13 +565,13 @@
         </pre-downgrade>
 
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
 
       <component name="OOZIE_CLIENT">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
     </service>
@@ -588,12 +579,12 @@
     <service name="FALCON">
       <component name="FALCON_SERVER">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
       <component name="FALCON_CLIENT">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
     </service>
@@ -601,7 +592,7 @@
     <service name="KAFKA">
       <component name="KAFKA_BROKER">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
     </service>
@@ -609,7 +600,7 @@
     <service name="KNOX">
       <component name="KNOX_GATEWAY">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
     </service>
@@ -622,27 +613,27 @@
           </task>
         </pre-upgrade>
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
       <component name="STORM_REST_API">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
       <component name="SUPERVISOR">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
       <component name="STORM_UI_SERVER">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
       <component name="DRPC_SERVER">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
         <post-upgrade>
           <task xsi:type="manual">
@@ -655,7 +646,7 @@
     <service name="FLUME">
       <component name="FLUME_HANDLER">
         <upgrade>
-          <task xsi:type="restart" />
+          <task xsi:type="restart-task" />
         </upgrade>
       </component>
     </service>

Reply via email to