Repository: ambari
Updated Branches:
  refs/heads/trunk 2d69fc7e0 -> 3155a43e5


AMBARI-21672 - Update Orchestration to support MAINT (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3155a43e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3155a43e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3155a43e

Branch: refs/heads/trunk
Commit: 3155a43e591499e9501ee3b2f6e5f095a9883177
Parents: 2d69fc7
Author: Jonathan Hurley <jhur...@hortonworks.com>
Authored: Mon Aug 7 11:03:09 2017 -0400
Committer: Jonathan Hurley <jhur...@hortonworks.com>
Committed: Tue Aug 8 10:11:53 2017 -0400

----------------------------------------------------------------------
 .../ambari/server/state/UpgradeContext.java     | 140 +++++++----
 .../ambari/server/state/UpgradeContextTest.java | 246 ++++++++++++++++---
 2 files changed, 309 insertions(+), 77 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/3155a43e/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
index 0e409a4..a4ed080 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeContext.java
@@ -65,6 +65,7 @@ import org.apache.ambari.server.orm.entities.UpgradeEntity;
 import org.apache.ambari.server.orm.entities.UpgradeHistoryEntity;
 import org.apache.ambari.server.stack.MasterHostResolver;
 import org.apache.ambari.server.stageplanner.RoleGraphFactory;
+import org.apache.ambari.server.state.repository.ClusterVersionSummary;
 import org.apache.ambari.server.state.repository.VersionDefinitionXml;
 import org.apache.ambari.server.state.stack.PrereqCheckStatus;
 import org.apache.ambari.server.state.stack.UpgradePack;
@@ -114,17 +115,17 @@ public class UpgradeContext {
   /*
    * The cluster that the upgrade is for.
    */
-  final private Cluster m_cluster;
+  private final Cluster m_cluster;
 
   /**
    * The direction of the upgrade.
    */
-  final private Direction m_direction;
+  private final Direction m_direction;
 
   /**
    * The type of upgrade.
    */
-  final private UpgradeType m_type;
+  private final UpgradeType m_type;
 
   /**
    * The upgrade pack for this upgrade.
@@ -187,7 +188,7 @@ public class UpgradeContext {
    * A set of services which are included in this upgrade. If this is empty,
    * then all cluster services are included.
    */
-  private Set<String> m_services = new HashSet<>();
+  private final Set<String> m_services = new HashSet<>();
 
   /**
    * A mapping of service to target repository. On an upgrade, this will be the
@@ -252,7 +253,7 @@ public class UpgradeContext {
   /**
    * Used as a quick way to tell if the upgrade is to revert a patch.
    */
-  private boolean m_isRevert = false;
+  private final boolean m_isRevert;
 
   /**
    * Defines orchestration type.  This is not the repository type when 
reverting a patch.
@@ -356,48 +357,8 @@ public class UpgradeContext {
           m_repositoryVersion = 
m_repoVersionDAO.findByPK(Long.valueOf(repositoryVersionId));
           m_orchestration = m_repositoryVersion.getType();
 
-          if (m_orchestration == RepositoryType.STANDARD) {
-            m_services.addAll(cluster.getServices().keySet());
-          } else {
-            try {
-              VersionDefinitionXml vdf = 
m_repositoryVersion.getRepositoryXml();
-              m_services.addAll(vdf.getAvailableServiceNames());
-
-              // if this is every true, then just stop the upgrade attempt and
-              // throw an exception
-              if (m_services.isEmpty()) {
-                String message = String.format(
-                    "When using a VDF of type %s, the available services must 
be defined in the VDF",
-                    m_repositoryVersion.getType());
-
-                throw new AmbariException(message);
-              }
-
-            } catch (Exception e) {
-              String msg = String.format(
-                  "Could not parse version definition for %s.  Upgrade will 
not proceed.",
-                  m_repositoryVersion.getVersion());
-
-              throw new AmbariException(msg);
-            }
-          }
-
-          Set<String> installedServices = new HashSet<>();
-          // populate the target repository map for all services in the upgrade
-          for (String serviceName : m_services) {
-            try {
-              Service service = cluster.getService(serviceName);
-              m_sourceRepositoryMap.put(serviceName, 
service.getDesiredRepositoryVersion());
-              m_targetRepositoryMap.put(serviceName, m_repositoryVersion);
-              installedServices.add(serviceName);
-            } catch (ServiceNotFoundException e) {
-              LOG.warn("Skipping orchestraction for service {}, as it was 
defined to upgrade, but is not installed in cluster {}",
-                  serviceName, cluster.getClusterName());
-            }
-          }
-
-          m_services = installedServices;
-
+          // add all of the services participating in the upgrade
+          m_services.addAll(getServicesForUpgrade(cluster, 
m_repositoryVersion));
           break;
         }
         case DOWNGRADE:{
@@ -417,8 +378,8 @@ public class UpgradeContext {
           break;
         }
         default:
-          m_repositoryVersion = null;
-          break;
+          throw new AmbariException(
+              String.format("%s is not a valid upgrade direction.", 
m_direction));
       }
     }
 
@@ -907,6 +868,87 @@ public class UpgradeContext {
   }
 
   /**
+   * Gets the set of services which will participate in the upgrade. The
+   * services available in the repository are comapred against those installed
+   * in the cluster to arrive at the final subset.
+   * <p/>
+   * In some cases, such as with a {@link RepositoryType#MAINT} repository, the
+   * subset can be further trimmed by determing that an installed services is
+   * already at a high enough version and doesn't need to be upgraded.
+   * <p/>
+   * This method will also populate the source ({@link #m_sourceRepositoryMap})
+   * and target ({@link #m_targetRepositoryMap}) repository maps.
+   *
+   * @param cluster
+   *          the cluster (not {@code null}).
+   * @param repositoryVersion
+   *          the repository to use for the upgrade (not {@code null}).
+   * @return the set of services which will participate in the upgrade.
+   * @throws AmbariException
+   */
+  private Set<String> getServicesForUpgrade(Cluster cluster,
+      RepositoryVersionEntity repositoryVersion) throws AmbariException {
+
+    // keep track of the services which will be in this upgrade
+    Set<String> servicesForUpgrade = new HashSet<>();
+
+    // standard repo types use all services of the cluster
+    if (repositoryVersion.getType() == RepositoryType.STANDARD) {
+      servicesForUpgrade = cluster.getServices().keySet();
+    } else {
+      try {
+        // use the VDF and cluster to determine what services should be in this
+        // upgrade - this will take into account the type (such as patch/maint)
+        // and the version of services installed in the cluster
+        VersionDefinitionXml vdf = repositoryVersion.getRepositoryXml();
+        ClusterVersionSummary clusterVersionSummary = 
vdf.getClusterSummary(cluster);
+        servicesForUpgrade = clusterVersionSummary.getAvailableServiceNames();
+
+        // if this is every true, then just stop the upgrade attempt and
+        // throw an exception
+        if (servicesForUpgrade.isEmpty()) {
+          String message = String.format(
+              "When using a VDF of type %s, the available services must be 
defined in the VDF",
+              repositoryVersion.getType());
+
+          throw new AmbariException(message);
+        }
+      } catch (Exception e) {
+        String msg = String.format(
+            "Could not parse version definition for %s.  Upgrade will not 
proceed.",
+            repositoryVersion.getVersion());
+
+        throw new AmbariException(msg);
+      }
+    }
+
+    // now that we have a list of the services defined by the VDF, only include
+    // services which are actually installed
+    Iterator<String> iterator = servicesForUpgrade.iterator();
+    while (iterator.hasNext()) {
+      String serviceName = null;
+      try {
+        serviceName = iterator.next();
+        Service service = cluster.getService(serviceName);
+
+        m_sourceRepositoryMap.put(serviceName, 
service.getDesiredRepositoryVersion());
+        m_targetRepositoryMap.put(serviceName, repositoryVersion);
+      } catch (ServiceNotFoundException e) {
+        // remove the service which is not part of the cluster - this should
+        // never happen since the summary from the VDF does this already, but
+        // can't hurt to be safe
+        iterator.remove();
+
+        LOG.warn(
+            "Skipping orchestration for service {}, as it was defined to 
upgrade, but is not installed in cluster {}",
+            serviceName, cluster.getClusterName());
+      }
+    }
+
+    return servicesForUpgrade;
+  }
+
+  /**
    * Builds a chain of {@link UpgradeRequestValidator}s to ensure that the
    * incoming request to create a new upgrade is valid.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/3155a43e/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeContextTest.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeContextTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeContextTest.java
index e2bb27e..e04be5e 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeContextTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeContextTest.java
@@ -33,6 +33,8 @@ import org.apache.ambari.server.orm.dao.UpgradeDAO;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.orm.entities.UpgradeEntity;
 import org.apache.ambari.server.orm.entities.UpgradeHistoryEntity;
+import org.apache.ambari.server.state.repository.ClusterVersionSummary;
+import org.apache.ambari.server.state.repository.VersionDefinitionXml;
 import org.apache.ambari.server.state.stack.UpgradePack;
 import org.apache.ambari.server.state.stack.upgrade.Direction;
 import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
@@ -43,39 +45,93 @@ import org.easymock.Mock;
 import org.junit.Before;
 import org.junit.Test;
 
+import com.google.common.collect.Sets;
+
 /**
  * Tests {@link UpgradeContext}.
  */
 public class UpgradeContextTest extends EasyMockSupport {
 
+  private final static String HDFS_SERVICE_NAME = "HDFS";
+  private final static String ZOOKEEPER_SERVICE_NAME = "ZOOKEEPER";
+
+  /**
+   * An existing upgrade which can be reverted.
+   */
   @Mock
   private UpgradeEntity m_completedRevertableUpgrade;
 
+  /**
+   * The target repository of a completed upgrade.
+   */
   @Mock
-  private RepositoryVersionEntity m_completedUpgradeTargetRepositoryVersion;
+  private RepositoryVersionEntity m_targetRepositoryVersion;
 
+  /**
+   * The source repository of a completed upgrade.
+   */
   @Mock
-  private RepositoryVersionEntity m_completedUpgradeSourceRepositoryVersion;
+  private RepositoryVersionEntity m_sourceRepositoryVersion;
 
+  /**
+   * The cluster performing the upgrade.
+   */
+  @Mock
+  private Cluster m_cluster;
+
+  /**
+   * HDFS
+   */
+  @Mock
+  private Service m_hdfsService;
+
+  /**
+   * ZooKeeper
+   */
+  @Mock
+  private Service m_zookeeperService;
 
   @Mock
   private UpgradeDAO m_upgradeDAO;
 
+  @Mock
+  private RepositoryVersionDAO m_repositoryVersionDAO;
+
+  /**
+   * Used to mock out what services will be provided to us by the VDF/cluster.
+   */
+  @Mock
+  private ClusterVersionSummary m_clusterVersionSummary;
+
+  /**
+   *
+   */
+  @Mock
+  private VersionDefinitionXml m_vdfXml;
+
+  /**
+   * The cluster services.
+   */
+  private Map<String, Service> m_services = new HashMap<>();
+
   @Before
-  public void setup() {
+  public void setup() throws Exception {
     injectMocks(this);
 
-    
expect(m_completedUpgradeSourceRepositoryVersion.getId()).andReturn(1L).anyTimes();
-    
expect(m_completedUpgradeSourceRepositoryVersion.getStackId()).andReturn(new 
StackId("HDP", "2.6")).anyTimes();
-    
expect(m_completedUpgradeTargetRepositoryVersion.getId()).andReturn(1L).anyTimes();
-    
expect(m_completedUpgradeTargetRepositoryVersion.getStackId()).andReturn(new 
StackId("HDP", "2.6")).anyTimes();
+    expect(m_sourceRepositoryVersion.getId()).andReturn(1L).anyTimes();
+    expect(m_sourceRepositoryVersion.getStackId()).andReturn(new 
StackId("HDP", "2.6")).anyTimes();
+    expect(m_targetRepositoryVersion.getId()).andReturn(99L).anyTimes();
+    expect(m_targetRepositoryVersion.getStackId()).andReturn(new 
StackId("HDP", "2.6")).anyTimes();
 
     UpgradeHistoryEntity upgradeHistoryEntity = 
createNiceMock(UpgradeHistoryEntity.class);
-    
expect(upgradeHistoryEntity.getServiceName()).andReturn("HDFS").atLeastOnce();
-    
expect(upgradeHistoryEntity.getFromReposistoryVersion()).andReturn(m_completedUpgradeSourceRepositoryVersion).anyTimes();
-    
expect(upgradeHistoryEntity.getTargetRepositoryVersion()).andReturn(m_completedUpgradeTargetRepositoryVersion).anyTimes();
+    
expect(upgradeHistoryEntity.getServiceName()).andReturn(HDFS_SERVICE_NAME).anyTimes();
+    
expect(upgradeHistoryEntity.getFromReposistoryVersion()).andReturn(m_sourceRepositoryVersion).anyTimes();
+    
expect(upgradeHistoryEntity.getTargetRepositoryVersion()).andReturn(m_targetRepositoryVersion).anyTimes();
     List<UpgradeHistoryEntity> upgradeHistory = 
Lists.newArrayList(upgradeHistoryEntity);
 
+    
expect(m_repositoryVersionDAO.findByPK(1L)).andReturn(m_sourceRepositoryVersion).anyTimes();
+    
expect(m_repositoryVersionDAO.findByPK(99L)).andReturn(m_targetRepositoryVersion).anyTimes();
+
     
expect(m_upgradeDAO.findUpgrade(1L)).andReturn(m_completedRevertableUpgrade).anyTimes();
 
     expect(
@@ -83,10 +139,156 @@ public class UpgradeContextTest extends EasyMockSupport {
             
eq(Direction.UPGRADE))).andReturn(m_completedRevertableUpgrade).anyTimes();
 
     
expect(m_completedRevertableUpgrade.getDirection()).andReturn(Direction.UPGRADE).anyTimes();
-    
expect(m_completedRevertableUpgrade.getRepositoryVersion()).andReturn(m_completedUpgradeTargetRepositoryVersion).anyTimes();
+    
expect(m_completedRevertableUpgrade.getRepositoryVersion()).andReturn(m_targetRepositoryVersion).anyTimes();
     
expect(m_completedRevertableUpgrade.getOrchestration()).andReturn(RepositoryType.PATCH).anyTimes();
     
expect(m_completedRevertableUpgrade.getHistory()).andReturn(upgradeHistory).anyTimes();
     
expect(m_completedRevertableUpgrade.getUpgradePackage()).andReturn(null).anyTimes();
+
+    RepositoryVersionEntity hdfsRepositoryVersion = 
createNiceMock(RepositoryVersionEntity.class);
+
+    
expect(m_hdfsService.getDesiredRepositoryVersion()).andReturn(hdfsRepositoryVersion).anyTimes();
+    
expect(m_cluster.getService(HDFS_SERVICE_NAME)).andReturn(m_hdfsService).anyTimes();
+    m_services.put(HDFS_SERVICE_NAME, m_hdfsService);
+
+    expect(m_cluster.getServices()).andReturn(m_services).anyTimes();
+    expect(m_cluster.getClusterId()).andReturn(1L).anyTimes();
+    expect(m_cluster.getClusterName()).andReturn("c1").anyTimes();
+    expect(m_cluster.getUpgradeInProgress()).andReturn(null).atLeastOnce();
+
+    // VDF stuff
+    
expect(m_vdfXml.getClusterSummary(EasyMock.anyObject(Cluster.class))).andReturn(
+        m_clusterVersionSummary).anyTimes();
+  }
+
+  /**
+   * Tests that the {@link UpgradeContext} for a normal upgrade.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testFullUpgrade() throws Exception {
+    UpgradeHelper upgradeHelper = createNiceMock(UpgradeHelper.class);
+    ConfigHelper configHelper = createNiceMock(ConfigHelper.class);
+    UpgradePack upgradePack = createNiceMock(UpgradePack.class);
+
+    
expect(m_targetRepositoryVersion.getType()).andReturn(RepositoryType.STANDARD).atLeastOnce();
+
+    expect(upgradeHelper.suggestUpgradePack(EasyMock.anyString(), 
EasyMock.anyObject(StackId.class),
+        EasyMock.anyObject(StackId.class), EasyMock.anyObject(Direction.class),
+        EasyMock.anyObject(UpgradeType.class), 
EasyMock.anyString())).andReturn(upgradePack).once();
+
+    replayAll();
+
+    Map<String, Object> requestMap = new HashMap<>();
+    requestMap.put(UpgradeResourceProvider.UPGRADE_TYPE, 
UpgradeType.ROLLING.name());
+    requestMap.put(UpgradeResourceProvider.UPGRADE_DIRECTION, 
Direction.UPGRADE.name());
+    requestMap.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID,
+        m_targetRepositoryVersion.getId().toString());
+    requestMap.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, 
"true");
+
+    UpgradeContext context = new UpgradeContext(m_cluster, requestMap, null, 
upgradeHelper,
+        m_upgradeDAO, m_repositoryVersionDAO, configHelper);
+
+    assertEquals(Direction.UPGRADE, context.getDirection());
+    assertEquals(RepositoryType.STANDARD, context.getOrchestrationType());
+    assertEquals(1, context.getSupportedServices().size());
+    assertFalse(context.isPatchRevert());
+
+    verifyAll();
+  }
+
+  /**
+   * Tests that the {@link UpgradeContext} for a patch upgrade.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testPatchUpgrade() throws Exception {
+    UpgradeHelper upgradeHelper = createNiceMock(UpgradeHelper.class);
+    ConfigHelper configHelper = createNiceMock(ConfigHelper.class);
+    UpgradePack upgradePack = createNiceMock(UpgradePack.class);
+
+    expect(m_clusterVersionSummary.getAvailableServiceNames()).andReturn(
+        Sets.newHashSet(HDFS_SERVICE_NAME)).once();
+
+    
expect(m_targetRepositoryVersion.getType()).andReturn(RepositoryType.PATCH).atLeastOnce();
+    
expect(m_targetRepositoryVersion.getRepositoryXml()).andReturn(m_vdfXml).once();
+
+    expect(upgradeHelper.suggestUpgradePack(EasyMock.anyString(), 
EasyMock.anyObject(StackId.class),
+        EasyMock.anyObject(StackId.class), EasyMock.anyObject(Direction.class),
+        EasyMock.anyObject(UpgradeType.class), 
EasyMock.anyString())).andReturn(upgradePack).once();
+
+    // make the cluster have 2 services just for fun (the VDF only has 1
+    // service)
+    
expect(m_cluster.getService(ZOOKEEPER_SERVICE_NAME)).andReturn(m_zookeeperService).anyTimes();
+    m_services.put(ZOOKEEPER_SERVICE_NAME, m_zookeeperService);
+    assertEquals(2, m_services.size());
+
+    replayAll();
+
+    Map<String, Object> requestMap = new HashMap<>();
+    requestMap.put(UpgradeResourceProvider.UPGRADE_TYPE, 
UpgradeType.NON_ROLLING.name());
+    requestMap.put(UpgradeResourceProvider.UPGRADE_DIRECTION, 
Direction.UPGRADE.name());
+    requestMap.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, 
m_targetRepositoryVersion.getId().toString());
+    requestMap.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, 
"true");
+
+    UpgradeContext context = new UpgradeContext(m_cluster, requestMap, null, 
upgradeHelper,
+        m_upgradeDAO, m_repositoryVersionDAO, configHelper);
+
+    assertEquals(Direction.UPGRADE, context.getDirection());
+    assertEquals(RepositoryType.PATCH, context.getOrchestrationType());
+    assertEquals(1, context.getSupportedServices().size());
+    assertFalse(context.isPatchRevert());
+
+    verifyAll();
+  }
+
+  /**
+   * Tests that the {@link UpgradeContext} for a maintenance upgrade.
+   * Maintenance upgrades will only upgrade services which require it by
+   * examining the versions included in the VDF.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testMaintUpgrade() throws Exception {
+    UpgradeHelper upgradeHelper = createNiceMock(UpgradeHelper.class);
+    ConfigHelper configHelper = createNiceMock(ConfigHelper.class);
+    UpgradePack upgradePack = createNiceMock(UpgradePack.class);
+
+    expect(m_clusterVersionSummary.getAvailableServiceNames()).andReturn(
+        Sets.newHashSet(HDFS_SERVICE_NAME)).once();
+
+    
expect(m_targetRepositoryVersion.getType()).andReturn(RepositoryType.MAINT).atLeastOnce();
+    
expect(m_targetRepositoryVersion.getRepositoryXml()).andReturn(m_vdfXml).once();
+
+    expect(upgradeHelper.suggestUpgradePack(EasyMock.anyString(), 
EasyMock.anyObject(StackId.class),
+        EasyMock.anyObject(StackId.class), EasyMock.anyObject(Direction.class),
+        EasyMock.anyObject(UpgradeType.class), 
EasyMock.anyString())).andReturn(upgradePack).once();
+
+    // make the cluster have 2 services - one is already upgraded to a new
+    // enough version
+    
expect(m_cluster.getService(ZOOKEEPER_SERVICE_NAME)).andReturn(m_zookeeperService).anyTimes();
+    m_services.put(ZOOKEEPER_SERVICE_NAME, m_zookeeperService);
+    assertEquals(2, m_services.size());
+
+    replayAll();
+
+    Map<String, Object> requestMap = new HashMap<>();
+    requestMap.put(UpgradeResourceProvider.UPGRADE_TYPE, 
UpgradeType.NON_ROLLING.name());
+    requestMap.put(UpgradeResourceProvider.UPGRADE_DIRECTION, 
Direction.UPGRADE.name());
+    requestMap.put(UpgradeResourceProvider.UPGRADE_REPO_VERSION_ID, 
m_targetRepositoryVersion.getId().toString());
+    requestMap.put(UpgradeResourceProvider.UPGRADE_SKIP_PREREQUISITE_CHECKS, 
"true");
+
+    UpgradeContext context = new UpgradeContext(m_cluster, requestMap, null, 
upgradeHelper,
+        m_upgradeDAO, m_repositoryVersionDAO, configHelper);
+
+    assertEquals(Direction.UPGRADE, context.getDirection());
+    assertEquals(RepositoryType.MAINT, context.getOrchestrationType());
+    assertEquals(1, context.getSupportedServices().size());
+    assertFalse(context.isPatchRevert());
+
+    verifyAll();
   }
 
   /**
@@ -97,21 +299,15 @@ public class UpgradeContextTest extends EasyMockSupport {
    */
   @Test
   public void testRevert() throws Exception {
-    Cluster cluster = createNiceMock(Cluster.class);
     UpgradeHelper upgradeHelper = createNiceMock(UpgradeHelper.class);
     ConfigHelper configHelper = createNiceMock(ConfigHelper.class);
-    RepositoryVersionDAO repositoryVersionDAO = 
createNiceMock(RepositoryVersionDAO.class);
-    RepositoryVersionEntity hdfsRepositoryVersion = 
createNiceMock(RepositoryVersionEntity.class);
 
-    Service service = createNiceMock(Service.class);
     UpgradePack upgradePack = createNiceMock(UpgradePack.class);
 
     expect(upgradeHelper.suggestUpgradePack(EasyMock.anyString(), 
EasyMock.anyObject(StackId.class),
         EasyMock.anyObject(StackId.class), EasyMock.anyObject(Direction.class),
         EasyMock.anyObject(UpgradeType.class), 
EasyMock.anyString())).andReturn(upgradePack).once();
 
-    
expect(service.getDesiredRepositoryVersion()).andReturn(hdfsRepositoryVersion).once();
-    expect(cluster.getService("HDFS")).andReturn(service).atLeastOnce();
 
     Map<String, Object> requestMap = new HashMap<>();
     requestMap.put(UpgradeResourceProvider.UPGRADE_TYPE, 
UpgradeType.ROLLING.name());
@@ -119,8 +315,8 @@ public class UpgradeContextTest extends EasyMockSupport {
 
     replayAll();
 
-    UpgradeContext context = new UpgradeContext(cluster, requestMap, null, 
upgradeHelper,
-        m_upgradeDAO, repositoryVersionDAO, configHelper);
+    UpgradeContext context = new UpgradeContext(m_cluster, requestMap, null, 
upgradeHelper,
+        m_upgradeDAO, m_repositoryVersionDAO, configHelper);
 
     assertEquals(Direction.DOWNGRADE, context.getDirection());
     assertEquals(RepositoryType.PATCH, context.getOrchestrationType());
@@ -137,21 +333,15 @@ public class UpgradeContextTest extends EasyMockSupport {
    * @throws Exception
    */
   @Test
-  public void testDowngradeScope() throws Exception {
-    Cluster cluster = createNiceMock(Cluster.class);
+  public void testDowngradeForPatch() throws Exception {
     UpgradeHelper upgradeHelper = createNiceMock(UpgradeHelper.class);
     ConfigHelper configHelper = createNiceMock(ConfigHelper.class);
-    RepositoryVersionDAO repositoryVersionDAO = 
createNiceMock(RepositoryVersionDAO.class);
-    RepositoryVersionEntity hdfsRepositoryVersion = 
createNiceMock(RepositoryVersionEntity.class);
-    Service service = createNiceMock(Service.class);
     UpgradePack upgradePack = createNiceMock(UpgradePack.class);
 
     expect(upgradeHelper.suggestUpgradePack(EasyMock.anyString(), 
EasyMock.anyObject(StackId.class),
         EasyMock.anyObject(StackId.class), EasyMock.anyObject(Direction.class),
         EasyMock.anyObject(UpgradeType.class), 
EasyMock.anyString())).andReturn(upgradePack).once();
 
-    
expect(service.getDesiredRepositoryVersion()).andReturn(hdfsRepositoryVersion).once();
-    expect(cluster.getService("HDFS")).andReturn(service).atLeastOnce();
 
     Map<String, Object> requestMap = new HashMap<>();
     requestMap.put(UpgradeResourceProvider.UPGRADE_TYPE, 
UpgradeType.NON_ROLLING.name());
@@ -159,8 +349,8 @@ public class UpgradeContextTest extends EasyMockSupport {
 
     replayAll();
 
-    UpgradeContext context = new UpgradeContext(cluster, requestMap, null, 
upgradeHelper,
-        m_upgradeDAO, repositoryVersionDAO, configHelper);
+    UpgradeContext context = new UpgradeContext(m_cluster, requestMap, null, 
upgradeHelper,
+        m_upgradeDAO, m_repositoryVersionDAO, configHelper);
 
     assertEquals(Direction.DOWNGRADE, context.getDirection());
     assertEquals(RepositoryType.PATCH, context.getOrchestrationType());

Reply via email to