This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch branch-feature-AMBARI-14714
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/branch-feature-AMBARI-14714 by 
this push:
     new fce9f21  AMBARI-24194. Fix broken Java UTs in ambari-server code -- 
Part 1 (#1706)
fce9f21 is described below

commit fce9f21455da39ddc1c50b806a4430345baa4ce5
Author: sduan <sd...@hortonworks.com>
AuthorDate: Tue Jul 10 20:34:57 2018 -0700

    AMBARI-24194. Fix broken Java UTs in ambari-server code -- Part 1 (#1706)
---
 .../apache/ambari/server/orm/dao/ClusterDAO.java   |  2 +-
 .../actionmanager/ExecutionCommandWrapperTest.java |  1 +
 .../server/actionmanager/TestActionScheduler.java  |  6 ++
 .../TestActionSchedulerThreading.java              |  4 +-
 .../server/agent/HeartbeatProcessorTest.java       | 43 ++++++++++++--
 .../ambari/server/agent/HeartbeatTestHelper.java   | 67 +++++++++++++++++++++-
 .../ambari/server/agent/TestHeartbeatHandler.java  | 33 +++++------
 .../ambari/server/api/query/QueryImplTest.java     | 66 ++++++---------------
 .../internal/ClusterControllerImplTest.java        | 35 +++++++++++
 9 files changed, 185 insertions(+), 72 deletions(-)

diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java 
b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
index b6786fc..cb72583 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
@@ -294,7 +294,7 @@ public class ClusterDAO {
     return daoUtils.selectOne(query);
   }
 
-  /**
+  /**8
    * Create Cluster entity in Database
    * @param clusterEntity entity to create
    */
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
index ad31d29..7c7c4c5 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapperTest.java
@@ -125,6 +125,7 @@ public class ExecutionCommandWrapperTest {
     Cluster cluster1 = clusters.getCluster(CLUSTER1);
 
     ServiceGroup serviceGroup = cluster1.addServiceGroup("CORE", 
cluster1.getDesiredStackVersion().getStackId());
+    ormTestHelper.createMpack(cluster1.getDesiredStackVersion());
     cluster1.addService(serviceGroup, "HDFS", "HDFS");
 
     SERVICE_SITE_CLUSTER = new HashMap<>();
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
index 330d651..4e43f64 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
@@ -2910,10 +2910,13 @@ public class TestActionScheduler {
 
     HostRoleCommand hrc1 = hostRoleCommandFactory.create("h1", Role.NAMENODE, 
null, RoleCommand.EXECUTE);
     hrc1.setStatus(HostRoleStatus.COMPLETED);
+    hrc1.setExecutionCommandWrapper(new ExecutionCommandWrapper(new 
ExecutionCommand()));
     HostRoleCommand hrc3 = hostRoleCommandFactory.create("h1", 
Role.AMBARI_SERVER_ACTION, null, RoleCommand.CUSTOM_COMMAND);
     hrc3.setStatus(HostRoleStatus.HOLDING);
+    hrc3.setExecutionCommandWrapper(new ExecutionCommandWrapper(new 
ExecutionCommand()));
     HostRoleCommand hrc4 = hostRoleCommandFactory.create("h1", 
Role.FLUME_HANDLER, null, RoleCommand.EXECUTE);
     hrc4.setStatus(HostRoleStatus.PENDING);
+    hrc4.setExecutionCommandWrapper(new ExecutionCommandWrapper(new 
ExecutionCommand()));
 
     List<HostRoleCommand> hostRoleCommands = Arrays.asList(hrc1, hrc3, hrc4);
 
@@ -2944,10 +2947,13 @@ public class TestActionScheduler {
 
     HostRoleCommand hrc1 = hostRoleCommandFactory.create("h1", Role.NAMENODE, 
null, RoleCommand.EXECUTE);
     hrc1.setStatus(HostRoleStatus.COMPLETED);
+    hrc1.setExecutionCommandWrapper(new ExecutionCommandWrapper(new 
ExecutionCommand()));
     HostRoleCommand hrc3 = hostRoleCommandFactory.create(null, 
Role.AMBARI_SERVER_ACTION, null, RoleCommand.CUSTOM_COMMAND);
     hrc3.setStatus(HostRoleStatus.IN_PROGRESS);
+    hrc3.setExecutionCommandWrapper(new ExecutionCommandWrapper(new 
ExecutionCommand()));
     HostRoleCommand hrc4 = hostRoleCommandFactory.create("h1", 
Role.FLUME_HANDLER, null, RoleCommand.EXECUTE);
     hrc4.setStatus(HostRoleStatus.PENDING);
+    hrc4.setExecutionCommandWrapper(new ExecutionCommandWrapper(new 
ExecutionCommand()));
 
     List<HostRoleCommand> hostRoleCommands = Arrays.asList(hrc1, hrc3, hrc4);
 
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java
index 07d7ebd..398815c 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionSchedulerThreading.java
@@ -123,7 +123,7 @@ public class TestActionSchedulerThreading {
 
     // zoo-cfg for v1 on current stack
     properties.put("foo-property-1", "foo-value-1");
-    Config c1 = configFactory.createNew(stackId, cluster, configType, 
"version-1", properties, propertiesAttributes, 1L);
+    Config c1 = configFactory.createNew(stackId, cluster, configType, 
"version-1", properties, propertiesAttributes, null);
 
     // make v1 "current"
     cluster.addDesiredConfig("admin", Sets.newHashSet(c1), "note-1");
@@ -135,7 +135,7 @@ public class TestActionSchedulerThreading {
     // save v2
     // zoo-cfg for v2 on new stack
     properties.put("foo-property-2", "foo-value-2");
-    Config c2 = configFactory.createNew(newStackId, cluster, configType, 
"version-2", properties, propertiesAttributes, 1L);
+    Config c2 = configFactory.createNew(newStackId, cluster, configType, 
"version-2", properties, propertiesAttributes, null);
 
     // make v2 "current"
     cluster.addDesiredConfig("admin", Sets.newHashSet(c2), "note-2");
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
index d738a42..ed9623c 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatProcessorTest.java
@@ -44,6 +44,8 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import com.google.inject.Binder;
+import com.google.inject.Module;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.H2DatabaseCleaner;
 import org.apache.ambari.server.Role;
@@ -61,6 +63,7 @@ import org.apache.ambari.server.actionmanager.StageFactory;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.audit.AuditLogger;
 import org.apache.ambari.server.configuration.Configuration;
+import 
org.apache.ambari.server.events.listeners.upgrade.MpackInstallStateListener;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.OrmTestHelper;
@@ -75,6 +78,7 @@ import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.HostState;
 import org.apache.ambari.server.state.MpackInstallState;
 import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.ServiceGroup;
 import org.apache.ambari.server.state.StackId;
@@ -1082,12 +1086,22 @@ public class HeartbeatProcessorTest {
 
   @Test
   public void testInstallPackagesWithId() throws Exception {
+    Cluster cluster = heartbeatTestHelper.getDummyCluster();
+    Service svc = cluster.getService("HDFS");
+    ServiceComponent svcComp = EasyMock.mock(ServiceComponent.class);
+    expect(svcComp.getName()).andReturn("nodemanager").anyTimes();
+    ServiceComponentHost scHost = EasyMock.mock(ServiceComponentHost.class);
+    
expect(svcComp.getServiceComponentHost(EasyMock.anyString())).andReturn(scHost).anyTimes();
+    
expect(scHost.getServiceComponentName()).andReturn("nodemanager").anyTimes();
+    replay(svcComp, scHost);
+    svc.addServiceComponent(svcComp);
     // required since this test method checks the DAO result of handling a
     // heartbeat which performs some async tasks
     EventBusSynchronizer.synchronizeCommandReportEventPublisher(injector);
 
     final HostRoleCommand command = 
hostRoleCommandFactory.create(DummyHostname1,
         Role.DATANODE, null, null);
+    command.setTaskId(1L);
 
     ActionManager am = actionManagerTestHelper.getMockActionManager();
     expect(am.getTasks(EasyMock.<List<Long>>anyObject())).andReturn(
@@ -1111,10 +1125,11 @@ public class HeartbeatProcessorTest {
     cmdReport.setTaskId(1);
     cmdReport.setCustomCommand("install_packages");
     cmdReport.setStructuredOut(json.toString());
-    cmdReport.setRoleCommand(RoleCommand.ACTIONEXECUTE.name());
+    cmdReport.setRoleCommand(RoleCommand.INSTALL.name());
     cmdReport.setStatus(HostRoleStatus.COMPLETED.name());
-    cmdReport.setRole("install_packages");
+    cmdReport.setRole("mpack_packages");
     cmdReport.setClusterId("1");
+    cmdReport.setServiceName("HDFS");
 
     List<CommandReport> reports = new ArrayList<>();
     reports.add(cmdReport);
@@ -1217,7 +1232,27 @@ public class HeartbeatProcessorTest {
    * @throws AmbariException
    */
   private Service addService(Cluster cluster, String serviceName) throws 
AmbariException {
-    ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", 
cluster.getDesiredStackVersion().getStackId());
-    return cluster.addService(serviceGroup, serviceName, serviceName);
+    ServiceGroup serviceGroup = cluster.getServiceGroup("CORE");
+    Service service = cluster.getService(serviceName);
+    if (service == null) {
+      service = cluster.addService(serviceGroup, serviceName, serviceName);
+    }
+    return service;
+  }
+
+  /**
+   *
+   */
+  private class MockModule implements Module {
+    /**
+     *
+     */
+    @Override
+    public void configure(Binder binder) {
+      // this listener gets in the way of actually testing the concurrency
+      // between the threads; it slows them down too much, so mock it out
+      binder.bind(MpackInstallStateListener.class).toInstance(
+          EasyMock.createNiceMock(MpackInstallStateListener.class));
+    }
   }
 }
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
index fd39a0f..9228475 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/agent/HeartbeatTestHelper.java
@@ -29,9 +29,11 @@ import static 
org.apache.ambari.server.agent.DummyHeartbeatConstants.HBASE;
 
 import java.lang.reflect.Method;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -45,17 +47,24 @@ import org.apache.ambari.server.actionmanager.Request;
 import org.apache.ambari.server.actionmanager.Stage;
 import org.apache.ambari.server.actionmanager.StageFactory;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.api.services.ServiceGroupKey;
 import org.apache.ambari.server.events.publishers.STOMPUpdatePublisher;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.OrmTestHelper;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.dao.ClusterServiceDAO;
 import org.apache.ambari.server.orm.dao.HostDAO;
 import org.apache.ambari.server.orm.dao.ResourceTypeDAO;
+import org.apache.ambari.server.orm.dao.ServiceDesiredStateDAO;
+import org.apache.ambari.server.orm.dao.ServiceGroupDAO;
 import org.apache.ambari.server.orm.dao.StackDAO;
 import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
 import org.apache.ambari.server.orm.entities.HostEntity;
 import org.apache.ambari.server.orm.entities.ResourceEntity;
 import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
+import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
+import org.apache.ambari.server.orm.entities.ServiceGroupEntity;
 import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.security.authorization.ResourceType;
 import org.apache.ambari.server.state.Cluster;
@@ -63,6 +72,11 @@ import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceFactory;
+import org.apache.ambari.server.state.ServiceGroup;
+import org.apache.ambari.server.state.ServiceGroupFactory;
+import org.apache.ambari.server.state.ServiceGroupImpl;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.cluster.ClustersImpl;
 import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
@@ -95,6 +109,15 @@ public class HeartbeatTestHelper {
   ClusterDAO clusterDAO;
 
   @Inject
+  ClusterServiceDAO clusterServiceDAO;
+
+  @Inject
+  ServiceGroupDAO serviceGroupDAO;
+
+  @Inject
+  ServiceDesiredStateDAO serviceDesiredStateDAO;
+
+  @Inject
   StackDAO stackDAO;
 
   @Inject
@@ -112,6 +135,12 @@ public class HeartbeatTestHelper {
   @Inject
   private StageFactory stageFactory;
 
+  @Inject
+  private ServiceFactory serviceFactory;
+
+  @Inject
+  private ServiceGroupFactory serviceGroupFactory;
+
   public final static StackId HDP_22_STACK = new StackId("HDP", "2.2.0");
 
   public static InMemoryDefaultTestModule getTestModule() {
@@ -182,7 +211,37 @@ public class HeartbeatTestHelper {
     clusterEntity.setResource(resourceEntity);
     clusterEntity.setDesiredStack(stackEntity);
 
-    clusterDAO.create(clusterEntity);
+    ServiceGroupEntity serviceGroupEntity = new ServiceGroupEntity();
+    serviceGroupEntity.setClusterId(clusterId);
+    serviceGroupEntity.setClusterEntity(clusterEntity);
+    serviceGroupEntity.setStack(stackEntity);
+    serviceGroupEntity.setServiceGroupName("CORE");
+    serviceGroupEntity.setServiceGroupId(1L);
+
+    ServiceDesiredStateEntity serviceDesiredStateEntity = new 
ServiceDesiredStateEntity();
+    serviceDesiredStateEntity.setClusterId(clusterId);
+    serviceDesiredStateEntity.setServiceGroupId(1L);
+    serviceDesiredStateEntity.setServiceId(100L);
+
+    ClusterServiceEntity clusterServiceEntity = new ClusterServiceEntity();
+    clusterServiceEntity.setServiceType("HDFS");
+    clusterServiceEntity.setServiceName("HDFS");
+    clusterServiceEntity.setServiceGroupEntity(serviceGroupEntity);
+    clusterServiceEntity.setClusterId(clusterId);
+    clusterServiceEntity.setServiceId(100L);
+    clusterServiceEntity.setServiceGroupId(1L);
+    
clusterServiceEntity.setServiceDesiredStateEntity(serviceDesiredStateEntity);
+    clusterServiceEntity.setClusterEntity(clusterEntity);
+
+    List<ClusterServiceEntity> clusterServiceEntities = new ArrayList<>();
+    clusterServiceEntities.add(clusterServiceEntity);
+    clusterEntity.setClusterServiceEntities(clusterServiceEntities);
+
+    serviceDesiredStateEntity.setClusterServiceEntity(clusterServiceEntity);
+    serviceDesiredStateDAO.merge(serviceDesiredStateEntity);
+    serviceGroupDAO.merge(serviceGroupEntity);
+    clusterServiceDAO.merge(clusterServiceEntity);
+    clusterDAO.merge(clusterEntity);
 
     // because this test method goes around the Clusters business object, we
     // forcefully will refresh the internal state so that any tests which
@@ -197,11 +256,15 @@ public class HeartbeatTestHelper {
     cluster.setDesiredStackVersion(stackId);
     cluster.setCurrentStackVersion(stackId);
 
+    ServiceGroup serviceGroup = serviceGroupFactory.createExisting(cluster, 
serviceGroupEntity);
+    Service service = serviceFactory.createExisting(cluster, serviceGroup,  
clusterServiceEntity);
+    cluster.addService(service);
+    cluster.addServiceGroup(serviceGroup);
+
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
     Config config = cf.createNew(cluster, "cluster-env", "version1", 
configProperties, new HashMap<>());
     cluster.addDesiredConfig("user", Collections.singleton(config));
 
-
     Map<String, String> hostAttributes = new HashMap<>();
     hostAttributes.put("os_family", "redhat");
     hostAttributes.put("os_release_version", "6.3");
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
index cfb786c..88c48c5 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
@@ -1317,25 +1317,17 @@ public class TestHeartbeatHandler {
     expected.setComponents(dummyComponents);
 
     Cluster cluster = heartbeatTestHelper.getDummyCluster();
-    Service service = EasyMock.createNiceMock(Service.class);
-    expect(service.getName()).andReturn("HDFS").atLeastOnce();
-
+    Service service = cluster.getService("HDFS");
     Map<String, ServiceComponent> componentMap = new HashMap<>();
     ServiceComponent nnComponent = 
EasyMock.createNiceMock(ServiceComponent.class);
-    expect(nnComponent.getName()).andReturn("NAMENODE").atLeastOnce();
-    expect(nnComponent.getStackId()).andReturn(dummyStackId).atLeastOnce();
+    expect(nnComponent.getName()).andReturn("NAMENODE").anyTimes();
+    
expect(nnComponent.getStackId()).andReturn(service.getStackId()).anyTimes();
+    replay(nnComponent);
     componentMap.put("NAMENODE", nnComponent);
-
-    
expect(service.getServiceComponents()).andReturn(componentMap).atLeastOnce();
-    expect(service.getServiceId()).andReturn(1L).atLeastOnce();
-    expect(service.getServiceType()).andReturn("HDFS").atLeastOnce();
-    expect(service.getStackId()).andReturn(dummyStackId).atLeastOnce();
+    service.addServiceComponent(nnComponent);
 
     ActionManager am = actionManagerTestHelper.getMockActionManager();
-
-    replay(service, nnComponent, am);
-
-    cluster.addService(service);
+    replay(am);
 
     HeartBeatHandler handler = heartbeatTestHelper.getHeartBeatHandler(am);
     // Make sure handler is not null, this has possibly been an intermittent 
problem in the past
@@ -1601,8 +1593,17 @@ public class TestHeartbeatHandler {
    * @throws AmbariException
    */
   private Service addService(Cluster cluster, String serviceName) throws 
AmbariException {
-    ServiceGroup serviceGroup = cluster.addServiceGroup("CORE", DummyStackId);
-    return cluster.addService(serviceGroup, serviceName, serviceName);
+    ServiceGroup serviceGroup = cluster.getServiceGroup("CORE");
+    if (serviceGroup == null) {
+      serviceGroup = cluster.addServiceGroup("CORE", DummyStackId);
+    }
+    Service service = null;
+    try {
+      service = cluster.getService(serviceName);
+    } catch (Exception e) {
+      service = cluster.addService(serviceGroup, serviceName, serviceName);
+    }
+    return service;
   }
 
 }
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/api/query/QueryImplTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/api/query/QueryImplTest.java
index 96d54c9..341f7e4 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/api/query/QueryImplTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/api/query/QueryImplTest.java
@@ -46,6 +46,8 @@ import 
org.apache.ambari.server.api.query.render.DefaultRenderer;
 import org.apache.ambari.server.api.query.render.Renderer;
 import org.apache.ambari.server.api.resources.ClusterResourceDefinition;
 import org.apache.ambari.server.api.resources.HostResourceDefinition;
+import org.apache.ambari.server.api.resources.MpackResourceDefinition;
+import 
org.apache.ambari.server.api.resources.OperatingSystemResourceDefinition;
 import org.apache.ambari.server.api.resources.ResourceDefinition;
 import org.apache.ambari.server.api.resources.ResourceInstance;
 import org.apache.ambari.server.api.resources.StackResourceDefinition;
@@ -195,14 +197,12 @@ public class QueryImplTest {
     QueryImpl instance = new TestQuery(mapIds, resourceDefinition);
 
     instance.addProperty("versions/*", null);
-    instance.addProperty("versions/operating_systems/*", null);
-    instance.addProperty("versions/operating_systems/repositories/*", null);
+    instance.addProperty("versions/mpack/*", null);
 
     instance.execute();
 
     Set<String> propertyIds = new HashSet<>();
-    
propertyIds.add("versions/operating_systems/repositories/Repositories/repo_id");
-    propertyIds.add("versions/operating_systems/OperatingSystems/os_type");
+    propertyIds.add("versions/mpack/MpackInfo/mpack_id");
 
     Map<Resource, Set<Map<String, Object>>> resourcePropertiesMap = 
instance.getJoinedResourceProperties(propertyIds, null, null);
 
@@ -216,12 +216,11 @@ public class QueryImplTest {
       fail("No property maps found!");
     }
 
-    Assert.assertEquals(6, propertyMaps.size());
+    Assert.assertEquals(2, propertyMaps.size());
 
     for (Map<String, Object> map : propertyMaps) {
-      Assert.assertEquals(2, map.size());
-      
Assert.assertTrue(map.containsKey("versions/operating_systems/OperatingSystems/os_type"));
-      
Assert.assertTrue(map.containsKey("versions/operating_systems/repositories/Repositories/repo_id"));
+      Assert.assertEquals(1, map.size());
+      Assert.assertTrue(map.containsKey("versions/mpack/MpackInfo/mpack_id"));
     }
   }
 
@@ -236,7 +235,7 @@ public class QueryImplTest {
     QueryImpl instance = new TestQuery(mapIds, resourceDefinition);
 
     PredicateBuilder pb = new PredicateBuilder();
-    Predicate predicate = 
pb.property("versions/operating_systems/OperatingSystems/os_type").equals("centos5").toPredicate();
+    Predicate predicate = 
pb.property("versions/mpack/MpackInfo/mpack_id").equals("12345").toPredicate();
 
     instance.setUserPredicate(predicate);
 
@@ -454,10 +453,7 @@ public class QueryImplTest {
     Assert.assertEquals("StackVersion:1", versionNode.getName());
     Assert.assertEquals(Resource.Type.StackVersion, 
versionNode.getObject().getType());
 
-    Assert.assertEquals(7, versionNode.getChildren().size());
-
-    TreeNode<Resource> opSystemsNode = 
versionNode.getChild("operating_systems");
-    Assert.assertEquals(3, opSystemsNode.getChildren().size());
+    Assert.assertEquals(4, versionNode.getChildren().size());
 
     TreeNode<Resource> artifactsNode = versionNode.getChild("artifacts");
     Assert.assertEquals(1, artifactsNode.getChildren().size());
@@ -515,9 +511,10 @@ public class QueryImplTest {
       throws NoSuchParentResourceException, UnsupportedPropertyException,
     NoSuchResourceException, SystemException {
 
-    ResourceDefinition resourceDefinition = new 
StackVersionResourceDefinition();
+    ResourceDefinition resourceDefinition = new MpackResourceDefinition();
 
     Map<Resource.Type, String> mapIds = new HashMap<>();
+    mapIds.put(Resource.Type.OperatingSystem, "centos5");
 
     QueryImpl instance = new TestQuery(mapIds, resourceDefinition);
     instance.addProperty("operating_systems/*", null);
@@ -532,11 +529,11 @@ public class QueryImplTest {
 
     TreeNode<Resource> tree = result.getResultTree();
     Assert.assertEquals(1, tree.getChildren().size());
-    TreeNode<Resource> stackVersionNode = tree.getChild("StackVersion:1");
-    Assert.assertEquals("StackVersion:1", stackVersionNode.getName());
+    TreeNode<Resource> mpackNode = tree.getChild("Mpack:1");
+    Assert.assertEquals("Mpack:1", mpackNode.getName());
 
-    Assert.assertEquals(Resource.Type.StackVersion, 
stackVersionNode.getObject().getType());
-    Assert.assertEquals("1.2.1", 
stackVersionNode.getObject().getPropertyValue("Versions/stack_version"));
+    Assert.assertEquals(Resource.Type.Mpack, mpackNode.getObject().getType());
+    Assert.assertEquals("12345", 
mpackNode.getObject().getPropertyValue("MpackInfo/mpack_id"));
 
     QueryImpl instance2 = new TestQuery(mapIds, resourceDefinition);
     instance2.addProperty("operating_systems/*", null);
@@ -547,36 +544,11 @@ public class QueryImplTest {
 
     TreeNode<Resource> tree2 = result2.getResultTree();
     Assert.assertEquals(1, tree2.getChildren().size());
-    TreeNode<Resource> stackVersionNode2 = tree2.getChild("StackVersion:1");
-    Assert.assertEquals("StackVersion:1", stackVersionNode2.getName());
-
-    Assert.assertEquals(Resource.Type.StackVersion, 
stackVersionNode2.getObject().getType());
-    Assert.assertEquals("1.2.2", 
stackVersionNode2.getObject().getPropertyValue("Versions/stack_version"));
-
-    QueryImpl instance3 = new TestQuery(mapIds, resourceDefinition);
-
-    instance3.addProperty("operating_systems/*", null);
-
-    instance3.setUserPredicate(predicate);
-    //page_size = 2, offset = 1
-    instance3.setPageRequest(new 
PageRequestImpl(PageRequest.StartingPoint.OffsetStart, 2, 1, null, null));
-
-    Result result3 = instance3.execute();
-
-    TreeNode<Resource> tree3 = result3.getResultTree();
-    Assert.assertEquals(2, tree3.getChildren().size());
-    TreeNode<Resource> stackVersionNode3 = tree3.getChild("StackVersion:1");
-    Assert.assertEquals("StackVersion:1", stackVersionNode3.getName());
-
-    Assert.assertEquals(Resource.Type.StackVersion, 
stackVersionNode3.getObject().getType());
-    Assert.assertEquals("1.2.2", 
stackVersionNode3.getObject().getPropertyValue("Versions/stack_version"));
-
-    stackVersionNode3 = tree3.getChild("StackVersion:2");
-    Assert.assertEquals("StackVersion:2", stackVersionNode3.getName());
-
-    Assert.assertEquals(Resource.Type.StackVersion, 
stackVersionNode3.getObject().getType());
-    Assert.assertEquals("2.0.1", 
stackVersionNode3.getObject().getPropertyValue("Versions/stack_version"));
+    TreeNode<Resource> mpackNode2 = tree2.getChild("Mpack:1");
+    Assert.assertEquals("Mpack:1", mpackNode2.getName());
 
+    Assert.assertEquals(Resource.Type.Mpack, mpackNode2.getObject().getType());
+    Assert.assertEquals("67890", 
mpackNode2.getObject().getPropertyValue("MpackInfo/mpack_id"));
   }
 
   @Test
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterControllerImplTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterControllerImplTest.java
index 7b26ebc..0c8bc49 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterControllerImplTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterControllerImplTest.java
@@ -943,6 +943,7 @@ public class ClusterControllerImplTest {
       providers.put(Resource.Type.StackVersion, new 
TestStackVersionResourceProvider());
       providers.put(Type.StackArtifact, new 
TestStackArtifactResourceProvider());
       providers.put(Type.Mpack, new TestMpackResourceProvider());
+      providers.put(Type.OperatingSystem, new 
TestOperatingSystemResourceProvider());
     }
 
     @Override
@@ -1155,6 +1156,11 @@ public class ClusterControllerImplTest {
 
 
   private static class TestStackVersionResourceProvider extends 
TestResourceProvider {
+
+    private TestStackVersionResourceProvider() {
+      super(StackVersionResourceProvider.PROPERTY_IDS, 
StackVersionResourceProvider.KEY_PROPERTY_IDS);
+    }
+
     @Override
     public Set<Resource> getResources(Request request, Predicate predicate)
         throws SystemException, UnsupportedPropertyException, 
NoSuchResourceException, NoSuchParentResourceException {
@@ -1168,10 +1174,39 @@ public class ClusterControllerImplTest {
     }
   }
 
+  private static class TestOperatingSystemResourceProvider extends 
TestResourceProvider {
+    private TestOperatingSystemResourceProvider() {
+      super(OperatingSystemResourceProvider.propertyIds, 
OperatingSystemResourceProvider.keyPropertyIds);
+    }
+
+    @Override
+    public Set<Resource> getResources(Request request, Predicate predicate)
+        throws SystemException, UnsupportedPropertyException, 
NoSuchResourceException, NoSuchParentResourceException {
+      Set<String> keyPropertyValues = new LinkedHashSet<>();
+
+      keyPropertyValues.add("centos5");
+      keyPropertyValues.add("centos6");
+      keyPropertyValues.add("oraclelinux5");
+
+      return getResources(Resource.Type.OperatingSystem, predicate, 
"OperatingSystems/os_type", keyPropertyValues);
+    }
+  }
+
   private static class TestMpackResourceProvider extends TestResourceProvider {
     private TestMpackResourceProvider() {
       super(MpackResourceProvider.PROPERTY_IDS, 
MpackResourceProvider.KEY_PROPERTY_IDS);
     }
+
+    @Override
+    public Set<Resource> getResources(Request request, Predicate predicate)
+        throws SystemException, UnsupportedPropertyException, 
NoSuchResourceException, NoSuchParentResourceException {
+      Set<String> keyPropertyValues = new LinkedHashSet<>();
+
+      keyPropertyValues.add("12345");
+      keyPropertyValues.add("67890");
+
+      return getResources(Type.Mpack, predicate, "MpackInfo/mpack_id", 
keyPropertyValues);
+    }
   }
 
   private static class TestStackArtifactResourceProvider extends 
TestResourceProvider {

Reply via email to