Repository: ambari
Updated Branches:
  refs/heads/branch-2.2 4e50772cf -> cc8d1ebe2


AMBARI-14674 - Cannot Finalize Downgrade Due To Detached ClusterEntity 
(jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/cc8d1ebe
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/cc8d1ebe
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/cc8d1ebe

Branch: refs/heads/branch-2.2
Commit: cc8d1ebe25acbcd24888b7ecfa52ebc982286d48
Parents: 4e50772
Author: Jonathan Hurley <[email protected]>
Authored: Thu Jan 14 15:25:32 2016 -0500
Committer: Jonathan Hurley <[email protected]>
Committed: Fri Jan 15 15:38:47 2016 -0500

----------------------------------------------------------------------
 .../ambari/server/orm/dao/ClusterDAO.java       |  32 +-
 .../orm/entities/ClusterConfigEntity.java       |   7 +-
 .../server/state/cluster/ClusterImpl.java       | 132 ++++---
 .../server/upgrade/UpgradeCatalog170.java       |  58 +--
 .../server/orm/dao/ServiceConfigDAOTest.java    | 350 ++++++++++++++++++-
 .../server/state/cluster/ClusterTest.java       |  85 +++++
 6 files changed, 584 insertions(+), 80 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/cc8d1ebe/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java 
b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
index 508a41c..4c757f6 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ClusterDAO.java
@@ -19,12 +19,10 @@
 package org.apache.ambari.server.orm.dao;
 
 import java.util.Collection;
-import java.util.Collections;
 import java.util.List;
 
 import javax.persistence.EntityManager;
 import javax.persistence.NoResultException;
-import javax.persistence.Query;
 import javax.persistence.TypedQuery;
 import javax.persistence.criteria.CriteriaBuilder;
 import javax.persistence.criteria.CriteriaQuery;
@@ -204,6 +202,32 @@ public class ClusterDAO {
 
     return daoUtils.selectList(query);
   }
+  
+  /**
+   * Gets the latest configurations for a given stack for all of the
+   * configurations of the specified cluster.
+   *
+   * @param clusterId
+   *          the cluster that the service is a part of.
+   * @param stackId
+   *          the stack to get the latest configurations for (not {@code 
null}).
+   * @return the latest configurations for the specified cluster and stack.
+   */
+  @RequiresSession
+  public List<ClusterConfigMappingEntity> getClusterConfigMappingsByStack(long 
clusterId,
+      StackId stackId) {
+    StackEntity stackEntity = stackDAO.find(stackId.getStackName(),
+        stackId.getStackVersion());
+
+    TypedQuery<ClusterConfigMappingEntity> query = 
entityManagerProvider.get().createNamedQuery(
+        "ClusterConfigEntity.findClusterConfigMappingsByStack",
+        ClusterConfigMappingEntity.class);
+
+    query.setParameter("clusterId", clusterId);
+    query.setParameter("stack", stackEntity);
+
+    return daoUtils.selectList(query);
+  }  
 
   @RequiresSession
   public List<ClusterConfigMappingEntity> 
getClusterConfigMappingEntitiesByCluster(long clusterId) {
@@ -255,8 +279,8 @@ public class ClusterDAO {
    * Update config mapping in DB
    */
   @Transactional
-  public void mergeConfigMapping(ClusterConfigMappingEntity mappingEntity) {
-    entityManagerProvider.get().merge(mappingEntity);
+  public ClusterConfigMappingEntity 
mergeConfigMapping(ClusterConfigMappingEntity mappingEntity) {
+    return entityManagerProvider.get().merge(mappingEntity);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc8d1ebe/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
index 7da77e3..c2b98b0 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ClusterConfigEntity.java
@@ -51,7 +51,12 @@ import javax.persistence.UniqueConstraint;
 @NamedQueries({
     @NamedQuery(name = "ClusterConfigEntity.findNextConfigVersion", query = 
"SELECT COALESCE(MAX(clusterConfig.version),0) + 1 as nextVersion FROM 
ClusterConfigEntity clusterConfig WHERE clusterConfig.type=:configType AND 
clusterConfig.clusterId=:clusterId"),
     @NamedQuery(name = "ClusterConfigEntity.findAllConfigsByStack", query = 
"SELECT clusterConfig FROM ClusterConfigEntity clusterConfig WHERE 
clusterConfig.clusterId=:clusterId AND clusterConfig.stack=:stack"),
-    @NamedQuery(name = "ClusterConfigEntity.findLatestConfigsByStack", query = 
"SELECT clusterConfig FROM ClusterConfigEntity clusterConfig WHERE 
clusterConfig.clusterId=:clusterId AND clusterConfig.timestamp = (SELECT 
MAX(clusterConfig2.timestamp) FROM ClusterConfigEntity clusterConfig2 WHERE 
clusterConfig2.clusterId=:clusterId AND clusterConfig2.stack=:stack AND 
clusterConfig2.type = clusterConfig.type)") })
+    @NamedQuery(name = "ClusterConfigEntity.findLatestConfigsByStack", query = 
"SELECT clusterConfig FROM ClusterConfigEntity clusterConfig WHERE 
clusterConfig.clusterId=:clusterId AND clusterConfig.timestamp = (SELECT 
MAX(clusterConfig2.timestamp) FROM ClusterConfigEntity clusterConfig2 WHERE 
clusterConfig2.clusterId=:clusterId AND clusterConfig2.stack=:stack AND 
clusterConfig2.type = clusterConfig.type)"),
+    @NamedQuery(name = "ClusterConfigEntity.findClusterConfigMappingsByStack",
+      query = "SELECT mapping FROM ClusterConfigMappingEntity mapping " +
+        "JOIN ClusterConfigEntity config ON mapping.typeName = config.type AND 
mapping.tag = config.tag " +
+        "WHERE mapping.clusterId = :clusterId AND config.stack = :stack")
+})
 public class ClusterConfigEntity {
 
   @Id

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc8d1ebe/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index 9c66d99..19f5863 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -641,7 +641,7 @@ public class ClusterImpl implements Cluster {
         clusterEntity.setClusterName(clusterName);
 
         // RollbackException possibility if UNIQUE constraint violated
-        clusterDAO.merge(clusterEntity);
+        clusterEntity = clusterDAO.merge(clusterEntity);
         clusters.updateClusterName(oldName, clusterName);
         this.clusterName = clusterName;
       }
@@ -976,7 +976,7 @@ public class ClusterImpl implements Cluster {
       ClusterEntity clusterEntity = getClusterEntity();
       if (clusterEntity != null) {
         clusterEntity.setDesiredStack(stackEntity);
-        clusterDAO.merge(clusterEntity);
+        clusterEntity = clusterDAO.merge(clusterEntity);
 
         if (cascade) {
           for (Service service : getServices().values()) {
@@ -1044,7 +1044,7 @@ public class ClusterImpl implements Cluster {
       ClusterEntity clusterEntity = getClusterEntity();
       if (clusterEntity != null) {
         clusterEntity.setProvisioningState(provisioningState);
-        clusterDAO.merge(clusterEntity);
+        clusterEntity = clusterDAO.merge(clusterEntity);
       }
     } finally {
       clusterGlobalLock.writeLock().unlock();
@@ -1078,7 +1078,7 @@ public class ClusterImpl implements Cluster {
       ClusterEntity clusterEntity = getClusterEntity();
       if (clusterEntity != null) {
         clusterEntity.setSecurityType(securityType);
-        clusterDAO.merge(clusterEntity);
+        clusterEntity = clusterDAO.merge(clusterEntity);
       }
     } finally {
       clusterGlobalLock.writeLock().unlock();
@@ -1168,7 +1168,7 @@ public class ClusterImpl implements Cluster {
             HostVersionEntity hostVersionEntity = 
existingHostToHostVersionEntity.get(hostname);
             if (hostVersionEntity.getState() != desiredState) {
               hostVersionEntity.setState(desiredState);
-              hostVersionDAO.merge(hostVersionEntity);
+            hostVersionEntity = hostVersionDAO.merge(hostVersionEntity);
             }
 
           // Maintain the invariant that only one HostVersionEntity is allowed
@@ -1179,7 +1179,7 @@ public class ClusterImpl implements Cluster {
               && desiredState == RepositoryVersionState.CURRENT
               && currentHostVersionEntity.getState() == 
RepositoryVersionState.CURRENT) {
             
currentHostVersionEntity.setState(RepositoryVersionState.INSTALLED);
-            hostVersionDAO.merge(currentHostVersionEntity);
+            currentHostVersionEntity = 
hostVersionDAO.merge(currentHostVersionEntity);
           }
         }
       }
@@ -1278,7 +1278,7 @@ public class ClusterImpl implements Cluster {
         // Update existing host stack version
         HostVersionEntity hostVersionEntity = 
existingHostStackVersions.get(hostname);
         hostVersionEntity.setState(repositoryVersionState);
-        hostVersionDAO.merge(hostVersionEntity);
+        hostVersionEntity = hostVersionDAO.merge(hostVersionEntity);
       }
     }
   }
@@ -1543,7 +1543,7 @@ public class ClusterImpl implements Cluster {
         // Alternatively, transition to CURRENT during initial bootstrap if at 
least one host component advertised a version
         if (hostSummary.isUpgradeFinished() && 
hostVersionEntity.getState().equals(RepositoryVersionState.UPGRADING) || 
performingInitialBootstrap) {
           hostVersionEntity.setState(RepositoryVersionState.CURRENT);
-          hostVersionDAO.merge(hostVersionEntity);
+          hostVersionEntity = hostVersionDAO.merge(hostVersionEntity);
         }
       } else {
         // Handle transitions during a Stack Upgrade
@@ -1552,12 +1552,12 @@ public class ClusterImpl implements Cluster {
         // INSTALLED->UPGRADING->UPGRADED in one shot.
         if 
(hostSummary.isUpgradeInProgress(currentVersionEntity.getRepositoryVersion().getVersion())
 && hostVersionEntity.getState().equals(RepositoryVersionState.INSTALLED)) {
           hostVersionEntity.setState(RepositoryVersionState.UPGRADING);
-          hostVersionDAO.merge(hostVersionEntity);
+          hostVersionEntity = hostVersionDAO.merge(hostVersionEntity);
         }
 
         if (hostSummary.isUpgradeFinished() && 
hostVersionEntity.getState().equals(RepositoryVersionState.UPGRADING)) {
           hostVersionEntity.setState(RepositoryVersionState.UPGRADED);
-          hostVersionDAO.merge(hostVersionEntity);
+          hostVersionEntity = hostVersionDAO.merge(hostVersionEntity);
         }
       }
     } finally {
@@ -1639,7 +1639,7 @@ public class ClusterImpl implements Cluster {
       System.currentTimeMillis(), System.currentTimeMillis(), userName);
     clusterVersionDAO.create(clusterVersionEntity);
     clusterEntity.getClusterVersionEntities().add(clusterVersionEntity);
-    clusterDAO.merge(clusterEntity);
+    clusterEntity = clusterDAO.merge(clusterEntity);
   }
 
   /**
@@ -1735,13 +1735,13 @@ public class ClusterImpl implements Cluster {
           ClusterVersionEntity currentVersion = 
clusterVersionDAO.findByClusterAndStateCurrent(getClusterName());
           if (currentVersion != null) {
             currentVersion.setState(RepositoryVersionState.INSTALLED);
-            clusterVersionDAO.merge(currentVersion);
+            currentVersion = clusterVersionDAO.merge(currentVersion);
           }
         }
 
         existingClusterVersion.setState(state);
         existingClusterVersion.setEndTime(System.currentTimeMillis());
-        clusterVersionDAO.merge(existingClusterVersion);
+        existingClusterVersion = 
clusterVersionDAO.merge(existingClusterVersion);
 
         if (state == RepositoryVersionState.CURRENT) {
           for (HostEntity hostEntity : clusterEntity.getHostEntities()) {
@@ -1760,10 +1760,10 @@ public class ClusterImpl implements Cluster {
                   existingClusterVersion.getRepositoryVersion().getId())) {
                   target = entity;
                   target.setState(state);
-                  hostVersionDAO.merge(target);
+                  target = hostVersionDAO.merge(target);
                 } else if (entity.getState() == 
RepositoryVersionState.CURRENT) {
                   entity.setState(RepositoryVersionState.INSTALLED);
-                  hostVersionDAO.merge(entity);
+                  entity = hostVersionDAO.merge(entity);
                 }
               }
             }
@@ -1853,11 +1853,11 @@ public class ClusterImpl implements Cluster {
           clusterStateDAO.create(clusterStateEntity);
           clusterStateEntity = clusterStateDAO.merge(clusterStateEntity);
           clusterEntity.setClusterStateEntity(clusterStateEntity);
-          clusterDAO.merge(clusterEntity);
+          clusterEntity = clusterDAO.merge(clusterEntity);
         } else {
           clusterStateEntity.setCurrentStack(stackEntity);
-          clusterStateDAO.merge(clusterStateEntity);
-          clusterDAO.merge(clusterEntity);
+          clusterStateEntity = clusterStateDAO.merge(clusterStateEntity);
+          clusterEntity = clusterDAO.merge(clusterEntity);
         }
       }
     } catch (RollbackException e) {
@@ -2235,8 +2235,9 @@ public class ClusterImpl implements Cluster {
           
c.setVersion(allConfigs.get(e.getType()).get(e.getTag()).getVersion());
 
           Set<DesiredConfig> configs = map.get(e.getType());
-          if (configs == null)
+          if (configs == null) {
             configs = new HashSet<>();
+          }
 
           configs.add(c);
 
@@ -2265,8 +2266,9 @@ public class ClusterImpl implements Cluster {
                 hostIdToName.get(mappingEntity.getHostId()), 
mappingEntity.getVersion()));
           }
 
-          for (DesiredConfig c: entry.getValue())
+          for (DesiredConfig c: entry.getValue()) {
             c.setHostOverrides(hostOverrides);
+          }
         }
       }
 
@@ -2317,7 +2319,7 @@ public class ClusterImpl implements Cluster {
         serviceConfigDAO.create(serviceConfigEntity);
         if (configGroup != null) {
           serviceConfigEntity.setHostIds(new 
ArrayList<Long>(configGroup.getHosts().keySet()));
-          serviceConfigDAO.merge(serviceConfigEntity);
+          serviceConfigEntity = serviceConfigDAO.merge(serviceConfigEntity);
         }
       }
     } finally {
@@ -2530,7 +2532,7 @@ public class ClusterImpl implements Cluster {
       for (ClusterConfigMappingEntity entity : mappingEntities) {
         if (configTypes.contains(entity.getType()) && entity.isSelected() > 0) 
{
           entity.setSelected(0);
-          clusterDAO.mergeConfigMapping(entity);
+          entity = clusterDAO.mergeConfigMapping(entity);
         }
       }
 
@@ -2597,7 +2599,7 @@ public class ClusterImpl implements Cluster {
     for (ClusterConfigMappingEntity e : entities) {
       if (e.isSelected() > 0 && e.getType().equals(type)) {
         e.setSelected(0);
-        clusterDAO.mergeConfigMapping(e);
+        e = clusterDAO.mergeConfigMapping(e);
       }
     }
 
@@ -3090,30 +3092,39 @@ public class ClusterImpl implements Cluster {
   public void applyLatestConfigurations(StackId stackId) {
     clusterGlobalLock.writeLock().lock();
     try {
+      ClusterEntity clusterEntity = getClusterEntity();
+      Collection<ClusterConfigMappingEntity> configMappingEntities = 
clusterEntity.getConfigMappingEntities();
 
-      Collection<ClusterConfigMappingEntity> configMappingEntities =
-        clusterDAO.getClusterConfigMappingEntitiesByCluster(clusterId);
-
-      // disable previous config
+      // disable all configs
       for (ClusterConfigMappingEntity e : configMappingEntities) {
+        LOG.debug("{} with tag {} is unselected", e.getType(), e.getTag());
         e.setSelected(0);
       }
 
-      List<ClusterConfigEntity> clusterConfigsToMakeSelected =
-        clusterDAO.getLatestConfigurations(clusterId, stackId);
+      List<ClusterConfigMappingEntity> clusterConfigMappingsForStack = 
clusterDAO.getClusterConfigMappingsByStack(
+          clusterEntity.getClusterId(), stackId);
+
+      Collection<ClusterConfigMappingEntity> latestConfigMappingByStack = 
getLatestConfigMapping(
+          clusterConfigMappingsForStack);
 
-      for( ClusterConfigEntity clusterConfigToMakeSelected : 
clusterConfigsToMakeSelected ){
-        for (ClusterConfigMappingEntity configMappingEntity : 
configMappingEntities) {
-          String tag = configMappingEntity.getTag();
-          String type = configMappingEntity.getType();
+      // loop through all configs and set the latest to enabled for the
+      // specified stack
+      for(ClusterConfigMappingEntity e: configMappingEntities){
+        String type = e.getType();
+        String tag =  e.getTag();
 
-          if (clusterConfigToMakeSelected.getTag().equals(tag)
-              && clusterConfigToMakeSelected.getType().equals(type)) {
-            configMappingEntity.setSelected(1);
+        for (ClusterConfigMappingEntity latest : latestConfigMappingByStack) {
+          String t = latest.getType();
+          String tagLatest = latest.getTag();
+          if(type.equals(t) && tag.equals(tagLatest) ){//find the latest 
config of a given mapping entity
+            LOG.info("{} with version tag {} is selected for stack {}", type, 
tag, stackId.toString());
+            e.setSelected(1);
           }
         }
       }
 
+      clusterEntity.setConfigMappingEntities(configMappingEntities);
+      clusterEntity = clusterDAO.merge(clusterEntity);
       clusterDAO.mergeConfigMappings(configMappingEntities);
 
       cacheConfigurations();
@@ -3121,6 +3132,25 @@ public class ClusterImpl implements Cluster {
       clusterGlobalLock.writeLock().unlock();
     }
   }
+  
+  public Collection<ClusterConfigMappingEntity> 
getLatestConfigMapping(List<ClusterConfigMappingEntity> 
clusterConfigMappingEntities){
+    Map<String, ClusterConfigMappingEntity> temp = new HashMap<String, 
ClusterConfigMappingEntity>();
+    for (ClusterConfigMappingEntity e : clusterConfigMappingEntities) {
+      String type = e.getType();
+      if(temp.containsKey(type)){
+        ClusterConfigMappingEntity entityStored = temp.get(type);
+        Long timestampStored = entityStored.getCreateTimestamp();
+        Long timestamp = e.getCreateTimestamp();
+        if(timestamp > timestampStored){
+          temp.put(type, e); //find a newer config for the given type
+        }
+      } else {
+        temp.put(type, e); //first time encounter a type, add it
+      }
+    }
+
+    return temp.values();
+  }  
 
   /**
    * {@inheritDoc}
@@ -3137,10 +3167,21 @@ public class ClusterImpl implements Cluster {
     return new HashMap<>();
   }
 
-  // The caller should make sure global write lock is acquired.
+
+  /**
+   * Removes all configurations associated with the specified stack. The caller
+   * should make sure the cluster global write lock is acquired.
+   *
+   * @param stackId
+   * @see Cluster#getClusterGlobalLock()
+   */
   @Transactional
   void removeAllConfigsForStack(StackId stackId) {
     ClusterEntity clusterEntity = getClusterEntity();
+
+    // make sure the entity isn't stale in the current unit of work.
+    clusterDAO.refresh(clusterEntity);
+
     long clusterId = clusterEntity.getClusterId();
 
     // this will keep track of cluster config mappings that need removal
@@ -3161,12 +3202,13 @@ public class ClusterImpl implements Cluster {
         clusterDAO.removeConfig(configEntity);
         removedClusterConfigs.add(configEntity);
       }
+
       serviceConfig.getClusterConfigEntities().clear();
       serviceConfigDAO.remove(serviceConfig);
       serviceConfigEntities.remove(serviceConfig);
     }
 
-    // remove any lefover cluster configurations that don't have a service
+    // remove any leftover cluster configurations that don't have a service
     // configuration (like cluster-env)
     List<ClusterConfigEntity> clusterConfigs = clusterDAO.getAllConfigurations(
       clusterId, stackId);
@@ -3177,7 +3219,8 @@ public class ClusterImpl implements Cluster {
       removedClusterConfigs.add(clusterConfig);
     }
 
-    clusterDAO.merge(clusterEntity);
+    clusterEntity.setClusterConfigEntities(clusterConfigEntities);
+    clusterEntity = clusterDAO.merge(clusterEntity);
 
     // remove config mappings
     Collection<ClusterConfigMappingEntity> configMappingEntities =
@@ -3201,9 +3244,11 @@ public class ClusterImpl implements Cluster {
       }
     }
 
-    clusterDAO.merge(clusterEntity);
+    clusterEntity.setConfigMappingEntities(configMappingEntities);
+    clusterEntity = clusterDAO.merge(clusterEntity);
   }
 
+
   /**
    * {@inheritDoc}
    */
@@ -3211,10 +3256,6 @@ public class ClusterImpl implements Cluster {
   public void removeConfigurations(StackId stackId) {
     clusterGlobalLock.writeLock().lock();
     try {
-      // make sure the entity isn't stale in the current unit of work.
-      ClusterEntity clusterEntity = getClusterEntity();
-      clusterDAO.refresh(clusterEntity);
-
       removeAllConfigsForStack(stackId);
       cacheConfigurations();
     } finally {
@@ -3336,8 +3377,9 @@ public class ClusterImpl implements Cluster {
     // Iterate through the topology requests associated with this cluster and 
look for PROVISION request
     for (TopologyRequestEntity topologyRequest: topologyRequests) {
       TopologyRequest.Type requestAction = 
TopologyRequest.Type.valueOf(topologyRequest.getAction());
-      if (requestAction == TopologyRequest.Type.PROVISION)
+      if (requestAction == TopologyRequest.Type.PROVISION) {
         return true;
+      }
     }
 
     return false;

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc8d1ebe/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java
index 01b69db..61700e8 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java
@@ -18,9 +18,32 @@
 
 package org.apache.ambari.server.upgrade;
 
-import com.google.common.reflect.TypeToken;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
+import java.lang.reflect.Type;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import javax.persistence.EntityManager;
+import javax.persistence.TypedQuery;
+import javax.persistence.criteria.CriteriaBuilder;
+import javax.persistence.criteria.CriteriaQuery;
+import javax.persistence.criteria.Expression;
+import javax.persistence.criteria.Predicate;
+import javax.persistence.criteria.Root;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.configuration.Configuration.DatabaseType;
@@ -82,30 +105,9 @@ import org.apache.ambari.server.view.ViewRegistry;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import javax.persistence.EntityManager;
-import javax.persistence.TypedQuery;
-import javax.persistence.criteria.CriteriaBuilder;
-import javax.persistence.criteria.CriteriaQuery;
-import javax.persistence.criteria.Expression;
-import javax.persistence.criteria.Predicate;
-import javax.persistence.criteria.Root;
-import java.lang.reflect.Type;
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
+import com.google.common.reflect.TypeToken;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
 
 /**
  * Upgrade catalog for version 1.7.0.
@@ -714,7 +716,7 @@ public class UpgradeCatalog170 extends 
AbstractUpgradeCatalog {
       for (ClusterConfigMappingEntity configMapping : 
cluster.getConfigMappingEntities()) {
         if 
(configMapping.getType().equals(Configuration.MAPREDUCE2_LOG4J_CONFIG_TAG)) {
           configMapping.setSelected(0);
-          clusterDAO.mergeConfigMapping(configMapping);
+          configMapping = clusterDAO.mergeConfigMapping(configMapping);
         }
       }
       clusterDAO.merge(cluster);

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc8d1ebe/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
index 4c186b5..4a0509e 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/ServiceConfigDAOTest.java
@@ -17,21 +17,28 @@
  */
 package org.apache.ambari.server.orm.dao;
 
-import java.util.List;
+import static org.easymock.EasyMock.createMockBuilder;
 
-import junit.framework.Assert;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Date;
+import java.util.List;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
+import org.apache.ambari.server.orm.entities.ClusterConfigMappingEntity;
 import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.ConfigGroupConfigMappingEntity;
+import org.apache.ambari.server.orm.entities.ConfigGroupEntity;
 import org.apache.ambari.server.orm.entities.ResourceEntity;
 import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
 import org.apache.ambari.server.orm.entities.ServiceConfigEntity;
 import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.cluster.ClusterImpl;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -39,6 +46,9 @@ import org.junit.Test;
 import com.google.inject.Guice;
 import com.google.inject.Injector;
 import com.google.inject.persist.PersistService;
+import com.sun.research.ws.wadl.ResourceType;
+
+import junit.framework.Assert;
 
 public class ServiceConfigDAOTest {
   private static final StackId HDP_01 = new StackId("HDP", "0.1");
@@ -49,6 +59,8 @@ public class ServiceConfigDAOTest {
   private ClusterDAO clusterDAO;
   private ResourceTypeDAO resourceTypeDAO;
   private StackDAO stackDAO;
+  private ConfigGroupDAO configGroupDAO;
+  private ConfigGroupConfigMappingDAO configGroupConfigMappingDAO;
 
   @Before
   public void setup() throws Exception {
@@ -62,6 +74,8 @@ public class ServiceConfigDAOTest {
     stackDAO = injector.getInstance(StackDAO.class);
     serviceConfigDAO = injector.getInstance(ServiceConfigDAO.class);
     resourceTypeDAO = injector.getInstance(ResourceTypeDAO.class);
+    configGroupDAO = injector.getInstance(ConfigGroupDAO.class);
+    configGroupConfigMappingDAO = 
injector.getInstance(ConfigGroupConfigMappingDAO.class);
   }
 
   @After
@@ -333,4 +347,336 @@ public class ServiceConfigDAOTest {
     serviceConfigs = serviceConfigDAO.getLatestServiceConfigs(clusterId, 
HDP_02);
     Assert.assertEquals(2, serviceConfigs.size());
   }
+
+  @Test
+  public void testConfiguration() throws Exception{
+    initClusterEntities();
+    ClusterEntity clusterEntity = clusterDAO.findByName("c1");
+
+    Assert.assertTrue(!clusterEntity.getClusterConfigEntities().isEmpty());
+    Assert.assertTrue(!clusterEntity.getConfigMappingEntities().isEmpty());
+
+    Assert.assertEquals(5, clusterEntity.getClusterConfigEntities().size());
+    Assert.assertEquals(3, clusterEntity.getConfigMappingEntities().size());
+  }
+
+  @Test
+  public void testGetClusterConfigMappingByStack() throws Exception{
+    initClusterEntities();
+
+    ClusterEntity clusterEntity = clusterDAO.findByName("c1");
+
+    List<ClusterConfigMappingEntity> clusterConfigMappingEntities = 
clusterDAO.getClusterConfigMappingsByStack(clusterEntity.getClusterId(), 
HDP_01);
+    Assert.assertEquals(2, clusterConfigMappingEntities .size());
+
+    ClusterConfigMappingEntity e1 = clusterConfigMappingEntities.get(0);
+    String tag1 = e1.getTag();
+    Assert.assertEquals("version1", tag1);
+    String type1 = e1.getType();
+    Assert.assertEquals("oozie-site", type1);
+
+    ClusterConfigMappingEntity e2 = clusterConfigMappingEntities.get(1);
+    String tag2 = e2.getTag();
+    Assert.assertEquals("version2", tag2);
+    String type2 = e2.getType();
+    Assert.assertEquals("oozie-site", type2);
+  }
+
+  /**
+   * Test the get latest configuration query against clusterconfig table with 
configuration groups inserted
+   * */
+  @Test
+  public void testGetClusterConfigMappingByStackCG() throws Exception{
+    initClusterEntitiesWithConfigGroups();
+    ClusterEntity clusterEntity = clusterDAO.findByName("c1");
+
+    List<ConfigGroupEntity> configGroupEntities = 
configGroupDAO.findAllByTag("OOZIE");
+
+    Assert.assertNotNull(configGroupEntities);
+    ConfigGroupEntity configGroupEntity = configGroupEntities.get(0);
+    Assert.assertNotNull(configGroupEntity);
+    Assert.assertEquals("c1", 
configGroupEntity.getClusterEntity().getClusterName());
+    Assert.assertEquals(Long.valueOf(1), configGroupEntity.getClusterEntity()
+      .getClusterId());
+    Assert.assertEquals("oozie_server", configGroupEntity.getGroupName());
+    Assert.assertEquals("OOZIE", configGroupEntity.getTag());
+    Assert.assertEquals("oozie server", configGroupEntity.getDescription());
+
+    List<ClusterConfigMappingEntity> clusterConfigMappingEntities = 
clusterDAO.getClusterConfigMappingsByStack(clusterEntity.getClusterId(), 
HDP_01);
+    Assert.assertEquals(2, clusterConfigMappingEntities .size());
+
+    ClusterConfigMappingEntity e1 = clusterConfigMappingEntities.get(0);
+    String tag1 = e1.getTag();
+    Assert.assertEquals("version1", tag1);
+    String type1 = e1.getType();
+    Assert.assertEquals("oozie-site", type1);
+
+    ClusterConfigMappingEntity e2 = clusterConfigMappingEntities.get(1);
+    String tag2 = e2.getTag();
+    Assert.assertEquals("version2", tag2);
+    String type2 = e2.getType();
+    Assert.assertEquals("oozie-site", type2);
+  }
+
+  /**
+   * Test
+   *
+   * When the last configuration of a given configuration type to be stored 
into the clusterconfig table is
+   * for a configuration group, there is no corresponding entry generated in 
the clusterconfigmapping.
+   *
+   * Therefore, the getlatestconfiguration query should skip configuration 
groups stored in the clusterconfig table.
+   *
+   * Test to determine the latest configuration of a given type whose 
version_tag
+   * exists in the clusterconfigmapping table.
+   *
+   * */
+  @Test
+  public void testGetLatestClusterConfigMappingByStack() throws Exception{
+    ClusterImpl cluster =
+        createMockBuilder(ClusterImpl.class).
+          addMockedMethod("getSessionManager").
+          addMockedMethod("getClusterName").
+          addMockedMethod("getSessionAttributes").
+          createMock();
+
+    initClusterEntities();
+    ClusterEntity clusterEntity = clusterDAO.findByName("c1");
+    List<ClusterConfigMappingEntity> clusterConfigMappingEntities = 
clusterDAO.getClusterConfigMappingsByStack(clusterEntity.getClusterId(), 
HDP_01);
+    Collection<ClusterConfigMappingEntity> latestMapingEntities = 
cluster.getLatestConfigMapping(clusterConfigMappingEntities);
+    Assert.assertEquals(1, latestMapingEntities.size());
+    for(ClusterConfigMappingEntity e: latestMapingEntities){
+      Assert.assertEquals("version2", e.getTag());
+      Assert.assertEquals("oozie-site", e.getType());
+    }
+  }
+
+  /**
+   * Test
+   *
+   * When the last configuration of a given configuration type to be stored 
into the clusterconfig table is
+   * for a configuration group, there is no corresponding entry generated in 
the clusterconfigmapping.
+   *
+   * Therefore, the getlatestconfiguration query should skip configuration 
groups stored in the clusterconfig table.
+   *
+   * Test to determine the latest configuration of a given type whose 
version_tag
+   * exists in the clusterconfigmapping table.
+   *
+   * */
+  @Test
+  public void testGetLatestClusterConfigMappingByStackCG() throws Exception{
+    ClusterImpl cluster =
+        createMockBuilder(ClusterImpl.class).
+          addMockedMethod("getSessionManager").
+          addMockedMethod("getClusterName").
+          addMockedMethod("getSessionAttributes").
+          createMock();
+
+    initClusterEntitiesWithConfigGroups();
+    ClusterEntity clusterEntity = clusterDAO.findByName("c1");
+    List<ClusterConfigMappingEntity> clusterConfigMappingEntities = 
clusterDAO.getClusterConfigMappingsByStack(clusterEntity.getClusterId(), 
HDP_01);
+    Collection<ClusterConfigMappingEntity> latestMapingEntities = 
cluster.getLatestConfigMapping(clusterConfigMappingEntities);
+    Assert.assertEquals(1, latestMapingEntities.size());
+    for(ClusterConfigMappingEntity e: latestMapingEntities){
+      Assert.assertEquals("version2", e.getTag());
+      Assert.assertEquals("oozie-site", e.getType());
+    }
+  }
+
+  private void initClusterEntities() throws Exception{
+    String userName = "admin";
+
+    ServiceConfigEntity oozieServiceConfigEntity = 
createServiceConfig("OOZIE", userName, 1L, 1L, System.currentTimeMillis(), 
null);
+    ClusterEntity  clusterEntity = oozieServiceConfigEntity.getClusterEntity();
+
+    Long clusterId = clusterEntity.getClusterId();
+
+    if(null == clusterId){
+      clusterId = 1L;
+      clusterEntity.setClusterId(clusterId);
+      clusterEntity = clusterDAO.merge(clusterEntity);
+    }
+
+    StackEntity stackEntityHDP01 = 
stackDAO.find(HDP_01.getStackName(),HDP_01.getStackVersion());
+    StackEntity stackEntityHDP02 = 
stackDAO.find(HDP_02.getStackName(),HDP_02.getStackVersion());
+
+    String oozieSite = "oozie-site";
+
+    for (int i = 1; i < 6; i++){
+      ClusterConfigEntity entity = new ClusterConfigEntity();
+      entity.setClusterEntity(clusterEntity);
+      entity.setClusterId(clusterEntity.getClusterId());
+      entity.setType(oozieSite);
+      entity.setVersion(Long.valueOf(i));
+      entity.setTag("version"+i);
+      entity.setTimestamp(new Date().getTime());
+      if(i < 4) {
+        entity.setStack(stackEntityHDP01);
+      } else {
+        entity.setStack(stackEntityHDP02);
+      }
+      entity.setData("");
+      clusterDAO.createConfig(entity);
+      clusterEntity.getClusterConfigEntities().add(entity);
+      clusterDAO.merge(clusterEntity);
+    }
+
+    Collection<ClusterConfigMappingEntity> entities = 
clusterEntity.getConfigMappingEntities();
+    if(null == entities){
+      entities = new ArrayList<ClusterConfigMappingEntity>();
+      clusterEntity.setConfigMappingEntities(entities);
+    }
+
+    ClusterConfigMappingEntity e1 = new ClusterConfigMappingEntity();
+    e1.setClusterEntity(clusterEntity);
+    e1.setClusterId(clusterEntity.getClusterId());
+    e1.setCreateTimestamp(System.currentTimeMillis());
+    e1.setSelected(0);
+    e1.setUser(userName);
+    e1.setType(oozieSite);
+    e1.setTag("version1");
+    entities.add(e1);
+    clusterDAO.merge(clusterEntity);
+
+    ClusterConfigMappingEntity e2 = new ClusterConfigMappingEntity();
+    e2.setClusterEntity(clusterEntity);
+    e2.setClusterId(clusterEntity.getClusterId());
+    e2.setCreateTimestamp(System.currentTimeMillis());
+    e2.setSelected(0);
+    e2.setUser(userName);
+    e2.setType(oozieSite);
+    e2.setTag("version2");
+    entities.add(e2);
+    clusterDAO.merge(clusterEntity);
+
+    ClusterConfigMappingEntity e3 = new ClusterConfigMappingEntity();
+    e3.setClusterEntity(clusterEntity);
+    e3.setClusterId(clusterEntity.getClusterId());
+    e3.setCreateTimestamp(System.currentTimeMillis());
+    e3.setSelected(1);
+    e3.setUser(userName);
+    e3.setType(oozieSite);
+    e3.setTag("version4");
+    entities.add(e3);
+    clusterDAO.merge(clusterEntity);
+  }
+
+  private void initClusterEntitiesWithConfigGroups() throws Exception{
+    String userName = "admin";
+
+    ServiceConfigEntity oozieServiceConfigEntity = 
createServiceConfig("OOZIE", userName, 1L, 1L, System.currentTimeMillis(), 
null);
+    ClusterEntity  clusterEntity = oozieServiceConfigEntity.getClusterEntity();
+
+    Long clusterId = clusterEntity.getClusterId();
+
+    if(null == clusterId){
+      clusterId = 1L;
+      clusterEntity.setClusterId(clusterId);
+      clusterEntity = clusterDAO.merge(clusterEntity);
+    }
+
+    StackEntity stackEntityHDP01 = 
stackDAO.find(HDP_01.getStackName(),HDP_01.getStackVersion());
+    String oozieSite = "oozie-site";
+
+    int count = 3;
+    for (int i = 1; i < count; i++){
+      ClusterConfigEntity entity = new ClusterConfigEntity();
+      entity.setClusterEntity(clusterEntity);
+      entity.setClusterId(clusterEntity.getClusterId());
+      entity.setType(oozieSite);
+      entity.setVersion(Long.valueOf(i));
+      entity.setTag("version"+i);
+      entity.setTimestamp(new Date().getTime());
+      entity.setStack(stackEntityHDP01);
+      entity.setData("");
+      clusterDAO.createConfig(entity);
+      clusterEntity.getClusterConfigEntities().add(entity);
+      clusterDAO.merge(clusterEntity);
+    }
+
+    Collection<ClusterConfigMappingEntity> entities = 
clusterEntity.getConfigMappingEntities();
+    if(null == entities){
+      entities = new ArrayList<ClusterConfigMappingEntity>();
+      clusterEntity.setConfigMappingEntities(entities);
+    }
+
+    ClusterConfigMappingEntity e1 = new ClusterConfigMappingEntity();
+    e1.setClusterEntity(clusterEntity);
+    e1.setClusterId(clusterEntity.getClusterId());
+    e1.setCreateTimestamp(System.currentTimeMillis());
+    e1.setSelected(0);
+    e1.setUser(userName);
+    e1.setType(oozieSite);
+    e1.setTag("version1");
+    entities.add(e1);
+    clusterDAO.merge(clusterEntity);
+
+    ClusterConfigMappingEntity e2 = new ClusterConfigMappingEntity();
+    e2.setClusterEntity(clusterEntity);
+    e2.setClusterId(clusterEntity.getClusterId());
+    e2.setCreateTimestamp(System.currentTimeMillis());
+    e2.setSelected(1);
+    e2.setUser(userName);
+    e2.setType(oozieSite);
+    e2.setTag("version2");
+    entities.add(e2);
+    clusterDAO.merge(clusterEntity);
+
+    ConfigGroupEntity configGroupEntity = new ConfigGroupEntity();
+
+    ResourceTypeEntity resourceTypeEntity = 
resourceTypeDAO.findById(ResourceTypeEntity.CLUSTER_RESOURCE_TYPE);
+    if (resourceTypeEntity == null) {
+      resourceTypeEntity = new ResourceTypeEntity();
+      resourceTypeEntity.setId(ResourceTypeEntity.CLUSTER_RESOURCE_TYPE);
+      
resourceTypeEntity.setName(ResourceTypeEntity.CLUSTER_RESOURCE_TYPE_NAME);
+      resourceTypeEntity = resourceTypeDAO.merge(resourceTypeEntity);
+    }
+
+    ResourceEntity resourceEntity = new ResourceEntity();
+    resourceEntity.setResourceType(resourceTypeEntity);
+
+    configGroupEntity.setClusterEntity(clusterEntity);
+    configGroupEntity.setClusterId(clusterEntity.getClusterId());
+    configGroupEntity.setGroupName("oozie_server");
+    configGroupEntity.setDescription("oozie server");
+    configGroupEntity.setTag("OOZIE");
+
+    ClusterConfigEntity configEntity = new ClusterConfigEntity();
+    configEntity.setType("oozie-site");
+    configEntity.setTag("version3");
+    configEntity.setData("someData");
+    configEntity.setAttributes("someAttributes");
+    configEntity.setStack(stackEntityHDP01);
+
+    List<ClusterConfigEntity> configEntities = new
+      ArrayList<ClusterConfigEntity>();
+    configEntities.add(configEntity);
+
+    configGroupDAO.create(configGroupEntity);
+
+    if (configEntities != null && !configEntities.isEmpty()) {
+      List<ConfigGroupConfigMappingEntity> configMappingEntities = new
+        ArrayList<ConfigGroupConfigMappingEntity>();
+
+      for (ClusterConfigEntity config : configEntities) {
+        config.setClusterEntity(clusterEntity);
+        config.setClusterId(clusterEntity.getClusterId());
+        clusterDAO.createConfig(config);
+
+        ConfigGroupConfigMappingEntity configMappingEntity = new
+          ConfigGroupConfigMappingEntity();
+        configMappingEntity.setClusterId(clusterEntity.getClusterId());
+        configMappingEntity.setClusterConfigEntity(config);
+        configMappingEntity.setConfigGroupEntity(configGroupEntity);
+        configMappingEntity.setConfigGroupId(configGroupEntity.getGroupId());
+        configMappingEntity.setVersionTag(config.getTag());
+        configMappingEntity.setConfigType(config.getType());
+        configMappingEntity.setTimestamp(System.currentTimeMillis());
+        configMappingEntities.add(configMappingEntity);
+        configGroupConfigMappingDAO.create(configMappingEntity);
+      }
+
+      
configGroupEntity.setConfigGroupConfigMappingEntities(configMappingEntities);
+      configGroupDAO.merge(configGroupEntity);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/cc8d1ebe/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
index b3f4781..6687909 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
@@ -62,6 +62,7 @@ import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
 import org.apache.ambari.server.orm.dao.ResourceTypeDAO;
 import org.apache.ambari.server.orm.dao.StackDAO;
 import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
+import org.apache.ambari.server.orm.entities.ClusterConfigMappingEntity;
 import org.apache.ambari.server.orm.entities.ClusterEntity;
 import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
 import org.apache.ambari.server.orm.entities.ClusterStateEntity;
@@ -2269,6 +2270,90 @@ public class ClusterTest {
     assertTrue(clusterConfigEntity.getData().contains("two"));
     assertTrue(clusterConfigEntity.getData().contains("three"));
     assertTrue(clusterConfigEntity.getData().contains("four"));
+  }
 
+  /**
+   * Tests removing configurations and configuration mappings by stack.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testRemoveConfigurations() throws Exception {
+    createDefaultCluster();
+    Cluster cluster = clusters.getCluster("c1");
+    ClusterEntity clusterEntity = clusterDAO.findByName("c1");
+    StackId stackId = cluster.getCurrentStackVersion();
+    StackId newStackId = new StackId("HDP-2.0.6");
+
+    StackEntity currentStack = stackDAO.find(stackId.getStackName(), 
stackId.getStackVersion());
+    StackEntity newStack = stackDAO.find(newStackId.getStackName(), 
newStackId.getStackVersion());
+
+    Assert.assertFalse(stackId.equals(newStackId));
+
+    String configType = "foo-type";
+
+    ClusterConfigEntity clusterConfig = new ClusterConfigEntity();
+    clusterConfig.setClusterEntity(clusterEntity);
+    clusterConfig.setConfigId(1L);
+    clusterConfig.setStack(currentStack);
+    clusterConfig.setTag("version-1");
+    clusterConfig.setData("{}");
+    clusterConfig.setType(configType);
+    clusterConfig.setTimestamp(1L);
+    clusterConfig.setVersion(1L);
+
+    clusterDAO.createConfig(clusterConfig);
+    clusterEntity.getClusterConfigEntities().add(clusterConfig);
+    clusterEntity = clusterDAO.merge(clusterEntity);
+
+    ClusterConfigEntity newClusterConfig = new ClusterConfigEntity();
+    newClusterConfig.setClusterEntity(clusterEntity);
+    newClusterConfig.setConfigId(2L);
+    newClusterConfig.setStack(newStack);
+    newClusterConfig.setTag("version-2");
+    newClusterConfig.setData("{}");
+    newClusterConfig.setType(configType);
+    newClusterConfig.setTimestamp(2L);
+    newClusterConfig.setVersion(2L);
+
+    clusterDAO.createConfig(newClusterConfig);
+    clusterEntity.getClusterConfigEntities().add(newClusterConfig);
+    clusterEntity = clusterDAO.merge(clusterEntity);
+
+    // config mapping set to 1
+    ClusterConfigMappingEntity configMapping = new 
ClusterConfigMappingEntity();
+    configMapping.setClusterEntity(clusterEntity);
+    configMapping.setCreateTimestamp(1L);
+    configMapping.setSelected(1);
+    configMapping.setTag("version-1");
+    configMapping.setType(configType);
+    configMapping.setUser("admin");
+
+    // new config mapping set to 0
+    ClusterConfigMappingEntity newConfigMapping = new 
ClusterConfigMappingEntity();
+    newConfigMapping.setClusterEntity(clusterEntity);
+    newConfigMapping.setCreateTimestamp(2L);
+    newConfigMapping.setSelected(0);
+    newConfigMapping.setTag("version-2");
+    newConfigMapping.setType(configType);
+    newConfigMapping.setUser("admin");
+
+    clusterDAO.persistConfigMapping(configMapping);
+    clusterDAO.persistConfigMapping(newConfigMapping);
+    clusterEntity.getConfigMappingEntities().add(configMapping);
+    clusterEntity.getConfigMappingEntities().add(newConfigMapping);
+    clusterEntity = clusterDAO.merge(clusterEntity);
+
+    // get back the cluster configs for the new stack
+    List<ClusterConfigEntity> clusterConfigs = clusterDAO.getAllConfigurations(
+        cluster.getClusterId(), newStackId);
+
+    Assert.assertEquals(1, clusterConfigs.size());
+
+    // remove the configs
+    cluster.removeConfigurations(newStackId);
+
+    clusterConfigs = clusterDAO.getAllConfigurations(cluster.getClusterId(), 
newStackId);
+    Assert.assertEquals(0, clusterConfigs.size());
   }
 }

Reply via email to