AMBARI-7245. Can not get HDFS configs tab to load (just spinners)(vbrodetskyi)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6722aba8 Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6722aba8 Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6722aba8 Branch: refs/heads/branch-alerts-dev Commit: 6722aba8299d1d50a067928388102653b954f907 Parents: 9588d2a Author: Vitaly Brodetskyi <vbrodets...@hortonworks.com> Authored: Wed Sep 10 20:45:57 2014 +0300 Committer: Vitaly Brodetskyi <vbrodets...@hortonworks.com> Committed: Wed Sep 10 20:45:57 2014 +0300 ---------------------------------------------------------------------- .../server/upgrade/UpgradeCatalog170.java | 83 ++++++++++++++++++++ .../server/upgrade/UpgradeCatalog170Test.java | 27 ++++++- 2 files changed, 108 insertions(+), 2 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/ambari/blob/6722aba8/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java index ee67330..17750f9 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java @@ -18,6 +18,8 @@ package org.apache.ambari.server.upgrade; +import java.lang.reflect.Type; + import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; @@ -30,6 +32,7 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.Date; import javax.persistence.EntityManager; import javax.persistence.TypedQuery; @@ -39,6 +42,7 @@ import javax.persistence.criteria.Expression; import javax.persistence.criteria.Predicate; import javax.persistence.criteria.Root; +import com.google.common.reflect.TypeToken; import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.configuration.Configuration; import org.apache.ambari.server.controller.AmbariManagementController; @@ -53,11 +57,14 @@ import org.apache.ambari.server.orm.dao.PrincipalTypeDAO; import org.apache.ambari.server.orm.dao.PrivilegeDAO; import org.apache.ambari.server.orm.dao.ResourceDAO; import org.apache.ambari.server.orm.dao.ResourceTypeDAO; +import org.apache.ambari.server.orm.dao.ConfigGroupConfigMappingDAO; import org.apache.ambari.server.orm.dao.UserDAO; import org.apache.ambari.server.orm.dao.ViewDAO; import org.apache.ambari.server.orm.dao.ViewInstanceDAO; import org.apache.ambari.server.orm.entities.ClusterEntity; import org.apache.ambari.server.orm.entities.HostRoleCommandEntity; +import org.apache.ambari.server.orm.entities.ConfigGroupConfigMappingEntity; +import org.apache.ambari.server.orm.entities.ClusterConfigEntity; import org.apache.ambari.server.orm.entities.HostRoleCommandEntity_; import org.apache.ambari.server.orm.entities.KeyValueEntity; import org.apache.ambari.server.orm.entities.PermissionEntity; @@ -73,6 +80,7 @@ import org.apache.ambari.server.state.Cluster; import org.apache.ambari.server.state.Clusters; import org.apache.ambari.server.state.Config; import org.apache.ambari.server.state.ConfigHelper; +import org.apache.ambari.server.utils.StageUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -586,6 +594,81 @@ public class UpgradeCatalog170 extends AbstractUpgradeCatalog { renamePigProperties(); upgradePermissionModel(); addJobsViewPermissions(); + moveConfigGroupsGlobalToEnv(); + } + + private void moveConfigGroupsGlobalToEnv() throws AmbariException { + final ConfigGroupConfigMappingDAO confGroupConfMappingDAO = injector.getInstance(ConfigGroupConfigMappingDAO.class); + ConfigHelper configHelper = injector.getInstance(ConfigHelper.class); + final ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class); + AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class); + List<ConfigGroupConfigMappingEntity> configGroupConfigMappingEntities = confGroupConfMappingDAO.findAll(); + List<ConfigGroupConfigMappingEntity> configGroupsWithGlobalConfigs = new ArrayList<ConfigGroupConfigMappingEntity>(); + Type type = new TypeToken<Map<String, String>>() {}.getType(); + + for (ConfigGroupConfigMappingEntity entity : configGroupConfigMappingEntities) { + if (entity.getConfigType().equals(Configuration.GLOBAL_CONFIG_TAG)) { + configGroupsWithGlobalConfigs.add(entity); + } + } + + for (ConfigGroupConfigMappingEntity entity : configGroupsWithGlobalConfigs) { + String configData = entity.getClusterConfigEntity().getData(); + Map<String, String> properties = StageUtils.getGson().fromJson(configData, type); + Cluster cluster = ambariManagementController.getClusters().getClusterById(entity.getClusterId()); + HashMap<String, HashMap<String, String>> configs = new HashMap<String, HashMap<String, String>>(); + + for (Entry<String, String> property : properties.entrySet()) { + Set<String> configTypes = configHelper.findConfigTypesByPropertyName(cluster.getCurrentStackVersion(), + property.getKey(), cluster.getClusterName()); + // i'm not sure, but i hope that every service property is unique + String configType = configTypes.iterator().next(); + + if (configs.containsKey(configType)) { + HashMap<String, String> config = configs.get(configType); + config.put(property.getKey(), property.getValue()); + } else { + HashMap<String, String> config = new HashMap<String, String>(); + config.put(property.getKey(), property.getValue()); + configs.put(configType, config); + } + } + + for (Entry<String, HashMap<String, String>> config : configs.entrySet()) { + + String tag; + if(cluster.getConfigsByType(config.getKey()) == null) { + tag = "version1"; + } else { + tag = "version" + System.currentTimeMillis(); + } + + ClusterConfigEntity clusterConfigEntity = new ClusterConfigEntity(); + clusterConfigEntity.setClusterEntity(entity.getClusterConfigEntity().getClusterEntity()); + clusterConfigEntity.setClusterId(cluster.getClusterId()); + clusterConfigEntity.setType(config.getKey()); + clusterConfigEntity.setVersion(cluster.getNextConfigVersion(config.getKey())); + clusterConfigEntity.setTag(tag); + clusterConfigEntity.setTimestamp(new Date().getTime()); + clusterConfigEntity.setData(StageUtils.getGson().toJson(config.getValue())); + clusterDAO.createConfig(clusterConfigEntity); + + + ConfigGroupConfigMappingEntity configGroupConfigMappingEntity = new ConfigGroupConfigMappingEntity(); + configGroupConfigMappingEntity.setTimestamp(System.currentTimeMillis()); + configGroupConfigMappingEntity.setClusterId(entity.getClusterId()); + configGroupConfigMappingEntity.setClusterConfigEntity(clusterConfigEntity); + configGroupConfigMappingEntity.setConfigGroupEntity(entity.getConfigGroupEntity()); + configGroupConfigMappingEntity.setConfigGroupId(entity.getConfigGroupId()); + configGroupConfigMappingEntity.setConfigType(config.getKey()); + configGroupConfigMappingEntity.setVersionTag(clusterConfigEntity.getTag()); + confGroupConfMappingDAO.create(configGroupConfigMappingEntity); + } + } + + for (ConfigGroupConfigMappingEntity entity : configGroupsWithGlobalConfigs) { + confGroupConfMappingDAO.remove(entity); + } } /** http://git-wip-us.apache.org/repos/asf/ambari/blob/6722aba8/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog170Test.java ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog170Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog170Test.java index abd97d6..d3d7f7c 100644 --- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog170Test.java +++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog170Test.java @@ -73,6 +73,7 @@ import org.apache.ambari.server.orm.dao.ResourceTypeDAO; import org.apache.ambari.server.orm.dao.UserDAO; import org.apache.ambari.server.orm.dao.ViewDAO; import org.apache.ambari.server.orm.dao.ViewInstanceDAO; +import org.apache.ambari.server.orm.dao.ConfigGroupConfigMappingDAO; import org.apache.ambari.server.orm.entities.ClusterEntity; import org.apache.ambari.server.orm.entities.HostRoleCommandEntity; import org.apache.ambari.server.orm.entities.KeyValueEntity; @@ -81,6 +82,8 @@ import org.apache.ambari.server.orm.entities.ResourceEntity; import org.apache.ambari.server.orm.entities.UserEntity; import org.apache.ambari.server.orm.entities.ViewEntity; import org.apache.ambari.server.orm.entities.ViewInstanceEntity; +import org.apache.ambari.server.orm.entities.ConfigGroupConfigMappingEntity; +import org.apache.ambari.server.orm.entities.ClusterConfigEntity; import org.apache.ambari.server.state.Cluster; import org.apache.ambari.server.state.Clusters; import org.apache.ambari.server.state.Config; @@ -230,6 +233,8 @@ public class UpgradeCatalog170Test { Config config = createStrictMock(Config.class); Config pigConfig = createStrictMock(Config.class); + ClusterConfigEntity clusterConfigEntity = createNiceMock(ClusterConfigEntity.class); + ConfigGroupConfigMappingDAO configGroupConfigMappingDAO = createNiceMock(ConfigGroupConfigMappingDAO.class); UserDAO userDAO = createNiceMock(UserDAO.class); PrincipalDAO principalDAO = createNiceMock(PrincipalDAO.class); PrincipalTypeDAO principalTypeDAO = createNiceMock(PrincipalTypeDAO.class); @@ -261,6 +266,14 @@ public class UpgradeCatalog170Test { UpgradeCatalog170 upgradeCatalog = createMockBuilder(UpgradeCatalog170.class) .addMockedMethod(m).addMockedMethod(n).createMock(); + List<ConfigGroupConfigMappingEntity> configGroupConfigMappingEntities = + new ArrayList<ConfigGroupConfigMappingEntity>(); + ConfigGroupConfigMappingEntity configGroupConfigMappingEntity = new ConfigGroupConfigMappingEntity(); + configGroupConfigMappingEntity.setConfigType(Configuration.GLOBAL_CONFIG_TAG); + configGroupConfigMappingEntity.setClusterConfigEntity(clusterConfigEntity); + configGroupConfigMappingEntity.setClusterId(1L); + configGroupConfigMappingEntities.add(configGroupConfigMappingEntity); + Map<String, Cluster> clustersMap = new HashMap<String, Cluster>(); clustersMap.put("c1", cluster); @@ -275,6 +288,9 @@ public class UpgradeCatalog170Test { envDicts.add("hadoop-env"); envDicts.add("global"); + Set<String> configTypes = new HashSet<String>(); + configTypes.add("hadoop-env"); + Map<String, String> contentOfHadoopEnv = new HashMap<String, String>(); contentOfHadoopEnv.put("content", "env file contents"); @@ -322,21 +338,27 @@ public class UpgradeCatalog170Test { expect(hrc.get(isA(SingularAttribute.class))).andReturn(errorLog).once(); expect(q.setMaxResults(1000)).andReturn(q).anyTimes(); expect(q.getResultList()).andReturn(r).anyTimes(); + expect(clusterConfigEntity.getData()).andReturn("{\"dtnode_heapsize\":\"1028m\"}"); expect(configuration.getDatabaseUrl()).andReturn(Configuration.JDBC_IN_MEMORY_URL).anyTimes(); expect(injector.getInstance(ConfigHelper.class)).andReturn(configHelper).anyTimes(); expect(injector.getInstance(AmbariManagementController.class)).andReturn(amc).anyTimes(); expect(amc.getClusters()).andReturn(clusters).anyTimes(); expect(clusters.getClusters()).andReturn(clustersMap).anyTimes(); + expect(clusters.getClusterById(1L)).andReturn(clustersMap.values().iterator().next()).anyTimes(); expect(cluster.getDesiredConfigByType("global")).andReturn(config).anyTimes(); + expect(cluster.getClusterId()).andReturn(1L); + expect(cluster.getNextConfigVersion("hadoop-env")).andReturn(3L); expect(config.getProperties()).andReturn(globalConfigs).anyTimes(); expect(cluster.getCurrentStackVersion()).andReturn(new StackId("HDP", "2.1")).anyTimes(); expect(cluster.getClusterName()).andReturn("c1").anyTimes(); expect(configHelper.findConfigTypesByPropertyName(new StackId("HDP", "2.1"), "prop1", "c1")).andReturn(envDicts).once(); expect(configHelper.findConfigTypesByPropertyName(new StackId("HDP", "2.1"), "smokeuser_keytab", "c1")).andReturn(new HashSet<String>()).once(); expect(configHelper.findConfigTypesByPropertyName(new StackId("HDP", "2.1"), "content", "c1")).andReturn(envDicts).once(); + expect(configHelper.findConfigTypesByPropertyName(new StackId("HDP", "2.1"), "dtnode_heapsize", "c1")).andReturn(configTypes).once(); expect(configHelper.getPropertyValueFromStackDefenitions(cluster, "hadoop-env", "content")).andReturn("env file contents").once(); + expect(injector.getInstance(ConfigGroupConfigMappingDAO.class)).andReturn(configGroupConfigMappingDAO).anyTimes(); expect(injector.getInstance(UserDAO.class)).andReturn(userDAO).anyTimes(); expect(injector.getInstance(PrincipalDAO.class)).andReturn(principalDAO).anyTimes(); expect(injector.getInstance(PrincipalTypeDAO.class)).andReturn(principalTypeDAO).anyTimes(); @@ -349,6 +371,7 @@ public class UpgradeCatalog170Test { expect(injector.getInstance(PrivilegeDAO.class)).andReturn(privilegeDAO).anyTimes(); expect(injector.getInstance(KeyValueDAO.class)).andReturn(keyValueDAO).anyTimes(); + expect(configGroupConfigMappingDAO.findAll()).andReturn(configGroupConfigMappingEntities).once(); expect(userDAO.findAll()).andReturn(Collections.<UserEntity> emptyList()).times(2); expect(clusterDAO.findAll()).andReturn(Collections.<ClusterEntity> emptyList()).anyTimes(); expect(viewDAO.findAll()).andReturn(Collections.<ViewEntity> emptyList()).anyTimes(); @@ -381,8 +404,8 @@ public class UpgradeCatalog170Test { replay(entityManager, trans, upgradeCatalog, cb, cq, hrc, q, userRolesResultSet); replay(dbAccessor, configuration, injector, cluster, clusters, amc, config, configHelper, pigConfig); - replay(userDAO, clusterDAO, viewDAO, viewInstanceDAO, permissionDAO); - replay(resourceTypeDAO, resourceDAO, keyValueDAO, privilegeDAO); + replay(userDAO, clusterDAO, viewDAO, viewInstanceDAO, permissionDAO, configGroupConfigMappingDAO); + replay(resourceTypeDAO, resourceDAO, keyValueDAO, privilegeDAO, clusterConfigEntity); replay(jobsView, showJobsKeyValue, user); Class<?> c = AbstractUpgradeCatalog.class;