AMBARI-6762. Include configuration in exported blueprints.
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8d464c2b Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8d464c2b Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8d464c2b Branch: refs/heads/trunk Commit: 8d464c2b286c522a47e2eb12f55d67e59ee61d37 Parents: 3e1ad2a Author: John Speidel <jspei...@hortonworks.com> Authored: Wed Aug 6 17:56:02 2014 -0400 Committer: John Speidel <jspei...@hortonworks.com> Committed: Wed Aug 6 22:32:48 2014 -0400 ---------------------------------------------------------------------- .../query/render/ClusterBlueprintRenderer.java | 400 +++++++- .../AmbariManagementControllerImpl.java | 12 +- .../server/controller/ConfigurationRequest.java | 20 + .../internal/BaseBlueprintProcessor.java | 82 +- .../BlueprintConfigurationProcessor.java | 700 ++++++++++++++ .../internal/ClusterResourceProvider.java | 406 +------- .../internal/ConfigurationResourceProvider.java | 13 +- .../server/controller/internal/HostGroup.java | 56 ++ .../render/ClusterBlueprintRendererTest.java | 32 +- .../BlueprintConfigurationProcessorTest.java | 932 +++++++++++++++++++ .../internal/ClusterResourceProviderTest.java | 103 -- 11 files changed, 2182 insertions(+), 574 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/ambari/blob/8d464c2b/ambari-server/src/main/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRenderer.java ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRenderer.java b/ambari-server/src/main/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRenderer.java index db3dff8..1afae39 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRenderer.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRenderer.java @@ -19,6 +19,7 @@ package org.apache.ambari.server.api.query.render; import org.apache.ambari.server.api.query.QueryInfo; +import org.apache.ambari.server.api.services.AmbariMetaInfo; import org.apache.ambari.server.api.services.Request; import org.apache.ambari.server.api.services.Result; import org.apache.ambari.server.api.services.ResultImpl; @@ -26,15 +27,25 @@ import org.apache.ambari.server.api.services.ResultPostProcessor; import org.apache.ambari.server.api.services.ResultPostProcessorImpl; import org.apache.ambari.server.api.util.TreeNode; import org.apache.ambari.server.api.util.TreeNodeImpl; +import org.apache.ambari.server.controller.AmbariManagementController; +import org.apache.ambari.server.controller.AmbariServer; +import org.apache.ambari.server.controller.internal.BlueprintConfigurationProcessor; +import org.apache.ambari.server.controller.internal.HostGroup; import org.apache.ambari.server.controller.internal.ResourceImpl; import org.apache.ambari.server.controller.spi.Resource; import org.apache.ambari.server.controller.utilities.PropertyHelper; +import org.apache.ambari.server.state.DesiredConfig; +import org.apache.ambari.server.state.HostConfig; +import org.apache.ambari.server.state.PropertyInfo; import java.net.InetAddress; import java.net.UnknownHostException; import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; @@ -44,6 +55,18 @@ import java.util.Set; */ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer { + /** + * Management Controller used to get stack information. + */ + private AmbariManagementController controller = AmbariServer.getController(); + + /** + * Map of configuration type to configuration properties which are required that a user + * input. These properties will be stripped from the exported blueprint. + */ + private Map<String, Collection<String>> propertiesToStrip = new HashMap<String, Collection<String>>(); + + // ----- Renderer ---------------------------------------------------------- @Override @@ -55,6 +78,17 @@ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer { null, properties, queryProperties.getName()); copyPropertiesToResult(queryProperties, resultTree); + + String configType = Resource.Type.Configuration.name(); + if (resultTree.getChild(configType) == null) { + resultTree.addChild(new HashSet<String>(), configType); + } + + String serviceType = Resource.Type.Service.name(); + if (resultTree.getChild(serviceType) == null) { + resultTree.addChild(new HashSet<String>(), serviceType); + } + String hostType = Resource.Type.Host.name(); String hostComponentType = Resource.Type.HostComponent.name(); TreeNode<Set<String>> hostComponentNode = resultTree.getChild( @@ -67,6 +101,7 @@ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer { } hostComponentNode = hostNode.addChild(new HashSet<String>(), hostComponentType); } + resultTree.getChild(configType).getObject().add("properties"); hostComponentNode.getObject().add("HostRoles/component_name"); return resultTree; @@ -106,50 +141,141 @@ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer { private Resource createBlueprintResource(TreeNode<Resource> clusterNode) { Resource clusterResource = clusterNode.getObject(); Resource blueprintResource = new ResourceImpl(Resource.Type.Cluster); - String clusterName = (String) clusterResource.getPropertyValue( - PropertyHelper.getPropertyId("Clusters", "cluster_name")); - //todo: deal with name collision? - String blueprintName = "blueprint-" + clusterName; + String[] stackTokens = ((String) clusterResource.getPropertyValue( PropertyHelper.getPropertyId("Clusters", "version"))).split("-"); - blueprintResource.setProperty("Blueprints/blueprint_name", blueprintName); blueprintResource.setProperty("Blueprints/stack_name", stackTokens[0]); blueprintResource.setProperty("Blueprints/stack_version", stackTokens[1]); - blueprintResource.setProperty( - "host_groups", processHostGroups(clusterNode.getChild("hosts"))); + + Collection<HostGroupImpl> hostGroups = processHostGroups(clusterNode.getChild("hosts")); + + List<Map<String, Object>> groupList = formatGroupsAsList(hostGroups); + blueprintResource.setProperty("host_groups", groupList); + + determinePropertiesToStrip(clusterNode.getChild("services"), stackTokens[0], stackTokens[1]); + + blueprintResource.setProperty("configurations", processConfigurations(clusterNode, hostGroups)); return blueprintResource; } /** - * Process host group information for all hosts. + * Determine which configuration properties need to be stripped from the configuration prior to exporting. + * Stripped properties are any property which are marked as required in the stack definition. For example, + * all passwords are required properties and are therefore not exported. + * + * @param servicesNode services node + * @param stackName stack name + * @param stackVersion stack version + */ + private void determinePropertiesToStrip(TreeNode<Resource> servicesNode, String stackName, String stackVersion) { + AmbariMetaInfo stackInfo = getController().getAmbariMetaInfo(); + for (TreeNode<Resource> service : servicesNode.getChildren()) { + String name = (String) service.getObject().getPropertyValue("ServiceInfo/service_name"); + Map<String, PropertyInfo> requiredProperties = stackInfo.getRequiredProperties(stackName, stackVersion, name); + for (Map.Entry<String, PropertyInfo> entry : requiredProperties.entrySet()) { + String propertyName = entry.getKey(); + PropertyInfo propertyInfo = entry.getValue(); + String configCategory = propertyInfo.getFilename(); + if (configCategory.endsWith(".xml")) { + configCategory = configCategory.substring(0, configCategory.indexOf(".xml")); + } + Collection<String> categoryProperties = propertiesToStrip.get(configCategory); + if (categoryProperties == null) { + categoryProperties = new ArrayList<String>(); + propertiesToStrip.put(configCategory, categoryProperties); + } + categoryProperties.add(propertyName); + } + } + } + + /** + * Process cluster scoped configurations. * - * @param hostNode a host node + * @param clusterNode cluster node + * @param hostGroups all host groups * - * @return list of host group property maps, one element for each host group + * @return cluster configuration */ - private List<Map<String, Object>> processHostGroups(TreeNode<Resource> hostNode) { - Map<HostGroup, HostGroup> mapHostGroups = new HashMap<HostGroup, HostGroup>(); + private List<Map<String, Map<String, String>>> processConfigurations(TreeNode<Resource> clusterNode, + Collection<HostGroupImpl> hostGroups) { + + List<Map<String, Map<String, String>>> configList = new ArrayList<Map<String, Map<String, String>>>(); + + Map<String, Object> desiredConfigMap = clusterNode.getObject().getPropertiesMap().get("Clusters/desired_configs"); + TreeNode<Resource> configNode = clusterNode.getChild("configurations"); + for (TreeNode<Resource> config : configNode.getChildren()) { + Configuration configuration = new Configuration(config); + DesiredConfig desiredConfig = (DesiredConfig) desiredConfigMap.get(configuration.getType()); + if (desiredConfig != null && desiredConfig.getTag().equals(configuration.getTag())) { + Map<String, Map<String, String>> properties = Collections.singletonMap( + configuration.getType(), configuration.getProperties()); + + BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties); + properties = updater.doUpdateForBlueprintExport(hostGroups); + configList.add(properties); + } + } + return configList; + } + + /** + * Process cluster host groups. + * + * @param hostNode host node + * + * @return collection of host groups + */ + private Collection<HostGroupImpl> processHostGroups(TreeNode<Resource> hostNode) { + Map<HostGroupImpl, HostGroupImpl> mapHostGroups = new HashMap<HostGroupImpl, HostGroupImpl>(); + int count = 1; for (TreeNode<Resource> host : hostNode.getChildren()) { - HostGroup group = HostGroup.parse(host); + HostGroupImpl group = new HostGroupImpl(host); + String hostName = (String) host.getObject().getPropertyValue( + PropertyHelper.getPropertyId("Hosts", "host_name")); + if (mapHostGroups.containsKey(group)) { - mapHostGroups.get(group).incrementCardinality(); + HostGroupImpl hostGroup = mapHostGroups.get(group); + hostGroup.incrementCardinality(); + hostGroup.addHost(hostName); } else { mapHostGroups.put(group, group); + group.setName("host_group_" + count++); + group.addHost(hostName); } } + return mapHostGroups.values(); + } - int count = 1; + + /** + * Process host group information for all hosts. + * + * @param hostGroups all host groups + * + * @return list of host group property maps, one element for each host group + */ + private List<Map<String, Object>> formatGroupsAsList(Collection<HostGroupImpl> hostGroups) { List<Map<String, Object>> listHostGroups = new ArrayList<Map<String, Object>>(); - for (HostGroup group : mapHostGroups.values()) { - String groupName = "host_group_" + count++; + for (HostGroupImpl group : hostGroups) { Map<String, Object> mapGroupProperties = new HashMap<String, Object>(); listHostGroups.add(mapGroupProperties); - mapGroupProperties.put("name", groupName); + mapGroupProperties.put("name", group.getName()); mapGroupProperties.put("cardinality", String.valueOf(group.getCardinality())); mapGroupProperties.put("components", processHostGroupComponents(group)); + List<Map<String, Map<String, String>>> hostConfigurations = new ArrayList<Map<String, Map<String, String>>>(); + for (Configuration configuration : group.getConfigurations()) { + Map<String, Map<String, String>> propertyMap = Collections.singletonMap( + configuration.getType(), configuration.properties); + BlueprintConfigurationProcessor configurationProcessor = new BlueprintConfigurationProcessor(propertyMap); + Map<String, Map<String, String>> updatedProps = configurationProcessor.doUpdateForBlueprintExport(hostGroups); + hostConfigurations.add(updatedProps); + + } + mapGroupProperties.put("configurations", hostConfigurations); } return listHostGroups; } @@ -161,7 +287,7 @@ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer { * * @return list of component names for the host */ - private List<Map<String, String>> processHostGroupComponents(HostGroup group) { + private List<Map<String, String>> processHostGroupComponents(HostGroupImpl group) { List<Map<String, String>> listHostGroupComponents = new ArrayList<Map<String, String>>(); for (String component : group.getComponents()) { Map<String, String> mapComponentProperties = new HashMap<String, String>(); @@ -183,16 +309,37 @@ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer { return isCollection != null && isCollection.equals("true"); } + /** + * Get management controller instance. + * + * @return management controller + */ + protected AmbariManagementController getController() { + return controller; + } + // ----- Host Group inner class -------------------------------------------- /** * Host Group representation. */ - private static class HostGroup { + private class HostGroupImpl implements HostGroup { + + /** + * Host Group name. + * + */ + private String name; + /** * Associated components. */ - private Set<String> m_components = new HashSet<String>(); + private Set<String> components = new HashSet<String>(); + + /** + * Host group scoped configurations. + */ + private Collection<Configuration> configurations = new HashSet<Configuration>(); /** * Number of instances. @@ -200,35 +347,107 @@ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer { private int m_cardinality = 1; /** - * Factory method for obtaining a host group instance. - * Parses a host tree node for host related information. - * - * @param host host tree node - * - * @return a new HostGroup instance + * Collection of associated hosts. */ - public static HostGroup parse(TreeNode<Resource> host) { - HostGroup group = new HostGroup(); + private Collection<String> hosts = new HashSet<String>(); + /** + * Constructor. + * + * @param host host node + */ + public HostGroupImpl(TreeNode<Resource> host) { TreeNode<Resource> components = host.getChild("host_components"); for (TreeNode<Resource> component : components.getChildren()) { - group.getComponents().add((String) component.getObject().getPropertyValue( + getComponents().add((String) component.getObject().getPropertyValue( "HostRoles/component_name")); } - - group.addAmbariComponentIfLocalhost((String) host.getObject().getPropertyValue( + addAmbariComponentIfLocalhost((String) host.getObject().getPropertyValue( PropertyHelper.getPropertyId("Hosts", "host_name"))); - return group; + processGroupConfiguration(host); } - /** ` - * Obtain associated components. + /** + * Preocess host group configuration. * - * @return set of associated components + * @param host host node */ + private void processGroupConfiguration(TreeNode<Resource> host) { + Map<String, Object> desiredConfigMap = host.getObject().getPropertiesMap().get("Hosts/desired_configs"); + if (desiredConfigMap != null) { + for (Map.Entry<String, Object> entry : desiredConfigMap.entrySet()) { + String type = entry.getKey(); + HostConfig hostConfig = (HostConfig) entry.getValue(); + Map<Long, String> overrides = hostConfig.getConfigGroupOverrides(); + + if (overrides != null && ! overrides.isEmpty()) { + Long version = Collections.max(overrides.keySet()); + String tag = overrides.get(version); + TreeNode<Resource> clusterNode = host.getParent().getParent(); + TreeNode<Resource> configNode = clusterNode.getChild("configurations"); + for (TreeNode<Resource> config : configNode.getChildren()) { + Configuration configuration = new Configuration(config); + if (type.equals(configuration.getType()) && tag.equals(configuration.getTag())) { + getConfigurations().add(configuration); + break; + } + } + } + } + } + } + + @Override + public String getName() { + return name; + } + + @Override public Set<String> getComponents() { - return m_components; + return components; + } + + @Override + public Collection<String> getHostInfo() { + return hosts; + } + + @Override + public Map<String, Map<String, String>> getConfigurationProperties() { + Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>(); + for (Configuration configuration : configurations) { + properties.put(configuration.getType(), configuration.getProperties()); + } + + return properties; + } + + /** + * Set the name. + * + * @param name name of host group + */ + public void setName(String name) { + this.name = name; + } + + /** + * Add a host. + * + * @param host host to add + */ + public void addHost(String host) { + hosts.add(host); + } + + /** + * Obtain associated host group scoped configurations. + * + * @return collection of host group scoped configurations + */ + public Collection<Configuration> getConfigurations() { + return configurations; } /** @@ -273,14 +492,115 @@ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - HostGroup hostGroup = (HostGroup) o; + HostGroupImpl hostGroup = (HostGroupImpl) o; - return m_components.equals(hostGroup.m_components); + return components.equals(hostGroup.components) && + configurations.equals(hostGroup.configurations); } @Override public int hashCode() { - return m_components.hashCode(); + int result = components.hashCode(); + result = 31 * result + configurations.hashCode(); + return result; + } + } + + /** + * Encapsulates a configuration. + */ + private class Configuration { + /** + * Configuration type such as hdfs-site. + */ + private String type; + + /** + * Configuration tag. + */ + private String tag; + + /** + * Properties of the configuration. + */ + private Map<String, String> properties = new HashMap<String, String>(); + + /** + * Constructor. + * + * @param configNode configuration node + */ + @SuppressWarnings("unchecked") + public Configuration(TreeNode<Resource> configNode) { + Resource configResource = configNode.getObject(); + type = (String) configResource.getPropertyValue("type"); + tag = (String) configResource.getPropertyValue("tag"); + + // property map type is currently <String, Object> + properties = (Map) configNode.getObject().getPropertiesMap().get("properties"); + stripRequiredProperties(properties); + } + + /** + * Get configuration type. + * + * @return configuration type + */ + public String getType() { + return type; + } + + /** + * Get configuration tag. + * + * @return configuration tag + */ + public String getTag() { + return tag; + } + + /** + * Get configuration properties. + * + * @return map of properties and values + */ + public Map<String, String> getProperties() { + return properties; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Configuration that = (Configuration) o; + return tag.equals(that.tag) && type.equals(that.type) && properties.equals(that.properties); + } + + @Override + public int hashCode() { + int result = type.hashCode(); + result = 31 * result + tag.hashCode(); + result = 31 * result + properties.hashCode(); + return result; + } + + /** + * Strip required properties from configuration. + * + * @param properties property map + */ + private void stripRequiredProperties(Map<String, String> properties) { + Iterator<Map.Entry<String, String>> iter = properties.entrySet().iterator(); + while (iter.hasNext()) { + Map.Entry<String, String> entry = iter.next(); + String property = entry.getKey(); + String category = getType(); + Collection<String> categoryProperties = propertiesToStrip.get(category); + if (categoryProperties != null && categoryProperties.contains(property)) { + iter.remove(); + } + } } } http://git-wip-us.apache.org/repos/asf/ambari/blob/8d464c2b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java index eff3854..97c155a 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java @@ -1080,16 +1080,19 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle } } else { + boolean includeProps = request.includeProperties(); if (null != request.getType()) { Map<String, Config> configs = cluster.getConfigsByType( request.getType()); if (null != configs) { for (Entry<String, Config> entry : configs.entrySet()) { + Config config = entry.getValue(); ConfigurationResponse response = new ConfigurationResponse( cluster.getClusterName(), request.getType(), - entry.getValue().getTag(), entry.getValue().getVersion(), new HashMap<String, String>(), - new HashMap<String, Map<String,String>>()); + config.getTag(), entry.getValue().getVersion(), + includeProps ? config.getProperties() : new HashMap<String, String>(), + includeProps ? config.getPropertiesAttributes() : new HashMap<String, Map<String,String>>()); responses.add(response); } } @@ -1099,8 +1102,9 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle for (Config config : all) { ConfigurationResponse response = new ConfigurationResponse( - cluster.getClusterName(), config.getType(), config.getTag(), config.getVersion(), - new HashMap<String, String>(), new HashMap<String, Map<String,String>>()); + cluster.getClusterName(), config.getType(), config.getTag(), config.getVersion(), + includeProps ? config.getProperties() : new HashMap<String, String>(), + includeProps ? config.getPropertiesAttributes() : new HashMap<String, Map<String,String>>()); responses.add(response); } http://git-wip-us.apache.org/repos/asf/ambari/blob/8d464c2b/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigurationRequest.java ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigurationRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigurationRequest.java index 0130de4..7c56eb5 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigurationRequest.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigurationRequest.java @@ -34,6 +34,7 @@ public class ConfigurationRequest { private Map<String, String> configs; private boolean selected = true; private Map<String, Map<String, String>> configsAttributes; + private boolean includeProperties; public ConfigurationRequest() { configs = new HashMap<String, String>(); @@ -52,6 +53,7 @@ public class ConfigurationRequest { this.tag = tag; this.configs = configs; this.configsAttributes = configsAttributes; + this.includeProperties = (type != null && tag != null); } /** @@ -128,6 +130,24 @@ public class ConfigurationRequest { } /** + * Set whether properties should be included. + * + * @param includeProperties whether properties should be included + */ + public void setIncludeProperties(boolean includeProperties) { + this.includeProperties = includeProperties; + } + + /** + * Determine whether properties should be included. + * + * @return true if properties should be included; false otherwise + */ + public boolean includeProperties() { + return this.includeProperties; + } + + /** * @return Attributes of configs */ public Map<String, Map<String, String>> getPropertiesAttributes() { http://git-wip-us.apache.org/repos/asf/ambari/blob/8d464c2b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseBlueprintProcessor.java ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseBlueprintProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseBlueprintProcessor.java index d723512..a4165d7 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseBlueprintProcessor.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseBlueprintProcessor.java @@ -84,9 +84,9 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP * * @return collection of host groups which contain the specified component */ - protected Collection<HostGroup> getHostGroupsForComponent(String component, Collection<HostGroup> hostGroups) { - Collection<HostGroup> resultGroups = new HashSet<HostGroup>(); - for (HostGroup group : hostGroups ) { + protected Collection<HostGroupImpl> getHostGroupsForComponent(String component, Collection<HostGroupImpl> hostGroups) { + Collection<HostGroupImpl> resultGroups = new HashSet<HostGroupImpl>(); + for (HostGroupImpl group : hostGroups ) { if (group.getComponents().contains(component)) { resultGroups.add(group); } @@ -102,11 +102,11 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP * * @return map of host group name to host group */ - protected Map<String, HostGroup> parseBlueprintHostGroups(BlueprintEntity blueprint, Stack stack) { - Map<String, HostGroup> mapHostGroups = new HashMap<String, HostGroup>(); + protected Map<String, HostGroupImpl> parseBlueprintHostGroups(BlueprintEntity blueprint, Stack stack) { + Map<String, HostGroupImpl> mapHostGroups = new HashMap<String, HostGroupImpl>(); for (HostGroupEntity hostGroup : blueprint.getHostGroups()) { - mapHostGroups.put(hostGroup.getName(), new HostGroup(hostGroup, stack)); + mapHostGroups.put(hostGroup.getName(), new HostGroupImpl(hostGroup, stack)); } return mapHostGroups; } @@ -149,14 +149,14 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP */ protected BlueprintEntity validateTopology(BlueprintEntity blueprint) throws AmbariException { Stack stack = new Stack(blueprint.getStackName(), blueprint.getStackVersion()); - Map<String, HostGroup> hostGroupMap = parseBlueprintHostGroups(blueprint, stack); - Collection<HostGroup> hostGroups = hostGroupMap.values(); + Map<String, HostGroupImpl> hostGroupMap = parseBlueprintHostGroups(blueprint, stack); + Collection<HostGroupImpl> hostGroups = hostGroupMap.values(); Map<String, Map<String, String>> clusterConfig = processBlueprintConfigurations(blueprint, null); Map<String, Map<String, Collection<DependencyInfo>>> missingDependencies = new HashMap<String, Map<String, Collection<DependencyInfo>>>(); Collection<String> services = getTopologyServices(hostGroups); - for (HostGroup group : hostGroups) { + for (HostGroupImpl group : hostGroups) { Map<String, Collection<DependencyInfo>> missingGroupDependencies = group.validateTopology(hostGroups, services, clusterConfig); if (! missingGroupDependencies.isEmpty()) { @@ -311,9 +311,9 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP * * @return collections of all services provided by topology */ - protected Collection<String> getTopologyServices(Collection<HostGroup> hostGroups) { + protected Collection<String> getTopologyServices(Collection<HostGroupImpl> hostGroups) { Collection<String> services = new HashSet<String>(); - for (HostGroup group : hostGroups) { + for (HostGroupImpl group : hostGroups) { services.addAll(group.getServices()); } return services; @@ -359,7 +359,7 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP * @return collection of missing component information */ private Collection<String> verifyComponentCardinalityCount(BlueprintEntity blueprint, - Collection<HostGroup> hostGroups, + Collection<HostGroupImpl> hostGroups, String component, Cardinality cardinality, AutoDeployInfo autoDeploy, @@ -374,11 +374,11 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP if (! validated && autoDeploy != null && autoDeploy.isEnabled() && cardinality.supportsAutoDeploy()) { String coLocateName = autoDeploy.getCoLocate(); if (coLocateName != null && ! coLocateName.isEmpty()) { - Collection<HostGroup> coLocateHostGroups = getHostGroupsForComponent( + Collection<HostGroupImpl> coLocateHostGroups = getHostGroupsForComponent( coLocateName.split("/")[1], hostGroups); if (! coLocateHostGroups.isEmpty()) { validated = true; - HostGroup group = coLocateHostGroups.iterator().next(); + HostGroupImpl group = coLocateHostGroups.iterator().next(); if (group.addComponent(component)) { addComponentToBlueprint(blueprint, group.getEntity().getName(), component); } @@ -405,7 +405,7 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP * @return collection of missing component information */ private Collection<String> verifyComponentInAllHostGroups(BlueprintEntity blueprint, - Collection<HostGroup> hostGroups, + Collection<HostGroupImpl> hostGroups, String component, AutoDeployInfo autoDeploy) { @@ -413,7 +413,7 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP int actualCount = getHostGroupsForComponent(component, hostGroups).size(); if (actualCount != hostGroups.size()) { if (autoDeploy != null && autoDeploy.isEnabled()) { - for (HostGroup group : hostGroups) { + for (HostGroupImpl group : hostGroups) { if (group.addComponent(component)) { addComponentToBlueprint(blueprint, group.getEntity().getName(), component); } @@ -846,7 +846,7 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP /** * Host group representation. */ - protected class HostGroup { + protected class HostGroupImpl implements HostGroup { /** * Host group entity */ @@ -885,13 +885,28 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP * @param hostGroup host group * @param stack stack */ - public HostGroup(HostGroupEntity hostGroup, Stack stack) { + public HostGroupImpl(HostGroupEntity hostGroup, Stack stack) { this.hostGroup = hostGroup; this.stack = stack; parseComponents(); parseConfigurations(); } + @Override + public String getName() { + return hostGroup.getName(); + } + + @Override + public Collection<String> getComponents() { + return this.components; + } + + @Override + public Collection<String> getHostInfo() { + return this.hosts; + } + /** * Associate a host with the host group. * @@ -902,15 +917,6 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP } /** - * Get associated host information. - * - * @return collection of hosts associated with the host group - */ - public Collection<String> getHostInfo() { - return this.hosts; - } - - /** * Get the services which are deployed to this host group. * * @return collection of services which have components in this host group @@ -920,16 +926,6 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP } /** - * Get the components associated with the host group. - * - * @return collection of component names for the host group - */ - public Collection<String> getComponents() { - return this.components; - } - - - /** * Add a component to the host group. * * @param component component to add @@ -969,7 +965,7 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP * * @return map of configuration type to a map of properties */ - public Map<String, Map<String, String>> getConfigurations() { + public Map<String, Map<String, String>> getConfigurationProperties() { return configurations; } @@ -991,7 +987,7 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP * * @return map of component to missing dependencies */ - public Map<String, Collection<DependencyInfo>> validateTopology(Collection<HostGroup> hostGroups, + public Map<String, Collection<DependencyInfo>> validateTopology(Collection<HostGroupImpl> hostGroups, Collection<String> services, Map<String, Map<String, String>> clusterConfig) { @@ -1059,8 +1055,12 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP typeProperties = new HashMap<String, String>(); configurations.put(type, typeProperties); } - configurations.put(type, jsonSerializer.<Map<String, String>>fromJson( - configEntity.getConfigData(), Map.class)); + Map<String, String> propertyMap = jsonSerializer.<Map<String, String>>fromJson( + configEntity.getConfigData(), Map.class); + + if (propertyMap != null) { + typeProperties.putAll(propertyMap); + } } } } http://git-wip-us.apache.org/repos/asf/ambari/blob/8d464c2b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java new file mode 100644 index 0000000..c7eef57 --- /dev/null +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java @@ -0,0 +1,700 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ambari.server.controller.internal; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Updates configuration properties based on cluster topology. This is done when exporting + * a blueprint and when a cluster is provisioned via a blueprint. + */ +public class BlueprintConfigurationProcessor { + + /** + * Single host topology updaters + */ + private static Map<String, Map<String, PropertyUpdater>> singleHostTopologyUpdaters = + new HashMap<String, Map<String, PropertyUpdater>>(); + + /** + * Multi host topology updaters + */ + private static Map<String, Map<String, PropertyUpdater>> multiHostTopologyUpdaters = + new HashMap<String, Map<String, PropertyUpdater>>(); + + /** + * Database host topology updaters + */ + private static Map<String, Map<String, PropertyUpdater>> dbHostTopologyUpdaters = + new HashMap<String, Map<String, PropertyUpdater>>(); + + /** + * Updaters for properties which need 'm' appended + */ + private static Map<String, Map<String, PropertyUpdater>> mPropertyUpdaters = + new HashMap<String, Map<String, PropertyUpdater>>(); + + /** + * Collection of all updaters + */ + private static Collection<Map<String, Map<String, PropertyUpdater>>> allUpdaters = + new ArrayList<Map<String, Map<String, PropertyUpdater>>>(); + + /** + * Compiled regex for hostgroup token. + */ + private static Pattern HOSTGROUP_REGEX = Pattern.compile("%HOSTGROUP::(\\S+)%"); + + /** + * Compiled regex for hostgroup token with port information. + */ + private static Pattern HOSTGROUP_PORT_REGEX = Pattern.compile("%HOSTGROUP::(\\w+|\\d+)%:?(\\d+)?"); + + /** + * Configuration properties to be updated + */ + private Map<String, Map<String, String>> properties; + + + /** + * Constructor. + * + * @param properties properties to update + */ + public BlueprintConfigurationProcessor(Map<String, Map<String, String>> properties) { + this.properties = properties; + } + + /** + * Update properties for cluster creation. This involves updating topology related properties with + * concrete topology information. + * + * @param hostGroups host groups of cluster to be deployed + * + * @return updated properties + */ + public Map<String, Map<String, String>> doUpdateForClusterCreate(Map<String, ? extends HostGroup> hostGroups) { + for (Map<String, Map<String, PropertyUpdater>> updaterMap : allUpdaters) { + for (Map.Entry<String, Map<String, PropertyUpdater>> entry : updaterMap.entrySet()) { + String type = entry.getKey(); + for (Map.Entry<String, PropertyUpdater> updaterEntry : entry.getValue().entrySet()) { + String propertyName = updaterEntry.getKey(); + PropertyUpdater updater = updaterEntry.getValue(); + + Map<String, String> typeMap = properties.get(type); + if (typeMap != null && typeMap.containsKey(propertyName)) { + typeMap.put(propertyName, updater.updateForClusterCreate( + hostGroups, typeMap.get(propertyName), properties)); + } + } + } + } + return properties; + } + + /** + * Update properties for blueprint export. + * This involves converting concrete topology information to host groups. + * + * @param hostGroups cluster host groups + * + * @return updated properties + */ + public Map<String, Map<String, String>> doUpdateForBlueprintExport(Collection<? extends HostGroup> hostGroups) { + doSingleHostExportUpdate(hostGroups, singleHostTopologyUpdaters); + doSingleHostExportUpdate(hostGroups, dbHostTopologyUpdaters); + doMultiHostExportUpdate(hostGroups, multiHostTopologyUpdaters); + + return properties; + } + + /** + * Update single host topology configuration properties for blueprint export. + * + * @param hostGroups cluster export + * @param updaters registered updaters + */ + private void doSingleHostExportUpdate(Collection<? extends HostGroup> hostGroups, + Map<String, Map<String, PropertyUpdater>> updaters) { + + for (Map.Entry<String, Map<String, PropertyUpdater>> entry : updaters.entrySet()) { + String type = entry.getKey(); + for (String propertyName : entry.getValue().keySet()) { + boolean matchedHost = false; + + Map<String, String> typeProperties = properties.get(type); + if (typeProperties != null && typeProperties.containsKey(propertyName)) { + String propValue = typeProperties.get(propertyName); + for (HostGroup group : hostGroups) { + Collection<String> hosts = group.getHostInfo(); + for (String host : hosts) { + if (propValue.contains(host)) { //todo: need to use regular expression to avoid matching a host which is a superset. Can this be fixed??? + matchedHost = true; + typeProperties.put(propertyName, propValue.replace( + host, "%HOSTGROUP::" + group.getName() + "%")); + break; + } + } + if (matchedHost) { + break; + } + } + if (! matchedHost) { + typeProperties.remove(propertyName); + } + } + } + } + } + + /** + * Update multi host topology configuration properties for blueprint export. + * + * @param hostGroups cluster host groups + * @param updaters registered updaters + */ + private void doMultiHostExportUpdate(Collection<? extends HostGroup> hostGroups, + Map<String, Map<String, PropertyUpdater>> updaters) { + + for (Map.Entry<String, Map<String, PropertyUpdater>> entry : updaters.entrySet()) { + String type = entry.getKey(); + for (String propertyName : entry.getValue().keySet()) { + Map<String, String> typeProperties = properties.get(type); + if (typeProperties != null && typeProperties.containsKey(propertyName)) { + String propValue = typeProperties.get(propertyName); + for (HostGroup group : hostGroups) { + Collection<String> hosts = group.getHostInfo(); + for (String host : hosts) { + propValue = propValue.replaceAll(host + "\\b", "%HOSTGROUP::" + group.getName() + "%"); + } + } + Collection<String> addedGroups = new HashSet<String>(); + String[] toks = propValue.split(","); + boolean inBrackets = propValue.startsWith("["); + + StringBuilder sb = new StringBuilder(); + if (inBrackets) { + sb.append('['); + } + boolean firstTok = true; + for (String tok : toks) { + tok = tok.replaceAll("[\\[\\]]", ""); + + if (addedGroups.add(tok)) { + if (! firstTok) { + sb.append(','); + } + sb.append(tok); + } + firstTok = false; + } + + if (inBrackets) { + sb.append(']'); + } + typeProperties.put(propertyName, sb.toString()); + } + } + } + } + + /** + * Get host groups which contain a component. + * + * @param component component name + * @param hostGroups collection of host groups to check + * + * @return collection of host groups which contain the specified component + */ + private static Collection<HostGroup> getHostGroupsForComponent(String component, + Collection<? extends HostGroup> hostGroups) { + + Collection<HostGroup> resultGroups = new HashSet<HostGroup>(); + for (HostGroup group : hostGroups ) { + if (group.getComponents().contains(component)) { + resultGroups.add(group); + } + } + return resultGroups; + } + + /** + * Convert a property value which includes a host group topology token to a physical host. + * + * @param hostGroups cluster host groups + * @param val value to be converted + * + * @return updated value with physical host name + */ + private static Collection<String> getHostStrings(Map<String, ? extends HostGroup> hostGroups, + String val) { + + Collection<String> hosts = new HashSet<String>(); + Matcher m = HOSTGROUP_PORT_REGEX.matcher(val); + while (m.find()) { + String groupName = m.group(1); + String port = m.group(2); + + + HostGroup hostGroup = hostGroups.get(groupName); + if (hostGroup == null) { + throw new IllegalArgumentException( + "Unable to match blueprint host group token to a host group: " + groupName); + } + for (String host : hostGroup.getHostInfo()) { + if (port != null) { + host += ":" + port; + } + hosts.add(host); + } + } + return hosts; + } + + /** + * Provides functionality to update a property value. + */ + public interface PropertyUpdater { + /** + * Update a property value. + * + * + * @param hostGroups host groups + * @param origValue original value of property + * @param properties all properties + * + * @return new property value + */ + public String updateForClusterCreate(Map<String, ? extends HostGroup> hostGroups, + String origValue, Map<String, Map<String, String>> properties); + } + + /** + * Topology based updater which replaces the original host name of a property with the host name + * which runs the associated (master) component in the new cluster. + */ + private static class SingleHostTopologyUpdater implements PropertyUpdater { + /** + * Component name + */ + private String component; + + /** + * Constructor. + * + * @param component component name associated with the property + */ + public SingleHostTopologyUpdater(String component) { + this.component = component; + } + + /** + * Update the property with the new host name which runs the associated component. + * + * + * @param hostGroups host groups + * @param origValue original value of property + * @param properties all properties + * + * @return updated property value with old host name replaced by new host name + */ + public String updateForClusterCreate(Map<String, ? extends HostGroup> hostGroups, + String origValue, + Map<String, Map<String, String>> properties) { + + Matcher m = HOSTGROUP_REGEX.matcher(origValue); + if (m.find()) { + String hostGroupName = m.group(1); + HostGroup hostGroup = hostGroups.get(hostGroupName); + //todo: ensure > 0 hosts (is this necessary) + return origValue.replace(m.group(0), hostGroup.getHostInfo().iterator().next()); + } else { + Collection<HostGroup> matchingGroups = getHostGroupsForComponent(component, hostGroups.values()); + if (matchingGroups.size() == 1) { + return origValue.replace("localhost", matchingGroups.iterator().next().getHostInfo().iterator().next()); + } else { + throw new IllegalArgumentException("Unable to update configuration property with topology information. " + + "Component '" + this.component + "' is not mapped to any host group or is mapped to multiple groups."); + } + } + } + } + + /** + * Topology based updater which replaces the original host name of a database property with the host name + * where the DB is deployed in the new cluster. If an existing database is specified, the original property + * value is returned. + */ + private static class DBTopologyUpdater extends SingleHostTopologyUpdater { + /** + * Property type (global, core-site ...) for property which is used to determine if DB is external. + */ + private final String configPropertyType; + + /** + * Name of property which is used to determine if DB is new or existing (exernal). + */ + private final String conditionalPropertyName; + + /** + * Constructor. + * + * @param component component to get hot name if new DB + * @param conditionalPropertyType config type of property used to determine if DB is external + * @param conditionalPropertyName name of property which is used to determine if DB is external + */ + private DBTopologyUpdater(String component, String conditionalPropertyType, + String conditionalPropertyName) { + super(component); + this.configPropertyType = conditionalPropertyType; + this.conditionalPropertyName = conditionalPropertyName; + } + + /** + * If database is a new managed database, update the property with the new host name which + * runs the associated component. If the database is external (non-managed), return the + * original value. + * + * + * @param hostGroups host groups + * @param origValue original value of property + * @param properties all properties + * + * @return updated property value with old host name replaced by new host name or original value + * if the database is external + */ + @Override + public String updateForClusterCreate(Map<String, ? extends HostGroup> hostGroups, + String origValue, Map<String, Map<String, String>> properties) { + + if (isDatabaseManaged(properties)) { + return super.updateForClusterCreate(hostGroups, origValue, properties); + } else { + return origValue; + } + } + + /** + * Determine if database is managed, meaning that it is a component in the cluster topology. + * + * @return true if the DB is managed; false otherwise + */ + private boolean isDatabaseManaged(Map<String, Map<String, String>> properties) { + // conditional property should always exist since it is required to be specified in the stack + return properties.get(configPropertyType). + get(conditionalPropertyName).startsWith("New"); + } + } + + /** + * Topology based updater which replaces original host names (possibly more than one) contained in a property + * value with the host names which runs the associated component in the new cluster. + */ + private static class MultipleHostTopologyUpdater implements PropertyUpdater { + /** + * Component name + */ + private String component; + + /** + * Separator for multiple property values + */ + private Character separator = ','; + + /** + * Constructor. + * + * @param component component name associated with the property + */ + public MultipleHostTopologyUpdater(String component) { + this.component = component; + } + + /** + * Update all host names included in the original property value with new host names which run the associated + * component. + * + * + * @param hostGroups host groups + * @param origValue original value of property + * @param properties all properties + * + * @return updated property value with old host names replaced by new host names + */ + public String updateForClusterCreate(Map<String, ? extends HostGroup> hostGroups, + String origValue, + Map<String, Map<String, String>> properties) { + + Collection<String> hostStrings = getHostStrings(hostGroups, origValue); + if (hostStrings.isEmpty()) { + //default non-exported original value + String port = null; + if (origValue.contains(":")) { + //todo: currently assuming all hosts are using same port + port = origValue.substring(origValue.indexOf(":") + 1); + } + Collection<HostGroup> matchingGroups = getHostGroupsForComponent(component, hostGroups.values()); + for (HostGroup group : matchingGroups) { + for (String host : group.getHostInfo()) { + if (port != null) { + host += ":" + port; + } + hostStrings.add(host); + } + } + } + + StringBuilder sb = new StringBuilder(); + boolean firstHost = true; + for (String host : hostStrings) { + if (!firstHost) { + sb.append(separator); + } else { + firstHost = false; + } + sb.append(host); + } + + return sb.toString(); + } + } + + /** + * Updater which appends "m" to the original property value. + * For example, "1024" would be updated to "1024m". + */ + private static class MPropertyUpdater implements PropertyUpdater { + /** + * Append 'm' to the original property value if it doesn't already exist. + * + * + * @param hostGroups host groups + * @param origValue original value of property + * @param properties all properties + * + * @return property with 'm' appended + */ + public String updateForClusterCreate(Map<String, ? extends HostGroup> hostGroups, + String origValue, Map<String, + Map<String, String>> properties) { + + return origValue.endsWith("m") ? origValue : origValue + 'm'; + } + } + + /** + * Class to facilitate special formatting needs of property values. + */ + private abstract static class AbstractPropertyValueDecorator implements PropertyUpdater { + PropertyUpdater propertyUpdater; + + /** + * Constructor. + * + * @param propertyUpdater wrapped updater + */ + public AbstractPropertyValueDecorator(PropertyUpdater propertyUpdater) { + this.propertyUpdater = propertyUpdater; + } + + /** + * Return decorated form of the updated input property value. + * + * @param hostGroupMap map of host group name to HostGroup + * @param origValue original value of property + * @param properties all properties + * + * @return Formatted output string + */ + @Override + public String updateForClusterCreate(Map<String, ? extends HostGroup> hostGroupMap, + String origValue, + Map<String, Map<String, String>> properties) { + + return doFormat(propertyUpdater.updateForClusterCreate(hostGroupMap, origValue, properties)); + } + + /** + * Transform input string to required output format. + * + * @param originalValue original value of property + * + * @return formatted output string + */ + public abstract String doFormat(String originalValue); + } + + /** + * Return properties of the form ['value'] + */ + private static class YamlMultiValuePropertyDecorator extends AbstractPropertyValueDecorator { + + public YamlMultiValuePropertyDecorator(PropertyUpdater propertyUpdater) { + super(propertyUpdater); + } + + /** + * Format input String of the form, str1,str2 to ['str1','str2'] + * + * @param origValue input string + * + * @return formatted string + */ + @Override + public String doFormat(String origValue) { + StringBuilder sb = new StringBuilder(); + if (origValue != null) { + sb.append("["); + boolean isFirst = true; + for (String value : origValue.split(",")) { + if (!isFirst) { + sb.append(","); + } else { + isFirst = false; + } + sb.append("'"); + sb.append(value); + sb.append("'"); + } + sb.append("]"); + } + return sb.toString(); + } + } + + /** + * Register updaters for configuration properties. + */ + static { + + allUpdaters.add(singleHostTopologyUpdaters); + allUpdaters.add(multiHostTopologyUpdaters); + allUpdaters.add(dbHostTopologyUpdaters); + allUpdaters.add(mPropertyUpdaters); + + Map<String, PropertyUpdater> hdfsSiteMap = new HashMap<String, PropertyUpdater>(); + Map<String, PropertyUpdater> mapredSiteMap = new HashMap<String, PropertyUpdater>(); + Map<String, PropertyUpdater> coreSiteMap = new HashMap<String, PropertyUpdater>(); + Map<String, PropertyUpdater> hbaseSiteMap = new HashMap<String, PropertyUpdater>(); + Map<String, PropertyUpdater> yarnSiteMap = new HashMap<String, PropertyUpdater>(); + Map<String, PropertyUpdater> hiveSiteMap = new HashMap<String, PropertyUpdater>(); + Map<String, PropertyUpdater> oozieSiteMap = new HashMap<String, PropertyUpdater>(); + Map<String, PropertyUpdater> stormSiteMap = new HashMap<String, PropertyUpdater>(); + + Map<String, PropertyUpdater> mapredEnvMap = new HashMap<String, PropertyUpdater>(); + Map<String, PropertyUpdater> hadoopEnvMap = new HashMap<String, PropertyUpdater>(); + Map<String, PropertyUpdater> hbaseEnvMap = new HashMap<String, PropertyUpdater>(); + + Map<String, PropertyUpdater> multiWebhcatSiteMap = new HashMap<String, PropertyUpdater>(); + Map<String, PropertyUpdater> multiHbaseSiteMap = new HashMap<String, PropertyUpdater>(); + Map<String, PropertyUpdater> multiStormSiteMap = new HashMap<String, PropertyUpdater>(); + + Map<String, PropertyUpdater> dbHiveSiteMap = new HashMap<String, PropertyUpdater>(); + + + singleHostTopologyUpdaters.put("hdfs-site", hdfsSiteMap); + singleHostTopologyUpdaters.put("mapred-site", mapredSiteMap); + singleHostTopologyUpdaters.put("core-site", coreSiteMap); + singleHostTopologyUpdaters.put("hbase-site", hbaseSiteMap); + singleHostTopologyUpdaters.put("yarn-site", yarnSiteMap); + singleHostTopologyUpdaters.put("hive-site", hiveSiteMap); + singleHostTopologyUpdaters.put("oozie-site", oozieSiteMap); + singleHostTopologyUpdaters.put("storm-site", stormSiteMap); + + mPropertyUpdaters.put("hadoop-env", hadoopEnvMap); + mPropertyUpdaters.put("hbase-env", hbaseEnvMap); + mPropertyUpdaters.put("mapred-env", mapredEnvMap); + + multiHostTopologyUpdaters.put("webhcat-site", multiWebhcatSiteMap); + multiHostTopologyUpdaters.put("hbase-site", multiHbaseSiteMap); + multiHostTopologyUpdaters.put("storm-site", multiStormSiteMap); + + dbHostTopologyUpdaters.put("hive-site", dbHiveSiteMap); + + // NAMENODE + hdfsSiteMap.put("dfs.http.address", new SingleHostTopologyUpdater("NAMENODE")); + hdfsSiteMap.put("dfs.https.address", new SingleHostTopologyUpdater("NAMENODE")); + coreSiteMap.put("fs.default.name", new SingleHostTopologyUpdater("NAMENODE")); + hdfsSiteMap.put("dfs.namenode.http-address", new SingleHostTopologyUpdater("NAMENODE")); + hdfsSiteMap.put("dfs.namenode.https-address", new SingleHostTopologyUpdater("NAMENODE")); + coreSiteMap.put("fs.defaultFS", new SingleHostTopologyUpdater("NAMENODE")); + hbaseSiteMap.put("hbase.rootdir", new SingleHostTopologyUpdater("NAMENODE")); + + // SECONDARY_NAMENODE + hdfsSiteMap.put("dfs.secondary.http.address", new SingleHostTopologyUpdater("SECONDARY_NAMENODE")); + hdfsSiteMap.put("dfs.namenode.secondary.http-address", new SingleHostTopologyUpdater("SECONDARY_NAMENODE")); + + // JOBTRACKER + mapredSiteMap.put("mapred.job.tracker", new SingleHostTopologyUpdater("JOBTRACKER")); + mapredSiteMap.put("mapred.job.tracker.http.address", new SingleHostTopologyUpdater("JOBTRACKER")); + mapredSiteMap.put("mapreduce.history.server.http.address", new SingleHostTopologyUpdater("JOBTRACKER")); + + + // HISTORY_SERVER + yarnSiteMap.put("yarn.log.server.url", new SingleHostTopologyUpdater("HISTORYSERVER")); + mapredSiteMap.put("mapreduce.jobhistory.webapp.address", new SingleHostTopologyUpdater("HISTORYSERVER")); + mapredSiteMap.put("mapreduce.jobhistory.address", new SingleHostTopologyUpdater("HISTORYSERVER")); + + // RESOURCEMANAGER + yarnSiteMap.put("yarn.resourcemanager.hostname", new SingleHostTopologyUpdater("RESOURCEMANAGER")); + yarnSiteMap.put("yarn.resourcemanager.resource-tracker.address", new SingleHostTopologyUpdater("RESOURCEMANAGER")); + yarnSiteMap.put("yarn.resourcemanager.webapp.address", new SingleHostTopologyUpdater("RESOURCEMANAGER")); + yarnSiteMap.put("yarn.resourcemanager.scheduler.address", new SingleHostTopologyUpdater("RESOURCEMANAGER")); + yarnSiteMap.put("yarn.resourcemanager.address", new SingleHostTopologyUpdater("RESOURCEMANAGER")); + yarnSiteMap.put("yarn.resourcemanager.admin.address", new SingleHostTopologyUpdater("RESOURCEMANAGER")); + + // HIVE_SERVER + hiveSiteMap.put("hive.metastore.uris", new SingleHostTopologyUpdater("HIVE_SERVER")); + dbHiveSiteMap.put("javax.jdo.option.ConnectionURL", + new DBTopologyUpdater("MYSQL_SERVER", "hive-env", "hive_database")); + + // OOZIE_SERVER + oozieSiteMap.put("oozie.base.url", new SingleHostTopologyUpdater("OOZIE_SERVER")); + + // ZOOKEEPER_SERVER + multiHbaseSiteMap.put("hbase.zookeeper.quorum", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER")); + multiWebhcatSiteMap.put("templeton.zookeeper.hosts", new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER")); + + // STORM + stormSiteMap.put("nimbus.host", new SingleHostTopologyUpdater("NIMBUS")); + stormSiteMap.put("worker.childopts", new SingleHostTopologyUpdater("GANGLIA_SERVER")); + stormSiteMap.put("supervisor.childopts", new SingleHostTopologyUpdater("GANGLIA_SERVER")); + stormSiteMap.put("nimbus.childopts", new SingleHostTopologyUpdater("GANGLIA_SERVER")); + multiStormSiteMap.put("storm.zookeeper.servers", + new YamlMultiValuePropertyDecorator(new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER"))); + + // Required due to AMBARI-4933. These no longer seem to be required as the default values in the stack + // are now correct but are left here in case an existing blueprint still contains an old value. + hadoopEnvMap.put("namenode_heapsize", new MPropertyUpdater()); + hadoopEnvMap.put("namenode_opt_newsize", new MPropertyUpdater()); + hadoopEnvMap.put("namenode_opt_maxnewsize", new MPropertyUpdater()); + hadoopEnvMap.put("dtnode_heapsize", new MPropertyUpdater()); + mapredEnvMap.put("jtnode_opt_newsize", new MPropertyUpdater()); + mapredEnvMap.put("jtnode_opt_maxnewsize", new MPropertyUpdater()); + mapredEnvMap.put("jtnode_heapsize", new MPropertyUpdater()); + hbaseEnvMap.put("hbase_master_heapsize", new MPropertyUpdater()); + hbaseEnvMap.put("hbase_regionserver_heapsize", new MPropertyUpdater()); + } +} \ No newline at end of file