Repository: ambari
Updated Branches:
  refs/heads/trunk 3e1ad2a4a -> 8d464c2b2


http://git-wip-us.apache.org/repos/asf/ambari/blob/8d464c2b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java
index f99955b..5bddeb9 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java
@@ -49,7 +49,6 @@ import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.configuration.Configuration;
 
 /**
  * Resource provider for cluster resources.
@@ -72,12 +71,6 @@ public class ClusterResourceProvider extends 
BaseBlueprintProcessor {
   private static Set<String> pkPropertyIds =
       new HashSet<String>(Arrays.asList(new String[]{CLUSTER_ID_PROPERTY_ID}));
 
-   /**
-   * Maps properties to updaters which update the property when provisioning a 
cluster via a blueprint
-   */
-  private Map<String, PropertyUpdater> propertyUpdaters =
-      new HashMap<String, PropertyUpdater>();
-
   /**
    * Maps configuration type (string) to associated properties
    */
@@ -104,7 +97,6 @@ public class ClusterResourceProvider extends 
BaseBlueprintProcessor {
                           AmbariManagementController managementController) {
 
     super(propertyIds, keyPropertyIds, managementController);
-    registerPropertyUpdaters();
   }
 
   /**
@@ -377,7 +369,7 @@ public class ClusterResourceProvider extends 
BaseBlueprintProcessor {
     BlueprintEntity blueprint = getExistingBlueprint(blueprintName);
     Stack stack = parseStack(blueprint);
 
-    Map<String, HostGroup> blueprintHostGroups = 
parseBlueprintHostGroups(blueprint, stack);
+    Map<String, HostGroupImpl> blueprintHostGroups = 
parseBlueprintHostGroups(blueprint, stack);
     applyRequestInfoToHostGroups(properties, blueprintHostGroups);
     Collection<Map<String, String>> configOverrides = (Collection<Map<String, 
String>>)properties.get("configurations");
     processConfigurations(processBlueprintConfigurations(blueprint, 
configOverrides),
@@ -410,7 +402,7 @@ public class ClusterResourceProvider extends 
BaseBlueprintProcessor {
    * @throws IllegalArgumentException if required password properties are 
missing and no
    *                                  default is specified via 
'default_password'
    */
-  private void validatePasswordProperties(BlueprintEntity blueprint, 
Map<String, HostGroup> hostGroups,
+  private void validatePasswordProperties(BlueprintEntity blueprint, 
Map<String, HostGroupImpl> hostGroups,
                                           String defaultPassword) {
 
     Map<String, Map<String, Collection<String>>> missingPasswords = 
blueprint.validateConfigurations(
@@ -432,8 +424,8 @@ public class ClusterResourceProvider extends 
BaseBlueprintProcessor {
           if 
(isPropertyInConfiguration(mapClusterConfigurations.get(configType), property)){
               propIter.remove();
           } else {
-            HostGroup hg = hostGroups.get(entry.getKey());
-            if (hg != null && 
isPropertyInConfiguration(hg.getConfigurations().get(configType), property)) {
+            HostGroupImpl hg = hostGroups.get(entry.getKey());
+            if (hg != null && 
isPropertyInConfiguration(hg.getConfigurationProperties().get(configType), 
property)) {
               propIter.remove();
             }  else if (setDefaultPassword(defaultPassword, configType, 
property)) {
               propIter.remove();
@@ -510,7 +502,7 @@ public class ClusterResourceProvider extends 
BaseBlueprintProcessor {
    * @throws ResourceAlreadyExistsException attempted to create a service or 
component that already exists
    * @throws NoSuchParentResourceException  a required parent resource is 
missing
    */
-  private void createServiceAndComponentResources(Map<String, HostGroup> 
blueprintHostGroups,
+  private void createServiceAndComponentResources(Map<String, HostGroupImpl> 
blueprintHostGroups,
                                                   String clusterName, 
Set<String> services)
                                                   throws SystemException,
                                                          
UnsupportedPropertyException,
@@ -555,12 +547,12 @@ public class ClusterResourceProvider extends 
BaseBlueprintProcessor {
    * @throws ResourceAlreadyExistsException attempt to create a host or 
host_component which already exists
    * @throws NoSuchParentResourceException  a required parent resource is 
missing
    */
-  private void createHostAndComponentResources(Map<String, HostGroup> 
blueprintHostGroups, String clusterName)
+  private void createHostAndComponentResources(Map<String, HostGroupImpl> 
blueprintHostGroups, String clusterName)
       throws SystemException, UnsupportedPropertyException, 
ResourceAlreadyExistsException, NoSuchParentResourceException {
 
     ResourceProvider hostProvider = getResourceProvider(Resource.Type.Host);
     ResourceProvider hostComponentProvider = 
getResourceProvider(Resource.Type.HostComponent);
-    for (HostGroup group : blueprintHostGroups.values()) {
+    for (HostGroupImpl group : blueprintHostGroups.values()) {
       for (String host : group.getHostInfo()) {
         Map<String, Object> hostProperties = new HashMap<String, Object>();
         hostProperties.put("Hosts/cluster_name", clusterName);
@@ -599,7 +591,7 @@ public class ClusterResourceProvider extends 
BaseBlueprintProcessor {
    * @throws ResourceAlreadyExistsException attempt to create a component 
which already exists
    * @throws NoSuchParentResourceException  a required parent resource is 
missing
    */
-  private void createComponentResources(Map<String, HostGroup> 
blueprintHostGroups,
+  private void createComponentResources(Map<String, HostGroupImpl> 
blueprintHostGroups,
                                         String clusterName, Set<String> 
services)
                                         throws SystemException,
                                                UnsupportedPropertyException,
@@ -607,7 +599,7 @@ public class ClusterResourceProvider extends 
BaseBlueprintProcessor {
                                                NoSuchParentResourceException {
     for (String service : services) {
       Set<String> components = new HashSet<String>();
-      for (HostGroup hostGroup : blueprintHostGroups.values()) {
+      for (HostGroupImpl hostGroup : blueprintHostGroups.values()) {
         Collection<String> serviceComponents = 
hostGroup.getComponents(service);
         if (serviceComponents != null && !serviceComponents.isEmpty()) {
           components.addAll(serviceComponents);
@@ -694,7 +686,7 @@ public class ClusterResourceProvider extends 
BaseBlueprintProcessor {
    */
   @SuppressWarnings("unchecked")
   private void applyRequestInfoToHostGroups(Map<String, Object> properties,
-                                            Map<String, HostGroup> 
blueprintHostGroups)
+                                            Map<String, HostGroupImpl> 
blueprintHostGroups)
                                             throws IllegalArgumentException {
 
     @SuppressWarnings("unchecked")
@@ -711,7 +703,7 @@ public class ClusterResourceProvider extends 
BaseBlueprintProcessor {
       if (name == null || name.isEmpty()) {
         throw new IllegalArgumentException("Every host_group must include a 
non-null 'name' property");
       }
-      HostGroup hostGroup = blueprintHostGroups.get(name);
+      HostGroupImpl hostGroup = blueprintHostGroups.get(name);
 
       if (hostGroup == null) {
         throw new IllegalArgumentException("Invalid host_group specified: " + 
name +
@@ -731,7 +723,7 @@ public class ClusterResourceProvider extends 
BaseBlueprintProcessor {
         }
         hostGroup.addHostInfo(fqdn);
       }
-      Map<String, Map<String, String>> existingConfigurations = 
hostGroup.getConfigurations();
+      Map<String, Map<String, String>> existingConfigurations = 
hostGroup.getConfigurationProperties();
       overrideExistingProperties(existingConfigurations, 
(Collection<Map<String, String>>)
           hostGroupProperties.get("configurations"));
 
@@ -786,7 +778,8 @@ public class ClusterResourceProvider extends 
BaseBlueprintProcessor {
    */
   private void processConfigurations(Map<String, Map<String, String>> 
blueprintConfigurations,
                                      Map<String, Map<String, Map<String, 
String>>> blueprintAttributes,
-                                     Stack stack, Map<String, HostGroup> 
blueprintHostGroups)  {
+                                     Stack stack, Map<String, HostGroupImpl> 
blueprintHostGroups)  {
+
 
     for (String service : getServicesToDeploy(stack, blueprintHostGroups)) {
       for (String type : stack.getConfigurationTypes(service)) {
@@ -817,16 +810,8 @@ public class ClusterResourceProvider extends 
BaseBlueprintProcessor {
     processBlueprintClusterConfigurations(blueprintConfigurations);
     processBlueprintClusterConfigAttributes(blueprintAttributes);
 
-    for (Map.Entry<String, Map<String, String>> entry : 
mapClusterConfigurations.entrySet()) {
-      for (Map.Entry<String, String> propertyEntry : 
entry.getValue().entrySet()) {
-        String propName = propertyEntry.getKey();
-        // see if property needs to be updated
-        PropertyUpdater propertyUpdater = propertyUpdaters.get(propName);
-        if (propertyUpdater != null) {
-          propertyEntry.setValue(propertyUpdater.update(blueprintHostGroups, 
propertyEntry.getValue()));
-        }
-      }
-    }
+    BlueprintConfigurationProcessor configurationProcessor = new 
BlueprintConfigurationProcessor(mapClusterConfigurations);
+    configurationProcessor.doUpdateForClusterCreate(blueprintHostGroups);
     setMissingConfigurations();
   }
   
@@ -942,9 +927,9 @@ public class ClusterResourceProvider extends 
BaseBlueprintProcessor {
    *
    * @return set of service names which will be deployed
    */
-  private Set<String> getServicesToDeploy(Stack stack, Map<String, HostGroup> 
blueprintHostGroups) {
+  private Set<String> getServicesToDeploy(Stack stack, Map<String, 
HostGroupImpl> blueprintHostGroups) {
     Set<String> services = new HashSet<String>();
-    for (HostGroup group : blueprintHostGroups.values()) {
+    for (HostGroupImpl group : blueprintHostGroups.values()) {
       if (! group.getHostInfo().isEmpty()) {
         services.addAll(stack.getServicesForComponents(group.getComponents()));
       }
@@ -956,75 +941,6 @@ public class ClusterResourceProvider extends 
BaseBlueprintProcessor {
   }
 
   /**
-   * Register updaters for configuration properties.
-   */
-  private void registerPropertyUpdaters() {
-    // NAMENODE
-    propertyUpdaters.put("dfs.http.address", new 
SingleHostPropertyUpdater("NAMENODE"));
-    propertyUpdaters.put("dfs.namenode.http-address", new 
SingleHostPropertyUpdater("NAMENODE"));
-    propertyUpdaters.put("dfs.https.address", new 
SingleHostPropertyUpdater("NAMENODE"));
-    propertyUpdaters.put("dfs.namenode.https-address", new 
SingleHostPropertyUpdater("NAMENODE"));
-    propertyUpdaters.put("fs.default.name", new 
SingleHostPropertyUpdater("NAMENODE"));
-    propertyUpdaters.put("fs.defaultFS", new 
SingleHostPropertyUpdater("NAMENODE"));
-    propertyUpdaters.put("hbase.rootdir", new 
SingleHostPropertyUpdater("NAMENODE"));
-
-    // SECONDARY_NAMENODE
-    propertyUpdaters.put("dfs.secondary.http.address", new 
SingleHostPropertyUpdater("SECONDARY_NAMENODE"));
-    propertyUpdaters.put("dfs.namenode.secondary.http-address", new 
SingleHostPropertyUpdater("SECONDARY_NAMENODE"));
-
-    // HISTORY_SERVER
-    propertyUpdaters.put("yarn.log.server.url", new 
SingleHostPropertyUpdater("HISTORYSERVER"));
-    propertyUpdaters.put("mapreduce.jobhistory.webapp.address", new 
SingleHostPropertyUpdater("HISTORYSERVER"));
-    propertyUpdaters.put("mapreduce.jobhistory.address", new 
SingleHostPropertyUpdater("HISTORYSERVER"));
-
-    // RESOURCEMANAGER
-    propertyUpdaters.put("yarn.resourcemanager.hostname", new 
SingleHostPropertyUpdater("RESOURCEMANAGER"));
-    propertyUpdaters.put("yarn.resourcemanager.resource-tracker.address", new 
SingleHostPropertyUpdater("RESOURCEMANAGER"));
-    propertyUpdaters.put("yarn.resourcemanager.webapp.address", new 
SingleHostPropertyUpdater("RESOURCEMANAGER"));
-    propertyUpdaters.put("yarn.resourcemanager.scheduler.address", new 
SingleHostPropertyUpdater("RESOURCEMANAGER"));
-    propertyUpdaters.put("yarn.resourcemanager.address", new 
SingleHostPropertyUpdater("RESOURCEMANAGER"));
-    propertyUpdaters.put("yarn.resourcemanager.admin.address", new 
SingleHostPropertyUpdater("RESOURCEMANAGER"));
-
-    // JOBTRACKER
-    propertyUpdaters.put("mapred.job.tracker", new 
SingleHostPropertyUpdater("JOBTRACKER"));
-    propertyUpdaters.put("mapred.job.tracker.http.address", new 
SingleHostPropertyUpdater("JOBTRACKER"));
-    propertyUpdaters.put("mapreduce.history.server.http.address", new 
SingleHostPropertyUpdater("JOBTRACKER"));
-
-    // HIVE_SERVER
-    propertyUpdaters.put("hive.metastore.uris", new 
SingleHostPropertyUpdater("HIVE_SERVER"));
-    propertyUpdaters.put("hive_ambari_host", new 
SingleHostPropertyUpdater("HIVE_SERVER"));
-    propertyUpdaters.put("javax.jdo.option.ConnectionURL",
-        new DBPropertyUpdater("MYSQL_SERVER", "hive-env", "hive_database"));
-
-    // OOZIE_SERVER
-    propertyUpdaters.put("oozie.base.url", new 
SingleHostPropertyUpdater("OOZIE_SERVER"));
-    propertyUpdaters.put("oozie_ambari_host", new 
SingleHostPropertyUpdater("OOZIE_SERVER"));
-
-    // ZOOKEEPER_SERVER
-    propertyUpdaters.put("hbase.zookeeper.quorum", new 
MultipleHostPropertyUpdater("ZOOKEEPER_SERVER"));
-    propertyUpdaters.put("templeton.zookeeper.hosts", new 
MultipleHostPropertyUpdater("ZOOKEEPER_SERVER"));
-
-    // STORM
-    propertyUpdaters.put("nimbus.host", new 
SingleHostPropertyUpdater("NIMBUS"));
-    propertyUpdaters.put("worker.childopts", new 
SingleHostPropertyUpdater("GANGLIA_SERVER"));
-    propertyUpdaters.put("supervisor.childopts", new 
SingleHostPropertyUpdater("GANGLIA_SERVER"));
-    propertyUpdaters.put("nimbus.childopts", new 
SingleHostPropertyUpdater("GANGLIA_SERVER"));
-    propertyUpdaters.put("storm.zookeeper.servers",
-      new YamlMultiValuePropertyDecorator(new 
MultipleHostPropertyUpdater("ZOOKEEPER_SERVER")));
-
-    // properties which need "m' appended.  Required due to AMBARI-4933
-    propertyUpdaters.put("namenode_heapsize", new MPropertyUpdater());
-    propertyUpdaters.put("namenode_opt_newsize", new MPropertyUpdater());
-    propertyUpdaters.put("namenode_opt_maxnewsize", new MPropertyUpdater());
-    propertyUpdaters.put("dtnode_heapsize", new MPropertyUpdater());
-    propertyUpdaters.put("jtnode_opt_newsize", new MPropertyUpdater());
-    propertyUpdaters.put("jtnode_opt_maxnewsize", new MPropertyUpdater());
-    propertyUpdaters.put("jtnode_heapsize", new MPropertyUpdater());
-    propertyUpdaters.put("hbase_master_heapsize", new MPropertyUpdater());
-    propertyUpdaters.put("hbase_regionserver_heapsize", new 
MPropertyUpdater());
-  }
-
-  /**
    * Register config groups for host group scoped configuration.
    * For each host group with configuration specified in the blueprint, a 
config group is created
    * and the hosts associated with the host group are assigned to the config 
group.
@@ -1038,16 +954,16 @@ public class ClusterResourceProvider extends 
BaseBlueprintProcessor {
    * @throws UnsupportedPropertyException   an invalid property is provided 
when creating a config group
    * @throws NoSuchParentResourceException  attempt to create a config group 
for a non-existing cluster
    */
-  private void registerConfigGroups(String clusterName, Map<String, HostGroup> 
hostGroups, Stack stack) throws
+  private void registerConfigGroups(String clusterName, Map<String, 
HostGroupImpl> hostGroups, Stack stack) throws
       ResourceAlreadyExistsException, SystemException,
       UnsupportedPropertyException, NoSuchParentResourceException {
     
-    for (HostGroup group : hostGroups.values()) {
+    for (HostGroupImpl group : hostGroups.values()) {
       HostGroupEntity entity = group.getEntity();
       Map<String, Map<String, Config>> groupConfigs = new HashMap<String, 
Map<String, Config>>();
       
-      handleGlobalsBackwardsCompability(stack, group.getConfigurations());
-      for (Map.Entry<String, Map<String, String>> entry: 
group.getConfigurations().entrySet()) {
+      handleGlobalsBackwardsCompability(stack, 
group.getConfigurationProperties());
+      for (Map.Entry<String, Map<String, String>> entry: 
group.getConfigurationProperties().entrySet()) {
         String type = entry.getKey();
         String service = stack.getServiceForConfigType(type);
         Config config = new ConfigImpl(type);
@@ -1079,11 +995,11 @@ public class ClusterResourceProvider extends 
BaseBlueprintProcessor {
    *
    * @param hostGroups map of host group name to host group
    */
-  private void validateHostMappings(Map<String, HostGroup> hostGroups) {
+  private void validateHostMappings(Map<String, HostGroupImpl> hostGroups) {
     Collection<String> mappedHosts = new HashSet<String>();
     Collection<String> flaggedHosts = new HashSet<String>();
 
-    for (HostGroup hostgroup : hostGroups.values()) {
+    for (HostGroupImpl hostgroup : hostGroups.values()) {
       for (String host : hostgroup.getHostInfo()) {
         if (mappedHosts.contains(host)) {
           flaggedHosts.add(host);
@@ -1099,279 +1015,5 @@ public class ClusterResourceProvider extends 
BaseBlueprintProcessor {
                                          flaggedHosts);
     }
   }
-
-
-  /**
-   * Provides functionality to update a property value.
-   */
-  public interface PropertyUpdater {
-    /**
-     * Update a property value.
-     *
-     * @param hostGroups  host groups
-     * @param origValue   original value of property
-     *
-     * @return new property value
-     */
-    public String update(Map<String, HostGroup> hostGroups, String origValue);
-  }
-
-  /**
-   * Topology based updater which replaces the original host name of a 
property with the host name
-   * which runs the associated (master) component in the new cluster.
-   */
-  private class SingleHostPropertyUpdater implements PropertyUpdater {
-    /**
-     * Component name
-     */
-    private String component;
-
-    /**
-     * Constructor.
-     *
-     * @param component  component name associated with the property
-     */
-    public SingleHostPropertyUpdater(String component) {
-      this.component = component;
-    }
-
-    /**
-     * Update the property with the new host name which runs the associated 
component.
-     *
-     * @param hostGroups  host groups                 host groups
-     * @param origValue   original value of property  original property value
-     *
-     * @return updated property value with old host name replaced by new host 
name
-     */
-    public String update(Map<String, HostGroup> hostGroups, String origValue)  
{
-      Collection<HostGroup> matchingGroups = 
getHostGroupsForComponent(component, hostGroups.values());
-      if (matchingGroups.size() == 1) {
-        return origValue.replace("localhost", 
matchingGroups.iterator().next().getHostInfo().iterator().next());
-      } else {
-        throw new IllegalArgumentException("Unable to update configuration 
property with topology information. " +
-            "Component '" + this.component + "' is not mapped to any host 
group or is mapped to multiple groups.");
-      }
-    }
-  }
-
-  /**
-   * Topology based updater which replaces the original host name of a 
database property with the host name
-   * where the DB is deployed in the new cluster.  If an existing database is 
specified, the original property
-   * value is returned.
-   */
-  private class DBPropertyUpdater extends SingleHostPropertyUpdater {
-    /**
-     * Property type (global, core-site ...) for property which is used to 
determine if DB is external.
-     */
-    private final String configPropertyType;
-
-    /**
-     * Name of property which is used to determine if DB is new or existing 
(exernal).
-     */
-    private final String conditionalPropertyName;
-
-    /**
-     * Constructor.
-     *
-     * @param component                component to get hot name if new DB
-     * @param configPropertyType       config type of property used to 
determine if DB is external
-     * @param conditionalPropertyName  name of property which is used to 
determine if DB is external
-     */
-    private DBPropertyUpdater(String component, String configPropertyType, 
String conditionalPropertyName) {
-      super(component);
-      this.configPropertyType = configPropertyType;
-      this.conditionalPropertyName = conditionalPropertyName;
-    }
-
-    /**
-     * If database is a new managed database, update the property with the new 
host name which
-     * runs the associated component.  If the database is external 
(non-managed), return the
-     * original value.
-     *
-     * @param hostGroups  host groups                 host groups
-     * @param origValue   original value of property  original property value
-     *
-     * @return updated property value with old host name replaced by new host 
name or original value
-     *         if the database is exernal
-     */
-    @Override
-    public String update(Map<String, HostGroup> hostGroups, String origValue) {
-      if (isDatabaseManaged()) {
-        return super.update(hostGroups, origValue);
-      } else {
-        return origValue;
-      }
-    }
-
-    /**
-     * Determine if database is managed, meaning that it is a component in the 
cluster topology.
-     *
-     * @return true if the DB is managed; false otherwise
-     */
-    //todo: use super.isDependencyManaged() and remove this method
-    private boolean isDatabaseManaged() {
-      // conditional property should always exist since it is required to be 
specified in the stack
-      return mapClusterConfigurations.get(configPropertyType).
-          get(conditionalPropertyName).startsWith("New");
-    }
-  }
-
-  /**
-   * Topology based updater which replaces original host names (possibly more 
than one) contained in a property
-   * value with the host names which runs the associated component in the new 
cluster.
-   */
-  private class MultipleHostPropertyUpdater implements PropertyUpdater {
-    /**
-     * Component name
-     */
-    private String component;
-
-    /**
-     * Separator for multiple property values
-     */
-    private Character separator = ',';
-
-    /**
-     * Constructor.
-     *
-     * @param component  component name associated with the property
-     */
-    public MultipleHostPropertyUpdater(String component) {
-      this.component = component;
-    }
-
-    /**
-     * Constructor with customized separator.
-     * @param component Component name
-     * @param separator separator character
-     */
-    public MultipleHostPropertyUpdater(String component, Character separator) {
-      this.component = component;
-      this.separator = separator;
-    }
-
-    //todo: specific to default values of EXACTLY 'localhost' or 
'localhost:port'.
-    //todo: when blueprint contains source configurations, these props will 
contain actual host names, not localhost.
-    //todo: currently assuming that all hosts will share the same port
-    /**
-     * Update all host names included in the original property value with new 
host names which run the associated
-     * component.
-     *
-     * @param hostGroups  host groups                 host groups
-     * @param origValue   original value of property  original value
-     *
-     * @return updated property value with old host names replaced by new host 
names
-     */
-    public String update(Map<String, HostGroup> hostGroups, String origValue) {
-      Collection<HostGroup> matchingGroups = 
getHostGroupsForComponent(component, hostGroups.values());
-      boolean containsPort = origValue.contains(":");
-      String port = null;
-      if (containsPort) {
-        port = origValue.substring(origValue.indexOf(":") + 1);
-      }
-      StringBuilder sb = new StringBuilder();
-      boolean firstHost = true;
-      for (HostGroup group : matchingGroups) {
-        for (String host : group.getHostInfo()) {
-          if (!firstHost) {
-            sb.append(separator);
-          } else {
-            firstHost = false;
-          }
-          sb.append(host);
-          if (containsPort) {
-            sb.append(":");
-            sb.append(port);
-          }
-        }
-      }
-
-      return sb.toString();
-    }
-  }
-
-  /**
-   * Updater which appends "m" to the original property value.
-   * For example, "1024" would be updated to "1024m".
-   */
-  private class MPropertyUpdater implements PropertyUpdater {
-    /**
-     * Append 'm' to the original property value if it doesn't already exist.
-     *
-     * @param hostGroups  host groups                 host groups
-     * @param origValue   original value of property  original property value
-     *
-     * @return property with 'm' appended
-     */
-    public String update(Map<String, HostGroup> hostGroups, String origValue) {
-      return origValue.endsWith("m") ? origValue : origValue + 'm';
-    }
-  }
-
-  /**
-   * Class to facilitate special formatting needs of property values.
-   */
-  private abstract class AbstractPropertyValueDecorator implements 
PropertyUpdater {
-    PropertyUpdater propertyUpdater;
-
-    public AbstractPropertyValueDecorator(PropertyUpdater propertyUpdater) {
-      this.propertyUpdater = propertyUpdater;
-    }
-
-    /**
-     * Return decorated form of the updated input property value.
-     * @param hostGroupMap Map of host group name to HostGroup
-     * @param origValue   original value of property
-     *
-     * @return Formatted output string
-     */
-    @Override
-    public String update(Map<String, HostGroup> hostGroupMap, String 
origValue) {
-      return doFormat(propertyUpdater.update(hostGroupMap, origValue));
-    }
-
-    /**
-     * Transform input string to required output format.
-     * @param originalValue Original value of property
-     * @return Formatted output string
-     */
-    public abstract String doFormat(String originalValue);
-  }
-
-  /**
-   * Return properties of the form ['value']
-   */
-  private class YamlMultiValuePropertyDecorator extends 
AbstractPropertyValueDecorator {
-
-    public YamlMultiValuePropertyDecorator(PropertyUpdater propertyUpdater) {
-      super(propertyUpdater);
-    }
-
-    /**
-     * Format input String of the form, str1,str2 to ['str1','str2']
-     * @param origValue Input string
-     * @return Formatted string
-     */
-    @Override
-    public String doFormat(String origValue) {
-      StringBuilder sb = new StringBuilder();
-      if (origValue != null) {
-        sb.append("[");
-        boolean isFirst = true;
-        for (String value : origValue.split(",")) {
-          if (!isFirst) {
-            sb.append(",");
-          } else {
-            isFirst = false;
-          }
-          sb.append("'");
-          sb.append(value);
-          sb.append("'");
-        }
-        sb.append("]");
-      }
-      return sb.toString();
-    }
-  }
 }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/8d464c2b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigurationResourceProvider.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigurationResourceProvider.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigurationResourceProvider.java
index 06d556d..98d1a46 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigurationResourceProvider.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigurationResourceProvider.java
@@ -145,7 +145,7 @@ public class ConfigurationResourceProvider extends
     final Set<ConfigurationRequest> requests = new 
HashSet<ConfigurationRequest>();
 
     for (Map<String, Object> propertyMap : getPropertyMaps(predicate)) {
-      requests.add(getRequest(propertyMap));
+      requests.add(getRequest(request, propertyMap));
     }
 
     Set<ConfigurationResponse> responses = getResources(new 
Command<Set<ConfigurationResponse>>() {
@@ -245,12 +245,19 @@ public class ConfigurationResourceProvider extends
    *
    * @return a configuration request
    */
-  private ConfigurationRequest getRequest(Map<String, Object> properties) {
+  private ConfigurationRequest getRequest(Request request, Map<String, Object> 
properties) {
     String type = (String) 
properties.get(CONFIGURATION_CONFIG_TYPE_PROPERTY_ID);
     String tag  = (String) 
properties.get(CONFIGURATION_CONFIG_TAG_PROPERTY_ID);
 
-    return new ConfigurationRequest(
+    ConfigurationRequest configRequest = new ConfigurationRequest(
         (String) properties.get(CONFIGURATION_CLUSTER_NAME_PROPERTY_ID),
         type, tag, new HashMap<String, String>(), new HashMap<String, 
Map<String, String>>());
+
+    Set<String> requestedIds = request.getPropertyIds();
+    if (requestedIds.contains("properties") || requestedIds.contains("*")) {
+      configRequest.setIncludeProperties(true);
+    }
+
+    return configRequest;
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/8d464c2b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostGroup.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostGroup.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostGroup.java
new file mode 100644
index 0000000..303bd15
--- /dev/null
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostGroup.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.internal;
+
+import java.util.Collection;
+import java.util.Map;
+
+/**
+ * Host Group definition.
+ */
+public interface HostGroup {
+
+  /**
+   * Get the host group name.
+   *
+   * @return host group name
+   */
+  public String getName();
+
+  /**
+   * Get associated host information.
+   *
+   * @return collection of hosts associated with the host group
+   */
+  public Collection<String> getHostInfo();
+
+  /**
+   * Get the components associated with the host group.
+   *
+   * @return  collection of component names for the host group
+   */
+  public Collection<String> getComponents();
+
+  /**
+   * Get the configurations associated with the host group.
+   *
+   * @return map of configuration type to a map of properties
+   */
+  public Map<String, Map<String, String>> getConfigurationProperties();
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/8d464c2b/ambari-server/src/test/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRendererTest.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRendererTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRendererTest.java
index 4d2e60e..04d86ad 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRendererTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRendererTest.java
@@ -22,22 +22,30 @@ import org.apache.ambari.server.api.query.QueryInfo;
 import org.apache.ambari.server.api.resources.ClusterResourceDefinition;
 import org.apache.ambari.server.api.resources.HostComponentResourceDefinition;
 import org.apache.ambari.server.api.resources.HostResourceDefinition;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.api.services.Result;
 import org.apache.ambari.server.api.services.ResultImpl;
 import org.apache.ambari.server.api.util.TreeNode;
 import org.apache.ambari.server.api.util.TreeNodeImpl;
+import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.internal.ResourceImpl;
 import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.state.PropertyInfo;
+import org.junit.Ignore;
 import org.junit.Test;
 
 import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.util.Arrays;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
+import static org.easymock.EasyMock.createMock;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.expect;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
@@ -91,12 +99,20 @@ public class ClusterBlueprintRendererTest {
     
assertTrue(propertyTree.getChild("Host/HostComponent").getObject().contains("HostRoles/component_name"));
   }
 
+  @Ignore
   @Test
   public void testFinalizeResult() throws Exception{
+
+    AmbariManagementController controller = 
createMock(AmbariManagementController.class);
+    AmbariMetaInfo stackInfo = createNiceMock(AmbariMetaInfo.class);
+
+    expect(stackInfo.getRequiredProperties("HDP", "1.3.3", 
"HDFS")).andReturn(Collections.<String, PropertyInfo>emptyMap());
+    expect(stackInfo.getRequiredProperties("HDP", "1.3.3", 
"MAPREDUCE")).andReturn(Collections.<String, PropertyInfo>emptyMap());
+
     Result result = new ResultImpl(true);
     createClusterResultTree(result.getResultTree());
 
-    ClusterBlueprintRenderer renderer = new ClusterBlueprintRenderer();
+    ClusterBlueprintRenderer renderer = new TestBlueprintRenderer(controller);
     Result blueprintResult = renderer.finalizeResult(result);
 
     TreeNode<Resource> blueprintTree = blueprintResult.getResultTree();
@@ -222,4 +238,18 @@ public class ClusterBlueprintRendererTest {
   private String getLocalHostName() throws UnknownHostException {
     return InetAddress.getLocalHost().getHostName();
   }
+
+  private static class TestBlueprintRenderer extends ClusterBlueprintRenderer {
+
+    private AmbariManagementController testController;
+
+    private TestBlueprintRenderer(AmbariManagementController controller) {
+      testController = controller;
+    }
+
+    @Override
+    protected AmbariManagementController getController() {
+      return testController;
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/8d464c2b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
new file mode 100644
index 0000000..a4b8ba9
--- /dev/null
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -0,0 +1,932 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.internal;
+
+import org.junit.Test;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import static junit.framework.Assert.assertEquals;
+import static junit.framework.Assert.assertFalse;
+import static junit.framework.Assert.assertTrue;
+
+
+/**
+ * BlueprintConfigurationProcessor unit tests.
+ */
+public class BlueprintConfigurationProcessorTest {
+
+  @Test
+  public void testDoUpdateForBlueprintExport_SingleHostProperty() {
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("yarn.resourcemanager.hostname", "testhost");
+    properties.put("yarn-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    HostGroup group1 = new TestHostGroup("group1", 
Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", 
Collections.singleton("testhost2"), hgComponents2);
+
+    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
+    hostGroups.add(group1);
+    hostGroups.add(group2);
+
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = 
updater.doUpdateForBlueprintExport(hostGroups);
+    String updatedVal = 
updatedProperties.get("yarn-site").get("yarn.resourcemanager.hostname");
+    assertEquals("%HOSTGROUP::group1%", updatedVal);
+  }
+  
+  @Test
+  public void testDoUpdateForBlueprintExport_SingleHostProperty__withPort() {
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("fs.defaultFS", "testhost:8020");
+    properties.put("core-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    HostGroup group1 = new TestHostGroup("group1", 
Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", 
Collections.singleton("testhost2"), hgComponents2);
+
+    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
+    hostGroups.add(group1);
+    hostGroups.add(group2);
+
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = 
updater.doUpdateForBlueprintExport(hostGroups);
+    String updatedVal = updatedProperties.get("core-site").get("fs.defaultFS");
+    assertEquals("%HOSTGROUP::group1%:8020", updatedVal);
+  }
+
+  @Test
+  public void 
testDoUpdateForBlueprintExport_SingleHostProperty__ExternalReference() {
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("yarn.resourcemanager.hostname", "external-host");
+    properties.put("yarn-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    HostGroup group1 = new TestHostGroup("group1", 
Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", 
Collections.singleton("testhost2"), hgComponents2);
+
+    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
+    hostGroups.add(group1);
+    hostGroups.add(group2);
+
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = 
updater.doUpdateForBlueprintExport(hostGroups);
+    
assertFalse(updatedProperties.get("yarn-site").containsKey("yarn.resourcemanager.hostname"));
+  }
+
+  @Test
+  public void testDoUpdateForBlueprintExport_MultiHostProperty() {
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("hbase.zookeeper.quorum", 
"testhost,testhost2,testhost2a,testhost2b");
+    properties.put("hbase-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("ZOOKEEPER_SERVER");
+    HostGroup group1 = new TestHostGroup("group1", 
Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_SERVER");
+    Set<String> hosts2 = new HashSet<String>();
+    hosts2.add("testhost2");
+    hosts2.add("testhost2a");
+    hosts2.add("testhost2b");
+    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
+
+    Collection<String> hgComponents3 = new HashSet<String>();
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_CLIENT");
+    Set<String> hosts3 = new HashSet<String>();
+    hosts3.add("testhost3");
+    hosts3.add("testhost3a");
+    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
+
+    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
+    hostGroups.add(group1);
+    hostGroups.add(group2);
+    hostGroups.add(group3);
+
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = 
updater.doUpdateForBlueprintExport(hostGroups);
+    String updatedVal = 
updatedProperties.get("hbase-site").get("hbase.zookeeper.quorum");
+    assertEquals("%HOSTGROUP::group1%,%HOSTGROUP::group2%", updatedVal);
+  }
+
+  @Test
+  public void testDoUpdateForBlueprintExport_MultiHostProperty__WithPorts() {
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("templeton.zookeeper.hosts", 
"testhost:5050,testhost2:9090,testhost2a:9090,testhost2b:9090");
+    properties.put("webhcat-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("ZOOKEEPER_SERVER");
+    HostGroup group1 = new TestHostGroup("group1", 
Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_SERVER");
+    Set<String> hosts2 = new HashSet<String>();
+    hosts2.add("testhost2");
+    hosts2.add("testhost2a");
+    hosts2.add("testhost2b");
+    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
+
+    Collection<String> hgComponents3 = new HashSet<String>();
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_CLIENT");
+    Set<String> hosts3 = new HashSet<String>();
+    hosts3.add("testhost3");
+    hosts3.add("testhost3a");
+    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
+
+    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
+    hostGroups.add(group1);
+    hostGroups.add(group2);
+    hostGroups.add(group3);
+
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = 
updater.doUpdateForBlueprintExport(hostGroups);
+    String updatedVal = 
updatedProperties.get("webhcat-site").get("templeton.zookeeper.hosts");
+    assertEquals("%HOSTGROUP::group1%:5050,%HOSTGROUP::group2%:9090", 
updatedVal);
+  }
+
+  @Test
+  public void testDoUpdateForBlueprintExport_MultiHostProperty__YAML() {
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("storm.zookeeper.servers", 
"['testhost:5050','testhost2:9090','testhost2a:9090','testhost2b:9090']");
+    properties.put("storm-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("ZOOKEEPER_SERVER");
+    HostGroup group1 = new TestHostGroup("group1", 
Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_SERVER");
+    Set<String> hosts2 = new HashSet<String>();
+    hosts2.add("testhost2");
+    hosts2.add("testhost2a");
+    hosts2.add("testhost2b");
+    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
+
+    Collection<String> hgComponents3 = new HashSet<String>();
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_CLIENT");
+    Set<String> hosts3 = new HashSet<String>();
+    hosts3.add("testhost3");
+    hosts3.add("testhost3a");
+    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
+
+    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
+    hostGroups.add(group1);
+    hostGroups.add(group2);
+    hostGroups.add(group3);
+
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = 
updater.doUpdateForBlueprintExport(hostGroups);
+    String updatedVal = 
updatedProperties.get("storm-site").get("storm.zookeeper.servers");
+    assertEquals("['%HOSTGROUP::group1%:5050','%HOSTGROUP::group2%:9090']", 
updatedVal);
+  }
+
+  @Test
+  public void testDoUpdateForBlueprintExport_DBHostProperty() {
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> hiveSiteProps = new HashMap<String, String>();
+    hiveSiteProps.put("javax.jdo.option.ConnectionURL", 
"jdbc:mysql://testhost/hive?createDatabaseIfNotExist=true");
+    properties.put("hive-site", hiveSiteProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    hgComponents.add("MYSQL_SERVER");
+    HostGroup group1 = new TestHostGroup("group1", 
Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", 
Collections.singleton("testhost2"), hgComponents2);
+
+    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
+    hostGroups.add(group1);
+    hostGroups.add(group2);
+
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = 
updater.doUpdateForBlueprintExport(hostGroups);
+    String updatedVal = 
updatedProperties.get("hive-site").get("javax.jdo.option.ConnectionURL");
+    
assertEquals("jdbc:mysql://%HOSTGROUP::group1%/hive?createDatabaseIfNotExist=true",
 updatedVal);
+  }
+
+  @Test
+  public void testDoUpdateForBlueprintExport_DBHostProperty__External() {
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("javax.jdo.option.ConnectionURL", 
"jdbc:mysql://external-host/hive?createDatabaseIfNotExist=true");
+    properties.put("hive-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    HostGroup group1 = new TestHostGroup("group1", 
Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", 
Collections.singleton("testhost2"), hgComponents2);
+
+    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
+    hostGroups.add(group1);
+    hostGroups.add(group2);
+
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = 
updater.doUpdateForBlueprintExport(hostGroups);
+    
assertFalse(updatedProperties.get("hive-site").containsKey("javax.jdo.option.ConnectionURL"));
+  }
+
+  @Test
+  public void testDoUpdateForClusterCreate_SingleHostProperty__defaultValue() {
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("yarn.resourcemanager.hostname", "localhost");
+    properties.put("yarn-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    HostGroup group1 = new TestHostGroup("group1", 
Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", 
Collections.singleton("testhost2"), hgComponents2);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = 
updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = 
updatedProperties.get("yarn-site").get("yarn.resourcemanager.hostname");
+    assertEquals("testhost", updatedVal);
+  }
+
+  @Test
+  public void 
testDoUpdateForClusterCreate_SingleHostProperty__defaultValue__WithPort() {
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("fs.defaultFS", "localhost:5050");
+    properties.put("core-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    HostGroup group1 = new TestHostGroup("group1", 
Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", 
Collections.singleton("testhost2"), hgComponents2);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = 
updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = updatedProperties.get("core-site").get("fs.defaultFS");
+    assertEquals("testhost:5050", updatedVal);
+  }
+
+  @Test
+  public void testDoUpdateForClusterCreate_MultiHostProperty__defaultValues() {
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("hbase.zookeeper.quorum", "localhost");
+    properties.put("hbase-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("ZOOKEEPER_SERVER");
+    HostGroup group1 = new TestHostGroup("group1", 
Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_SERVER");
+    Set<String> hosts2 = new HashSet<String>();
+    hosts2.add("testhost2");
+    hosts2.add("testhost2a");
+    hosts2.add("testhost2b");
+    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
+
+    Collection<String> hgComponents3 = new HashSet<String>();
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_CLIENT");
+    Set<String> hosts3 = new HashSet<String>();
+    hosts3.add("testhost3");
+    hosts3.add("testhost3a");
+    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+    hostGroups.put(group3.getName(), group3);
+
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = 
updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = 
updatedProperties.get("hbase-site").get("hbase.zookeeper.quorum");
+    String[] hosts = updatedVal.split(",");
+
+    Collection<String> expectedHosts = new HashSet<String>();
+    expectedHosts.add("testhost");
+    expectedHosts.add("testhost2");
+    expectedHosts.add("testhost2a");
+    expectedHosts.add("testhost2b");
+
+    assertEquals(4, hosts.length);
+    for (String host : hosts) {
+      assertTrue(expectedHosts.contains(host));
+      expectedHosts.remove(host);
+    }
+  }
+
+  @Test
+  public void 
testDoUpdateForClusterCreate_MultiHostProperty__defaultValues___withPorts() {
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("templeton.zookeeper.hosts", "localhost:9090");
+    properties.put("webhcat-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("ZOOKEEPER_SERVER");
+    HostGroup group1 = new TestHostGroup("group1", 
Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_SERVER");
+    Set<String> hosts2 = new HashSet<String>();
+    hosts2.add("testhost2");
+    hosts2.add("testhost2a");
+    hosts2.add("testhost2b");
+    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
+
+    Collection<String> hgComponents3 = new HashSet<String>();
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_CLIENT");
+    Set<String> hosts3 = new HashSet<String>();
+    hosts3.add("testhost3");
+    hosts3.add("testhost3a");
+    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+    hostGroups.put(group3.getName(), group3);
+
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = 
updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = 
updatedProperties.get("webhcat-site").get("templeton.zookeeper.hosts");
+    String[] hosts = updatedVal.split(",");
+
+    Collection<String> expectedHosts = new HashSet<String>();
+    expectedHosts.add("testhost:9090");
+    expectedHosts.add("testhost2:9090");
+    expectedHosts.add("testhost2a:9090");
+    expectedHosts.add("testhost2b:9090");
+
+    assertEquals(4, hosts.length);
+    for (String host : hosts) {
+      assertTrue(expectedHosts.contains(host));
+      expectedHosts.remove(host);
+    }
+  }
+
+  @Test
+  public void 
testDoUpdateForClusterCreate_MultiHostProperty__defaultValues___YAML() {
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("storm.zookeeper.servers", "['localhost']");
+    properties.put("storm-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("ZOOKEEPER_SERVER");
+    HostGroup group1 = new TestHostGroup("group1", 
Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_SERVER");
+    Set<String> hosts2 = new HashSet<String>();
+    hosts2.add("testhost2");
+    hosts2.add("testhost2a");
+    hosts2.add("testhost2b");
+    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
+
+    Collection<String> hgComponents3 = new HashSet<String>();
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_CLIENT");
+    Set<String> hosts3 = new HashSet<String>();
+    hosts3.add("testhost3");
+    hosts3.add("testhost3a");
+    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+    hostGroups.put(group3.getName(), group3);
+
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = 
updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = 
updatedProperties.get("storm-site").get("storm.zookeeper.servers");
+    assertTrue(updatedVal.startsWith("["));
+    assertTrue(updatedVal.endsWith("]"));
+    // remove the surrounding brackets
+    updatedVal = updatedVal.replaceAll("[\\[\\]]", "");
+
+    String[] hosts = updatedVal.split(",");
+
+    Collection<String> expectedHosts = new HashSet<String>();
+    expectedHosts.add("'testhost'");
+    expectedHosts.add("'testhost2'");
+    expectedHosts.add("'testhost2a'");
+    expectedHosts.add("'testhost2b'");
+
+    assertEquals(4, hosts.length);
+    for (String host : hosts) {
+      assertTrue(expectedHosts.contains(host));
+      expectedHosts.remove(host);
+    }
+  }
+
+  @Test
+  public void testDoUpdateForClusterCreate_MProperty__defaultValues() {
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("hbase_master_heapsize", "512m");
+    properties.put("hbase-env", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    HostGroup group1 = new TestHostGroup("group1", 
Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", 
Collections.singleton("testhost2"), hgComponents2);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = 
updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = 
updatedProperties.get("hbase-env").get("hbase_master_heapsize");
+    assertEquals("512m", updatedVal);
+  }
+
+  @Test
+  public void testDoUpdateForClusterCreate_MProperty__missingM() {
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("hbase_master_heapsize", "512");
+    properties.put("hbase-env", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    HostGroup group1 = new TestHostGroup("group1", 
Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", 
Collections.singleton("testhost2"), hgComponents2);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = 
updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = 
updatedProperties.get("hbase-env").get("hbase_master_heapsize");
+    assertEquals("512m", updatedVal);
+  }
+
+  @Test
+  public void testDoUpdateForClusterCreate_SingleHostProperty__exportedValue() 
{
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("yarn.resourcemanager.hostname", "%HOSTGROUP::group1%");
+    properties.put("yarn-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    HostGroup group1 = new TestHostGroup("group1", 
Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", 
Collections.singleton("testhost2"), hgComponents2);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = 
updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = 
updatedProperties.get("yarn-site").get("yarn.resourcemanager.hostname");
+    assertEquals("testhost", updatedVal);
+  }
+
+  @Test
+  public void 
testDoUpdateForClusterCreate_SingleHostProperty__exportedValue__WithPort() {
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("fs.defaultFS", "%HOSTGROUP::group1%:5050");
+    properties.put("core-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    HostGroup group1 = new TestHostGroup("group1", 
Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", 
Collections.singleton("testhost2"), hgComponents2);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = 
updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = updatedProperties.get("core-site").get("fs.defaultFS");
+    assertEquals("testhost:5050", updatedVal);
+  }
+
+  @Test
+  public void testDoUpdateForClusterCreate_MultiHostProperty__exportedValues() 
{
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("hbase.zookeeper.quorum", 
"%HOSTGROUP::group1%,%HOSTGROUP::group2%");
+    properties.put("hbase-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("ZOOKEEPER_SERVER");
+    HostGroup group1 = new TestHostGroup("group1", 
Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_SERVER");
+    Set<String> hosts2 = new HashSet<String>();
+    hosts2.add("testhost2");
+    hosts2.add("testhost2a");
+    hosts2.add("testhost2b");
+    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
+
+    Collection<String> hgComponents3 = new HashSet<String>();
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_CLIENT");
+    Set<String> hosts3 = new HashSet<String>();
+    hosts3.add("testhost3");
+    hosts3.add("testhost3a");
+    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+    hostGroups.put(group3.getName(), group3);
+
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = 
updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = 
updatedProperties.get("hbase-site").get("hbase.zookeeper.quorum");
+    String[] hosts = updatedVal.split(",");
+
+    Collection<String> expectedHosts = new HashSet<String>();
+    expectedHosts.add("testhost");
+    expectedHosts.add("testhost2");
+    expectedHosts.add("testhost2a");
+    expectedHosts.add("testhost2b");
+
+    assertEquals(4, hosts.length);
+    for (String host : hosts) {
+      assertTrue(expectedHosts.contains(host));
+      expectedHosts.remove(host);
+    }
+  }
+
+  @Test
+  public void 
testDoUpdateForClusterCreate_MultiHostProperty__exportedValues___withPorts() {
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("templeton.zookeeper.hosts", 
"%HOSTGROUP::group1%:9090,%HOSTGROUP::group2%:9091");
+    properties.put("webhcat-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("ZOOKEEPER_SERVER");
+    HostGroup group1 = new TestHostGroup("group1", 
Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_SERVER");
+    Set<String> hosts2 = new HashSet<String>();
+    hosts2.add("testhost2");
+    hosts2.add("testhost2a");
+    hosts2.add("testhost2b");
+    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
+
+    Collection<String> hgComponents3 = new HashSet<String>();
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_CLIENT");
+    Set<String> hosts3 = new HashSet<String>();
+    hosts3.add("testhost3");
+    hosts3.add("testhost3a");
+    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+    hostGroups.put(group3.getName(), group3);
+
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = 
updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = 
updatedProperties.get("webhcat-site").get("templeton.zookeeper.hosts");
+    String[] hosts = updatedVal.split(",");
+
+    Collection<String> expectedHosts = new HashSet<String>();
+    expectedHosts.add("testhost:9090");
+    expectedHosts.add("testhost2:9091");
+    expectedHosts.add("testhost2a:9091");
+    expectedHosts.add("testhost2b:9091");
+
+    assertEquals(4, hosts.length);
+    for (String host : hosts) {
+      assertTrue(expectedHosts.contains(host));
+      expectedHosts.remove(host);
+    }
+  }
+
+  @Test
+  public void 
testDoUpdateForClusterCreate_MultiHostProperty__exportedValues___YAML() {
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("storm.zookeeper.servers", 
"['%HOSTGROUP::group1%:9090','%HOSTGROUP::group2%:9091']");
+    properties.put("storm-site", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("ZOOKEEPER_SERVER");
+    HostGroup group1 = new TestHostGroup("group1", 
Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_SERVER");
+    Set<String> hosts2 = new HashSet<String>();
+    hosts2.add("testhost2");
+    hosts2.add("testhost2a");
+    hosts2.add("testhost2b");
+    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
+
+    Collection<String> hgComponents3 = new HashSet<String>();
+    hgComponents2.add("HDFS_CLIENT");
+    hgComponents2.add("ZOOKEEPER_CLIENT");
+    Set<String> hosts3 = new HashSet<String>();
+    hosts3.add("testhost3");
+    hosts3.add("testhost3a");
+    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+    hostGroups.put(group3.getName(), group3);
+
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = 
updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = 
updatedProperties.get("storm-site").get("storm.zookeeper.servers");
+    assertTrue(updatedVal.startsWith("["));
+    assertTrue(updatedVal.endsWith("]"));
+    // remove the surrounding brackets
+    updatedVal = updatedVal.replaceAll("[\\[\\]]", "");
+
+    String[] hosts = updatedVal.split(",");
+
+    Collection<String> expectedHosts = new HashSet<String>();
+    expectedHosts.add("'testhost:9090'");
+    expectedHosts.add("'testhost2:9091'");
+    expectedHosts.add("'testhost2a:9091'");
+    expectedHosts.add("'testhost2b:9091'");
+
+    assertEquals(4, hosts.length);
+    for (String host : hosts) {
+      assertTrue(expectedHosts.contains(host));
+      expectedHosts.remove(host);
+    }
+  }
+
+  @Test
+  public void testDoUpdateForClusterCreate_DBHostProperty__defaultValue() {
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> hiveSiteProps = new HashMap<String, String>();
+    hiveSiteProps.put("javax.jdo.option.ConnectionURL", 
"jdbc:mysql://localhost/hive?createDatabaseIfNotExist=true");
+    Map<String, String> hiveEnvProps = new HashMap<String, String>();
+    hiveEnvProps.put("hive_database", "New MySQL Database");
+    properties.put("hive-site", hiveSiteProps);
+    properties.put("hive-env", hiveEnvProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    hgComponents.add("MYSQL_SERVER");
+    HostGroup group1 = new TestHostGroup("group1", 
Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", 
Collections.singleton("testhost2"), hgComponents2);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = 
updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = 
updatedProperties.get("hive-site").get("javax.jdo.option.ConnectionURL");
+    assertEquals("jdbc:mysql://testhost/hive?createDatabaseIfNotExist=true", 
updatedVal);
+  }
+
+  @Test
+  public void testDoUpdateForClusterCreate_DBHostProperty__exportedValue() {
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> hiveSiteProps = new HashMap<String, String>();
+    hiveSiteProps.put("javax.jdo.option.ConnectionURL", 
"jdbc:mysql://%HOSTGROUP::group1%/hive?createDatabaseIfNotExist=true");
+    Map<String, String> hiveEnvProps = new HashMap<String, String>();
+    hiveEnvProps.put("hive_database", "New MySQL Database");
+    properties.put("hive-site", hiveSiteProps);
+    properties.put("hive-env", hiveEnvProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    hgComponents.add("MYSQL_SERVER");
+    HostGroup group1 = new TestHostGroup("group1", 
Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", 
Collections.singleton("testhost2"), hgComponents2);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = 
updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = 
updatedProperties.get("hive-site").get("javax.jdo.option.ConnectionURL");
+    assertEquals("jdbc:mysql://testhost/hive?createDatabaseIfNotExist=true", 
updatedVal);
+  }
+
+  @Test
+  public void testDoUpdateForClusterCreate_DBHostProperty__external() {
+    Map<String, Map<String, String>> properties = new HashMap<String, 
Map<String, String>>();
+    Map<String, String> typeProps = new HashMap<String, String>();
+    typeProps.put("javax.jdo.option.ConnectionURL", 
"jdbc:mysql://myHost.com/hive?createDatabaseIfNotExist=true");
+    typeProps.put("hive_database", "Existing MySQL Database");
+    properties.put("hive-env", typeProps);
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    hgComponents.add("SECONDARY_NAMENODE");
+    hgComponents.add("RESOURCEMANAGER");
+    HostGroup group1 = new TestHostGroup("group1", 
Collections.singleton("testhost"), hgComponents);
+
+    Collection<String> hgComponents2 = new HashSet<String>();
+    hgComponents2.add("DATANODE");
+    hgComponents2.add("HDFS_CLIENT");
+    HostGroup group2 = new TestHostGroup("group2", 
Collections.singleton("testhost2"), hgComponents2);
+
+    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
+    hostGroups.put(group1.getName(), group1);
+    hostGroups.put(group2.getName(), group2);
+
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(properties);
+    Map<String, Map<String, String>> updatedProperties = 
updater.doUpdateForClusterCreate(hostGroups);
+    String updatedVal = 
updatedProperties.get("hive-env").get("javax.jdo.option.ConnectionURL");
+    assertEquals("jdbc:mysql://myHost.com/hive?createDatabaseIfNotExist=true", 
updatedVal);
+  }
+
+  private class TestHostGroup implements HostGroup {
+
+    private String name;
+    private Collection<String> hosts;
+    private Collection<String> components;
+
+    private TestHostGroup(String name, Collection<String> hosts, 
Collection<String> components) {
+      this.name = name;
+      this.hosts = hosts;
+      this.components = components;
+    }
+
+    @Override
+    public String getName() {
+      return name;
+    }
+
+    @Override
+    public Collection<String> getHostInfo() {
+      return hosts;
+    }
+
+    @Override
+    public Collection<String> getComponents() {
+      return components;
+    }
+
+    @Override
+    public Map<String, Map<String, String>> getConfigurationProperties() {
+      return null;
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/8d464c2b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java
index d1aa4d5..e443bc6 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java
@@ -21,7 +21,6 @@ package org.apache.ambari.server.controller.internal;
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.capture;
 import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.createMockBuilder;
 import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.createStrictMock;
 import static org.easymock.EasyMock.eq;
@@ -34,9 +33,7 @@ import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import java.lang.reflect.Field;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
@@ -61,7 +58,6 @@ import 
org.apache.ambari.server.controller.StackServiceComponentRequest;
 import org.apache.ambari.server.controller.StackServiceComponentResponse;
 import org.apache.ambari.server.controller.StackServiceRequest;
 import org.apache.ambari.server.controller.StackServiceResponse;
-import 
org.apache.ambari.server.controller.internal.ClusterResourceProvider.PropertyUpdater;
 import org.apache.ambari.server.controller.spi.Predicate;
 import org.apache.ambari.server.controller.spi.Request;
 import org.apache.ambari.server.controller.spi.RequestStatus;
@@ -69,7 +65,6 @@ import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.ResourceProvider;
 import org.apache.ambari.server.controller.utilities.PredicateBuilder;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import 
org.apache.ambari.server.controller.internal.BaseBlueprintProcessor.HostGroup;
 import org.apache.ambari.server.orm.dao.BlueprintDAO;
 import org.apache.ambari.server.orm.entities.BlueprintConfigEntity;
 import org.apache.ambari.server.orm.entities.BlueprintEntity;
@@ -80,7 +75,6 @@ import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.DependencyInfo;
 import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.state.State;
-import org.apache.commons.collections.CollectionUtils;
 import org.easymock.Capture;
 import org.easymock.EasyMock;
 import org.junit.Assert;
@@ -2535,103 +2529,6 @@ public class ClusterResourceProviderTest {
         hostComponentResourceProvider, configGroupResourceProvider, 
persistKeyValue, metaInfo);
   }
 
-  @SuppressWarnings("unchecked")
-  @Test
-  public void testBlueprintPropertyUpdaters() throws Exception {
-    final Map<String, String> singleHostProperty1 =
-      Collections.singletonMap("dfs.http.address", "localhost:50070");
-
-    final Map<String, String> singleHostProperty2 =
-      Collections.singletonMap("hive.metastore.uris", 
"prefix.localhost.suffix");
-
-    final Map<String, String> multiHostProperty1 =
-      Collections.singletonMap("hbase.zookeeper.quorum", "localhost");
-
-    final Map<String, String> multiHostProperty2 =
-      Collections.singletonMap("storm.zookeeper.servers", "['localhost']");
-
-    final Map<String, String> mProperty =
-      Collections.singletonMap("namenode_heapsize", "1025");
-
-    final Map<String, String> databaseProperty =
-        Collections.singletonMap("javax.jdo.option.ConnectionURL", 
"localhost:12345");
-
-    final HostGroup hostGroup1 = createNiceMock(HostGroup.class);
-    final HostGroup hostGroup2 = createNiceMock(HostGroup.class);
-
-    expect(hostGroup1.getComponents()).andReturn(new ArrayList<String>() {{
-      add("NAMENODE");
-      add("HBASE_MASTER");
-      add("HIVE_SERVER");
-      add("ZOOKEEPER_SERVER");
-    }}).anyTimes();
-    
expect(hostGroup1.getHostInfo()).andReturn(Collections.singletonList("h1")).anyTimes();
-
-    
expect(hostGroup2.getComponents()).andReturn(Collections.singletonList("ZOOKEEPER_SERVER")).anyTimes();
-    
expect(hostGroup2.getHostInfo()).andReturn(Collections.singletonList("h2")).anyTimes();
-
-    Map<String, HostGroup> hostGroups = new
-      HashMap<String, HostGroup>() {{
-        put("host_group_1", hostGroup1);
-        put("host_group_2", hostGroup2);
-      }};
-
-    AmbariManagementController managementController = 
createNiceMock(AmbariManagementController.class);
-
-    ClusterResourceProvider resourceProvider =
-      createMockBuilder(ClusterResourceProvider.class)
-        .withConstructor(Set.class, Map.class, 
AmbariManagementController.class)
-        .withArgs(new HashSet<String>(), new HashMap<Resource.Type, String>(), 
managementController)
-        .createMock();
-
-    replay(managementController, resourceProvider, hostGroup1, hostGroup2);
-
-    Map<String, Map<String, String>> mapConfigurations;
-    Field configField = 
ClusterResourceProvider.class.getDeclaredField("mapClusterConfigurations");
-    configField.setAccessible(true);
-    mapConfigurations = (Map<String, Map<String, String>>) 
configField.get(resourceProvider);
-
-    Map<String, PropertyUpdater> propertyUpdaterMap;
-    Field f = 
ClusterResourceProvider.class.getDeclaredField("propertyUpdaters");
-    f.setAccessible(true);
-    propertyUpdaterMap = (Map<String, PropertyUpdater>) 
f.get(resourceProvider);
-
-    Assert.assertNotNull(propertyUpdaterMap);
-
-    String newValue;
-
-    Map.Entry<String, String> entry = 
singleHostProperty1.entrySet().iterator().next();
-    newValue = propertyUpdaterMap.get(entry.getKey()).update(hostGroups, 
entry.getValue());
-    Assert.assertEquals("h1:50070", newValue);
-
-    entry = singleHostProperty2.entrySet().iterator().next();
-    newValue = propertyUpdaterMap.get(entry.getKey()).update(hostGroups, 
entry.getValue());
-    Assert.assertEquals("prefix.h1.suffix", newValue);
-
-    entry = multiHostProperty1.entrySet().iterator().next();
-    newValue = propertyUpdaterMap.get(entry.getKey()).update(hostGroups, 
entry.getValue());
-    Assert.assertTrue(CollectionUtils.isEqualCollection(
-      Arrays.asList("h1,h2".split(",")), Arrays.asList(newValue.split(","))
-    ));
-
-    entry = multiHostProperty2.entrySet().iterator().next();
-    newValue = propertyUpdaterMap.get(entry.getKey()).update(hostGroups, 
entry.getValue());
-    // no ordering guarantee
-    Assert.assertTrue(newValue.equals("['h1','h2']") || 
newValue.equals("['h2','h1']"));
-
-    entry = mProperty.entrySet().iterator().next();
-    newValue = propertyUpdaterMap.get(entry.getKey()).update(hostGroups, 
entry.getValue());
-    Assert.assertEquals("1025m", newValue);
-
-    Map<String, String> configs = new HashMap<String, String>();
-    configs.put("hive_database", "External MySQL Database");
-    mapConfigurations.put("hive-env", configs);
-    entry = databaseProperty.entrySet().iterator().next();
-    newValue = propertyUpdaterMap.get(entry.getKey()).update(hostGroups, 
entry.getValue());
-    Assert.assertEquals("localhost:12345", newValue);
-
-    verify(managementController, resourceProvider, hostGroup1, hostGroup2);
-  }
 
   @Test
   public void testGetResources() throws Exception{

Reply via email to