This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 1a8b193  AMBARI-24709. Implement support for Minimal Blueprint Export 
(#2489)
1a8b193 is described below

commit 1a8b193fbc8da67597044cb26ee1a8cabcc86cd8
Author: Doroszlai, Attila <6454655+adorosz...@users.noreply.github.com>
AuthorDate: Wed Oct 24 15:09:56 2018 +0200

    AMBARI-24709. Implement support for Minimal Blueprint Export (#2489)
---
 .../api/query/render/ClusterBlueprintRenderer.java |  61 ++++++--
 .../api/resources/ClusterResourceDefinition.java   |  11 +-
 .../internal/BlueprintConfigurationProcessor.java  | 157 +++++++++++++-------
 .../controller/internal/BlueprintExportType.java   | 164 +++++++++++++++++++++
 .../internal/ExportBlueprintRequest.java           |  61 +++-----
 .../server/controller/internal/UnitUpdater.java    |  18 ++-
 .../apache/ambari/server/state/PropertyInfo.java   |   7 +
 .../ambari/server/topology/Configuration.java      |  16 +-
 .../query/render/ClusterBlueprintRendererTest.java |  43 +++---
 .../BlueprintConfigurationProcessorTest.java       | 141 +++++++++++++-----
 .../internal/ExportBlueprintRequestTest.java       |  42 +-----
 11 files changed, 516 insertions(+), 205 deletions(-)

diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRenderer.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRenderer.java
index 7ac240a..9139cec 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRenderer.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRenderer.java
@@ -41,6 +41,7 @@ import 
org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.AmbariServer;
 import org.apache.ambari.server.controller.internal.ArtifactResourceProvider;
 import 
org.apache.ambari.server.controller.internal.BlueprintConfigurationProcessor;
+import org.apache.ambari.server.controller.internal.BlueprintExportType;
 import org.apache.ambari.server.controller.internal.BlueprintResourceProvider;
 import org.apache.ambari.server.controller.internal.ExportBlueprintRequest;
 import org.apache.ambari.server.controller.internal.RequestImpl;
@@ -72,6 +73,7 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
 
 /**
  * Renderer which renders a cluster resource as a blueprint.
@@ -92,6 +94,12 @@ public class ClusterBlueprintRenderer extends BaseRenderer 
implements Renderer {
    */
   private final AmbariManagementController controller = 
AmbariServer.getController();
 
+  private final BlueprintExportType exportType;
+
+  public ClusterBlueprintRenderer(BlueprintExportType exportType) {
+    this.exportType = exportType;
+  }
+
 
   // ----- Renderer ----------------------------------------------------------
 
@@ -184,7 +192,6 @@ public class ClusterBlueprintRenderer extends BaseRenderer 
implements Renderer {
    * Create a blueprint resource.
    *
    * @param clusterNode  cluster tree node
-   *
    * @return a new blueprint resource
    */
   private Resource createBlueprintResource(TreeNode<Resource> clusterNode) {
@@ -198,7 +205,7 @@ public class ClusterBlueprintRenderer extends BaseRenderer 
implements Renderer {
     }
 
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(exportType);
 
     Stack stack = topology.getBlueprint().getStack();
     blueprintResource.setProperty("Blueprints/stack_name", stack.getName());
@@ -223,10 +230,16 @@ public class ClusterBlueprintRenderer extends 
BaseRenderer implements Renderer {
     List<Map<String, Object>> groupList = formatGroupsAsList(topology);
     blueprintResource.setProperty("host_groups", groupList);
 
-    blueprintResource.setProperty("configurations", 
processConfigurations(topology));
+    List<Map<String, Map<String, Map<String, ?>>>> configurations = 
processConfigurations(topology);
+    if (exportType.include(configurations)) {
+      blueprintResource.setProperty("configurations", configurations);
+    }
 
     //Fetch settings section for blueprint
-    blueprintResource.setProperty("settings", getSettings(clusterNode, stack));
+    Collection<Map<String, Object>> settings = getSettings(clusterNode, stack);
+    if (exportType.include(settings)) {
+      blueprintResource.setProperty("settings", settings);
+    }
 
     return blueprintResource;
   }
@@ -261,7 +274,7 @@ public class ClusterBlueprintRenderer extends BaseRenderer 
implements Renderer {
    * @return A Collection<Map<String, Object>> which represents the Setting 
Object
    */
   @VisibleForTesting
-  static Collection<Map<String, Object>> getSettings(TreeNode<Resource> 
clusterNode, Stack stack) {
+  Collection<Map<String, Object>> getSettings(TreeNode<Resource> clusterNode, 
Stack stack) {
     //Initialize collections to create appropriate json structure
     Collection<Map<String, Object>> blueprintSetting = new ArrayList<>();
 
@@ -289,7 +302,7 @@ public class ClusterBlueprintRenderer extends BaseRenderer 
implements Renderer {
 
       //Fetch the service Components to obtain ServiceComponentInfo
       Collection<TreeNode<Resource>> componentChildren = 
serviceNode.getChild("components").getChildren();
-      for (TreeNode componentNode : componentChildren) {
+      for (TreeNode<Resource> componentNode : componentChildren) {
         ResourceImpl component = (ResourceImpl) componentNode.getObject();
         Map<String, Object> serviceComponentInfoMap = 
component.getPropertiesMap().get("ServiceComponentInfo");
         String componentName = 
serviceComponentInfoMap.get("component_name").toString();
@@ -307,8 +320,13 @@ public class ClusterBlueprintRenderer extends BaseRenderer 
implements Renderer {
         }
       }
     }
-    blueprintSetting.add(ImmutableMap.of(SERVICE_SETTINGS, serviceSettings));
-    blueprintSetting.add(ImmutableMap.of(COMPONENT_SETTINGS, 
componentSettings));
+
+    if (exportType.include(serviceSettings)) {
+      blueprintSetting.add(ImmutableMap.of(SERVICE_SETTINGS, serviceSettings));
+    }
+    if (exportType.include(componentSettings)) {
+      blueprintSetting.add(ImmutableMap.of(COMPONENT_SETTINGS, 
componentSettings));
+    }
 
     return blueprintSetting;
   }
@@ -366,14 +384,23 @@ public class ClusterBlueprintRenderer extends 
BaseRenderer implements Renderer {
     List<Map<String, Map<String, Map<String, ?>>>> configList = new 
ArrayList<>();
 
     Configuration configuration = topology.getConfiguration();
-    Collection<String> allTypes = new HashSet<>();
-    allTypes.addAll(configuration.getFullProperties().keySet());
-    allTypes.addAll(configuration.getFullAttributes().keySet());
+    Map<String, Map<String, String>> fullProperties = 
configuration.getFullProperties();
+    Map<String, Map<String, Map<String, String>>> fullAttributes = 
configuration.getFullAttributes();
+    Collection<String> allTypes = ImmutableSet.<String>builder()
+      .addAll(fullProperties.keySet())
+      .addAll(fullAttributes.keySet())
+      .build();
     for (String type : allTypes) {
       Map<String, Map<String, ?>> typeMap = new HashMap<>();
-      typeMap.put("properties", configuration.getFullProperties().get(type));
-      if (! configuration.getFullAttributes().isEmpty()) {
-        typeMap.put("properties_attributes", 
configuration.getFullAttributes().get(type));
+      Map<String, String> properties = fullProperties.get(type);
+      if (exportType.include(properties)) {
+        typeMap.put("properties", properties);
+      }
+      if (!fullAttributes.isEmpty()) {
+        Map<String, Map<String, String>> attributes = fullAttributes.get(type);
+        if (exportType.include(attributes)) {
+          typeMap.put("properties_attributes", attributes);
+        }
       }
 
       configList.add(Collections.singletonMap(type, typeMap));
@@ -407,7 +434,9 @@ public class ClusterBlueprintRenderer extends BaseRenderer 
implements Renderer {
 
         configList.add(propertyMap);
       }
-      mapGroupProperties.put("configurations", configList);
+      if (exportType.include(configList)) {
+        mapGroupProperties.put("configurations", configList);
+      }
     }
     return listHostGroups;
   }
@@ -433,7 +462,7 @@ public class ClusterBlueprintRenderer extends BaseRenderer 
implements Renderer {
   protected ClusterTopology createClusterTopology(TreeNode<Resource> 
clusterNode)
       throws InvalidTopologyTemplateException, InvalidTopologyException {
 
-    return new ClusterTopologyImpl(new AmbariContext(), new 
ExportBlueprintRequest(clusterNode));
+    return new ClusterTopologyImpl(new AmbariContext(), new 
ExportBlueprintRequest(clusterNode, controller));
   }
 
   /**
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ClusterResourceDefinition.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ClusterResourceDefinition.java
index bc0e3ae..cf9426c 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ClusterResourceDefinition.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/api/resources/ClusterResourceDefinition.java
@@ -20,11 +20,13 @@ package org.apache.ambari.server.api.resources;
 
 import java.util.Collection;
 import java.util.HashSet;
+import java.util.Optional;
 import java.util.Set;
 
 import org.apache.ambari.server.api.query.render.ClusterBlueprintRenderer;
 import org.apache.ambari.server.api.query.render.Renderer;
 import org.apache.ambari.server.controller.KerberosHelper;
+import org.apache.ambari.server.controller.internal.BlueprintExportType;
 import org.apache.ambari.server.controller.spi.Resource;
 
 /**
@@ -51,11 +53,10 @@ public class ClusterResourceDefinition extends 
BaseResourceDefinition {
 
   @Override
   public Renderer getRenderer(String name) {
-    if (name != null && name.equals("blueprint")) {
-      return new ClusterBlueprintRenderer();
-    } else {
-      return super.getRenderer(name);
-    }
+    Optional<BlueprintExportType> blueprintExportType = 
BlueprintExportType.parse(name);
+    return blueprintExportType.isPresent()
+      ? new ClusterBlueprintRenderer(blueprintExportType.get())
+      : super.getRenderer(name);
   }
 
   @Override
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index 2d580e4..c15ca70 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -63,8 +63,10 @@ import org.apache.commons.lang3.tuple.Pair;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Predicates;
+import com.google.common.base.Strings;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
@@ -196,12 +198,22 @@ public class BlueprintConfigurationProcessor {
   private static final Set<String> configPropertiesWithHASupport =
     new HashSet<>(Arrays.asList("fs.defaultFS", "hbase.rootdir", 
"instance.volumes", "policymgr_external_url", 
"xasecure.audit.destination.hdfs.dir"));
 
+  private static final Set<Pair<String, String>> 
PROPERTIES_FOR_HADOOP_PROXYUSER = ImmutableSet.of(
+    Pair.of("oozie-env", "oozie_user"),
+    Pair.of("hive-env", "hive_user"),
+    Pair.of("hive-env", "webhcat_user"),
+    Pair.of("hbase-env", "hbase_user"),
+    Pair.of("falcon-env", "falcon_user")
+  );
+  private static final String HADOOP_PROXYUSER_HOSTS_FORMAT = 
"hadoop.proxyuser.%s.hosts";
+  private static final String HADOOP_PROXYUSER_GROUPS_FORMAT = 
"hadoop.proxyuser.%s.groups";
+
   /**
    * Statically-defined list of filters to apply on property exports.
    * This will initially be used to filter out the Ranger Passwords, but
    * could be extended in the future for more generic purposes.
    */
-  private PropertyFilter[] getExportPropertyFilters (Map<Long, Set<String>> 
authToLocalPerClusterMap)
+  private PropertyFilter[] getExportPropertyFilters(Map<Long, Set<String>> 
authToLocalPerClusterMap)
   {
     return new PropertyFilter[] {
       new PasswordPropertyFilter(),
@@ -451,9 +463,8 @@ public class BlueprintConfigurationProcessor {
     }
 
     // Explicitly set any properties that are required but not currently 
provided in the stack definition.
+    injectDefaults(clusterConfig, configTypesUpdated, 
clusterTopology.getBlueprint().getServices());
     setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
-    setRetryConfiguration(clusterConfig, configTypesUpdated);
-    setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
     addExcludedConfigProperties(clusterConfig, configTypesUpdated, 
clusterTopology.getBlueprint().getStack());
 
     trimProperties(clusterConfig, clusterTopology);
@@ -815,7 +826,7 @@ public class BlueprintConfigurationProcessor {
    * Update properties for blueprint export.
    * This involves converting concrete topology information to host groups.
    */
-  public void doUpdateForBlueprintExport() {
+  public void doUpdateForBlueprintExport(BlueprintExportType exportType) {
     // HA configs are only processed in cluster configuration, not HG 
configurations
     if (clusterTopology.isNameNodeHAEnabled()) {
       doNameNodeHAUpdate();
@@ -830,7 +841,8 @@ public class BlueprintConfigurationProcessor {
     }
 
     Collection<Configuration> allConfigs = new ArrayList<>();
-    allConfigs.add(clusterTopology.getConfiguration());
+    Configuration clusterConfig = clusterTopology.getConfiguration();
+    allConfigs.add(clusterConfig);
     for (HostGroupInfo groupInfo : 
clusterTopology.getHostGroupInfo().values()) {
       Configuration hgConfiguration = groupInfo.getConfiguration();
       if (! hgConfiguration.getFullProperties(1).isEmpty()) {
@@ -852,6 +864,20 @@ public class BlueprintConfigurationProcessor {
 
       doFilterPriorToExport(configuration);
     }
+
+    Blueprint blueprint = clusterTopology.getBlueprint();
+    applyTypeSpecificFilter(exportType, clusterConfig, 
blueprint.getStack().getConfiguration(), blueprint.getServices());
+  }
+
+  @VisibleForTesting
+  void applyTypeSpecificFilter(BlueprintExportType exportType, Configuration 
clusterConfig, Configuration stackConfig, Collection<String> services) {
+    if (exportType == BlueprintExportType.MINIMAL) {
+      // convert back to suffix-less form, to allow comparing to defaults
+      doNonTopologyUpdate(mPropertyUpdaters, clusterConfig);
+    }
+
+    injectDefaults(stackConfig, new HashSet<>(), services);
+    exportType.filter(clusterConfig, stackConfig);
   }
 
   /**
@@ -1673,11 +1699,11 @@ public class BlueprintConfigurationProcessor {
     for (Map.Entry<String, Map<String, PropertyUpdater>> entry : 
updaters.entrySet()) {
       String type = entry.getKey();
       for (String propertyName : entry.getValue().keySet()) {
-        NonTopologyUpdater npu = (NonTopologyUpdater) 
entry.getValue().get(propertyName);
+        PropertyUpdater pu = entry.getValue().get(propertyName);
         Map<String, String> typeProperties = properties.get(type);
 
         if (typeProperties != null && 
typeProperties.containsKey(propertyName)) {
-          String newValue = npu.updateForBlueprintExport(propertyName, 
typeProperties.get(propertyName), properties, clusterTopology);
+          String newValue = pu.updateForBlueprintExport(propertyName, 
typeProperties.get(propertyName), properties, clusterTopology);
           configuration.setProperty(type, propertyName, newValue);
         }
       }
@@ -1703,6 +1729,10 @@ public class BlueprintConfigurationProcessor {
                                   Map<String, Map<String, String>> properties,
                                   ClusterTopology topology);
 
+    default String updateForBlueprintExport(String propertyName, String value, 
Map<String, Map<String, String>> properties, ClusterTopology topology) {
+      return value;
+    }
+
     /**
      * Determine the required host groups for the provided property.
      *
@@ -2764,13 +2794,6 @@ public class BlueprintConfigurationProcessor {
                                                     ClusterTopology topology) {
       return Collections.emptyList();
     }
-
-    public String updateForBlueprintExport(String propertyName,
-                                           String origValue,
-                                           Map<String, Map<String, String>> 
properties,
-                                           ClusterTopology topology) {
-      return origValue;
-    }
   }
 
 
@@ -3228,52 +3251,75 @@ public class BlueprintConfigurationProcessor {
     }
   }
 
-  private Collection<String> setupHDFSProxyUsers(Configuration configuration, 
Set<String> configTypesUpdated) {
-    // AMBARI-5206
-    final Map<String , String> userProps = new HashMap<>();
+  /**
+   * Generates property names of the format "hadoop.proxyuser.*" based on 
actual usernames defined in {@code configuration}.
+   * Eg. if "hive-env" defined "hive_user": "cstm-hive", then a generated 
property name would be "hadoop_proxyuser_cstm-hive_hosts"
+   * @return set of hadoop proxyuser property names, paired with the name of 
the config type the username is defined in
+   *         (the config type is needed for filtering later)
+   */
+  private static Set<Pair<String, String>> 
generateHadoopProxyUserPropertyNames(Configuration configuration) {
+    Set<Pair<String, String>> proxyUsers = new HashSet<>();
+    Map<String, Map<String, String>> existingProperties = 
configuration.getFullProperties();
+    for (Pair<String, String> userProp : PROPERTIES_FOR_HADOOP_PROXYUSER) {
+      String configType = userProp.getLeft();
+      String property = userProp.getRight();
+      Map<String, String> configs = existingProperties.get(configType);
+      if (configs != null) {
+        String user = configs.get(property);
+        if (!Strings.isNullOrEmpty(user)) {
+          proxyUsers.add(Pair.of(configType, 
String.format(HADOOP_PROXYUSER_HOSTS_FORMAT, user)));
+          proxyUsers.add(Pair.of(configType, 
String.format(HADOOP_PROXYUSER_GROUPS_FORMAT, user)));
+        }
+      }
+    }
+
+    return proxyUsers;
+  }
 
-    Collection<String> services = clusterTopology.getBlueprint().getServices();
+  /**
+   * Ensures {@code hadoop.proxyuser.*} properties are present in core-site 
for the services defined in the blueprint.
+   */
+  private static void setupHDFSProxyUsers(Configuration configuration, 
Set<String> configTypesUpdated, Collection<String> services) {
     if (services.contains("HDFS")) {
-      // only add user properties to the map for
-      // services actually included in the blueprint definition
-      if (services.contains("OOZIE")) {
-        userProps.put("oozie_user", "oozie-env");
-      }
+      Set<Pair<String, String>> configTypePropertyPairs = 
generateHadoopProxyUserPropertyNames(configuration);
+      Set<String> acceptableConfigTypes = 
getEligibleConfigTypesForHadoopProxyUsers(services);
 
-      if (services.contains("HIVE")) {
-        userProps.put("hive_user", "hive-env");
-        userProps.put("webhcat_user", "hive-env");
+      Map<String, Map<String, String>> existingProperties = 
configuration.getFullProperties();
+      for (Pair<String, String> pair : configTypePropertyPairs) {
+        String configType = pair.getLeft();
+        if (acceptableConfigTypes.contains(configType)) {
+          Map<String, String> configs = existingProperties.get(configType);
+          if (configs != null) {
+            ensureProperty(configuration, "core-site", pair.getRight(), "*", 
configTypesUpdated);
+          }
+        }
       }
+    }
+  }
 
-      if (services.contains("HBASE")) {
-        userProps.put("hbase_user", "hbase-env");
-      }
+  /**
+   * @return set of config types with eligible properties for hadoop proxyuser
+   */
+  private static Set<String> 
getEligibleConfigTypesForHadoopProxyUsers(Collection<String> services) {
+    Set<String> acceptableConfigTypes = new HashSet<>();
 
-      if (services.contains("FALCON")) {
-        userProps.put("falcon_user", "falcon-env");
-      }
+    if (services.contains("OOZIE")) {
+      acceptableConfigTypes.add("oozie-env");
+    }
 
-      String proxyUserHosts = "hadoop.proxyuser.%s.hosts";
-      String proxyUserGroups = "hadoop.proxyuser.%s.groups";
+    if (services.contains("HIVE")) {
+      acceptableConfigTypes.add("hive-env");
+    }
 
-      Map<String, Map<String, String>> existingProperties = 
configuration.getFullProperties();
-      for (String property : userProps.keySet()) {
-        String configType = userProps.get(property);
-        Map<String, String> configs = existingProperties.get(configType);
-        if (configs != null) {
-          String user = configs.get(property);
-          if (user != null && !user.isEmpty()) {
-            ensureProperty(configuration, "core-site", 
String.format(proxyUserHosts, user), "*", configTypesUpdated);
-            ensureProperty(configuration, "core-site", 
String.format(proxyUserGroups, user), "*", configTypesUpdated);
-          }
-        } else {
-          LOG.debug("setMissingConfigurations: no user configuration found for 
type = {}.  This may be caused by an error in the blueprint configuration.",
-            configType);
-        }
+    if (services.contains("HBASE")) {
+      acceptableConfigTypes.add("hbase-env");
+    }
 
-      }
+    if (services.contains("FALCON")) {
+      acceptableConfigTypes.add("falcon-env");
     }
-    return services;
+
+    return acceptableConfigTypes;
   }
 
   /**
@@ -3416,6 +3462,14 @@ public class BlueprintConfigurationProcessor {
     }
   }
 
+  /**
+   * Ensures that properties non-stack properties are present in {@code 
configuration}.
+   */
+  private static void injectDefaults(Configuration configuration, Set<String> 
configTypesUpdated, Collection<String> services) {
+    setRetryConfiguration(configuration, configTypesUpdated);
+    setupHDFSProxyUsers(configuration, configTypesUpdated, services);
+  }
+
   private @Nullable String trimValue(@Nullable String value,
                                      @NotNull Stack stack,
                                      @NotNull String configType,
@@ -3439,7 +3493,7 @@ public class BlueprintConfigurationProcessor {
    * @param property       property name
    * @param defaultValue   default value
    */
-  private void ensureProperty(Configuration configuration, String type, String 
property, String defaultValue, Set<String> configTypesUpdated) {
+  private static void ensureProperty(Configuration configuration, String type, 
String property, String defaultValue, Set<String> configTypesUpdated) {
     if (configuration.getPropertyValue(type, property) == null) {
       configuration.setProperty(type, property, defaultValue);
       configTypesUpdated.add(type);
@@ -3503,6 +3557,7 @@ public class BlueprintConfigurationProcessor {
       return !PASSWORD_NAME_REGEX.matcher(propertyName).matches();
     }
   }
+
   /**
    * A Filter that excludes properties if in stack a property is marked as 
password property or kerberos principal
    *
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintExportType.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintExportType.java
new file mode 100644
index 0000000..5bada66
--- /dev/null
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintExportType.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller.internal;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+
+import org.apache.ambari.server.topology.Configuration;
+import org.apache.commons.lang3.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.ImmutableSet;
+
+/**
+ * Handles most of type-specific behavior for blueprint export.
+ */
+public enum BlueprintExportType {
+  /**
+   * The exported blueprint contains all properties of all config types
+   * for services present in the cluster.
+   */
+  FULL {
+    @Override
+    public Configuration filter(Configuration actualConfig, Configuration 
defaultConfig) {
+      // no-op
+      return actualConfig;
+    }
+
+    @Override
+    public boolean include(String value, String defaultValue) {
+      return true;
+    }
+
+    @Override
+    public boolean include(Collection<?> collection) {
+      return true;
+    }
+
+    @Override
+    public boolean include(Map<?, ?> map) {
+      return true;
+    }
+  },
+
+  /**
+   * The exported blueprint contains only the properties that do not match 
default values
+   * as defined in the stack.  Empty lists/maps are also omitted.
+   */
+  MINIMAL {
+    @Override
+    public Configuration filter(Configuration actualConfig, Configuration 
defaultConfig) {
+      for (Map.Entry<String, Map<String, String>> configTypeEntry : 
ImmutableSet.copyOf(actualConfig.getProperties().entrySet())) {
+        String configType = configTypeEntry.getKey();
+        Map<String, String> properties = configTypeEntry.getValue();
+        for (Map.Entry<String, String> propertyEntry : 
ImmutableSet.copyOf(properties.entrySet())) {
+          String propertyName = propertyEntry.getKey();
+          String propertyValue = propertyEntry.getValue();
+          String defaultValue = defaultConfig.getPropertyValue(configType, 
propertyName);
+          if (include(propertyValue, defaultValue))  {
+            LOG.debug("Including {}/{} in exported blueprint, as default value 
and actual value differ:\n{}\nvs\n{}", configType, propertyName, defaultValue, 
propertyValue);
+          } else {
+            LOG.debug("Omitting {}/{} from exported blueprint, as it has the 
default value of {}", configType, propertyName, propertyValue);
+            actualConfig.removeProperty(configType, propertyName);
+          }
+        }
+        if (properties.isEmpty()) {
+          actualConfig.getProperties().remove(configType);
+        }
+      }
+
+      for (Map.Entry<String, Map<String, Map<String, String>>> configTypeEntry 
: ImmutableSet.copyOf(actualConfig.getAttributes().entrySet())) {
+        String configType = configTypeEntry.getKey();
+        Map<String, Map<String, String>> attributes = 
configTypeEntry.getValue();
+        for (Map.Entry<String, Map<String, String>> attributeEntry : 
ImmutableSet.copyOf(attributes.entrySet())) {
+          String attributeName = attributeEntry.getKey();
+          Map<String, String> properties = attributeEntry.getValue();
+          for (Map.Entry<String, String> propertyEntry : 
ImmutableSet.copyOf(properties.entrySet())) {
+            String propertyName = propertyEntry.getKey();
+            String attributeValue = propertyEntry.getValue();
+            String defaultValue = defaultConfig.getAttributeValue(configType, 
propertyName, attributeName);
+            if (include(attributeValue, defaultValue))  {
+              LOG.debug("Including {}/{}/{} in exported blueprint, as default 
value and actual value differ:\n{}\nvs\n{}", configType, attributeName, 
propertyName, defaultValue, attributeValue);
+            } else {
+              LOG.debug("Omitting {}/{}/{} from exported blueprint, as it has 
the default value of {}", configType, attributeName, propertyName, 
attributeValue);
+              properties.remove(propertyName);
+            }
+          }
+          if (properties.isEmpty()) {
+            attributes.remove(attributeName);
+          }
+        }
+        if (attributes.isEmpty()) {
+          actualConfig.getAttributes().remove(configType);
+        }
+      }
+
+      return actualConfig;
+    }
+
+    @Override
+    public boolean include(String value, String defaultValue) {
+      return value != null && (
+        defaultValue == null ||
+          !Objects.equals(StringUtils.trim(defaultValue), 
StringUtils.trim(value))
+      );
+    }
+
+    @Override
+    public boolean include(Collection<?> collection) {
+      return collection != null && !collection.isEmpty();
+    }
+
+    @Override
+    public boolean include(Map<?, ?> map) {
+      return map != null && !map.isEmpty();
+    }
+  },
+  ;
+
+  public abstract Configuration filter(Configuration actualConfig, 
Configuration defaultConfig);
+  public abstract boolean include(String value, String defaultValue);
+  public abstract boolean include(Collection<?> collection);
+  public abstract boolean include(Map<?, ?> map);
+
+  public static final BlueprintExportType DEFAULT = MINIMAL;
+  public static final String PREFIX = "blueprint";
+  private static final String SEPARATOR = "_";
+  private static final Logger LOG = 
LoggerFactory.getLogger(BlueprintExportType.class);
+
+  public static Optional<BlueprintExportType> parse(String input) {
+    if (input == null || !input.startsWith(PREFIX)) {
+      return Optional.empty();
+    }
+
+    int separatorPos = input.indexOf(SEPARATOR);
+    if (separatorPos == -1 || separatorPos + 1 == input.length()) {
+      return Optional.of(DEFAULT);
+    }
+
+    switch (input.substring(separatorPos + 1)) {
+      case "full": return Optional.of(FULL);
+      case "minimal": return Optional.of(MINIMAL);
+      default: return Optional.of(DEFAULT);
+    }
+  }
+}
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequest.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequest.java
index e604bf3..bf010de 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequest.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequest.java
@@ -32,7 +32,6 @@ import java.util.Set;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.util.TreeNode;
 import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.AmbariServer;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.apache.ambari.server.state.DesiredConfig;
@@ -52,31 +51,31 @@ import org.apache.ambari.server.topology.TopologyRequest;
  */
 public class ExportBlueprintRequest implements TopologyRequest {
 
-  private static AmbariManagementController controller = 
AmbariServer.getController();
+  private final AmbariManagementController controller;
 
-  private String clusterName;
-  private Long clusterId;
+  private final String clusterName;
+  private final Long clusterId;
   private Blueprint blueprint;
-  private Configuration configuration;
-  //todo: Should this map be represented by a new class?
-  private Map<String, HostGroupInfo> hostGroupInfo = new HashMap<>();
+  private final Configuration configuration;
+  private final Map<String, HostGroupInfo> hostGroupInfo = new HashMap<>();
 
 
-  public ExportBlueprintRequest(TreeNode<Resource> clusterNode) throws 
InvalidTopologyTemplateException {
+  public ExportBlueprintRequest(TreeNode<Resource> clusterNode, 
AmbariManagementController controller) throws InvalidTopologyTemplateException {
+    this.controller = controller;
+
     Resource clusterResource = clusterNode.getObject();
+    Stack stack = parseStack(clusterResource);
     clusterName = String.valueOf(clusterResource.getPropertyValue(
         ClusterResourceProvider.CLUSTER_NAME_PROPERTY_ID));
     clusterId = Long.valueOf(String.valueOf(clusterResource.getPropertyValue(
             ClusterResourceProvider.CLUSTER_ID_PROPERTY_ID)));
 
 
-    createConfiguration(clusterNode);
-    //todo: should be parsing Configuration from the beginning
-    //createConfiguration(configurations);
+    configuration = createConfiguration(clusterNode);
 
     Collection<ExportedHostGroup> exportedHostGroups = 
processHostGroups(clusterNode.getChild("hosts"));
     createHostGroupInfo(exportedHostGroups);
-    createBlueprint(exportedHostGroups, parseStack(clusterResource));
+    createBlueprint(exportedHostGroups, stack);
   }
 
   public String getClusterName() {
@@ -138,7 +137,6 @@ public class ExportBlueprintRequest implements 
TopologyRequest {
     for (ExportedHostGroup exportedGroup : exportedHostGroups) {
       HostGroupInfo groupInfo = new HostGroupInfo(exportedGroup.getName());
       groupInfo.addHosts(exportedGroup.getHostInfo());
-      //todo: should be parsing Configuration from the beginning
       groupInfo.setConfiguration(exportedGroup.getConfiguration());
       hostGroupInfo.put(groupInfo.getHostGroupName(), groupInfo);
     }
@@ -161,10 +159,8 @@ public class ExportBlueprintRequest implements 
TopologyRequest {
    * Process cluster scoped configurations.
    *
    * @param clusterNode  cluster node
-   *
    */
-  private void createConfiguration(TreeNode<Resource> clusterNode) {
-
+  private static Configuration createConfiguration(TreeNode<Resource> 
clusterNode) {
     Map<String, Map<String, String>> properties = new HashMap<>();
     Map<String, Map<String, Map<String, String>>> attributes = new HashMap<>();
 
@@ -174,16 +170,15 @@ public class ExportBlueprintRequest implements 
TopologyRequest {
       ExportedConfiguration configuration = new ExportedConfiguration(config);
       DesiredConfig desiredConfig = (DesiredConfig) 
desiredConfigMap.get(configuration.getType());
       if (desiredConfig != null && 
desiredConfig.getTag().equals(configuration.getTag())) {
-
         properties.put(configuration.getType(), configuration.getProperties());
         attributes.put(configuration.getType(), 
configuration.getPropertyAttributes());
       }
     }
-    configuration = new Configuration(properties, attributes);
-    // empty parent configuration when exporting as all properties are 
included in this configuration
-    configuration.setParentConfiguration(new Configuration(
-        Collections.emptyMap(),
-        Collections.emptyMap()));
+
+    Configuration configuration = new Configuration(properties, attributes);
+    configuration.setParentConfiguration(new 
Configuration(Collections.emptyMap(), Collections.emptyMap()));
+
+    return configuration;
   }
 
   /**
@@ -231,12 +226,12 @@ public class ExportBlueprintRequest implements 
TopologyRequest {
     /**
      * Associated components.
      */
-    private Set<String> components = new HashSet<>();
+    private final Set<String> components = new HashSet<>();
 
     /**
      * Host group scoped configurations.
      */
-    private Collection<ExportedConfiguration> configurations = new HashSet<>();
+    private final Collection<ExportedConfiguration> configurations = new 
HashSet<>();
 
     /**
      * Number of instances.
@@ -246,7 +241,7 @@ public class ExportBlueprintRequest implements 
TopologyRequest {
     /**
      * Collection of associated hosts.
      */
-    private Collection<String> hosts = new HashSet<>();
+    private final Collection<String> hosts = new HashSet<>();
 
     /**
      * Constructor.
@@ -376,7 +371,6 @@ public class ExportBlueprintRequest implements 
TopologyRequest {
             getComponents().add("AMBARI_SERVER");
           }
         } catch (UnknownHostException e) {
-          //todo: SystemException?
           throw new RuntimeException("Unable to obtain local host name", e);
         }
       } catch (UnknownHostException e) {
@@ -406,7 +400,7 @@ public class ExportBlueprintRequest implements 
TopologyRequest {
   /**
    * Encapsulates a configuration.
    */
-  private class ExportedConfiguration {
+  private static class ExportedConfiguration {
     /**
      * Configuration type such as hdfs-site.
      */
@@ -448,18 +442,6 @@ public class ExportBlueprintRequest implements 
TopologyRequest {
       if (propertiesMap.containsKey("properties_attributes")) {
         propertyAttributes = (Map) propertiesMap.get("properties_attributes");
       }
-
-      //todo: not processing config here, ensure that
-      //todo: this logic regarding null/empty properties is properly handled
-//      if (properties != null && !properties.isEmpty()) {
-//        stripRequiredProperties(properties);
-//      } else {
-//        LOG.warn("Empty configuration found for configuration type = " + 
type +
-//            " during Blueprint export.  This may occur after an upgrade of 
Ambari, when" +
-//            "attempting to export a Blueprint from a cluster started by an 
older version of " +
-//            "Ambari.");
-//      }
-
     }
 
     /**
@@ -517,4 +499,5 @@ public class ExportBlueprintRequest implements 
TopologyRequest {
       return result;
     }
   }
+
 }
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UnitUpdater.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UnitUpdater.java
index 8b7cb67..501e16a 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UnitUpdater.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UnitUpdater.java
@@ -55,11 +55,21 @@ public class UnitUpdater implements 
BlueprintConfigurationProcessor.PropertyUpda
         return value.toString();
       } else if (!value.hasAnyUnit()) {
         return value.withUnit(stackUnit);
-      } else { // should not happen because of prevalidation in UnitValidator
+      } else { // should not happen because of pre-validation in UnitValidator
         throw new IllegalArgumentException("Property " + propertyName + "=" + 
origValue + " has an unsupported unit. Stack supported unit is: " + stackUnit + 
" or no unit");
       }
   }
 
+  /**
+   * @return property value with removed unit
+   */
+  @Override
+  public String updateForBlueprintExport(String propertyName, String 
origValue, Map<String, Map<String, String>> properties, ClusterTopology 
topology) {
+    PropertyUnit stackUnit = 
PropertyUnit.of(topology.getBlueprint().getStack(), serviceName, configType, 
propertyName);
+    PropertyValue value = PropertyValue.of(propertyName, origValue);
+    return value.withoutUnit(stackUnit);
+  }
+
   @Override
   public Collection<String> getRequiredHostGroups(String propertyName, String 
origValue, Map<String, Map<String, String>> properties, ClusterTopology 
topology) {
     return Collections.emptySet();
@@ -142,6 +152,12 @@ public class UnitUpdater implements 
BlueprintConfigurationProcessor.PropertyUpda
       return value + unit;
     }
 
+    public String withoutUnit(PropertyUnit unit) {
+      return hasUnit(unit)
+        ? value.substring(0, value.length() - unit.toString().length())
+        : value;
+    }
+
     @Override
     public String toString() {
       return value;
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java 
b/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
index 31fcb9d..a3564fb 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/state/PropertyInfo.java
@@ -187,6 +187,13 @@ public class PropertyInfo {
     for (Element propertyAttribute : propertyAttributes) {
       attributes.put(propertyAttribute.getTagName(), 
propertyAttribute.getFirstChild().getNodeValue());
     }
+
+    // inject "hidden" property_value_attribute into property_attributes, see 
AMBARI-17223
+    String hidden = getPropertyValueAttributes().getHidden();
+    if (hidden != null) {
+      attributes.putIfAbsent("hidden", hidden);
+    }
+
     return attributes;
   }
 
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/Configuration.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/Configuration.java
index 4026ce6..bbd4502 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/topology/Configuration.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/topology/Configuration.java
@@ -43,6 +43,18 @@ public class Configuration {
    */
   private Configuration parentConfiguration;
 
+  public static Configuration newEmpty() {
+    return new Configuration(new HashMap<>(), new HashMap<>());
+  }
+
+  public Configuration copy() {
+    Configuration parent = parentConfiguration;
+    parentConfiguration = null;
+    Configuration copy = new Configuration(getFullProperties(), 
getFullAttributes());
+    parentConfiguration = parent;
+    return copy;
+  }
+
   /**
    * Constructor.
    *
@@ -385,10 +397,10 @@ public class Configuration {
    * Remove all occurrences of a config type
    */
   public void removeConfigType(String configType) {
-    if (properties != null && properties.containsKey(configType)) {
+    if (properties != null) {
       properties.remove(configType);
     }
-    if (attributes != null && attributes.containsKey(configType)) {
+    if (attributes != null) {
       attributes.remove(configType);
     }
     if (parentConfiguration != null) {
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRendererTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRendererTest.java
index b9ef9c9..7ccbfc3 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRendererTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRendererTest.java
@@ -59,6 +59,7 @@ import org.apache.ambari.server.controller.AmbariServer;
 import org.apache.ambari.server.controller.KerberosHelper;
 import org.apache.ambari.server.controller.KerberosHelperImpl;
 import org.apache.ambari.server.controller.internal.ArtifactResourceProvider;
+import org.apache.ambari.server.controller.internal.BlueprintExportType;
 import org.apache.ambari.server.controller.internal.ClusterControllerImpl;
 import org.apache.ambari.server.controller.internal.ResourceImpl;
 import org.apache.ambari.server.controller.internal.Stack;
@@ -124,6 +125,9 @@ public class ClusterBlueprintRendererTest {
     new HashMap<>();
 
   private static final Configuration clusterConfig = new 
Configuration(clusterProps, clusterAttributes);
+  private final ClusterBlueprintRenderer minimalRenderer = new 
ClusterBlueprintRenderer(BlueprintExportType.MINIMAL);
+  private final ClusterBlueprintRenderer fullRenderer = new 
ClusterBlueprintRenderer(BlueprintExportType.FULL);
+
   @Before
   public void setup() throws Exception {
 
@@ -164,8 +168,10 @@ public class ClusterBlueprintRendererTest {
     expect(blueprint.getHostGroups()).andReturn(hostGroups).anyTimes();
     
expect(blueprint.getHostGroup("host_group_1")).andReturn(group1).anyTimes();
     
expect(blueprint.getHostGroup("host_group_2")).andReturn(group2).anyTimes();
+    expect(blueprint.getServices()).andReturn(ImmutableSet.of("HDFS", 
"YARN")).anyTimes();
     expect(stack.getName()).andReturn("HDP").anyTimes();
     expect(stack.getVersion()).andReturn("1.3.3").anyTimes();
+    
expect(stack.getConfiguration()).andReturn(Configuration.newEmpty()).anyTimes();
     expect(group1.getName()).andReturn("host_group_1").anyTimes();
     expect(group2.getName()).andReturn("host_group_2").anyTimes();
     expect(group1.getComponents()).andReturn(group1Components).anyTimes();
@@ -256,8 +262,7 @@ public class ClusterBlueprintRendererTest {
     QueryInfo hostComponentInfo = new QueryInfo(new 
HostComponentResourceDefinition(), new HashSet<>());
     queryTree.getChild("Host").addChild(hostComponentInfo, "HostComponent");
 
-    ClusterBlueprintRenderer renderer = new ClusterBlueprintRenderer();
-    TreeNode<Set<String>> propertyTree = 
renderer.finalizeProperties(queryTree, false);
+    TreeNode<Set<String>> propertyTree = 
fullRenderer.finalizeProperties(queryTree, false);
 
     Set<String> rootProperties = propertyTree.getObject();
     assertEquals(2, rootProperties.size());
@@ -276,12 +281,13 @@ public class ClusterBlueprintRendererTest {
       defaultCredentialStoreSettings(),
       defaultRecoverySettings());
 
-    Collection<Map<String, Object>> settings = 
ClusterBlueprintRenderer.getSettings(clusterNode, stack);
-
+    Collection<Map<String, Object>> settings = 
fullRenderer.getSettings(clusterNode, stack);
     assertEquals(Lists.newArrayList(
       ImmutableMap.of(ClusterBlueprintRenderer.SERVICE_SETTINGS, 
ImmutableSet.of()),
       ImmutableMap.of(ClusterBlueprintRenderer.COMPONENT_SETTINGS, 
ImmutableSet.of())
     ), settings);
+
+    assertEquals(ImmutableList.of(), minimalRenderer.getSettings(clusterNode, 
stack));
   }
 
   @Test
@@ -292,8 +298,7 @@ public class ClusterBlueprintRendererTest {
       customCredentialStoreSettingFor(stack, "service1", "service2"),
       customRecoverySettingsFor(stack, "component1", "component2"));
 
-    Collection<Map<String, Object>> settings = 
ClusterBlueprintRenderer.getSettings(clusterNode, stack);
-
+    Collection<Map<String, Object>> settings = 
fullRenderer.getSettings(clusterNode, stack);
     assertEquals(Lists.newArrayList(
       ImmutableMap.of(ClusterBlueprintRenderer.SERVICE_SETTINGS, 
ImmutableSet.of(
         ImmutableMap.of(
@@ -310,6 +315,8 @@ public class ClusterBlueprintRendererTest {
         ImmutableMap.of("name", "component2", 
ClusterBlueprintRenderer.RECOVERY_ENABLED, ClusterBlueprintRenderer.TRUE)
       ))
     ), settings);
+
+    assertEquals(settings, minimalRenderer.getSettings(clusterNode, stack));
   }
 
   @Test
@@ -320,7 +327,7 @@ public class ClusterBlueprintRendererTest {
       defaultCredentialStoreSettings(),
       customRecoverySettingsFor(stack, "component1"));
 
-    Collection<Map<String, Object>> settings = 
ClusterBlueprintRenderer.getSettings(clusterNode, stack);
+    Collection<Map<String, Object>> settings = 
fullRenderer.getSettings(clusterNode, stack);
 
     assertEquals(Lists.newArrayList(
       ImmutableMap.of(ClusterBlueprintRenderer.SERVICE_SETTINGS, 
ImmutableSet.of()),
@@ -328,6 +335,11 @@ public class ClusterBlueprintRendererTest {
         ImmutableMap.of("name", "component1", 
ClusterBlueprintRenderer.RECOVERY_ENABLED, ClusterBlueprintRenderer.FALSE)
       ))
     ), settings);
+    assertEquals(Lists.newArrayList(
+      ImmutableMap.of(ClusterBlueprintRenderer.COMPONENT_SETTINGS, 
ImmutableSet.of(
+        ImmutableMap.of("name", "component1", 
ClusterBlueprintRenderer.RECOVERY_ENABLED, ClusterBlueprintRenderer.FALSE)
+      ))
+    ), minimalRenderer.getSettings(clusterNode, stack));
   }
 
   private static TreeNode<Resource> clusterWith(Stack stack,
@@ -456,8 +468,7 @@ public class ClusterBlueprintRendererTest {
     rootQuery.getProperties().add("foo/bar");
     rootQuery.getProperties().add("prop1");
 
-    ClusterBlueprintRenderer renderer = new ClusterBlueprintRenderer();
-    TreeNode<Set<String>> propertyTree = 
renderer.finalizeProperties(queryTree, false);
+    TreeNode<Set<String>> propertyTree = 
fullRenderer.finalizeProperties(queryTree, false);
 
     Set<String> rootProperties = propertyTree.getObject();
     assertEquals(2, rootProperties.size());
@@ -476,7 +487,7 @@ public class ClusterBlueprintRendererTest {
     Result result = new ResultImpl(true);
     createClusterResultTree(result.getResultTree());
 
-    ClusterBlueprintRenderer renderer = new TestBlueprintRenderer(topology);
+    ClusterBlueprintRenderer renderer = new TestBlueprintRenderer(topology, 
BlueprintExportType.FULL);
     Result blueprintResult = renderer.finalizeResult(result);
 
     TreeNode<Resource> blueprintTree = blueprintResult.getResultTree();
@@ -502,7 +513,7 @@ public class ClusterBlueprintRendererTest {
     Result result = new ResultImpl(true);
     createClusterResultTree(result.getResultTree());
 
-    ClusterBlueprintRenderer renderer = new TestBlueprintRenderer(topology);
+    ClusterBlueprintRenderer renderer = new TestBlueprintRenderer(topology, 
BlueprintExportType.FULL);
     Result blueprintResult = renderer.finalizeResult(result);
 
     TreeNode<Resource> blueprintTree = blueprintResult.getResultTree();
@@ -575,7 +586,7 @@ public class ClusterBlueprintRendererTest {
 
     createClusterResultTree(result.getResultTree(), testDesiredConfigMap);
 
-    ClusterBlueprintRenderer renderer = new TestBlueprintRenderer(topology);
+    ClusterBlueprintRenderer renderer = new TestBlueprintRenderer(topology, 
BlueprintExportType.FULL);
     Result blueprintResult = renderer.finalizeResult(result);
 
     TreeNode<Resource> blueprintTree = blueprintResult.getResultTree();
@@ -678,11 +689,8 @@ public class ClusterBlueprintRendererTest {
 
   @Test
   public void testClusterRendererDefaults() {
-    Renderer clusterBlueprintRenderer =
-      new ClusterBlueprintRenderer();
-
     assertFalse("ClusterBlueprintRenderer should not require property provider 
input",
-      clusterBlueprintRenderer.requiresPropertyProviderInput());
+      fullRenderer.requiresPropertyProviderInput());
   }
 
   //todo: collection resource
@@ -827,7 +835,8 @@ public class ClusterBlueprintRendererTest {
 
     private ClusterTopology topology;
 
-    TestBlueprintRenderer(ClusterTopology topology) {
+    TestBlueprintRenderer(ClusterTopology topology, BlueprintExportType 
exportType) {
+      super(exportType);
       this.topology = topology;
     }
 
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index 996c09d..28a9bcd 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -167,6 +167,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
     // return false for all components since for this test we don't care about 
the value
     expect(stack.isMasterComponent((String) 
anyObject())).andReturn(false).anyTimes();
     
expect(stack.getConfigurationPropertiesWithMetadata(anyObject(String.class), 
anyObject(String.class))).andReturn(emptyMap()).anyTimes();
+    
expect(stack.getConfiguration()).andReturn(Configuration.newEmpty()).anyTimes();
 
     expect(serviceInfo.getRequiredProperties()).andReturn(
       emptyMap()).anyTimes();
@@ -314,7 +315,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     String updatedVal = 
properties.get("yarn-site").get("yarn.resourcemanager.hostname");
     assertEquals("%HOSTGROUP::group1%", updatedVal);
@@ -362,7 +363,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     assertEquals(properties.size(), 3);
     assertEquals(((Map) properties.get("kerberos-env")).size(), 0);
@@ -398,7 +399,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
 
     // When
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     // Then
     assertEquals("policymgr_external_url property's original value should be 
exported when Ranger Admin is deployed to multiple hosts.", 
"test_policymgr_external_url", 
properties.get("admin-properties").get("policymgr_external_url"));
@@ -440,7 +441,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     assertEquals("%HOSTGROUP::group1%", 
clusterConfig.getPropertyValue("yarn-site", "yarn.resourcemanager.hostname"));
     assertEquals("%HOSTGROUP::group1%", 
clusterConfig.getPropertyValue("yarn-site", 
"yarn.resourcemanager.resource-tracker.address"));
@@ -488,7 +489,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     assertEquals("%HOSTGROUP::group1%", 
properties.get("yarn-site").get("yarn.resourcemanager.hostname"));
     assertEquals("%HOSTGROUP::group1%",
@@ -523,7 +524,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     String updatedVal = properties.get("core-site").get("fs.defaultFS");
     assertEquals("%HOSTGROUP::group1%:8020", updatedVal);
@@ -556,7 +557,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     
assertFalse(properties.get("yarn-site").containsKey("yarn.resourcemanager.hostname"));
   }
@@ -602,7 +603,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     String updatedVal = 
properties.get("hbase-site").get("hbase.zookeeper.quorum");
     assertEquals("%HOSTGROUP::group1%,%HOSTGROUP::group2%", updatedVal);
@@ -649,7 +650,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     String updatedVal = 
properties.get("webhcat-site").get("templeton.zookeeper.hosts");
     assertEquals("%HOSTGROUP::group1%:5050,%HOSTGROUP::group2%:9090", 
updatedVal);
@@ -680,7 +681,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     String updatedVal = 
properties.get("application-properties").get("atlas.server.bind.address");
     
assertEquals("http://%HOSTGROUP::group1%:21000,http://%HOSTGROUP::group2%:21000";,
 updatedVal);
@@ -735,7 +736,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     String updatedVal = 
properties.get("storm-site").get("storm.zookeeper.servers");
     assertEquals("['%HOSTGROUP::group1%:5050','%HOSTGROUP::group2%:9090']", 
updatedVal);
@@ -778,7 +779,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     String updatedVal = 
properties.get("hive-site").get("javax.jdo.option.ConnectionURL");
     
assertEquals("jdbc:mysql://%HOSTGROUP::group1%/hive?createDatabaseIfNotExist=true",
 updatedVal);
@@ -811,7 +812,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     
assertFalse(properties.get("hive-site").containsKey("javax.jdo.option.ConnectionURL"));
   }
@@ -863,7 +864,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
 
     assertEquals("Exported properties map was not of the expected size", 2,
@@ -946,7 +947,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     assertEquals("Falcon Broker URL property not properly exported",
       createExportedAddress(expectedPortNum, expectedHostGroupName), 
falconStartupProperties.get("*.broker.url"));
@@ -983,7 +984,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     assertFalse("tez.tez-ui.history-url.base should not be present in exported 
blueprint in tez-site",
       tezSiteProperties.containsKey("tez.tez-ui.history-url.base"));
@@ -1034,7 +1035,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     // verify that these properties are filtered out of the exported 
configuration
     assertFalse("admin_server_host should not be present in exported blueprint 
in kerberos-env",
@@ -1098,7 +1099,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     assertEquals("HTTPS address HA property not properly exported",
       createExportedAddress(expectedPortNum, expectedHostGroupName), 
hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + 
"." + expectedNodeOne));
@@ -1171,7 +1172,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     assertEquals("HTTPS address HA property not properly exported",
       createExportedAddress(expectedPortNum, expectedHostGroupName), 
hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + 
"." + expectedNodeOne));
@@ -1235,7 +1236,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     // verify that any properties that include nameservices are not removed 
from the exported blueprint's configuration
     assertEquals("Property containing an HA nameservice (fs.defaultFS), was 
not correctly exported by the processor",
@@ -1271,7 +1272,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     assertEquals("Incorrect state for hdfs-site config after HA call in non-HA 
environment, should be zero",
       0, hdfsSiteProperties.size());
@@ -1334,7 +1335,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     // verify results for name service one
     assertEquals("HTTPS address HA property not properly exported",
@@ -1413,7 +1414,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     assertEquals("Yarn Log Server URL was incorrectly exported",
       "http://"; + "%HOSTGROUP::" + expectedHostGroupName + "%" + 
":19888/jobhistory/logs", yarnSiteProperties.get("yarn.log.server.url"));
@@ -1485,7 +1486,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     assertEquals("Yarn Log Server URL was incorrectly exported",
       "http://"; + "%HOSTGROUP::" + expectedHostGroupName + "%" + 
":19888/jobhistory/logs", yarnSiteProperties.get("yarn.log.server.url"));
@@ -1565,7 +1566,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     assertEquals("hdfs config property not exported properly",
       createExportedAddress(expectedPortNum, expectedHostGroupName), 
hdfsSiteProperties.get("dfs.http.address"));
@@ -1654,7 +1655,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
 
     // call top-level export method
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     assertEquals("hive property not properly exported",
       "thrift://" + createExportedAddress(expectedPortNum, 
expectedHostGroupName), hiveSiteProperties.get("hive.metastore.uris"));
@@ -1747,7 +1748,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
 
     // call top-level export method
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     System.out.println("RWN: exported value of hive.metastore.uris = " + 
hiveSiteProperties.get("hive.metastore.uris"));
 
@@ -1843,7 +1844,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
 
     // call top-level export method
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     // check that jdbc url and related properties are removed if oozie 
external db is on host which not included to cluster
     
assertFalse(BlueprintConfigurationProcessor.singleHostTopologyUpdaters.get("oozie-site").containsKey("oozie.service.JPAService.jdbc.url"));
@@ -2008,7 +2009,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
 
     // call top-level export method
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     assertEquals("zookeeper config not properly exported",
       createExportedHostName(expectedHostGroupName) + "," + 
createExportedHostName(expectedHostGroupNameTwo),
@@ -2085,7 +2086,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
 
     // call top-level export method
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     assertEquals("Knox for core-site config not properly exported",
       createExportedHostName(expectedHostGroupName) + "," + 
createExportedHostName(expectedHostGroupNameTwo),
@@ -2134,7 +2135,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
 
     // call top-level export method
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     assertEquals("kafka Ganglia config not properly exported",
       createExportedHostName(expectedHostGroupName, expectedPortNumberOne),
@@ -2173,7 +2174,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
 
     // call top-level export method
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     assertEquals("Property was incorrectly exported",
       "%HOSTGROUP::" + expectedHostGroupName + "%", 
properties.get("storm.zookeeper.servers"));
@@ -3460,7 +3461,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(topology);
-    updater.doUpdateForBlueprintExport();
+    updater.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     final String expectedPropertyValue = createExportedAddress("2181", 
expectedHostGroupName) + "," + createExportedAddress("2181", 
expectedHostGroupNameTwo);
     assertEquals("hive.llap.zk.sm.connectionString property not updated 
correctly", expectedPropertyValue, 
hiveInteractiveSiteProperties.get(llapZkProperty));
@@ -3580,7 +3581,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(topology);
-    updater.doUpdateForBlueprintExport();
+    updater.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     assertEquals("oozie property not updated correctly",
       createExportedHostName(expectedHostGroupName, expectedPortNum), 
oozieSiteProperties.get("oozie.base.url"));
@@ -3731,7 +3732,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(topology);
-    updater.doUpdateForBlueprintExport();
+    updater.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     // verify that the properties with hostname information was correctly 
preserved
     assertEquals("Yarn Log Server URL was incorrectly updated",
@@ -8135,7 +8136,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
 
     // When
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     // Then
     String expectedAuditHdfsDir = "hdfs://%HOSTGROUP::group2%:100";
@@ -8207,7 +8208,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
 
     // When
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     // Then
     String expectedAuditHdfsDir = "hdfs://my_name_service:100";
@@ -9044,7 +9045,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
     properties.put("hive-site", typeProps);
 
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     String updatedVal = properties.get("hive-site").get("atlas.cluster.name");
     assertEquals("primary", updatedVal);
@@ -9083,7 +9084,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
     properties.put("storm-site", stormSiteProps);
 
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     String hiveExecPostHooks = 
properties.get("hive-site").get("hive.exec.post.hooks");
     String kafkaMetricsReporters = 
properties.get("kafka-broker").get("kafka.metrics.reporters");
@@ -9249,7 +9250,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
     expect(stack.isPasswordProperty((String) anyObject(), (String) 
anyObject(), (String) anyObject())).andReturn(true).once();
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups);
     BlueprintConfigurationProcessor configProcessor = new 
BlueprintConfigurationProcessor(topology);
-    configProcessor.doUpdateForBlueprintExport();
+    configProcessor.doUpdateForBlueprintExport(BlueprintExportType.FULL);
 
     
assertFalse(properties.get("ranger-admin-site").containsKey("ranger.service.https.attrib.keystore.pass"));
   }
@@ -10039,6 +10040,66 @@ public class BlueprintConfigurationProcessorTest 
extends EasyMockSupport {
     assertNull(updatedRangerTagsyncSiteConfigurations);
   }
 
+  @Test
+  public void defaultConfigs() {
+    Configuration stackConfig = createTestStack();
+    Configuration clusterConfig = stackConfig.copy();
+    Configuration customConfig = Configuration.newEmpty();
+
+    ClusterTopology topology = createNiceMock(ClusterTopology.class);
+    Stack stack = createNiceMock(Stack.class);
+    Collection<String> services = ImmutableList.of("HDFS");
+    expect(stack.getServices()).andReturn(services).anyTimes();
+    expect(stack.getConfiguration()).andReturn(stackConfig).anyTimes();
+    expect(topology.getConfiguration()).andReturn(clusterConfig).anyTimes();
+    expect(topology.getHostGroupInfo()).andReturn(emptyMap()).anyTimes();
+    replay(stack, topology);
+
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(topology);
+    updater.applyTypeSpecificFilter(BlueprintExportType.MINIMAL, 
clusterConfig, stackConfig, services);
+
+    assertEquals(customConfig.getProperties(), clusterConfig.getProperties());
+  }
+
+  @Test
+  public void customConfigs() {
+    Configuration stackConfig = createTestStack();
+    Configuration clusterConfig = stackConfig.copy();
+    Configuration customConfig = Configuration.newEmpty();
+    customize(clusterConfig, customConfig, "core-site", 
"hadoop.security.authorization", "true");
+    customize(clusterConfig, customConfig, "core-site", "fs.trash.interval", 
"0");
+    customize(clusterConfig, customConfig, "hdfs-site", "dfs.webhdfs.enabled", 
 "false");
+
+    ClusterTopology topology = createNiceMock(ClusterTopology.class);
+    Stack stack = createNiceMock(Stack.class);
+    Collection<String> services = ImmutableList.of("HDFS");
+    expect(stack.getServices()).andReturn(services).anyTimes();
+    expect(stack.getConfiguration()).andReturn(stackConfig).anyTimes();
+    expect(topology.getConfiguration()).andReturn(clusterConfig).anyTimes();
+    expect(topology.getHostGroupInfo()).andReturn(emptyMap()).anyTimes();
+    replay(stack, topology);
+
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(topology);
+    updater.applyTypeSpecificFilter(BlueprintExportType.MINIMAL, 
clusterConfig, stackConfig, services);
+
+    assertEquals(customConfig.getProperties(), clusterConfig.getProperties());
+  }
+
+  private static Configuration createTestStack() {
+    Configuration stackConfig = Configuration.newEmpty();
+    stackConfig.setProperty("core-site", "io.file.buffer.size",  "131072");
+    stackConfig.setProperty("core-site", "hadoop.security.authorization",  
"false");
+    stackConfig.setProperty("core-site", "fs.trash.interval",  "360");
+    stackConfig.setProperty("hdfs-site", "dfs.namenode.name.dir",  
"/hadoop/hdfs/namenode");
+    stackConfig.setProperty("hdfs-site", "dfs.datanode.data.dir",  
"/hadoop/hdfs/data");
+    stackConfig.setProperty("hdfs-site", "dfs.webhdfs.enabled",  "true");
+    return stackConfig;
+  }
+
+  private static void customize(Configuration clusterConfig, Configuration 
customConfig, String configType, String propertyName, String value) {
+    clusterConfig.setProperty(configType, propertyName, value);
+    customConfig.setProperty(configType, propertyName, value);
+  }
 
   private static String createExportedAddress(String expectedPortNum, String 
expectedHostGroupName) {
     return createExportedHostName(expectedHostGroupName, expectedPortNum);
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequestTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequestTest.java
index e1f5583..cfd8fc8 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequestTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequestTest.java
@@ -10,8 +10,7 @@
  *     http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distribut
- * ed on an "AS IS" BASIS,
+ * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
@@ -23,66 +22,40 @@ import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.reset;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import java.lang.reflect.Field;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
-import java.util.Set;
 
 import org.apache.ambari.server.api.util.TreeNode;
 import org.apache.ambari.server.api.util.TreeNodeImpl;
 import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.StackLevelConfigurationRequest;
-import org.apache.ambari.server.controller.StackServiceRequest;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.topology.Blueprint;
 import org.apache.ambari.server.topology.HostGroup;
 import org.apache.ambari.server.topology.HostGroupInfo;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 
 /**
  * ExportBlueprintRequest unit tests.
  */
-@SuppressWarnings("unchecked")
 public class ExportBlueprintRequestTest {
   private static final String CLUSTER_NAME = "c1";
   private static final String CLUSTER_ID = "2";
 
-  private AmbariManagementController controller = 
createNiceMock(AmbariManagementController.class);
-
-  @Before
-  public void setupTest() throws Exception {
-    Field f = ExportBlueprintRequest.class.getDeclaredField("controller");
-    f.setAccessible(true);
-    f.set(null, controller);
-
-    expect(controller.getStackServices((Set<StackServiceRequest>)  
anyObject())).andReturn(
-        Collections.emptySet()).anyTimes();
-    
expect(controller.getStackLevelConfigurations((Set<StackLevelConfigurationRequest>)
 anyObject())).andReturn(
-        Collections.emptySet()).anyTimes();
-
-    replay(controller);
-  }
-
-  @After
-  public void tearDown() {
-    reset(controller);
-  }
-
-  //todo: test configuration processing
-
   @Test
   public void testExport_noConfigs() throws Exception {
+    AmbariManagementController controller = 
createNiceMock(AmbariManagementController.class);
+    
expect(controller.getStackServices(anyObject())).andReturn(Collections.emptySet()).anyTimes();
+    
expect(controller.getStackLevelConfigurations(anyObject())).andReturn(Collections.emptySet()).anyTimes();
+    replay(controller);
+
     Resource clusterResource = new ResourceImpl(Resource.Type.Cluster);
     
clusterResource.setProperty(ClusterResourceProvider.CLUSTER_NAME_PROPERTY_ID, 
CLUSTER_NAME);
     
clusterResource.setProperty(ClusterResourceProvider.CLUSTER_ID_PROPERTY_ID, 
CLUSTER_ID);
@@ -116,7 +89,7 @@ public class ExportBlueprintRequestTest {
     processHostGroupComponents(host3Node, host3ComponentsList);
 
     // test
-    ExportBlueprintRequest exportBlueprintRequest = new 
ExportBlueprintRequest(clusterNode);
+    ExportBlueprintRequest exportBlueprintRequest = new 
ExportBlueprintRequest(clusterNode, controller);
 
     // assertions
     assertEquals(CLUSTER_NAME, exportBlueprintRequest.getClusterName());
@@ -163,4 +136,5 @@ public class ExportBlueprintRequestTest {
       hostComponentsNode.addChild(componentResource, "host_component_" + 
componentCount++);
     }
   }
+
 }

Reply via email to