This is an automated email from the ASF dual-hosted git repository.

rnettleton pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 4f0fbd2  [Ambari 23549] Blueprint configuration processor updates for 
HDFS NameNode Federation (#985)
4f0fbd2 is described below

commit 4f0fbd200796ce521499bd89b3bfb0c6470bb80c
Author: Robert Nettleton <rnettle...@hortonworks.com>
AuthorDate: Mon Apr 16 10:08:21 2018 -0400

    [Ambari 23549] Blueprint configuration processor updates for HDFS NameNode 
Federation (#985)
    
    * Adding property substitution support for various Federation-related 
properties
    
    * Minor changes to Blueprint Configuration Processor for NameNode 
Federation.
    
    * Updates to unit tests and minor refactorings
    
    * Updates to unit tests and minor changes.
    
    * Updated error handling for cluster name and new unit test assertions.
---
 .../internal/BlueprintConfigurationProcessor.java  |  61 +++++--
 .../BlueprintConfigurationProcessorTest.java       | 182 ++++++++++++++++++++-
 2 files changed, 227 insertions(+), 16 deletions(-)

diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index 5859fee..24f49ad 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -92,6 +92,10 @@ public class BlueprintConfigurationProcessor {
   private final static String HDFS_ACTIVE_NAMENODE_SET_PROPERTY_NAME = 
"dfs_ha_initial_namenode_active_set";
   private final static String HDFS_STANDBY_NAMENODE_SET_PROPERTY_NAME = 
"dfs_ha_initial_namenode_standby_set";
 
+  private final static String HDFS_HA_INITIAL_CLUSTER_ID_PROPERTY_NAME = 
"dfs_ha_initial_cluster_id";
+
+  private final static String HADOOP_ENV_CONFIG_TYPE_NAME = "hadoop-env";
+
 
   /**
    * Single host topology updaters
@@ -185,10 +189,10 @@ public class BlueprintConfigurationProcessor {
         new SimplePropertyNameExportFilter("ldap-url", "kerberos-env"),
         new SimplePropertyNameExportFilter("container_dn", "kerberos-env"),
         new SimplePropertyNameExportFilter("domains", "krb5-conf"),
-        new SimplePropertyNameExportFilter("dfs_ha_initial_namenode_active", 
"hadoop-env"),
-        new SimplePropertyNameExportFilter("dfs_ha_initial_namenode_standby", 
"hadoop-env"),
-        new 
SimplePropertyNameExportFilter(HDFS_ACTIVE_NAMENODE_SET_PROPERTY_NAME, 
"hadoop-env"),
-        new 
SimplePropertyNameExportFilter(HDFS_STANDBY_NAMENODE_SET_PROPERTY_NAME, 
"hadoop-env"),
+        new SimplePropertyNameExportFilter("dfs_ha_initial_namenode_active", 
HADOOP_ENV_CONFIG_TYPE_NAME),
+        new SimplePropertyNameExportFilter("dfs_ha_initial_namenode_standby", 
HADOOP_ENV_CONFIG_TYPE_NAME),
+        new 
SimplePropertyNameExportFilter(HDFS_ACTIVE_NAMENODE_SET_PROPERTY_NAME, 
HADOOP_ENV_CONFIG_TYPE_NAME),
+        new 
SimplePropertyNameExportFilter(HDFS_STANDBY_NAMENODE_SET_PROPERTY_NAME, 
HADOOP_ENV_CONFIG_TYPE_NAME),
         new StackPropertyTypeFilter(),
         new KerberosAuthToLocalRulesFilter(authToLocalPerClusterMap)};
     }
@@ -429,13 +433,13 @@ public class BlueprintConfigurationProcessor {
           // set the properties that configure which namenode is active,
           // and which is a standby node in this HA deployment
           Iterator<String> nnHostIterator = nnHosts.iterator();
-          clusterConfig.setProperty("hadoop-env", 
"dfs_ha_initial_namenode_active", nnHostIterator.next());
-          clusterConfig.setProperty("hadoop-env", 
"dfs_ha_initial_namenode_standby", nnHostIterator.next());
+          clusterConfig.setProperty(HADOOP_ENV_CONFIG_TYPE_NAME, 
"dfs_ha_initial_namenode_active", nnHostIterator.next());
+          clusterConfig.setProperty(HADOOP_ENV_CONFIG_TYPE_NAME, 
"dfs_ha_initial_namenode_standby", nnHostIterator.next());
 
-          configTypesUpdated.add("hadoop-env");
+          configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
         }
       } else {
-        if (!isPropertySet(clusterProps, "hadoop-env", 
HDFS_ACTIVE_NAMENODE_SET_PROPERTY_NAME) && !isPropertySet(clusterProps, 
"hadoop-env", HDFS_STANDBY_NAMENODE_SET_PROPERTY_NAME)) {
+        if (!isPropertySet(clusterProps, HADOOP_ENV_CONFIG_TYPE_NAME, 
HDFS_ACTIVE_NAMENODE_SET_PROPERTY_NAME) && !isPropertySet(clusterProps, 
HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_STANDBY_NAMENODE_SET_PROPERTY_NAME)) {
           // multiple nameservices indicates an HDFS NameNode Federation 
install
           // process each nameservice to determine the active/standby nodes
           LOG.info("Processing multiple HDFS NameService instances, which 
indicates a NameNode Federation deployment");
@@ -473,9 +477,15 @@ public class BlueprintConfigurationProcessor {
 
             // set the properties what configure the NameNode Active/Standby 
status for each nameservice
             if (!activeNameNodeHostnames.isEmpty() && 
!standbyNameNodeHostnames.isEmpty()) {
-              clusterConfig.setProperty("hadoop-env", 
HDFS_ACTIVE_NAMENODE_SET_PROPERTY_NAME, String.join(",", 
activeNameNodeHostnames));
-              clusterConfig.setProperty("hadoop-env", 
HDFS_STANDBY_NAMENODE_SET_PROPERTY_NAME, String.join(",", 
standbyNameNodeHostnames));
-              configTypesUpdated.add("hadoop-env");
+              clusterConfig.setProperty(HADOOP_ENV_CONFIG_TYPE_NAME, 
HDFS_ACTIVE_NAMENODE_SET_PROPERTY_NAME, String.join(",", 
activeNameNodeHostnames));
+              clusterConfig.setProperty(HADOOP_ENV_CONFIG_TYPE_NAME, 
HDFS_STANDBY_NAMENODE_SET_PROPERTY_NAME, String.join(",", 
standbyNameNodeHostnames));
+
+              // also set the clusterID property, required for Federation 
installs of HDFS
+              if (!isPropertySet(clusterProps, HADOOP_ENV_CONFIG_TYPE_NAME, 
HDFS_HA_INITIAL_CLUSTER_ID_PROPERTY_NAME)) {
+                clusterConfig.setProperty(HADOOP_ENV_CONFIG_TYPE_NAME, 
HDFS_HA_INITIAL_CLUSTER_ID_PROPERTY_NAME, getClusterName());
+              }
+
+              configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
             } else {
               LOG.warn("Error in processing the set of active/standby 
namenodes in this federated cluster, please check hdfs-site configuration");
             }
@@ -495,6 +505,21 @@ public class BlueprintConfigurationProcessor {
     return configTypesUpdated;
   }
 
+  private String getClusterName() throws ConfigurationTopologyException {
+    String clusterNameToReturn = null;
+    try {
+      clusterNameToReturn = 
clusterTopology.getAmbariContext().getClusterName(clusterTopology.getClusterId());
+    } catch (AmbariException e) {
+      throw new ConfigurationTopologyException("Cluster name could not 
obtained, this may indicate a deployment or configuration error.", e);
+    }
+
+    if (clusterNameToReturn == null) {
+      throw new ConfigurationTopologyException("Cluster name could not 
obtained, this may indicate a deployment or configuration error.");
+    }
+
+    return clusterNameToReturn;
+  }
+
   private void trimProperties(Configuration clusterConfig, ClusterTopology 
clusterTopology) {
     Blueprint blueprint = clusterTopology.getBlueprint();
     Stack stack = blueprint.getStack();
@@ -948,6 +973,10 @@ public class BlueprintConfigurationProcessor {
     Map<String, String> hdfsSiteConfig = 
clusterTopology.getConfiguration().getFullProperties().get("hdfs-site");
     // generate the property names based on the current HA config for the 
NameNode deployments
     for (String nameService : parseNameServices(hdfsSiteConfig)) {
+      final String journalEditsDirPropertyName = 
"dfs.namenode.shared.edits.dir." + nameService;
+      // register an updater for the nameservice-specific shared edits dir
+      hdfsSiteUpdatersForAvailability.put(journalEditsDirPropertyName, new 
MultipleHostTopologyUpdater("JOURNALNODE", ';', false, false, true));
+
       for (String nameNode : parseNameNodes(nameService, hdfsSiteConfig)) {
         final String httpsPropertyName = "dfs.namenode.https-address." + 
nameService + "." + nameNode;
         hdfsSiteUpdatersForAvailability.put(httpsPropertyName, new 
SingleHostTopologyUpdater("NAMENODE"));
@@ -955,6 +984,8 @@ public class BlueprintConfigurationProcessor {
         hdfsSiteUpdatersForAvailability.put(httpPropertyName, new 
SingleHostTopologyUpdater("NAMENODE"));
         final String rpcPropertyName = "dfs.namenode.rpc-address." + 
nameService + "." + nameNode;
         hdfsSiteUpdatersForAvailability.put(rpcPropertyName, new 
SingleHostTopologyUpdater("NAMENODE"));
+        final String serviceRpcPropertyName = 
"dfs.namenode.servicerpc-address." + nameService + "." + nameNode;
+        hdfsSiteUpdatersForAvailability.put(serviceRpcPropertyName, new 
SingleHostTopologyUpdater("NAMENODE"));
       }
     }
     return highAvailabilityUpdaters;
@@ -1045,7 +1076,7 @@ public class BlueprintConfigurationProcessor {
    *         false if the initial active namenode property has not been 
configured
    */
   static boolean isNameNodeHAInitialActiveNodeSet(Map<String, Map<String, 
String>> configProperties) {
-    return configProperties.containsKey("hadoop-env") && 
configProperties.get("hadoop-env").containsKey("dfs_ha_initial_namenode_active");
+    return configProperties.containsKey(HADOOP_ENV_CONFIG_TYPE_NAME) && 
configProperties.get(HADOOP_ENV_CONFIG_TYPE_NAME).containsKey("dfs_ha_initial_namenode_active");
   }
 
 
@@ -1059,7 +1090,7 @@ public class BlueprintConfigurationProcessor {
    *         false if the initial standby namenode property has not been 
configured
    */
   static boolean isNameNodeHAInitialStandbyNodeSet(Map<String, Map<String, 
String>> configProperties) {
-    return configProperties.containsKey("hadoop-env") && 
configProperties.get("hadoop-env").containsKey("dfs_ha_initial_namenode_standby");
+    return configProperties.containsKey(HADOOP_ENV_CONFIG_TYPE_NAME) && 
configProperties.get(HADOOP_ENV_CONFIG_TYPE_NAME).containsKey("dfs_ha_initial_namenode_standby");
   }
 
   /**
@@ -2539,13 +2570,13 @@ public class BlueprintConfigurationProcessor {
     singleHostTopologyUpdaters.put("ranger-kafka-audit", 
rangerKafkaAuditPropsMap);
     singleHostTopologyUpdaters.put("ranger-storm-audit", 
rangerStormAuditPropsMap);
     singleHostTopologyUpdaters.put("ranger-atlas-audit", 
rangerAtlasAuditPropsMap);
-    singleHostTopologyUpdaters.put("hadoop-env", shHadoopEnvMap);
+    singleHostTopologyUpdaters.put(HADOOP_ENV_CONFIG_TYPE_NAME, 
shHadoopEnvMap);
 
     singleHostTopologyUpdaters.put("hawq-site", hawqSiteMap);
     singleHostTopologyUpdaters.put("zookeeper-env", zookeeperEnvMap);
 
 
-    mPropertyUpdaters.put("hadoop-env", mHadoopEnvMap);
+    mPropertyUpdaters.put(HADOOP_ENV_CONFIG_TYPE_NAME, mHadoopEnvMap);
     mPropertyUpdaters.put("hbase-env", hbaseEnvMap);
     mPropertyUpdaters.put("mapred-env", mapredEnvMap);
     mPropertyUpdaters.put("oozie-env", oozieEnvHeapSizeMap);
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index 5e73dc1..47a17e1 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -5497,6 +5497,12 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
     hdfsSiteProperties.put("dfs.ha.namenodes.mynameservice", expectedNodeOne + 
", " + expectedNodeTwo);
     hdfsSiteProperties.put("dfs.ha.namenodes." + expectedNameServiceTwo, 
expectedNodeThree + "," + expectedNodeFour);
 
+    //setup nameservice-specific properties
+    hdfsSiteProperties.put("dfs.namenode.shared.edits.dir" + "." + 
expectedNameService,
+                           "qjournal://" + 
createExportedAddress(expectedPortNum, expectedHostGroupName) + ";" + 
createExportedAddress(expectedPortNum, expectedHostGroupNameTwo) + "/ns1");
+    hdfsSiteProperties.put("dfs.namenode.shared.edits.dir" + "." + 
expectedNameServiceTwo,
+                           "qjournal://" + 
createExportedAddress(expectedPortNum, expectedHostGroupName) + ";" + 
createExportedAddress(expectedPortNum, expectedHostGroupNameTwo) + "/ns2");
+
 
     // setup properties that include exported host group information
     hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameService 
+ "." + expectedNodeOne, createExportedAddress(expectedPortNum, 
expectedHostGroupName));
@@ -5505,6 +5511,10 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
     hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameService 
+ "." + expectedNodeTwo, createExportedAddress(expectedPortNum, 
expectedHostGroupNameTwo));
     hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameService + 
"." + expectedNodeOne, createExportedAddress(expectedPortNum, 
expectedHostGroupName));
     hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameService + 
"." + expectedNodeTwo, createExportedAddress(expectedPortNum, 
expectedHostGroupNameTwo));
+    hdfsSiteProperties.put("dfs.namenode.servicerpc-address." + 
expectedNameService + "." + expectedNodeOne, 
createExportedAddress(expectedPortNum, expectedHostGroupName));
+    hdfsSiteProperties.put("dfs.namenode.servicerpc-address." + 
expectedNameService + "." + expectedNodeTwo, 
createExportedAddress(expectedPortNum, expectedHostGroupNameTwo));
+
+
 
     hdfsSiteProperties.put("dfs.namenode.https-address." + 
expectedNameServiceTwo + "." + expectedNodeThree, 
createExportedAddress(expectedPortNum, expectedHostGroupNameThree));
     hdfsSiteProperties.put("dfs.namenode.https-address." + 
expectedNameServiceTwo + "." + expectedNodeFour, 
createExportedAddress(expectedPortNum, expectedHostGroupNameFour));
@@ -5512,6 +5522,9 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
     hdfsSiteProperties.put("dfs.namenode.http-address." + 
expectedNameServiceTwo + "." + expectedNodeFour, 
createExportedAddress(expectedPortNum, expectedHostGroupNameFour));
     hdfsSiteProperties.put("dfs.namenode.rpc-address." + 
expectedNameServiceTwo + "." + expectedNodeThree, 
createExportedAddress(expectedPortNum, expectedHostGroupNameThree));
     hdfsSiteProperties.put("dfs.namenode.rpc-address." + 
expectedNameServiceTwo + "." + expectedNodeFour, 
createExportedAddress(expectedPortNum, expectedHostGroupNameFour));
+    hdfsSiteProperties.put("dfs.namenode.servicerpc-address." + 
expectedNameServiceTwo + "." + expectedNodeThree, 
createExportedAddress(expectedPortNum, expectedHostGroupNameThree));
+    hdfsSiteProperties.put("dfs.namenode.servicerpc-address." + 
expectedNameServiceTwo + "." + expectedNodeFour, 
createExportedAddress(expectedPortNum, expectedHostGroupNameFour));
+
 
     // add properties that require the SECONDARY_NAMENODE, which
     // is not included in this test
@@ -5582,6 +5595,13 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
     assertEquals("HTTPS address HA property not properly exported",
       expectedHostNameTwo + ":" + expectedPortNum, 
hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." 
+ expectedNodeTwo));
 
+    assertEquals("servicerpc-address property not handled properly",
+      expectedHostName + ":" + expectedPortNum, 
hdfsSiteProperties.get("dfs.namenode.servicerpc-address." + expectedNameService 
+ "." + expectedNodeOne));
+    assertEquals("servicerpc-address property not handled properly",
+      expectedHostNameTwo + ":" + expectedPortNum, 
hdfsSiteProperties.get("dfs.namenode.servicerpc-address." + expectedNameService 
+ "." + expectedNodeTwo));
+
+
+
     assertEquals("fs.defaultFS should not be modified by cluster update when 
NameNode HA is enabled.",
       "hdfs://" + expectedNameService, coreSiteProperties.get("fs.defaultFS"));
 
@@ -5599,6 +5619,17 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
     assertFalse("dfs.namenode.rpc-address should have been filtered out of 
this HA configuration",
       hdfsSiteProperties.containsKey("dfs.namenode.rpc-address"));
 
+    // verify that the namservice-specific shared.edits properties are handled 
correctly
+    // expect that all servers are included in the updated config, and that 
the qjournal URL format is preserved
+    assertEquals("HDFS HA shared edits directory property not properly updated 
for cluster create.",
+      "qjournal://" + createHostAddress(expectedHostName, expectedPortNum) + 
";" + createHostAddress(expectedHostNameTwo, expectedPortNum) + "/ns1",
+      hdfsSiteProperties.get("dfs.namenode.shared.edits.dir" + "." + 
expectedNameService));
+
+    // expect that all servers are included in the updated config, and that 
the qjournal URL format is preserved
+    assertEquals("HDFS HA shared edits directory property not properly updated 
for cluster create.",
+      "qjournal://" + createHostAddress(expectedHostName, expectedPortNum) + 
";" + createHostAddress(expectedHostNameTwo, expectedPortNum) + "/ns2",
+      hdfsSiteProperties.get("dfs.namenode.shared.edits.dir" + "." + 
expectedNameServiceTwo));
+
 
     // verify that correct configuration types were listed as updated in the 
returned set
     assertEquals("Incorrect number of updated config types returned, set = " + 
updatedConfigTypes,
@@ -5623,6 +5654,12 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
       hadoopEnvProperties.containsKey("dfs_ha_initial_namenode_active_set"));
     assertTrue("Expected standby set not found in hadoop-env",
       hadoopEnvProperties.containsKey("dfs_ha_initial_namenode_standby_set"));
+    assertTrue("Expected clusterId not found in hadoop-env",
+      hadoopEnvProperties.containsKey("dfs_ha_initial_cluster_id"));
+
+    // verify that the clusterID is set by default to the cluster name
+    assertEquals("Expected clusterId was not set to expected value",
+      "clusterName", hadoopEnvProperties.get("dfs_ha_initial_cluster_id"));
 
     // verify that the expected hostnames are included in the active set
     String[] activeHostNames = 
hadoopEnvProperties.get("dfs_ha_initial_namenode_active_set").split(",");
@@ -5646,6 +5683,130 @@ public class BlueprintConfigurationProcessorTest 
extends EasyMockSupport {
                setOfStandbyHostNames.contains(expectedHostNameFour));
   }
 
+  @Test(expected = ConfigurationTopologyException.class)
+  public void 
testDoUpdateForClusterWithNameNodeFederationEnabledErrorClusterNameNotFound() 
throws Exception {
+    final String expectedNameService = "mynameservice";
+    final String expectedNameServiceTwo = "mynameservicetwo";
+    final String expectedHostName = "c6401.apache.ambari.org";
+    final String expectedHostNameTwo = "c6402.apache.ambari.org";
+    final String expectedHostNameThree = "c6403.apache.ambari.org";
+    final String expectedHostNameFour = "c6404.apache.ambari.org";
+    final String expectedPortNum = "808080";
+    final String expectedNodeOne = "nn1";
+    final String expectedNodeTwo = "nn2";
+    final String expectedNodeThree = "nn3";
+    final String expectedNodeFour = "nn4";
+    final String expectedHostGroupName = "host_group_1";
+    final String expectedHostGroupNameTwo = "host_group_2";
+    final String expectedHostGroupNameThree = "host-group-3";
+    final String expectedHostGroupNameFour = "host-group-4";
+
+    EasyMockSupport mockSupport = new EasyMockSupport();
+    AmbariContext mockAmbariContext = 
mockSupport.createMock(AmbariContext.class);
+
+    // configure mock to return null cluster name (error condition)
+    expect(mockAmbariContext.getClusterName(1)).andReturn(null).anyTimes();
+
+    mockSupport.replayAll();
+
+    Map<String, Map<String, String>> properties = new HashMap<>();
+
+    Map<String, String> hdfsSiteProperties = new HashMap<>();
+    Map<String, String> hbaseSiteProperties = new HashMap<>();
+    Map<String, String> hadoopEnvProperties = new HashMap<>();
+    Map<String, String> coreSiteProperties = new HashMap<>();
+    Map<String, String> accumuloSiteProperties = new HashMap<>();
+
+    properties.put("hdfs-site", hdfsSiteProperties);
+    properties.put("hadoop-env", hadoopEnvProperties);
+    properties.put("core-site", coreSiteProperties);
+    properties.put("hbase-site", hbaseSiteProperties);
+    properties.put("accumulo-site", accumuloSiteProperties);
+
+    // setup multiple nameservices, to indicate NameNode Federation will be 
used
+    hdfsSiteProperties.put("dfs.nameservices", expectedNameService + "," + 
expectedNameServiceTwo);
+    hdfsSiteProperties.put("dfs.ha.namenodes.mynameservice", expectedNodeOne + 
", " + expectedNodeTwo);
+    hdfsSiteProperties.put("dfs.ha.namenodes." + expectedNameServiceTwo, 
expectedNodeThree + "," + expectedNodeFour);
+
+    //setup nameservice-specific properties
+    hdfsSiteProperties.put("dfs.namenode.shared.edits.dir" + "." + 
expectedNameService,
+      "qjournal://" + createExportedAddress(expectedPortNum, 
expectedHostGroupName) + ";" + createExportedAddress(expectedPortNum, 
expectedHostGroupNameTwo) + "/ns1");
+    hdfsSiteProperties.put("dfs.namenode.shared.edits.dir" + "." + 
expectedNameServiceTwo,
+      "qjournal://" + createExportedAddress(expectedPortNum, 
expectedHostGroupName) + ";" + createExportedAddress(expectedPortNum, 
expectedHostGroupNameTwo) + "/ns2");
+
+
+    // setup properties that include exported host group information
+    hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameService 
+ "." + expectedNodeOne, createExportedAddress(expectedPortNum, 
expectedHostGroupName));
+    hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameService 
+ "." + expectedNodeTwo, createExportedAddress(expectedPortNum, 
expectedHostGroupNameTwo));
+    hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameService 
+ "." + expectedNodeOne, createExportedAddress(expectedPortNum, 
expectedHostGroupName));
+    hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameService 
+ "." + expectedNodeTwo, createExportedAddress(expectedPortNum, 
expectedHostGroupNameTwo));
+    hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameService + 
"." + expectedNodeOne, createExportedAddress(expectedPortNum, 
expectedHostGroupName));
+    hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameService + 
"." + expectedNodeTwo, createExportedAddress(expectedPortNum, 
expectedHostGroupNameTwo));
+    hdfsSiteProperties.put("dfs.namenode.servicerpc-address." + 
expectedNameService + "." + expectedNodeOne, 
createExportedAddress(expectedPortNum, expectedHostGroupName));
+    hdfsSiteProperties.put("dfs.namenode.servicerpc-address." + 
expectedNameService + "." + expectedNodeTwo, 
createExportedAddress(expectedPortNum, expectedHostGroupNameTwo));
+
+
+
+    hdfsSiteProperties.put("dfs.namenode.https-address." + 
expectedNameServiceTwo + "." + expectedNodeThree, 
createExportedAddress(expectedPortNum, expectedHostGroupNameThree));
+    hdfsSiteProperties.put("dfs.namenode.https-address." + 
expectedNameServiceTwo + "." + expectedNodeFour, 
createExportedAddress(expectedPortNum, expectedHostGroupNameFour));
+    hdfsSiteProperties.put("dfs.namenode.http-address." + 
expectedNameServiceTwo + "." + expectedNodeThree, 
createExportedAddress(expectedPortNum, expectedHostGroupNameThree));
+    hdfsSiteProperties.put("dfs.namenode.http-address." + 
expectedNameServiceTwo + "." + expectedNodeFour, 
createExportedAddress(expectedPortNum, expectedHostGroupNameFour));
+    hdfsSiteProperties.put("dfs.namenode.rpc-address." + 
expectedNameServiceTwo + "." + expectedNodeThree, 
createExportedAddress(expectedPortNum, expectedHostGroupNameThree));
+    hdfsSiteProperties.put("dfs.namenode.rpc-address." + 
expectedNameServiceTwo + "." + expectedNodeFour, 
createExportedAddress(expectedPortNum, expectedHostGroupNameFour));
+    hdfsSiteProperties.put("dfs.namenode.servicerpc-address." + 
expectedNameServiceTwo + "." + expectedNodeThree, 
createExportedAddress(expectedPortNum, expectedHostGroupNameThree));
+    hdfsSiteProperties.put("dfs.namenode.servicerpc-address." + 
expectedNameServiceTwo + "." + expectedNodeFour, 
createExportedAddress(expectedPortNum, expectedHostGroupNameFour));
+
+
+    // add properties that require the SECONDARY_NAMENODE, which
+    // is not included in this test
+    hdfsSiteProperties.put("dfs.secondary.http.address", "localhost:8080");
+    hdfsSiteProperties.put("dfs.namenode.secondary.http-address", 
"localhost:8080");
+
+
+    // add properties that are used in non-HA HDFS NameNode settings
+    // to verify that these are eventually removed by the filter
+    hdfsSiteProperties.put("dfs.namenode.http-address", "localhost:8080");
+    hdfsSiteProperties.put("dfs.namenode.https-address", "localhost:8081");
+    hdfsSiteProperties.put("dfs.namenode.rpc-address", "localhost:8082");
+
+    // configure the defaultFS to use the nameservice URL
+    coreSiteProperties.put("fs.defaultFS", "hdfs://" + expectedNameService);
+
+    // configure the hbase rootdir to use the nameservice URL
+    hbaseSiteProperties.put("hbase.rootdir", "hdfs://" + expectedNameService + 
"/hbase/test/root/dir");
+
+    // configure the hbase rootdir to use the nameservice URL
+    accumuloSiteProperties.put("instance.volumes", "hdfs://" + 
expectedNameService + "/accumulo/test/instance/volumes");
+
+    Configuration clusterConfig = new Configuration(properties, 
Collections.emptyMap());
+
+    Collection<String> hgComponents = new HashSet<>();
+    hgComponents.add("NAMENODE");
+    TestHostGroup group1 = new TestHostGroup(expectedHostGroupName, 
hgComponents, Collections.singleton(expectedHostName));
+
+    Collection<String> hgComponents2 = new HashSet<>();
+    hgComponents2.add("NAMENODE");
+    TestHostGroup group2 = new TestHostGroup(expectedHostGroupNameTwo, 
hgComponents2, Collections.singleton(expectedHostNameTwo));
+
+    // add third and fourth hostgroup with NAMENODE, to simulate HDFS NameNode 
Federation
+    TestHostGroup group3 = new TestHostGroup(expectedHostGroupNameThree, 
Collections.singleton("NAMENODE"), 
Collections.singleton(expectedHostNameThree));
+    TestHostGroup group4 = new TestHostGroup(expectedHostGroupNameFour, 
Collections.singleton("NAMENODE"), Collections.singleton(expectedHostNameFour));
+
+    Collection<TestHostGroup> hostGroups = new ArrayList<>();
+    hostGroups.add(group1);
+    hostGroups.add(group2);
+    hostGroups.add(group3);
+    hostGroups.add(group4);
+
+    expect(stack.getCardinality("NAMENODE")).andReturn(new 
Cardinality("1-2")).anyTimes();
+    expect(stack.getCardinality("SECONDARY_NAMENODE")).andReturn(new 
Cardinality("1")).anyTimes();
+
+    ClusterTopology topology = createClusterTopology(bp, clusterConfig, 
hostGroups, mockAmbariContext);
+    BlueprintConfigurationProcessor updater = new 
BlueprintConfigurationProcessor(topology);
+
+    updater.doUpdateForClusterCreate();
+  }
+
   @Test
   public void 
testDoUpdateForClusterWithNameNodeFederationEnabledWithCustomizedActiveStandbyHostSets()
 throws Exception {
     final String expectedNameService = "mynameservice";
@@ -5682,6 +5843,7 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
     // configure the active/standy host lists to a custom set of hostnames
     hadoopEnvProperties.put("dfs_ha_initial_namenode_active_set", 
"test-server-five,test-server-six");
     hadoopEnvProperties.put("dfs_ha_initial_namenode_standby_set", 
"test-server-seven,test-server-eight");
+    hadoopEnvProperties.put("dfs_ha_initial_cluster_id", 
"my-custom-cluster-name");
 
 
     // setup multiple nameservices, to indicate NameNode Federation will be 
used
@@ -5813,6 +5975,12 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
       hadoopEnvProperties.containsKey("dfs_ha_initial_namenode_active_set"));
     assertTrue("Expected standby set not found in hadoop-env",
       hadoopEnvProperties.containsKey("dfs_ha_initial_namenode_standby_set"));
+    assertTrue("Expected clusterId not found in hadoop-env",
+      hadoopEnvProperties.containsKey("dfs_ha_initial_cluster_id"));
+
+    // verify that the clusterID is not set by processor, since user has 
already customized it
+    assertEquals("Expected clusterId was not set to expected value",
+      "my-custom-cluster-name", 
hadoopEnvProperties.get("dfs_ha_initial_cluster_id"));
 
     // verify that the expected hostnames are included in the active set
     String[] activeHostNames = 
hadoopEnvProperties.get("dfs_ha_initial_namenode_active_set").split(",");
@@ -8788,7 +8956,12 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
   }
 
   private ClusterTopology createClusterTopology(Blueprint blueprint, 
Configuration configuration,
-                                                Collection<TestHostGroup> 
hostGroups)
+                                                Collection<TestHostGroup> 
hostGroups) throws InvalidTopologyException {
+    return createClusterTopology(blueprint, configuration, hostGroups, null);
+  }
+
+  private ClusterTopology createClusterTopology(Blueprint blueprint, 
Configuration configuration,
+                                                Collection<TestHostGroup> 
hostGroups, AmbariContext ambariContextReplacement)
     throws InvalidTopologyException {
 
 
@@ -8839,6 +9012,13 @@ public class BlueprintConfigurationProcessorTest extends 
EasyMockSupport {
 
     replay(bp, topologyRequestMock);
 
+    if (ambariContextReplacement != null) {
+      // override the mock AmbariContext setup in the default Before method
+      // Note, this should only be used in a small number of test cases to 
verify exception
+      // behavior when the AmbariContext returns an unexpected value
+      ambariContext = ambariContextReplacement;
+    }
+
     ClusterTopology topology = new ClusterTopologyImpl(ambariContext, 
topologyRequestMock);
     
topology.setConfigRecommendationStrategy(ConfigRecommendationStrategy.NEVER_APPLY);
 

-- 
To stop receiving notification emails like this one, please contact
rnettle...@apache.org.

Reply via email to