This is an automated email from the ASF dual-hosted git repository.
rnettleton pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git
The following commit(s) were added to refs/heads/trunk by this push:
new 3305199 [AMBARI-23467] Initial changes for Blueprint configuration
processing of NameNode Federation Clusters (#890)
3305199 is described below
commit 3305199bb58931474c2f2dfb2a1bac30f7b38cd4
Author: rnettleton <[email protected]>
AuthorDate: Thu Apr 5 16:35:27 2018 -0400
[AMBARI-23467] Initial changes for Blueprint configuration processing of
NameNode Federation Clusters (#890)
* Initial changes for Blueprint configuration processing of NameNode
Federation clusters.
* Updated patch with minor fixes based on review comments.
---
.../internal/BlueprintConfigurationProcessor.java | 125 ++++--
.../BlueprintConfigurationProcessorTest.java | 462 +++++++++++++++++++++
2 files changed, 561 insertions(+), 26 deletions(-)
diff --git
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index 7f1361a..5859fee 100644
---
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -89,6 +89,10 @@ public class BlueprintConfigurationProcessor {
private final static String HAWQ_SITE_HAWQ_STANDBY_ADDRESS_HOST =
"hawq_standby_address_host";
private final static String HAWQSTANDBY = "HAWQSTANDBY";
+ private final static String HDFS_ACTIVE_NAMENODE_SET_PROPERTY_NAME =
"dfs_ha_initial_namenode_active_set";
+ private final static String HDFS_STANDBY_NAMENODE_SET_PROPERTY_NAME =
"dfs_ha_initial_namenode_standby_set";
+
+
/**
* Single host topology updaters
*/
@@ -171,20 +175,22 @@ public class BlueprintConfigurationProcessor {
private PropertyFilter[] getExportPropertyFilters (Map<Long, Set<String>>
authToLocalPerClusterMap)
{
return new PropertyFilter[] {
- new PasswordPropertyFilter(),
- new SimplePropertyNameExportFilter("tez.tez-ui.history-url.base",
"tez-site"),
- new SimplePropertyNameExportFilter("admin_server_host", "kerberos-env"),
- new SimplePropertyNameExportFilter("kdc_hosts", "kerberos-env"),
- new SimplePropertyNameExportFilter("master_kdc", "kerberos-env"),
- new SimplePropertyNameExportFilter("realm", "kerberos-env"),
- new SimplePropertyNameExportFilter("kdc_type", "kerberos-env"),
- new SimplePropertyNameExportFilter("ldap-url", "kerberos-env"),
- new SimplePropertyNameExportFilter("container_dn", "kerberos-env"),
- new SimplePropertyNameExportFilter("domains", "krb5-conf"),
- new SimplePropertyNameExportFilter("dfs_ha_initial_namenode_active",
"hadoop-env"),
- new SimplePropertyNameExportFilter("dfs_ha_initial_namenode_standby",
"hadoop-env"),
- new StackPropertyTypeFilter(),
- new KerberosAuthToLocalRulesFilter(authToLocalPerClusterMap)};
+ new PasswordPropertyFilter(),
+ new SimplePropertyNameExportFilter("tez.tez-ui.history-url.base",
"tez-site"),
+ new SimplePropertyNameExportFilter("admin_server_host",
"kerberos-env"),
+ new SimplePropertyNameExportFilter("kdc_hosts", "kerberos-env"),
+ new SimplePropertyNameExportFilter("master_kdc", "kerberos-env"),
+ new SimplePropertyNameExportFilter("realm", "kerberos-env"),
+ new SimplePropertyNameExportFilter("kdc_type", "kerberos-env"),
+ new SimplePropertyNameExportFilter("ldap-url", "kerberos-env"),
+ new SimplePropertyNameExportFilter("container_dn", "kerberos-env"),
+ new SimplePropertyNameExportFilter("domains", "krb5-conf"),
+ new SimplePropertyNameExportFilter("dfs_ha_initial_namenode_active",
"hadoop-env"),
+ new SimplePropertyNameExportFilter("dfs_ha_initial_namenode_standby",
"hadoop-env"),
+ new
SimplePropertyNameExportFilter(HDFS_ACTIVE_NAMENODE_SET_PROPERTY_NAME,
"hadoop-env"),
+ new
SimplePropertyNameExportFilter(HDFS_STANDBY_NAMENODE_SET_PROPERTY_NAME,
"hadoop-env"),
+ new StackPropertyTypeFilter(),
+ new KerberosAuthToLocalRulesFilter(authToLocalPerClusterMap)};
}
/**
@@ -406,21 +412,75 @@ public class BlueprintConfigurationProcessor {
clusterConfig.setProperty("hdfs-site", "dfs.internal.nameservices",
nameservices);
}
- // if the active/stanbdy namenodes are not specified, assign them
automatically
- if (! isNameNodeHAInitialActiveNodeSet(clusterProps) && !
isNameNodeHAInitialStandbyNodeSet(clusterProps)) {
- Collection<String> nnHosts =
clusterTopology.getHostAssignmentsForComponent("NAMENODE");
- if (nnHosts.size() < 2) {
- throw new ConfigurationTopologyException("NAMENODE HA requires at
least 2 hosts running NAMENODE but there are: " +
- nnHosts.size() + " Hosts: " + nnHosts);
+ // parse out the nameservices value
+ String[] parsedNameServices = parseNameServices(hdfsSiteConfig);
+
+ // if a single nameservice is configured (default HDFS HA deployment)
+ if (parsedNameServices.length == 1) {
+ LOG.info("Processing a single HDFS NameService, which indicates a
default HDFS NameNode HA deployment");
+ // if the active/stanbdy namenodes are not specified, assign them
automatically
+ if (! isNameNodeHAInitialActiveNodeSet(clusterProps) && !
isNameNodeHAInitialStandbyNodeSet(clusterProps)) {
+ Collection<String> nnHosts =
clusterTopology.getHostAssignmentsForComponent("NAMENODE");
+ if (nnHosts.size() < 2) {
+ throw new ConfigurationTopologyException("NAMENODE HA requires at
least 2 hosts running NAMENODE but there are: " +
+ nnHosts.size() + " Hosts: " + nnHosts);
+ }
+
+ // set the properties that configure which namenode is active,
+ // and which is a standby node in this HA deployment
+ Iterator<String> nnHostIterator = nnHosts.iterator();
+ clusterConfig.setProperty("hadoop-env",
"dfs_ha_initial_namenode_active", nnHostIterator.next());
+ clusterConfig.setProperty("hadoop-env",
"dfs_ha_initial_namenode_standby", nnHostIterator.next());
+
+ configTypesUpdated.add("hadoop-env");
}
+ } else {
+ if (!isPropertySet(clusterProps, "hadoop-env",
HDFS_ACTIVE_NAMENODE_SET_PROPERTY_NAME) && !isPropertySet(clusterProps,
"hadoop-env", HDFS_STANDBY_NAMENODE_SET_PROPERTY_NAME)) {
+ // multiple nameservices indicates an HDFS NameNode Federation
install
+ // process each nameservice to determine the active/standby nodes
+ LOG.info("Processing multiple HDFS NameService instances, which
indicates a NameNode Federation deployment");
+ if (parsedNameServices.length > 1) {
+ Set<String> activeNameNodeHostnames = new HashSet<>();
+ Set<String> standbyNameNodeHostnames = new HashSet<>();
+
+ for (String nameService : parsedNameServices) {
+ List<String> hostNames = new ArrayList<>();
+ String[] nameNodes = parseNameNodes(nameService, hdfsSiteConfig);
+ for (String nameNode : nameNodes) {
+ // use the HA rpc-address property to obtain the NameNode
hostnames
+ String propertyName = "dfs.namenode.rpc-address." +
nameService + "." + nameNode;
+ String propertyValue = hdfsSiteConfig.get(propertyName);
+ if (propertyValue == null) {
+ throw new ConfigurationTopologyException("NameNode HA
property = " + propertyName + " is not found in the cluster config. This
indicates an error in configuration for HA/Federated clusters. " +
+ "Please recheck the HDFS configuration and try this
deployment again");
+ }
+
+ String hostName = propertyValue.split(":")[0];
+ hostNames.add(hostName);
+ }
- // set the properties that configure which namenode is active,
- // and which is a standby node in this HA deployment
- Iterator<String> nnHostIterator = nnHosts.iterator();
- clusterConfig.setProperty("hadoop-env",
"dfs_ha_initial_namenode_active", nnHostIterator.next());
- clusterConfig.setProperty("hadoop-env",
"dfs_ha_initial_namenode_standby", nnHostIterator.next());
+ if (hostNames.size() < 2) {
+ throw new ConfigurationTopologyException("NAMENODE HA for
nameservice = " + nameService + " requires at least 2 hosts running NAMENODE
but there are: " +
+ hostNames.size() + " Hosts: " + hostNames);
+ } else {
+ // by default, select the active and standby namenodes for
this nameservice
+ // using the first two hostnames found
+ // since HA is assumed, there should only be two NameNodes
deployed per NameService
+ activeNameNodeHostnames.add(hostNames.get(0));
+ standbyNameNodeHostnames.add(hostNames.get(1));
+ }
+ }
- configTypesUpdated.add("hadoop-env");
+ // set the properties what configure the NameNode Active/Standby
status for each nameservice
+ if (!activeNameNodeHostnames.isEmpty() &&
!standbyNameNodeHostnames.isEmpty()) {
+ clusterConfig.setProperty("hadoop-env",
HDFS_ACTIVE_NAMENODE_SET_PROPERTY_NAME, String.join(",",
activeNameNodeHostnames));
+ clusterConfig.setProperty("hadoop-env",
HDFS_STANDBY_NAMENODE_SET_PROPERTY_NAME, String.join(",",
standbyNameNodeHostnames));
+ configTypesUpdated.add("hadoop-env");
+ } else {
+ LOG.warn("Error in processing the set of active/standby
namenodes in this federated cluster, please check hdfs-site configuration");
+ }
+ }
+ }
}
}
@@ -1002,6 +1062,19 @@ public class BlueprintConfigurationProcessor {
return configProperties.containsKey("hadoop-env") &&
configProperties.get("hadoop-env").containsKey("dfs_ha_initial_namenode_standby");
}
+ /**
+ * General convenience method to determine if a given property has been set
in the cluster configuration
+ *
+ * @param configProperties the configuration for this cluster
+ * @param configType the config type to check
+ * @param propertyName the property name to check
+ * @return true if the named property has been set
+ * false if the named property has not been set
+ */
+ static boolean isPropertySet(Map<String, Map<String, String>>
configProperties, String configType, String propertyName) {
+ return configProperties.containsKey(configType) &&
configProperties.get(configType).containsKey(propertyName);
+ }
+
/**
* Parses out the list of nameservices associated with this HDFS
configuration.
diff --git
a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index 0d84550..5e73dc1 100644
---
a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -5459,6 +5459,468 @@ public class BlueprintConfigurationProcessorTest
extends EasyMockSupport {
updatedConfigTypes.contains("hadoop-env"));
}
+
+ @Test
+ public void testDoUpdateForClusterWithNameNodeFederationEnabled() throws
Exception {
+ final String expectedNameService = "mynameservice";
+ final String expectedNameServiceTwo = "mynameservicetwo";
+ final String expectedHostName = "c6401.apache.ambari.org";
+ final String expectedHostNameTwo = "c6402.apache.ambari.org";
+ final String expectedHostNameThree = "c6403.apache.ambari.org";
+ final String expectedHostNameFour = "c6404.apache.ambari.org";
+ final String expectedPortNum = "808080";
+ final String expectedNodeOne = "nn1";
+ final String expectedNodeTwo = "nn2";
+ final String expectedNodeThree = "nn3";
+ final String expectedNodeFour = "nn4";
+ final String expectedHostGroupName = "host_group_1";
+ final String expectedHostGroupNameTwo = "host_group_2";
+ final String expectedHostGroupNameThree = "host-group-3";
+ final String expectedHostGroupNameFour = "host-group-4";
+
+ Map<String, Map<String, String>> properties = new HashMap<>();
+
+ Map<String, String> hdfsSiteProperties = new HashMap<>();
+ Map<String, String> hbaseSiteProperties = new HashMap<>();
+ Map<String, String> hadoopEnvProperties = new HashMap<>();
+ Map<String, String> coreSiteProperties = new HashMap<>();
+ Map<String, String> accumuloSiteProperties = new HashMap<>();
+
+ properties.put("hdfs-site", hdfsSiteProperties);
+ properties.put("hadoop-env", hadoopEnvProperties);
+ properties.put("core-site", coreSiteProperties);
+ properties.put("hbase-site", hbaseSiteProperties);
+ properties.put("accumulo-site", accumuloSiteProperties);
+
+ // setup multiple nameservices, to indicate NameNode Federation will be
used
+ hdfsSiteProperties.put("dfs.nameservices", expectedNameService + "," +
expectedNameServiceTwo);
+ hdfsSiteProperties.put("dfs.ha.namenodes.mynameservice", expectedNodeOne +
", " + expectedNodeTwo);
+ hdfsSiteProperties.put("dfs.ha.namenodes." + expectedNameServiceTwo,
expectedNodeThree + "," + expectedNodeFour);
+
+
+ // setup properties that include exported host group information
+ hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameService
+ "." + expectedNodeOne, createExportedAddress(expectedPortNum,
expectedHostGroupName));
+ hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameService
+ "." + expectedNodeTwo, createExportedAddress(expectedPortNum,
expectedHostGroupNameTwo));
+ hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameService
+ "." + expectedNodeOne, createExportedAddress(expectedPortNum,
expectedHostGroupName));
+ hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameService
+ "." + expectedNodeTwo, createExportedAddress(expectedPortNum,
expectedHostGroupNameTwo));
+ hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameService +
"." + expectedNodeOne, createExportedAddress(expectedPortNum,
expectedHostGroupName));
+ hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameService +
"." + expectedNodeTwo, createExportedAddress(expectedPortNum,
expectedHostGroupNameTwo));
+
+ hdfsSiteProperties.put("dfs.namenode.https-address." +
expectedNameServiceTwo + "." + expectedNodeThree,
createExportedAddress(expectedPortNum, expectedHostGroupNameThree));
+ hdfsSiteProperties.put("dfs.namenode.https-address." +
expectedNameServiceTwo + "." + expectedNodeFour,
createExportedAddress(expectedPortNum, expectedHostGroupNameFour));
+ hdfsSiteProperties.put("dfs.namenode.http-address." +
expectedNameServiceTwo + "." + expectedNodeThree,
createExportedAddress(expectedPortNum, expectedHostGroupNameThree));
+ hdfsSiteProperties.put("dfs.namenode.http-address." +
expectedNameServiceTwo + "." + expectedNodeFour,
createExportedAddress(expectedPortNum, expectedHostGroupNameFour));
+ hdfsSiteProperties.put("dfs.namenode.rpc-address." +
expectedNameServiceTwo + "." + expectedNodeThree,
createExportedAddress(expectedPortNum, expectedHostGroupNameThree));
+ hdfsSiteProperties.put("dfs.namenode.rpc-address." +
expectedNameServiceTwo + "." + expectedNodeFour,
createExportedAddress(expectedPortNum, expectedHostGroupNameFour));
+
+ // add properties that require the SECONDARY_NAMENODE, which
+ // is not included in this test
+ hdfsSiteProperties.put("dfs.secondary.http.address", "localhost:8080");
+ hdfsSiteProperties.put("dfs.namenode.secondary.http-address",
"localhost:8080");
+
+
+ // add properties that are used in non-HA HDFS NameNode settings
+ // to verify that these are eventually removed by the filter
+ hdfsSiteProperties.put("dfs.namenode.http-address", "localhost:8080");
+ hdfsSiteProperties.put("dfs.namenode.https-address", "localhost:8081");
+ hdfsSiteProperties.put("dfs.namenode.rpc-address", "localhost:8082");
+
+ // configure the defaultFS to use the nameservice URL
+ coreSiteProperties.put("fs.defaultFS", "hdfs://" + expectedNameService);
+
+ // configure the hbase rootdir to use the nameservice URL
+ hbaseSiteProperties.put("hbase.rootdir", "hdfs://" + expectedNameService +
"/hbase/test/root/dir");
+
+ // configure the hbase rootdir to use the nameservice URL
+ accumuloSiteProperties.put("instance.volumes", "hdfs://" +
expectedNameService + "/accumulo/test/instance/volumes");
+
+ Configuration clusterConfig = new Configuration(properties,
Collections.emptyMap());
+
+ Collection<String> hgComponents = new HashSet<>();
+ hgComponents.add("NAMENODE");
+ TestHostGroup group1 = new TestHostGroup(expectedHostGroupName,
hgComponents, Collections.singleton(expectedHostName));
+
+ Collection<String> hgComponents2 = new HashSet<>();
+ hgComponents2.add("NAMENODE");
+ TestHostGroup group2 = new TestHostGroup(expectedHostGroupNameTwo,
hgComponents2, Collections.singleton(expectedHostNameTwo));
+
+ // add third and fourth hostgroup with NAMENODE, to simulate HDFS NameNode
Federation
+ TestHostGroup group3 = new TestHostGroup(expectedHostGroupNameThree,
Collections.singleton("NAMENODE"),
Collections.singleton(expectedHostNameThree));
+ TestHostGroup group4 = new TestHostGroup(expectedHostGroupNameFour,
Collections.singleton("NAMENODE"), Collections.singleton(expectedHostNameFour));
+
+ Collection<TestHostGroup> hostGroups = new ArrayList<>();
+ hostGroups.add(group1);
+ hostGroups.add(group2);
+ hostGroups.add(group3);
+ hostGroups.add(group4);
+
+ expect(stack.getCardinality("NAMENODE")).andReturn(new
Cardinality("1-2")).anyTimes();
+ expect(stack.getCardinality("SECONDARY_NAMENODE")).andReturn(new
Cardinality("1")).anyTimes();
+
+ ClusterTopology topology = createClusterTopology(bp, clusterConfig,
hostGroups);
+ BlueprintConfigurationProcessor updater = new
BlueprintConfigurationProcessor(topology);
+
+ Set<String> updatedConfigTypes =
+ updater.doUpdateForClusterCreate();
+
+ // verify that dfs.internal.nameservices was added
+ assertEquals("dfs.internal.nameservices wasn't added", expectedNameService
+ "," + expectedNameServiceTwo,
hdfsSiteProperties.get("dfs.internal.nameservices"));
+
+ // verify that the expected hostname was substituted for the host group
name in the config
+ assertEquals("HTTPS address HA property not properly exported",
+ expectedHostName + ":" + expectedPortNum,
hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService +
"." + expectedNodeOne));
+ assertEquals("HTTPS address HA property not properly exported",
+ expectedHostNameTwo + ":" + expectedPortNum,
hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService +
"." + expectedNodeTwo));
+
+ assertEquals("HTTPS address HA property not properly exported",
+ expectedHostName + ":" + expectedPortNum,
hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "."
+ expectedNodeOne));
+ assertEquals("HTTPS address HA property not properly exported",
+ expectedHostNameTwo + ":" + expectedPortNum,
hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "."
+ expectedNodeTwo));
+
+ assertEquals("HTTPS address HA property not properly exported",
+ expectedHostName + ":" + expectedPortNum,
hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "."
+ expectedNodeOne));
+ assertEquals("HTTPS address HA property not properly exported",
+ expectedHostNameTwo + ":" + expectedPortNum,
hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "."
+ expectedNodeTwo));
+
+ assertEquals("fs.defaultFS should not be modified by cluster update when
NameNode HA is enabled.",
+ "hdfs://" + expectedNameService, coreSiteProperties.get("fs.defaultFS"));
+
+ assertEquals("hbase.rootdir should not be modified by cluster update when
NameNode HA is enabled.",
+ "hdfs://" + expectedNameService + "/hbase/test/root/dir",
hbaseSiteProperties.get("hbase.rootdir"));
+
+ assertEquals("instance.volumes should not be modified by cluster update
when NameNode HA is enabled.",
+ "hdfs://" + expectedNameService + "/accumulo/test/instance/volumes",
accumuloSiteProperties.get("instance.volumes"));
+
+ // verify that the non-HA properties are filtered out in HA mode
+ assertFalse("dfs.namenode.http-address should have been filtered out of
this HA configuration",
+ hdfsSiteProperties.containsKey("dfs.namenode.http-address"));
+ assertFalse("dfs.namenode.https-address should have been filtered out of
this HA configuration",
+ hdfsSiteProperties.containsKey("dfs.namenode.https-address"));
+ assertFalse("dfs.namenode.rpc-address should have been filtered out of
this HA configuration",
+ hdfsSiteProperties.containsKey("dfs.namenode.rpc-address"));
+
+
+ // verify that correct configuration types were listed as updated in the
returned set
+ assertEquals("Incorrect number of updated config types returned, set = " +
updatedConfigTypes,
+ 3, updatedConfigTypes.size());
+ assertTrue("Expected config type not found in updated set",
+ updatedConfigTypes.contains("cluster-env"));
+ assertTrue("Expected config type not found in updated set",
+ updatedConfigTypes.contains("hdfs-site"));
+ assertTrue("Expected config type not found in updated set",
+ updatedConfigTypes.contains("hadoop-env"));
+
+ // verify that the standard, single-nameservice HA properties are
+ // NOT set in this configuration
+ assertFalse("Single-node nameservice config should not have been set",
+
hadoopEnvProperties.containsKey("dfs_ha_initial_namenode_active"));
+ assertFalse("Single-node nameservice config should not have been set",
+ hadoopEnvProperties.containsKey("dfs_ha_initial_namenode_standby"));
+
+ // verify that the config processor sets the expected properties for
+ // the sets of active and standby hostnames for NameNode deployment
+ assertTrue("Expected active set not found in hadoop-env",
+ hadoopEnvProperties.containsKey("dfs_ha_initial_namenode_active_set"));
+ assertTrue("Expected standby set not found in hadoop-env",
+ hadoopEnvProperties.containsKey("dfs_ha_initial_namenode_standby_set"));
+
+ // verify that the expected hostnames are included in the active set
+ String[] activeHostNames =
hadoopEnvProperties.get("dfs_ha_initial_namenode_active_set").split(",");
+ assertEquals("NameNode active set did not contain the expected number of
hosts",
+ 2, activeHostNames.length);
+ Set<String> setOfActiveHostNames = new
HashSet<String>(Arrays.asList(activeHostNames));
+ assertTrue("Expected host name not found in the active map",
+ setOfActiveHostNames.contains(expectedHostName));
+ assertTrue("Expected host name not found in the active map",
+ setOfActiveHostNames.contains(expectedHostNameThree));
+
+
+ // verify that the expected hostnames are included in the standby set
+ String[] standbyHostNames =
hadoopEnvProperties.get("dfs_ha_initial_namenode_standby_set").split(",");
+ assertEquals("NameNode standby set did not contain the expected number of
hosts",
+ 2, standbyHostNames.length);
+ Set<String> setOfStandbyHostNames = new
HashSet<String>(Arrays.asList(standbyHostNames));
+ assertTrue("Expected host name not found in the standby map",
+ setOfStandbyHostNames.contains(expectedHostNameTwo));
+ assertTrue("Expected host name not found in the standby map",
+ setOfStandbyHostNames.contains(expectedHostNameFour));
+ }
+
+ @Test
+ public void
testDoUpdateForClusterWithNameNodeFederationEnabledWithCustomizedActiveStandbyHostSets()
throws Exception {
+ final String expectedNameService = "mynameservice";
+ final String expectedNameServiceTwo = "mynameservicetwo";
+ final String expectedHostName = "c6401.apache.ambari.org";
+ final String expectedHostNameTwo = "c6402.apache.ambari.org";
+ final String expectedHostNameThree = "c6403.apache.ambari.org";
+ final String expectedHostNameFour = "c6404.apache.ambari.org";
+ final String expectedPortNum = "808080";
+ final String expectedNodeOne = "nn1";
+ final String expectedNodeTwo = "nn2";
+ final String expectedNodeThree = "nn3";
+ final String expectedNodeFour = "nn4";
+ final String expectedHostGroupName = "host_group_1";
+ final String expectedHostGroupNameTwo = "host_group_2";
+ final String expectedHostGroupNameThree = "host-group-3";
+ final String expectedHostGroupNameFour = "host-group-4";
+
+ Map<String, Map<String, String>> properties = new HashMap<>();
+
+ Map<String, String> hdfsSiteProperties = new HashMap<>();
+ Map<String, String> hbaseSiteProperties = new HashMap<>();
+ Map<String, String> hadoopEnvProperties = new HashMap<>();
+ Map<String, String> coreSiteProperties = new HashMap<>();
+ Map<String, String> accumuloSiteProperties = new HashMap<>();
+
+ properties.put("hdfs-site", hdfsSiteProperties);
+ properties.put("hadoop-env", hadoopEnvProperties);
+ properties.put("core-site", coreSiteProperties);
+ properties.put("hbase-site", hbaseSiteProperties);
+ properties.put("accumulo-site", accumuloSiteProperties);
+
+
+ // configure the active/standy host lists to a custom set of hostnames
+ hadoopEnvProperties.put("dfs_ha_initial_namenode_active_set",
"test-server-five,test-server-six");
+ hadoopEnvProperties.put("dfs_ha_initial_namenode_standby_set",
"test-server-seven,test-server-eight");
+
+
+ // setup multiple nameservices, to indicate NameNode Federation will be
used
+ hdfsSiteProperties.put("dfs.nameservices", expectedNameService + "," +
expectedNameServiceTwo);
+ hdfsSiteProperties.put("dfs.ha.namenodes.mynameservice", expectedNodeOne +
", " + expectedNodeTwo);
+ hdfsSiteProperties.put("dfs.ha.namenodes." + expectedNameServiceTwo,
expectedNodeThree + "," + expectedNodeFour);
+
+
+ // setup properties that include exported host group information
+ hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameService
+ "." + expectedNodeOne, createExportedAddress(expectedPortNum,
expectedHostGroupName));
+ hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameService
+ "." + expectedNodeTwo, createExportedAddress(expectedPortNum,
expectedHostGroupNameTwo));
+ hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameService
+ "." + expectedNodeOne, createExportedAddress(expectedPortNum,
expectedHostGroupName));
+ hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameService
+ "." + expectedNodeTwo, createExportedAddress(expectedPortNum,
expectedHostGroupNameTwo));
+ hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameService +
"." + expectedNodeOne, createExportedAddress(expectedPortNum,
expectedHostGroupName));
+ hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameService +
"." + expectedNodeTwo, createExportedAddress(expectedPortNum,
expectedHostGroupNameTwo));
+
+ hdfsSiteProperties.put("dfs.namenode.https-address." +
expectedNameServiceTwo + "." + expectedNodeThree,
createExportedAddress(expectedPortNum, expectedHostGroupNameThree));
+ hdfsSiteProperties.put("dfs.namenode.https-address." +
expectedNameServiceTwo + "." + expectedNodeFour,
createExportedAddress(expectedPortNum, expectedHostGroupNameFour));
+ hdfsSiteProperties.put("dfs.namenode.http-address." +
expectedNameServiceTwo + "." + expectedNodeThree,
createExportedAddress(expectedPortNum, expectedHostGroupNameThree));
+ hdfsSiteProperties.put("dfs.namenode.http-address." +
expectedNameServiceTwo + "." + expectedNodeFour,
createExportedAddress(expectedPortNum, expectedHostGroupNameFour));
+ hdfsSiteProperties.put("dfs.namenode.rpc-address." +
expectedNameServiceTwo + "." + expectedNodeThree,
createExportedAddress(expectedPortNum, expectedHostGroupNameThree));
+ hdfsSiteProperties.put("dfs.namenode.rpc-address." +
expectedNameServiceTwo + "." + expectedNodeFour,
createExportedAddress(expectedPortNum, expectedHostGroupNameFour));
+
+ // add properties that require the SECONDARY_NAMENODE, which
+ // is not included in this test
+ hdfsSiteProperties.put("dfs.secondary.http.address", "localhost:8080");
+ hdfsSiteProperties.put("dfs.namenode.secondary.http-address",
"localhost:8080");
+
+
+ // add properties that are used in non-HA HDFS NameNode settings
+ // to verify that these are eventually removed by the filter
+ hdfsSiteProperties.put("dfs.namenode.http-address", "localhost:8080");
+ hdfsSiteProperties.put("dfs.namenode.https-address", "localhost:8081");
+ hdfsSiteProperties.put("dfs.namenode.rpc-address", "localhost:8082");
+
+ // configure the defaultFS to use the nameservice URL
+ coreSiteProperties.put("fs.defaultFS", "hdfs://" + expectedNameService);
+
+ // configure the hbase rootdir to use the nameservice URL
+ hbaseSiteProperties.put("hbase.rootdir", "hdfs://" + expectedNameService +
"/hbase/test/root/dir");
+
+ // configure the hbase rootdir to use the nameservice URL
+ accumuloSiteProperties.put("instance.volumes", "hdfs://" +
expectedNameService + "/accumulo/test/instance/volumes");
+
+ Configuration clusterConfig = new Configuration(properties,
Collections.emptyMap());
+
+ Collection<String> hgComponents = new HashSet<>();
+ hgComponents.add("NAMENODE");
+ TestHostGroup group1 = new TestHostGroup(expectedHostGroupName,
hgComponents, Collections.singleton(expectedHostName));
+
+ Collection<String> hgComponents2 = new HashSet<>();
+ hgComponents2.add("NAMENODE");
+ TestHostGroup group2 = new TestHostGroup(expectedHostGroupNameTwo,
hgComponents2, Collections.singleton(expectedHostNameTwo));
+
+ // add third and fourth hostgroup with NAMENODE, to simulate HDFS NameNode
Federation
+ TestHostGroup group3 = new TestHostGroup(expectedHostGroupNameThree,
Collections.singleton("NAMENODE"),
Collections.singleton(expectedHostNameThree));
+ TestHostGroup group4 = new TestHostGroup(expectedHostGroupNameFour,
Collections.singleton("NAMENODE"), Collections.singleton(expectedHostNameFour));
+
+ Collection<TestHostGroup> hostGroups = new ArrayList<>();
+ hostGroups.add(group1);
+ hostGroups.add(group2);
+ hostGroups.add(group3);
+ hostGroups.add(group4);
+
+ expect(stack.getCardinality("NAMENODE")).andReturn(new
Cardinality("1-2")).anyTimes();
+ expect(stack.getCardinality("SECONDARY_NAMENODE")).andReturn(new
Cardinality("1")).anyTimes();
+
+ ClusterTopology topology = createClusterTopology(bp, clusterConfig,
hostGroups);
+ BlueprintConfigurationProcessor updater = new
BlueprintConfigurationProcessor(topology);
+
+ Set<String> updatedConfigTypes =
+ updater.doUpdateForClusterCreate();
+
+ // verify that dfs.internal.nameservices was added
+ assertEquals("dfs.internal.nameservices wasn't added", expectedNameService
+ "," + expectedNameServiceTwo,
hdfsSiteProperties.get("dfs.internal.nameservices"));
+
+ // verify that the expected hostname was substituted for the host group
name in the config
+ assertEquals("HTTPS address HA property not properly exported",
+ expectedHostName + ":" + expectedPortNum,
hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService +
"." + expectedNodeOne));
+ assertEquals("HTTPS address HA property not properly exported",
+ expectedHostNameTwo + ":" + expectedPortNum,
hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService +
"." + expectedNodeTwo));
+
+ assertEquals("HTTPS address HA property not properly exported",
+ expectedHostName + ":" + expectedPortNum,
hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "."
+ expectedNodeOne));
+ assertEquals("HTTPS address HA property not properly exported",
+ expectedHostNameTwo + ":" + expectedPortNum,
hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "."
+ expectedNodeTwo));
+
+ assertEquals("HTTPS address HA property not properly exported",
+ expectedHostName + ":" + expectedPortNum,
hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "."
+ expectedNodeOne));
+ assertEquals("HTTPS address HA property not properly exported",
+ expectedHostNameTwo + ":" + expectedPortNum,
hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "."
+ expectedNodeTwo));
+
+ assertEquals("fs.defaultFS should not be modified by cluster update when
NameNode HA is enabled.",
+ "hdfs://" + expectedNameService, coreSiteProperties.get("fs.defaultFS"));
+
+ assertEquals("hbase.rootdir should not be modified by cluster update when
NameNode HA is enabled.",
+ "hdfs://" + expectedNameService + "/hbase/test/root/dir",
hbaseSiteProperties.get("hbase.rootdir"));
+
+ assertEquals("instance.volumes should not be modified by cluster update
when NameNode HA is enabled.",
+ "hdfs://" + expectedNameService + "/accumulo/test/instance/volumes",
accumuloSiteProperties.get("instance.volumes"));
+
+ // verify that the non-HA properties are filtered out in HA mode
+ assertFalse("dfs.namenode.http-address should have been filtered out of
this HA configuration",
+ hdfsSiteProperties.containsKey("dfs.namenode.http-address"));
+ assertFalse("dfs.namenode.https-address should have been filtered out of
this HA configuration",
+ hdfsSiteProperties.containsKey("dfs.namenode.https-address"));
+ assertFalse("dfs.namenode.rpc-address should have been filtered out of
this HA configuration",
+ hdfsSiteProperties.containsKey("dfs.namenode.rpc-address"));
+
+
+ // verify that correct configuration types were listed as updated in the
returned set
+ assertEquals("Incorrect number of updated config types returned, set = " +
updatedConfigTypes,
+ 2, updatedConfigTypes.size());
+ assertTrue("Expected config type not found in updated set",
+ updatedConfigTypes.contains("cluster-env"));
+ assertTrue("Expected config type not found in updated set",
+ updatedConfigTypes.contains("hdfs-site"));
+
+ // verify that the standard, single-nameservice HA properties are
+ // NOT set in this configuration
+ assertFalse("Single-node nameservice config should not have been set",
+ hadoopEnvProperties.containsKey("dfs_ha_initial_namenode_active"));
+ assertFalse("Single-node nameservice config should not have been set",
+ hadoopEnvProperties.containsKey("dfs_ha_initial_namenode_standby"));
+
+ // verify that the config processor sets the expected properties for
+ // the sets of active and standby hostnames for NameNode deployment
+ assertTrue("Expected active set not found in hadoop-env",
+ hadoopEnvProperties.containsKey("dfs_ha_initial_namenode_active_set"));
+ assertTrue("Expected standby set not found in hadoop-env",
+ hadoopEnvProperties.containsKey("dfs_ha_initial_namenode_standby_set"));
+
+ // verify that the expected hostnames are included in the active set
+ String[] activeHostNames =
hadoopEnvProperties.get("dfs_ha_initial_namenode_active_set").split(",");
+ assertEquals("NameNode active set did not contain the expected number of
hosts",
+ 2, activeHostNames.length);
+ Set<String> setOfActiveHostNames = new
HashSet<String>(Arrays.asList(activeHostNames));
+ assertTrue("Expected host name not found in the active map",
+ setOfActiveHostNames.contains("test-server-five"));
+ assertTrue("Expected host name not found in the active map",
+ setOfActiveHostNames.contains("test-server-six"));
+
+
+ // verify that the expected hostnames are included in the standby set
+ String[] standbyHostNames =
hadoopEnvProperties.get("dfs_ha_initial_namenode_standby_set").split(",");
+ assertEquals("NameNode standby set did not contain the expected number of
hosts",
+ 2, standbyHostNames.length);
+ Set<String> setOfStandbyHostNames = new
HashSet<String>(Arrays.asList(standbyHostNames));
+ assertTrue("Expected host name not found in the standby map",
+ setOfStandbyHostNames.contains("test-server-seven"));
+ assertTrue("Expected host name not found in the standby map",
+ setOfStandbyHostNames.contains("test-server-eight"));
+ }
+
+ @Test(expected = ConfigurationTopologyException.class)
+ public void
testDoUpdateForClusterWithNameNodeFederationEnabledErrorRPCAddressNotSpecified()
throws Exception {
+ final String expectedNameService = "mynameservice";
+ final String expectedNameServiceTwo = "mynameservicetwo";
+ final String expectedHostName = "c6401.apache.ambari.org";
+ final String expectedHostNameTwo = "c6402.apache.ambari.org";
+ final String expectedHostNameThree = "c6403.apache.ambari.org";
+ final String expectedHostNameFour = "c6404.apache.ambari.org";
+ final String expectedPortNum = "808080";
+ final String expectedNodeOne = "nn1";
+ final String expectedNodeTwo = "nn2";
+ final String expectedNodeThree = "nn3";
+ final String expectedNodeFour = "nn4";
+ final String expectedHostGroupName = "host_group_1";
+ final String expectedHostGroupNameTwo = "host_group_2";
+ final String expectedHostGroupNameThree = "host-group-3";
+ final String expectedHostGroupNameFour = "host-group-4";
+
+ Map<String, Map<String, String>> properties = new HashMap<>();
+
+ Map<String, String> hdfsSiteProperties = new HashMap<>();
+ Map<String, String> hbaseSiteProperties = new HashMap<>();
+ Map<String, String> hadoopEnvProperties = new HashMap<>();
+ Map<String, String> coreSiteProperties = new HashMap<>();
+ Map<String, String> accumuloSiteProperties = new HashMap<>();
+
+ properties.put("hdfs-site", hdfsSiteProperties);
+ properties.put("hadoop-env", hadoopEnvProperties);
+ properties.put("core-site", coreSiteProperties);
+ properties.put("hbase-site", hbaseSiteProperties);
+ properties.put("accumulo-site", accumuloSiteProperties);
+
+ // setup multiple nameservices, to indicate NameNode Federation will be
used
+ hdfsSiteProperties.put("dfs.nameservices", expectedNameService + "," +
expectedNameServiceTwo);
+ hdfsSiteProperties.put("dfs.ha.namenodes.mynameservice", expectedNodeOne +
", " + expectedNodeTwo);
+ hdfsSiteProperties.put("dfs.ha.namenodes." + expectedNameServiceTwo,
expectedNodeThree + "," + expectedNodeFour);
+
+
+ // setup properties that include exported host group information
+ hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameService
+ "." + expectedNodeOne, createExportedAddress(expectedPortNum,
expectedHostGroupName));
+ hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameService
+ "." + expectedNodeTwo, createExportedAddress(expectedPortNum,
expectedHostGroupNameTwo));
+ hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameService
+ "." + expectedNodeOne, createExportedAddress(expectedPortNum,
expectedHostGroupName));
+ hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameService
+ "." + expectedNodeTwo, createExportedAddress(expectedPortNum,
expectedHostGroupNameTwo));
+
+ hdfsSiteProperties.put("dfs.namenode.https-address." +
expectedNameServiceTwo + "." + expectedNodeThree,
createExportedAddress(expectedPortNum, expectedHostGroupNameThree));
+ hdfsSiteProperties.put("dfs.namenode.https-address." +
expectedNameServiceTwo + "." + expectedNodeFour,
createExportedAddress(expectedPortNum, expectedHostGroupNameFour));
+ hdfsSiteProperties.put("dfs.namenode.http-address." +
expectedNameServiceTwo + "." + expectedNodeThree,
createExportedAddress(expectedPortNum, expectedHostGroupNameThree));
+ hdfsSiteProperties.put("dfs.namenode.http-address." +
expectedNameServiceTwo + "." + expectedNodeFour,
createExportedAddress(expectedPortNum, expectedHostGroupNameFour));
+
+ // add properties that require the SECONDARY_NAMENODE, which
+ // is not included in this test
+ hdfsSiteProperties.put("dfs.secondary.http.address", "localhost:8080");
+ hdfsSiteProperties.put("dfs.namenode.secondary.http-address",
"localhost:8080");
+
+ Configuration clusterConfig = new Configuration(properties,
Collections.emptyMap());
+
+ Collection<String> hgComponents = new HashSet<>();
+ hgComponents.add("NAMENODE");
+ TestHostGroup group1 = new TestHostGroup(expectedHostGroupName,
hgComponents, Collections.singleton(expectedHostName));
+
+ Collection<String> hgComponents2 = new HashSet<>();
+ hgComponents2.add("NAMENODE");
+ TestHostGroup group2 = new TestHostGroup(expectedHostGroupNameTwo,
hgComponents2, Collections.singleton(expectedHostNameTwo));
+
+ // add third and fourth hostgroup with NAMENODE, to simulate HDFS NameNode
Federation
+ TestHostGroup group3 = new TestHostGroup(expectedHostGroupNameThree,
Collections.singleton("NAMENODE"),
Collections.singleton(expectedHostNameThree));
+ TestHostGroup group4 = new TestHostGroup(expectedHostGroupNameFour,
Collections.singleton("NAMENODE"), Collections.singleton(expectedHostNameFour));
+
+ Collection<TestHostGroup> hostGroups = new ArrayList<>();
+ hostGroups.add(group1);
+ hostGroups.add(group2);
+ hostGroups.add(group3);
+ hostGroups.add(group4);
+
+ expect(stack.getCardinality("NAMENODE")).andReturn(new
Cardinality("1-2")).anyTimes();
+ expect(stack.getCardinality("SECONDARY_NAMENODE")).andReturn(new
Cardinality("1")).anyTimes();
+
+ ClusterTopology topology = createClusterTopology(bp, clusterConfig,
hostGroups);
+ BlueprintConfigurationProcessor updater = new
BlueprintConfigurationProcessor(topology);
+
+ // this should fail with the expected exception
+ Set<String> updatedConfigTypes =
+ updater.doUpdateForClusterCreate();
+ }
+
@Test
public void testDoUpdateForClusterWithNameNodeHANotEnabled() throws
Exception {
final String expectedHostName = "c6401.apache.ambari.org";
--
To stop receiving notification emails like this one, please contact
[email protected].