Author: swagle
Date: Wed Apr 24 22:11:50 2013
New Revision: 1471728
URL: http://svn.apache.org/r1471728
Log:
AMBARI-2019. Cannot decommission data node (ensure recommission also works).
(swagle)
Modified:
incubator/ambari/trunk/CHANGES.txt
incubator/ambari/trunk/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_default.rb
incubator/ambari/trunk/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
incubator/ambari/trunk/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
Modified: incubator/ambari/trunk/CHANGES.txt
URL:
http://svn.apache.org/viewvc/incubator/ambari/trunk/CHANGES.txt?rev=1471728&r1=1471727&r2=1471728&view=diff
==============================================================================
--- incubator/ambari/trunk/CHANGES.txt (original)
+++ incubator/ambari/trunk/CHANGES.txt Wed Apr 24 22:11:50 2013
@@ -786,6 +786,9 @@ Trunk (unreleased changes):
BUG FIXES
+ AMBARI-2019. Cannot decommission data node (ensure recommission also works).
+ (swagle)
+
AMBARI-2021. Hadoop installation on cluster with SUSE-11 failed. (smohanty)
AMBARI-2010. Tasks do not timeout for failed hosts. (swagle)
Modified:
incubator/ambari/trunk/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_default.rb
URL:
http://svn.apache.org/viewvc/incubator/ambari/trunk/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_default.rb?rev=1471728&r1=1471727&r2=1471728&view=diff
==============================================================================
---
incubator/ambari/trunk/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_default.rb
(original)
+++
incubator/ambari/trunk/ambari-agent/src/main/puppet/modules/hdp/lib/puppet/parser/functions/hdp_default.rb
Wed Apr 24 22:11:50 2013
@@ -29,11 +29,8 @@ module Puppet::Parser::Functions
# Lookup value inside a hash map.
if var_parts.length > 1 and function_hdp_is_empty(val) and
function_hdp_is_empty(lookupvar("configuration")) == false and
function_hdp_is_empty(lookupvar("#{var_parts[-2]}")) == false
keyHash = var_parts[-2]
- puts "keyHash #{keyHash}"
hashMap = lookupvar("#{keyHash}")
- puts "hashMap #{hashMap}"
- puts "default #{default}"
- val = hashMap.fetch(var_name, default.to_s)
+ val = hashMap.fetch(var_name, default.to_s)
end
# To workaround string-boolean comparison issues,
# ensure that we return boolean result if the default value
Modified:
incubator/ambari/trunk/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
URL:
http://svn.apache.org/viewvc/incubator/ambari/trunk/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java?rev=1471728&r1=1471727&r2=1471728&view=diff
==============================================================================
---
incubator/ambari/trunk/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
(original)
+++
incubator/ambari/trunk/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
Wed Apr 24 22:11:50 2013
@@ -4028,6 +4028,7 @@ public class AmbariManagementControllerI
throws AmbariException {
// Find hdfs admin host, just decommission from namenode.
String clusterName = decommissionRequest.getClusterName();
+ Cluster cluster = clusters.getCluster(clusterName);
String serviceName = decommissionRequest.getServiceName();
String namenodeHost = clusters.getCluster(clusterName)
.getService(serviceName).getServiceComponent(Role.NAMENODE.toString())
@@ -4052,14 +4053,11 @@ public class AmbariManagementControllerI
new TreeMap<String, Map<String, String>>();
configurations.put(config.getType(), config.getProperties());
- Map<String, Config> hdfsSiteConfig =
clusters.getCluster(clusterName).getService("HDFS")
- .getDesiredConfigs();
- if (hdfsSiteConfig != null) {
- for (Map.Entry<String, Config> entry: hdfsSiteConfig.entrySet()) {
- configurations
- .put(entry.getValue().getType(), entry.getValue().getProperties());
- }
- }
+ Map<String, Map<String, String>> configTags = new TreeMap<String,
+ Map<String, String>>();
+
+ findConfigurationPropertiesWithOverrides(configurations, configTags,
+ cluster, serviceName, namenodeHost);
stage.addHostRoleExecutionCommand(
namenodeHost,
@@ -4073,8 +4071,8 @@ public class AmbariManagementControllerI
Role.DECOMMISSION_DATANODE.toString()).getExecutionCommand();
execCmd.setConfigurations(configurations);
+ execCmd.setConfigurationTags(configTags);
- Cluster cluster = clusters.getCluster(clusterName);
Map<String, String> params = new TreeMap<String, String>();
params.put("jdk_location", this.jdkResourceUrl);
params.put("stack_version", cluster.getDesiredStackVersion()
Modified:
incubator/ambari/trunk/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
URL:
http://svn.apache.org/viewvc/incubator/ambari/trunk/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java?rev=1471728&r1=1471727&r2=1471728&view=diff
==============================================================================
---
incubator/ambari/trunk/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
(original)
+++
incubator/ambari/trunk/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
Wed Apr 24 22:11:50 2013
@@ -4610,6 +4610,104 @@ public class AmbariManagementControllerT
}
@Test
+ public void testDecommissonDatanodeAction() throws AmbariException {
+ String clusterName = "foo1";
+ createCluster(clusterName);
+ clusters.getCluster(clusterName)
+ .setDesiredStackVersion(new StackId("HDP-0.1"));
+ String serviceName = "HDFS";
+ createService(clusterName, serviceName, null);
+ String componentName1 = "NAMENODE";
+ String componentName2 = "DATANODE";
+ String componentName3 = "HDFS_CLIENT";
+
+ Map<String, String> mapRequestProps = new HashMap<String, String>();
+ mapRequestProps.put("context", "Called from a test");
+
+ createServiceComponent(clusterName, serviceName, componentName1,
+ State.INIT);
+ createServiceComponent(clusterName, serviceName, componentName2,
+ State.INIT);
+ createServiceComponent(clusterName, serviceName, componentName3,
+ State.INIT);
+
+ String host1 = "h1";
+ clusters.addHost(host1);
+ clusters.getHost("h1").setOsType("centos5");
+ clusters.getHost("h1").persist();
+ String host2 = "h2";
+ clusters.addHost(host2);
+ clusters.getHost("h2").setOsType("centos6");
+ clusters.getHost("h2").persist();
+
+ clusters.mapHostToCluster(host1, clusterName);
+ clusters.mapHostToCluster(host2, clusterName);
+
+ createServiceComponentHost(clusterName, serviceName, componentName1,
+ host1, null);
+ createServiceComponentHost(clusterName, serviceName, componentName2,
+ host1, null);
+ createServiceComponentHost(clusterName, serviceName, componentName2,
+ host2, null);
+ createServiceComponentHost(clusterName, serviceName, componentName3,
+ host1, null);
+ createServiceComponentHost(clusterName, serviceName, componentName3,
+ host2, null);
+
+ // Install
+ installService(clusterName, serviceName, false, false);
+
+ // Create and attach config
+ Map<String, String> configs = new HashMap<String, String>();
+ configs.put("a", "b");
+
+ ConfigurationRequest cr1,cr2;
+ cr1 = new ConfigurationRequest(clusterName, "hdfs-site","version1",
+ configs);
+ ClusterRequest crReq = new ClusterRequest(null, clusterName, null, null);
+ crReq.setDesiredConfig(cr1);
+ controller.updateCluster(crReq, null);
+ Map<String, String> props = new HashMap<String, String>();
+ props.put("datanodes", host2);
+ cr2 = new ConfigurationRequest(clusterName, "hdfs-exclude-file", "tag1",
+ props);
+ crReq = new ClusterRequest(null, clusterName, null, null);
+ crReq.setDesiredConfig(cr2);
+ controller.updateCluster(crReq, null);
+
+ // Start
+ startService(clusterName, serviceName, false, false);
+
+ Cluster cluster = clusters.getCluster(clusterName);
+ Service s = cluster.getService(serviceName);
+ Assert.assertEquals(State.STARTED, s.getDesiredState());
+
+ Set<ActionRequest> requests = new HashSet<ActionRequest>();
+ Map<String, String> params = new HashMap<String, String>(){{
+ put("test", "test");
+ }};
+ ActionRequest request = new ActionRequest(clusterName, "HDFS",
+ Role.DECOMMISSION_DATANODE.name(), params);
+ params.put("excludeFileTag", "tag1");
+ requests.add(request);
+
+ Map<String, String> requestProperties = new HashMap<String, String>();
+ requestProperties.put(REQUEST_CONTEXT_PROPERTY, "Called from a test");
+
+ RequestStatusResponse response = controller.createActions(requests,
+ requestProperties);
+
+ List<HostRoleCommand> storedTasks =
actionDB.getRequestTasks(response.getRequestId());
+ ExecutionCommand execCmd = storedTasks.get(0).getExecutionCommandWrapper
+ ().getExecutionCommand();
+ assertNotNull(storedTasks);
+ Assert.assertNotNull(execCmd.getConfigurationTags().get("hdfs-site"));
+ Assert.assertEquals(1, storedTasks.size());
+ Assert.assertEquals(host2, execCmd.getConfigurations().get
+ ("hdfs-exclude-file").get("datanodes"));
+ }
+
+ @Test
public void testConfigsAttachedToServiceChecks() throws AmbariException {
String clusterName = "foo1";
createCluster(clusterName);