This is an automated email from the ASF dual-hosted git repository.
avijayan pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git
The following commit(s) were added to refs/heads/trunk by this push:
new 71816bc [AMBARI-23370] Download client configs fails due to
'clusterLevelParams' not found (#1033)
71816bc is described below
commit 71816bcb82a09e548436425c5f4e48941975d355
Author: majorendre <[email protected]>
AuthorDate: Thu Apr 19 04:57:59 2018 +0200
[AMBARI-23370] Download client configs fails due to 'clusterLevelParams'
not found (#1033)
* AMBARI-23370 Download client configs fails due to clusterLevelParam
missing
* Removed agentCache param
* AMBARI-23370 clusterLevelParams, more compatible stage utils
---
.../server/controller/KerberosHelperImpl.java | 10 +-
.../internal/ClientConfigResourceProvider.java | 75 ++-------
.../topology/ClusterConfigurationRequest.java | 5 +-
.../org/apache/ambari/server/utils/StageUtils.java | 81 +++++----
.../stacks/HDP/2.6/services/YARN/kerberos.json | 2 +-
.../controller/AmbariManagementControllerTest.java | 2 +-
.../internal/ClientConfigResourceProviderTest.java | 184 +++++++--------------
.../apache/ambari/server/utils/StageUtilsTest.java | 24 +--
8 files changed, 141 insertions(+), 242 deletions(-)
diff --git
a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
index d2323c4..3f0ea84 100644
---
a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
+++
b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
@@ -588,8 +588,6 @@ public class KerberosHelperImpl implements KerberosHelper {
configurations.put("clusterHostInfo", clusterHostInfoMap);
}
- Map<String, String> componentToClusterInfoMap =
StageUtils.getComponentToClusterInfoKeyMap();
-
// Iterate through the recommendations to find the recommended host
assignments
for (RecommendationResponse.HostGroup hostGroup : hostGroups) {
Set<Map<String, String>> components = hostGroup.getComponents();
@@ -607,13 +605,7 @@ public class KerberosHelperImpl implements KerberosHelper {
// If the component filter is null or the current component
is found in the filter,
// include it in the map
if ((componentFilter == null) ||
componentFilter.contains(componentName)) {
- String key = componentToClusterInfoMap.get(componentName);
-
- if (StringUtils.isEmpty(key)) {
- // If not found in the componentToClusterInfoMap, then
keys are assumed to be
- // in the form of <component_name>_hosts (lowercase)
- key = componentName.toLowerCase() + "_hosts";
- }
+ String key =
StageUtils.getClusterHostInfoKey(componentName);
Set<String> fqdns = new TreeSet<>();
diff --git
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
index fda3817..0dfb5dc 100644
---
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
+++
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
@@ -18,22 +18,7 @@
package org.apache.ambari.server.controller.internal;
-import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AGENT_STACK_RETRY_COUNT;
-import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AGENT_STACK_RETRY_ON_UNAVAILABILITY;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.DB_NAME;
-import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.GPL_LICENSE_ACCEPTED;
-import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.GROUP_LIST;
-import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOST_SYS_PREPPED;
-import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_LOCATION;
-import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.MYSQL_JDBC_URL;
-import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.NOT_MANAGED_HDFS_PATH_LIST;
-import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.ORACLE_JDBC_URL;
import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.PACKAGE_LIST;
-import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_REPO_INFO;
-import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_NAME;
-import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_VERSION;
-import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.USER_GROUPS;
-import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.USER_LIST;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
@@ -65,6 +50,7 @@ import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.configuration.Configuration;
import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.AmbariManagementControllerImpl;
import org.apache.ambari.server.controller.MaintenanceStateHelper;
import org.apache.ambari.server.controller.ServiceComponentHostRequest;
import org.apache.ambari.server.controller.ServiceComponentHostResponse;
@@ -369,27 +355,13 @@ public class ClientConfigResourceProvider extends
AbstractControllerResourceProv
}
osFamily = clusters.getHost(hostName).getOsFamily();
- TreeMap<String, String> hostLevelParams = new TreeMap<>();
- StageUtils.useStackJdkIfExists(hostLevelParams, configs);
- hostLevelParams.put(JDK_LOCATION,
managementController.getJdkResourceUrl());
- hostLevelParams.put(STACK_NAME, stackId.getStackName());
- hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
- hostLevelParams.put(DB_NAME, managementController.getServerDB());
- hostLevelParams.put(MYSQL_JDBC_URL,
managementController.getMysqljdbcUrl());
- hostLevelParams.put(ORACLE_JDBC_URL,
managementController.getOjdbcUrl());
- hostLevelParams.put(HOST_SYS_PREPPED, configs.areHostsSysPrepped());
- hostLevelParams.putAll(managementController.getRcaParameters());
- hostLevelParams.put(AGENT_STACK_RETRY_ON_UNAVAILABILITY,
configs.isAgentStackRetryOnInstallEnabled());
- hostLevelParams.put(AGENT_STACK_RETRY_COUNT,
configs.getAgentStackRetryOnInstallCount());
- hostLevelParams.put(GPL_LICENSE_ACCEPTED,
configs.getGplLicenseAccepted().toString());
-
// Write down os specific info for the service
ServiceOsSpecific anyOs = null;
if (serviceInfo.getOsSpecifics().containsKey(AmbariMetaInfo.ANY_OS)) {
anyOs = serviceInfo.getOsSpecifics().get(AmbariMetaInfo.ANY_OS);
}
- ServiceOsSpecific hostOs = populateServicePackagesInfo(serviceInfo,
hostLevelParams, osFamily);
+ ServiceOsSpecific hostOs = populateServicePackagesInfo(serviceInfo,
osFamily);
// Build package list that is relevant for host
List<ServiceOsSpecific.Package> packages =
@@ -403,25 +375,6 @@ public class ClientConfigResourceProvider extends
AbstractControllerResourceProv
}
String packageList = gson.toJson(packages);
- Set<String> userSet =
configHelper.getPropertyValuesWithPropertyType(stackId, PropertyType.USER,
cluster, desiredClusterConfigs);
- String userList = gson.toJson(userSet);
- hostLevelParams.put(USER_LIST, userList);
-
- //Create a user_group mapping and send it as part of the
hostLevelParams
- Map<String, Set<String>> userGroupsMap =
configHelper.createUserGroupsMap(
- stackId, cluster, desiredClusterConfigs);
- String userGroups = gson.toJson(userGroupsMap);
- hostLevelParams.put(USER_GROUPS,userGroups);
-
- Set<String> groupSet =
configHelper.getPropertyValuesWithPropertyType(stackId, PropertyType.GROUP,
cluster, desiredClusterConfigs);
- String groupList = gson.toJson(groupSet);
- hostLevelParams.put(GROUP_LIST, groupList);
-
- Map<org.apache.ambari.server.state.PropertyInfo, String>
notManagedHdfsPathMap = configHelper.getPropertiesWithPropertyType(stackId,
PropertyType.NOT_MANAGED_HDFS_PATH, cluster, desiredClusterConfigs);
- Set<String> notManagedHdfsPathSet =
configHelper.filterInvalidPropertyValues(notManagedHdfsPathMap,
NOT_MANAGED_HDFS_PATH_LIST);
- String notManagedHdfsPathList = gson.toJson(notManagedHdfsPathSet);
- hostLevelParams.put(NOT_MANAGED_HDFS_PATH_LIST,
notManagedHdfsPathList);
-
String jsonConfigurations = null;
Map<String, Object> commandParams = new HashMap<>();
List<Map<String, String>> xmlConfigs = new LinkedList<>();
@@ -441,6 +394,17 @@ public class ClientConfigResourceProvider extends
AbstractControllerResourceProv
}
}
+ TreeMap<String, String> clusterLevelParams = null;
+ TreeMap<String, String> ambariLevelParams = null;
+ if (getManagementController() instanceof
AmbariManagementControllerImpl){
+ AmbariManagementControllerImpl controller =
((AmbariManagementControllerImpl)getManagementController());
+ clusterLevelParams =
controller.getMetadataClusterLevelParams(cluster, stackId);
+ ambariLevelParams = controller.getMetadataAmbariLevelParams();
+ }
+ TreeMap<String, String> agentLevelParams = new TreeMap<>();
+ agentLevelParams.put("hostname", hostName);
+ agentLevelParams.put("public_hostname", publicHostName);
+
commandParams.put(PACKAGE_LIST, packageList);
commandParams.put("xml_configs_list", xmlConfigs);
commandParams.put("env_configs_list", envConfigs);
@@ -452,7 +416,9 @@ public class ClientConfigResourceProvider extends
AbstractControllerResourceProv
jsonContent.put("configuration_attributes", configurationAttributes);
jsonContent.put("commandParams", commandParams);
jsonContent.put("clusterHostInfo", clusterHostInfo);
- jsonContent.put("hostLevelParams", hostLevelParams);
+ jsonContent.put("ambariLevelParams", ambariLevelParams);
+ jsonContent.put("clusterLevelParams", clusterLevelParams);
+ jsonContent.put("agentLevelParams", agentLevelParams);
jsonContent.put("hostname", hostName);
jsonContent.put("public_hostname", publicHostName);
jsonContent.put("clusterName", cluster.getClusterName());
@@ -926,20 +892,13 @@ public class ClientConfigResourceProvider extends
AbstractControllerResourceProv
}
- protected ServiceOsSpecific populateServicePackagesInfo(ServiceInfo
serviceInfo, Map<String, String> hostParams,
- String osFamily) {
+ protected ServiceOsSpecific populateServicePackagesInfo(ServiceInfo
serviceInfo, String osFamily) {
ServiceOsSpecific hostOs = new ServiceOsSpecific(osFamily);
List<ServiceOsSpecific> foundedOSSpecifics =
getOSSpecificsByFamily(serviceInfo.getOsSpecifics(), osFamily);
if (!foundedOSSpecifics.isEmpty()) {
for (ServiceOsSpecific osSpecific : foundedOSSpecifics) {
hostOs.addPackages(osSpecific.getPackages());
}
- // Choose repo that is relevant for host
- ServiceOsSpecific.Repo serviceRepo = hostOs.getRepo();
- if (serviceRepo != null) {
- String serviceRepoInfo = gson.toJson(serviceRepo);
- hostParams.put(SERVICE_REPO_INFO, serviceRepoInfo);
- }
}
return hostOs;
diff --git
a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
index 740dd91..92aecb3 100644
---
a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
+++
b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
@@ -285,10 +285,7 @@ public class ClusterConfigurationRequest {
for (String component : components) {
Collection<String> componentHost =
clusterTopology.getHostAssignmentsForComponent(component);
// retrieve corresponding clusterInfoKey for component using StageUtils
- String clusterInfoKey =
StageUtils.getComponentToClusterInfoKeyMap().get(component);
- if (clusterInfoKey == null) {
- clusterInfoKey = component.toLowerCase() + "_hosts";
- }
+ String clusterInfoKey = StageUtils.getClusterHostInfoKey(component);
componentHostsMap.put(clusterInfoKey, StringUtils.join(componentHost,
","));
}
}
diff --git
a/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
b/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
index 9abc62b..26be695 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
@@ -90,8 +90,7 @@ public class StageUtils {
protected static final String RACKS = "all_racks";
protected static final String IPV4_ADDRESSES = "all_ipv4_ips";
- private static Map<String, String> componentToClusterInfoKeyMap =
- new HashMap<>();
+ private static Map<String, String> componentToClusterInfoKeyMap = new
HashMap<>();
private volatile static Gson gson;
@Inject
@@ -147,44 +146,48 @@ public class StageUtils {
StageUtils.configuration = configuration;
}
+ private static void put2componentToClusterInfoKeyMap(String component){
+ componentToClusterInfoKeyMap.put(component,
getClusterHostInfoKey(component));
+ }
+
+ /**
+ * Even though this map is populated systematically, we still need this map
+ * since components like Atlas Client that are missing from this map
+ * and cleint components are handled differently
+ */
static {
- componentToClusterInfoKeyMap.put("NAMENODE", "namenode_host");
- componentToClusterInfoKeyMap.put("JOBTRACKER", "jtnode_host");
- componentToClusterInfoKeyMap.put("SECONDARY_NAMENODE", "snamenode_host");
- componentToClusterInfoKeyMap.put("RESOURCEMANAGER", "rm_host");
- componentToClusterInfoKeyMap.put("NODEMANAGER", "nm_hosts");
- componentToClusterInfoKeyMap.put("HISTORYSERVER", "hs_host");
- componentToClusterInfoKeyMap.put("JOURNALNODE", "journalnode_hosts");
- componentToClusterInfoKeyMap.put("ZKFC", "zkfc_hosts");
- componentToClusterInfoKeyMap.put("ZOOKEEPER_SERVER", "zookeeper_hosts");
- componentToClusterInfoKeyMap.put("FLUME_HANDLER", "flume_hosts");
- componentToClusterInfoKeyMap.put("HBASE_MASTER", "hbase_master_hosts");
- componentToClusterInfoKeyMap.put("HBASE_REGIONSERVER", "hbase_rs_hosts");
- componentToClusterInfoKeyMap.put("HIVE_SERVER", "hive_server_host");
- componentToClusterInfoKeyMap.put("HIVE_METASTORE", "hive_metastore_host");
- componentToClusterInfoKeyMap.put("OOZIE_SERVER", "oozie_server");
- componentToClusterInfoKeyMap.put("WEBHCAT_SERVER", "webhcat_server_host");
- componentToClusterInfoKeyMap.put("MYSQL_SERVER", "hive_mysql_host");
- componentToClusterInfoKeyMap.put("DASHBOARD", "dashboard_host");
- componentToClusterInfoKeyMap.put("GANGLIA_SERVER", "ganglia_server_host");
- componentToClusterInfoKeyMap.put("DATANODE", "slave_hosts");
- componentToClusterInfoKeyMap.put("TASKTRACKER", "mapred_tt_hosts");
- componentToClusterInfoKeyMap.put("HBASE_REGIONSERVER", "hbase_rs_hosts");
- componentToClusterInfoKeyMap.put("ACCUMULO_MASTER",
"accumulo_master_hosts");
- componentToClusterInfoKeyMap.put("ACCUMULO_MONITOR",
"accumulo_monitor_hosts");
- componentToClusterInfoKeyMap.put("ACCUMULO_GC", "accumulo_gc_hosts");
- componentToClusterInfoKeyMap.put("ACCUMULO_TRACER",
"accumulo_tracer_hosts");
- componentToClusterInfoKeyMap.put("ACCUMULO_TSERVER",
"accumulo_tserver_hosts");
+ put2componentToClusterInfoKeyMap("NAMENODE");
+ put2componentToClusterInfoKeyMap("JOBTRACKER");
+ put2componentToClusterInfoKeyMap("SECONDARY_NAMENODE");
+ put2componentToClusterInfoKeyMap("RESOURCEMANAGER");
+ put2componentToClusterInfoKeyMap("NODEMANAGER");
+ put2componentToClusterInfoKeyMap("HISTORYSERVER");
+ put2componentToClusterInfoKeyMap("JOURNALNODE");
+ put2componentToClusterInfoKeyMap("ZKFC");
+ put2componentToClusterInfoKeyMap("ZOOKEEPER_SERVER");
+ put2componentToClusterInfoKeyMap("FLUME_HANDLER");
+ put2componentToClusterInfoKeyMap("HBASE_MASTER");
+ put2componentToClusterInfoKeyMap("HBASE_REGIONSERVER");
+ put2componentToClusterInfoKeyMap("HIVE_SERVER");
+ put2componentToClusterInfoKeyMap("HIVE_METASTORE");
+ put2componentToClusterInfoKeyMap("OOZIE_SERVER");
+ put2componentToClusterInfoKeyMap("WEBHCAT_SERVER");
+ put2componentToClusterInfoKeyMap("MYSQL_SERVER");
+ put2componentToClusterInfoKeyMap("DASHBOARD");
+ put2componentToClusterInfoKeyMap("GANGLIA_SERVER");
+ put2componentToClusterInfoKeyMap("DATANODE");
+ put2componentToClusterInfoKeyMap("TASKTRACKER");
+ put2componentToClusterInfoKeyMap("ACCUMULO_MASTER");
+ put2componentToClusterInfoKeyMap("ACCUMULO_MONITOR");
+ put2componentToClusterInfoKeyMap("ACCUMULO_GC");
+ put2componentToClusterInfoKeyMap("ACCUMULO_TRACER");
+ put2componentToClusterInfoKeyMap("ACCUMULO_TSERVER");
}
public static String getActionId(long requestId, long stageId) {
return requestId + "-" + stageId;
}
- public static Map<String, String> getComponentToClusterInfoKeyMap() {
- return componentToClusterInfoKeyMap;
- }
-
public static long[] getRequestStage(String actionId) {
String[] fields = actionId.split("-");
long[] requestStageIds = new long[2];
@@ -267,6 +270,16 @@ public class StageUtils {
return commandParams;
}
+ /**
+ * A helper method for generating keys for the clusterHostInfo section.
+ */
+ public static String getClusterHostInfoKey(String componentName){
+ if (componentName == null){
+ throw new IllegalArgumentException("Component name cannot be null");
+ }
+ return componentName.toLowerCase()+"_hosts";
+ }
+
public static Map<String, Set<String>> getClusterHostInfo(Cluster cluster)
throws AmbariException {
//Fill hosts and ports lists
Set<String> hostsSet = new LinkedHashSet<>();
@@ -351,7 +364,7 @@ public class StageUtils {
Collection<String> hostComponents = entry.getValue();
for (String hostComponent : hostComponents) {
- String roleName = componentToClusterInfoKeyMap.get(hostComponent);
+ String roleName = getClusterHostInfoKey(hostComponent);
if (null == roleName) {
roleName = additionalComponentToClusterInfoKeyMap.get(hostComponent);
}
diff --git
a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
index bd6798c..425ae0d 100644
---
a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
+++
b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
@@ -44,7 +44,7 @@
{
"core-site": {
"hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
- "hadoop.proxyuser.${yarn-env/yarn_user}.hosts":
"${clusterHostInfo/rm_host}"
+ "hadoop.proxyuser.${yarn-env/yarn_user}.hosts":
"${clusterHostInfo/resourcemanager_hosts}"
}
},
{
diff --git
a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 8567b6d..d67f9e5 100644
---
a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++
b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -1313,7 +1313,7 @@ public class AmbariManagementControllerTest {
assertTrue(ec.getCommandParams().containsKey("command_retry_enabled"));
assertEquals("false", ec.getCommandParams().get("command_retry_enabled"));
Map<String, Set<String>> chInfo = ec.getClusterHostInfo();
- assertTrue(chInfo.containsKey("namenode_host"));
+ assertTrue(chInfo.containsKey("namenode_hosts"));
assertFalse(ec.getCommandParams().containsKey("custom_folder"));
ec = controller.getExecutionCommand(cluster,
diff --git
a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java
index c5994c5..82e3bb4 100644
---
a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java
+++
b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java
@@ -34,6 +34,7 @@ import java.io.File;
import java.io.InputStream;
import java.io.PrintWriter;
import java.util.Arrays;
+import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
@@ -70,7 +71,6 @@ import org.apache.ambari.server.state.Service;
import org.apache.ambari.server.state.ServiceComponent;
import org.apache.ambari.server.state.ServiceComponentHost;
import org.apache.ambari.server.state.ServiceInfo;
-import org.apache.ambari.server.state.ServiceOsSpecific;
import org.apache.ambari.server.state.StackId;
import org.apache.ambari.server.state.UserGroupInfo;
import org.apache.ambari.server.state.ValueAttributesInfo;
@@ -85,10 +85,10 @@ import
org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
/**
- * TaskResourceProvider tests.
+ * ClientConfigResourceProviderTest tests.
*/
@RunWith(PowerMockRunner.class)
-@PrepareForTest( {ClientConfigResourceProvider.class, StageUtils.class} )
+@PrepareForTest({ClientConfigResourceProvider.class, StageUtils.class})
public class ClientConfigResourceProviderTest {
@Test
public void testCreateResources() throws Exception {
@@ -151,7 +151,7 @@ public class ClientConfigResourceProviderTest {
Request request = PropertyHelper.getUpdateRequest(properties, null);
Predicate predicate = new PredicateBuilder().property(
-
ClientConfigResourceProvider.COMPONENT_CLUSTER_NAME_PROPERTY_ID).equals("c1").toPredicate();
+
ClientConfigResourceProvider.COMPONENT_CLUSTER_NAME_PROPERTY_ID).equals("c1").toPredicate();
try {
provider.updateResources(request, predicate);
@@ -183,12 +183,10 @@ public class ClientConfigResourceProviderTest {
Service service = createNiceMock(Service.class);
ServiceComponent serviceComponent = createNiceMock(ServiceComponent.class);
ServiceComponentHost serviceComponentHost =
createNiceMock(ServiceComponentHost.class);
- ServiceOsSpecific serviceOsSpecific =
createNiceMock(ServiceOsSpecific.class);
ConfigHelper configHelper = createNiceMock(ConfigHelper.class);
Configuration configuration =
PowerMock.createStrictMockAndExpectNew(Configuration.class);
- Map<String, String> configMap = createNiceMock(Map.class);
- File newFile = File.createTempFile("config",".json",new File("/tmp/"));
+ File newFile = File.createTempFile("config", ".json", new File("/tmp/"));
newFile.deleteOnExit();
Runtime runtime = createMock(Runtime.class);
@@ -205,7 +203,7 @@ public class ClientConfigResourceProviderTest {
clientConfigFileDefinition.setDictionaryName("pig-env");
clientConfigFileDefinition.setFileName("pig-env.sh");
clientConfigFileDefinition.setType("env");
- List <ClientConfigFileDefinition> clientConfigFileDefinitionList = new
LinkedList<>();
+ List<ClientConfigFileDefinition> clientConfigFileDefinitionList = new
LinkedList<>();
clientConfigFileDefinitionList.add(clientConfigFileDefinition);
ResourceProvider provider =
AbstractControllerResourceProvider.getResourceProvider(
@@ -223,25 +221,14 @@ public class ClientConfigResourceProviderTest {
String stackName = "S1";
String stackVersion = "V1";
- String stackRoot="/tmp/stacks/S1/V1";
- String packageFolder="PIG/package";
+ String stackRoot = "/tmp/stacks/S1/V1";
+ String packageFolder = "PIG/package";
if (System.getProperty("os.name").contains("Windows")) {
stackRoot = "C:\\tmp\\stacks\\S1\\V1";
packageFolder = "PIG\\package";
}
- HashMap<String, Host> hosts = new HashMap<>();
- hosts.put(hostName, host);
- HashMap<String, Service> services = new HashMap<>();
- services.put(serviceName,service);
- HashMap<String, ServiceComponent> serviceComponentMap = new HashMap<>();
- serviceComponentMap.put(componentName,serviceComponent);
- HashMap<String, ServiceComponentHost> serviceComponentHosts = new
HashMap<>();
- serviceComponentHosts.put(componentName, serviceComponentHost);
- HashMap<String, ServiceOsSpecific> serviceOsSpecificHashMap = new
HashMap<>();
- serviceOsSpecificHashMap.put("key",serviceOsSpecific);
-
ServiceComponentHostResponse shr1 = new
ServiceComponentHostResponse(clusterName, serviceName,
componentName, displayName, hostName, publicHostname, desiredState,
"", null, null, null,
null);
@@ -260,23 +247,12 @@ public class ClientConfigResourceProviderTest {
expect(clusters.getCluster(clusterName)).andReturn(cluster).anyTimes();
expect(configHelper.getEffectiveConfigProperties(cluster,
configTags)).andReturn(properties);
expect(configHelper.getEffectiveConfigAttributes(cluster,
configTags)).andReturn(attributes);
-
expect(configMap.get(Configuration.SERVER_TMP_DIR.getKey())).andReturn(Configuration.SERVER_TMP_DIR.getDefaultValue());
-
expect(configMap.get(Configuration.AMBARI_PYTHON_WRAP.getKey())).andReturn(Configuration.AMBARI_PYTHON_WRAP.getDefaultValue());
expect(configuration.getConfigsMap()).andReturn(returnConfigMap);
expect(configuration.getResourceDirPath()).andReturn(stackRoot);
- expect(configuration.getJavaHome()).andReturn("dummy_java_home");
- expect(configuration.getJDKName()).andReturn(null);
- expect(configuration.getJCEName()).andReturn(null);
- expect(configuration.getJavaVersion()).andReturn(8);
- expect(configuration.getStackJavaHome()).andReturn(null);
- expect(configuration.areHostsSysPrepped()).andReturn("false");
-
expect(configuration.isAgentStackRetryOnInstallEnabled()).andReturn("false");
- expect(configuration.getAgentStackRetryOnInstallCount()).andReturn("5");
-
expect(configuration.getGplLicenseAccepted()).andReturn(Configuration.GPL_LICENSE_ACCEPTED.getDefaultValue());
expect(configuration.getExternalScriptThreadPoolSize()).andReturn(Configuration.THREAD_POOL_SIZE_FOR_EXTERNAL_SCRIPT.getDefaultValue());
expect(configuration.getExternalScriptTimeout()).andReturn(Configuration.EXTERNAL_SCRIPT_TIMEOUT.getDefaultValue());
- Map<String,String> props = new HashMap<>();
- props.put("key","value");
+ Map<String, String> props = new HashMap<>();
+ props.put("key", "value");
expect(clusterConfig.getProperties()).andReturn(props);
expect(configHelper.getEffectiveDesiredTags(cluster,
null)).andReturn(allConfigTags);
expect(cluster.getClusterName()).andReturn(clusterName);
@@ -286,19 +262,17 @@ public class ClientConfigResourceProviderTest {
Map<String, Set<String>> clusterHostInfo = new HashMap<>();
Set<String> all_hosts = new HashSet<>(Arrays.asList("Host100", "Host101",
"Host102"));
Set<String> some_hosts = new HashSet<>(Arrays.asList("0-1", "2"));
- Set<String> ohter_hosts = new HashSet<>(Arrays.asList("0,1"));
+ Set<String> ohter_hosts = Collections.singleton("0,1");
Set<String> clusterHostTypes = new HashSet<>(Arrays.asList("nm_hosts",
"hs_host",
- "namenode_host", "rm_host", "snamenode_host", "slave_hosts",
"zookeeper_hosts"));
- for (String hostTypes: clusterHostTypes) {
+ "namenode_host", "rm_host", "snamenode_host", "slave_hosts",
"zookeeper_hosts"));
+ for (String hostTypes : clusterHostTypes) {
if (hostTypes.equals("slave_hosts")) {
clusterHostInfo.put(hostTypes, ohter_hosts);
} else {
clusterHostInfo.put(hostTypes, some_hosts);
}
}
- Map<String, Host> stringHostMap = new HashMap<>();
- stringHostMap.put(hostName, host);
- clusterHostInfo.put("all_hosts",all_hosts);
+ clusterHostInfo.put("all_hosts", all_hosts);
expect(StageUtils.getClusterHostInfo(cluster)).andReturn(clusterHostInfo);
expect(stackId.getStackName()).andReturn(stackName).anyTimes();
@@ -307,8 +281,8 @@ public class ClientConfigResourceProviderTest {
expect(ambariMetaInfo.getComponent(stackName, stackVersion, serviceName,
componentName)).andReturn(componentInfo);
expect(ambariMetaInfo.getService(stackName, stackVersion,
serviceName)).andReturn(serviceInfo);
expect(serviceInfo.getServicePackageFolder()).andReturn(packageFolder);
- expect(ambariMetaInfo.getComponent((String) anyObject(), (String)
anyObject(),
- (String) anyObject(), (String)
anyObject())).andReturn(componentInfo).anyTimes();
+ expect(ambariMetaInfo.getComponent(anyString(), anyString(),
+ anyString(), anyString())).andReturn(componentInfo).anyTimes();
expect(componentInfo.getCommandScript()).andReturn(commandScriptDefinition);
expect(componentInfo.getClientConfigFiles()).andReturn(clientConfigFileDefinitionList);
expect(cluster.getConfig("hive-site", null)).andReturn(clusterConfig);
@@ -321,14 +295,10 @@ public class ClientConfigResourceProviderTest {
expect(serviceComponent.getDesiredStackId()).andReturn(stackId).atLeastOnce();
HashMap<String, String> rcaParams = new HashMap<>();
- rcaParams.put("key","value");
+ rcaParams.put("key", "value");
expect(managementController.getRcaParameters()).andReturn(rcaParams).anyTimes();
expect(ambariMetaInfo.getService(stackName, stackVersion,
serviceName)).andReturn(serviceInfo);
expect(serviceInfo.getOsSpecifics()).andReturn(new HashMap<>()).anyTimes();
- Set<String> userSet = new HashSet<>();
- userSet.add("hdfs");
- expect(configHelper.getPropertyValuesWithPropertyType(
- stackId, PropertyInfo.PropertyType.USER, cluster,
desiredConfigMap)).andReturn(userSet);
Map<PropertyInfo, String> userProperties = new HashMap<>();
Map<PropertyInfo, String> groupProperties = new HashMap<>();
PropertyInfo userProperty = new PropertyInfo();
@@ -353,30 +323,30 @@ public class ClientConfigResourceProviderTest {
userProperties.put(userProperty, "hdfsUser");
groupProperties.put(groupProperty, "hdfsGroup");
Map<String, Set<String>> userGroupsMap = new HashMap<>();
- userGroupsMap.put("hdfsUser", new HashSet<>(Arrays.asList("hdfsGroup")));
+ userGroupsMap.put("hdfsUser", Collections.singleton("hdfsGroup"));
expect(configHelper.getPropertiesWithPropertyType(
- stackId, PropertyInfo.PropertyType.USER, cluster,
desiredConfigMap)).andReturn(userProperties).anyTimes();
+ stackId, PropertyInfo.PropertyType.USER, cluster,
desiredConfigMap)).andReturn(userProperties).anyTimes();
expect(configHelper.getPropertiesWithPropertyType(
- stackId, PropertyInfo.PropertyType.GROUP, cluster,
desiredConfigMap)).andReturn(groupProperties).anyTimes();
+ stackId, PropertyInfo.PropertyType.GROUP, cluster,
desiredConfigMap)).andReturn(groupProperties).anyTimes();
expect(configHelper.createUserGroupsMap(stackId, cluster,
desiredConfigMap)).andReturn(userGroupsMap).anyTimes();
PowerMock.expectNew(File.class, new Class<?>[]{String.class},
anyObject(String.class)).andReturn(newFile).anyTimes();
PowerMock.mockStatic(File.class);
expect(File.createTempFile(anyString(), anyString(),
anyObject(File.class))).andReturn(newFile);
- String commandLine = "ambari-python-wrap
/tmp/stacks/S1/V1/PIG/package/null generate_configs "+newFile +
- " /tmp/stacks/S1/V1/PIG/package
/var/lib/ambari-server/tmp/structured-out.json " +
- "INFO /var/lib/ambari-server/tmp";
+ String commandLine = "ambari-python-wrap
/tmp/stacks/S1/V1/PIG/package/null generate_configs " + newFile +
+ " /tmp/stacks/S1/V1/PIG/package
/var/lib/ambari-server/tmp/structured-out.json " +
+ "INFO /var/lib/ambari-server/tmp";
if (System.getProperty("os.name").contains("Windows")) {
commandLine = "ambari-python-wrap " + stackRoot +
- "\\PIG\\package\\null generate_configs null " +
- stackRoot + "\\PIG\\package
/var/lib/ambari-server/tmp\\structured-out.json " +
- "INFO /var/lib/ambari-server/tmp";
+ "\\PIG\\package\\null generate_configs null " +
+ stackRoot + "\\PIG\\package
/var/lib/ambari-server/tmp\\structured-out.json " +
+ "INFO /var/lib/ambari-server/tmp";
}
ProcessBuilder processBuilder =
PowerMock.createNiceMock(ProcessBuilder.class);
-
PowerMock.expectNew(ProcessBuilder.class,Arrays.asList(commandLine.split("\\s+"))).andReturn(processBuilder).once();
+ PowerMock.expectNew(ProcessBuilder.class,
Arrays.asList(commandLine.split("\\s+"))).andReturn(processBuilder).once();
expect(processBuilder.start()).andReturn(process).once();
InputStream inputStream = new ByteArrayInputStream("some logging
info".getBytes());
expect(process.getInputStream()).andReturn(inputStream);
@@ -388,16 +358,16 @@ public class ClientConfigResourceProviderTest {
// create the request
Request request =
PropertyHelper.getReadRequest(ClientConfigResourceProvider.COMPONENT_CLUSTER_NAME_PROPERTY_ID,
"c1",
- ClientConfigResourceProvider.COMPONENT_COMPONENT_NAME_PROPERTY_ID,
- ClientConfigResourceProvider.COMPONENT_SERVICE_NAME_PROPERTY_ID);
+ ClientConfigResourceProvider.COMPONENT_COMPONENT_NAME_PROPERTY_ID,
+ ClientConfigResourceProvider.COMPONENT_SERVICE_NAME_PROPERTY_ID);
Predicate predicate = new
PredicateBuilder().property(ClientConfigResourceProvider.COMPONENT_CLUSTER_NAME_PROPERTY_ID).
-
equals("c1").and().property(ClientConfigResourceProvider.COMPONENT_SERVICE_NAME_PROPERTY_ID).equals("PIG").toPredicate();
+
equals("c1").and().property(ClientConfigResourceProvider.COMPONENT_SERVICE_NAME_PROPERTY_ID).equals("PIG").toPredicate();
// replay
replay(managementController, clusters, cluster, ambariMetaInfo, stackId,
componentInfo, commandScriptDefinition,
- clusterConfig, host, service, serviceComponent,
serviceComponentHost, serviceInfo, configHelper,
- runtime, process, configMap);
+ clusterConfig, host, service, serviceComponent, serviceComponentHost,
serviceInfo, configHelper,
+ runtime, process);
PowerMock.replayAll();
Set<Resource> resources = provider.getResources(request, predicate);
@@ -405,9 +375,9 @@ public class ClientConfigResourceProviderTest {
assertFalse(newFile.exists());
// verify
- verify(managementController, clusters, cluster, ambariMetaInfo, stackId,
componentInfo,commandScriptDefinition,
- clusterConfig, host, service, serviceComponent,
serviceComponentHost, serviceInfo, configHelper,
- runtime, process);
+ verify(managementController, clusters, cluster, ambariMetaInfo, stackId,
componentInfo, commandScriptDefinition,
+ clusterConfig, host, service, serviceComponent, serviceComponentHost,
serviceInfo, configHelper,
+ runtime, process);
PowerMock.verifyAll();
}
@@ -430,10 +400,8 @@ public class ClientConfigResourceProviderTest {
Service service = createNiceMock(Service.class);
ServiceComponent serviceComponent = createNiceMock(ServiceComponent.class);
ServiceComponentHost serviceComponentHost =
createNiceMock(ServiceComponentHost.class);
- ServiceOsSpecific serviceOsSpecific =
createNiceMock(ServiceOsSpecific.class);
ConfigHelper configHelper = createNiceMock(ConfigHelper.class);
Configuration configuration =
PowerMock.createStrictMockAndExpectNew(Configuration.class);
- Map<String, String> configMap = createNiceMock(Map.class);
File mockFile = PowerMock.createNiceMock(File.class);
Runtime runtime = createMock(Runtime.class);
@@ -450,7 +418,7 @@ public class ClientConfigResourceProviderTest {
clientConfigFileDefinition.setDictionaryName("pig-env");
clientConfigFileDefinition.setFileName("pig-env.sh");
clientConfigFileDefinition.setType("env");
- List <ClientConfigFileDefinition> clientConfigFileDefinitionList = new
LinkedList<>();
+ List<ClientConfigFileDefinition> clientConfigFileDefinitionList = new
LinkedList<>();
clientConfigFileDefinitionList.add(clientConfigFileDefinition);
ResourceProvider provider =
AbstractControllerResourceProvider.getResourceProvider(
@@ -463,9 +431,9 @@ public class ClientConfigResourceProviderTest {
ClientConfigResourceProvider.COMPONENT_SERVICE_NAME_PROPERTY_ID);
Predicate predicate = new
PredicateBuilder().property(ClientConfigResourceProvider.COMPONENT_CLUSTER_NAME_PROPERTY_ID).
-
equals("c1").and().property(ClientConfigResourceProvider.COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("PIG").
-
and().property(ClientConfigResourceProvider.COMPONENT_SERVICE_NAME_PROPERTY_ID).equals("PIG").
- toPredicate();
+
equals("c1").and().property(ClientConfigResourceProvider.COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("PIG").
+
and().property(ClientConfigResourceProvider.COMPONENT_SERVICE_NAME_PROPERTY_ID).equals("PIG").
+ toPredicate();
String clusterName = "C1";
String serviceName = "PIG";
@@ -478,26 +446,13 @@ public class ClientConfigResourceProviderTest {
String stackName = "S1";
String stackVersion = "V1";
- String stackRoot="/tmp/stacks/S1/V1";
- String packageFolder= StackManager.COMMON_SERVICES + "/PIG/package";
+ String packageFolder = StackManager.COMMON_SERVICES + "/PIG/package";
String commonServicesPath = "/var/lib/ambari-server/src/main/resources" +
File.separator + "common-services";
if (System.getProperty("os.name").contains("Windows")) {
- stackRoot = "C:\\tmp\\stacks\\S1\\V1";
packageFolder = StackManager.COMMON_SERVICES + "\\PIG\\package";
}
- HashMap<String, Host> hosts = new HashMap<>();
- hosts.put(hostName, host);
- HashMap<String, Service> services = new HashMap<>();
- services.put(serviceName,service);
- HashMap<String, ServiceComponent> serviceComponentMap = new HashMap<>();
- serviceComponentMap.put(componentName,serviceComponent);
- HashMap<String, ServiceComponentHost> serviceComponentHosts = new
HashMap<>();
- serviceComponentHosts.put(componentName, serviceComponentHost);
- HashMap<String, ServiceOsSpecific> serviceOsSpecificHashMap = new
HashMap<>();
- serviceOsSpecificHashMap.put("key",serviceOsSpecific);
-
ServiceComponentHostResponse shr1 = new
ServiceComponentHostResponse(clusterName, serviceName,
componentName, displayName, hostName, publicHostName, desiredState,
"", null, null, null,
null);
@@ -516,24 +471,13 @@ public class ClientConfigResourceProviderTest {
expect(clusters.getCluster(clusterName)).andReturn(cluster).anyTimes();
expect(configHelper.getEffectiveConfigProperties(cluster,
configTags)).andReturn(properties);
expect(configHelper.getEffectiveConfigAttributes(cluster,
configTags)).andReturn(attributes);
-
expect(configMap.get(Configuration.SERVER_TMP_DIR.getKey())).andReturn(Configuration.SERVER_TMP_DIR.getDefaultValue());
-
expect(configMap.get(Configuration.AMBARI_PYTHON_WRAP.getKey())).andReturn(Configuration.AMBARI_PYTHON_WRAP.getDefaultValue());
expect(configuration.getConfigsMap()).andReturn(returnConfigMap);
expect(configuration.getResourceDirPath()).andReturn("/var/lib/ambari-server/src/main/resources");
- expect(configuration.getJavaHome()).andReturn("dummy_java_home");
- expect(configuration.getJDKName()).andReturn(null);
- expect(configuration.getJCEName()).andReturn(null);
- expect(configuration.getJavaVersion()).andReturn(8);
- expect(configuration.getStackJavaHome()).andReturn(null);
- expect(configuration.areHostsSysPrepped()).andReturn("false");
-
expect(configuration.isAgentStackRetryOnInstallEnabled()).andReturn("false");
- expect(configuration.getAgentStackRetryOnInstallCount()).andReturn("5");
-
expect(configuration.getGplLicenseAccepted()).andReturn(Configuration.GPL_LICENSE_ACCEPTED.getDefaultValue());
expect(configuration.getExternalScriptThreadPoolSize()).andReturn(Configuration.THREAD_POOL_SIZE_FOR_EXTERNAL_SCRIPT.getDefaultValue());
expect(configuration.getExternalScriptTimeout()).andReturn(Configuration.EXTERNAL_SCRIPT_TIMEOUT.getDefaultValue());
- Map<String,String> props = new HashMap<>();
- props.put("key","value");
+ Map<String, String> props = new HashMap<>();
+ props.put("key", "value");
expect(clusterConfig.getProperties()).andReturn(props);
expect(configHelper.getEffectiveDesiredTags(cluster,
null)).andReturn(allConfigTags);
expect(cluster.getClusterName()).andReturn(clusterName);
@@ -543,19 +487,17 @@ public class ClientConfigResourceProviderTest {
Map<String, Set<String>> clusterHostInfo = new HashMap<>();
Set<String> all_hosts = new HashSet<>(Arrays.asList("Host100", "Host101",
"Host102"));
Set<String> some_hosts = new HashSet<>(Arrays.asList("0-1", "2"));
- Set<String> ohter_hosts = new HashSet<>(Arrays.asList("0,1"));
+ Set<String> ohter_hosts = Collections.singleton("0,1");
Set<String> clusterHostTypes = new HashSet<>(Arrays.asList("nm_hosts",
"hs_host",
- "namenode_host", "rm_host", "snamenode_host", "slave_hosts",
"zookeeper_hosts"));
- for (String hostTypes: clusterHostTypes) {
+ "namenode_host", "rm_host", "snamenode_host", "slave_hosts",
"zookeeper_hosts"));
+ for (String hostTypes : clusterHostTypes) {
if (hostTypes.equals("slave_hosts")) {
clusterHostInfo.put(hostTypes, ohter_hosts);
} else {
clusterHostInfo.put(hostTypes, some_hosts);
}
}
- Map<String, Host> stringHostMap = new HashMap<>();
- stringHostMap.put(hostName, host);
- clusterHostInfo.put("all_hosts",all_hosts);
+ clusterHostInfo.put("all_hosts", all_hosts);
expect(StageUtils.getClusterHostInfo(cluster)).andReturn(clusterHostInfo);
expect(stackId.getStackName()).andReturn(stackName).anyTimes();
@@ -564,8 +506,8 @@ public class ClientConfigResourceProviderTest {
expect(ambariMetaInfo.getComponent(stackName, stackVersion, serviceName,
componentName)).andReturn(componentInfo);
expect(ambariMetaInfo.getService(stackName, stackVersion,
serviceName)).andReturn(serviceInfo);
expect(serviceInfo.getServicePackageFolder()).andReturn(packageFolder);
- expect(ambariMetaInfo.getComponent((String) anyObject(), (String)
anyObject(),
- (String) anyObject(), (String)
anyObject())).andReturn(componentInfo).anyTimes();
+ expect(ambariMetaInfo.getComponent(anyString(), anyString(),
+ anyString(), anyString())).andReturn(componentInfo).anyTimes();
expect(componentInfo.getCommandScript()).andReturn(commandScriptDefinition);
expect(componentInfo.getClientConfigFiles()).andReturn(clientConfigFileDefinitionList);
expect(cluster.getConfig("hive-site", null)).andReturn(clusterConfig);
@@ -578,13 +520,10 @@ public class ClientConfigResourceProviderTest {
expect(serviceComponent.getDesiredStackId()).andReturn(stackId).atLeastOnce();
HashMap<String, String> rcaParams = new HashMap<>();
- rcaParams.put("key","value");
+ rcaParams.put("key", "value");
expect(managementController.getRcaParameters()).andReturn(rcaParams).anyTimes();
expect(ambariMetaInfo.getService(stackName, stackVersion,
serviceName)).andReturn(serviceInfo);
expect(serviceInfo.getOsSpecifics()).andReturn(new HashMap<>()).anyTimes();
- Set<String> userSet = new HashSet<>();
- userSet.add("hdfs");
- expect(configHelper.getPropertyValuesWithPropertyType(stackId,
PropertyInfo.PropertyType.USER, cluster, desiredConfigMap)).andReturn(userSet);
PowerMock.expectNew(File.class, new Class<?>[]{String.class},
anyObject(String.class)).andReturn(mockFile).anyTimes();
PowerMock.mockStatic(File.class);
expect(mockFile.exists()).andReturn(true);
@@ -592,40 +531,39 @@ public class ClientConfigResourceProviderTest {
PowerMock.createNiceMockAndExpectNew(PrintWriter.class, anyObject());
PowerMock.mockStatic(Runtime.class);
String commandLine = "ambari-python-wrap " + commonServicesPath +
"/PIG/package/null generate_configs null " +
- commonServicesPath + "/PIG/package
/var/lib/ambari-server/tmp/structured-out.json " +
- "INFO /var/lib/ambari-server/tmp";
+ commonServicesPath + "/PIG/package
/var/lib/ambari-server/tmp/structured-out.json " +
+ "INFO /var/lib/ambari-server/tmp";
if (System.getProperty("os.name").contains("Windows")) {
commandLine = "ambari-python-wrap " + commonServicesPath +
- "\\PIG\\package\\null generate_configs null " +
- commonServicesPath + "\\PIG\\package
/var/lib/ambari-server/tmp\\structured-out.json " +
- "INFO /var/lib/ambari-server/tmp";
+ "\\PIG\\package\\null generate_configs null " +
+ commonServicesPath + "\\PIG\\package
/var/lib/ambari-server/tmp\\structured-out.json " +
+ "INFO /var/lib/ambari-server/tmp";
}
ProcessBuilder processBuilder =
PowerMock.createNiceMock(ProcessBuilder.class);
-
PowerMock.expectNew(ProcessBuilder.class,Arrays.asList(commandLine.split("\\s+"))).andReturn(processBuilder).once();
+ PowerMock.expectNew(ProcessBuilder.class,
Arrays.asList(commandLine.split("\\s+"))).andReturn(processBuilder).once();
expect(processBuilder.start()).andReturn(process).once();
InputStream inputStream = new ByteArrayInputStream("some logging
info".getBytes());
expect(process.getInputStream()).andReturn(inputStream);
// replay
replay(managementController, clusters, cluster, ambariMetaInfo, stackId,
componentInfo, commandScriptDefinition,
- clusterConfig, host, service, serviceComponent,
serviceComponentHost, serviceInfo, configHelper,
- runtime, process, configMap);
+ clusterConfig, host, service, serviceComponent, serviceComponentHost,
serviceInfo, configHelper,
+ runtime, process);
PowerMock.replayAll();
Set<Resource> resources = provider.getResources(request, predicate);
assertFalse(resources.isEmpty());
// verify
- verify(managementController, clusters, cluster, ambariMetaInfo, stackId,
componentInfo,commandScriptDefinition,
- clusterConfig, host, service, serviceComponent,
serviceComponentHost, serviceInfo, configHelper,
- runtime, process);
+ verify(managementController, clusters, cluster, ambariMetaInfo, stackId,
componentInfo, commandScriptDefinition,
+ clusterConfig, host, service, serviceComponent, serviceComponentHost,
serviceInfo, configHelper,
+ runtime, process);
PowerMock.verifyAll();
}
-
@Test
public void testDeleteResources() throws Exception {
Resource.Type type = Resource.Type.ClientConfig;
@@ -640,7 +578,7 @@ public class ClientConfigResourceProviderTest {
managementController);
Predicate predicate = new PredicateBuilder().property(
-
ClientConfigResourceProvider.COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("HDFS_CLIENT").toPredicate();
+
ClientConfigResourceProvider.COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("HDFS_CLIENT").toPredicate();
try {
provider.deleteResources(new RequestImpl(null, null, null, null),
predicate);
Assert.fail("Expected an UnsupportedOperationException");
diff --git
a/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
b/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
index e5c8943..e901d62 100644
---
a/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
+++
b/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
@@ -555,19 +555,19 @@ public class StageUtilsTest extends EasyMockSupport {
assertEquals(StageUtils.getHostName(), serverHost.iterator().next());
// check host role replacing by the projected topology
- assertTrue(getDecompressedSet(info.get("hbase_rs_hosts")).contains(9));
+
assertTrue(getDecompressedSet(info.get("hbase_regionserver_hosts")).contains(9));
// Validate substitutions...
info = StageUtils.substituteHostIndexes(info);
- checkServiceHostNames(info, "DATANODE", "slave_hosts", projectedTopology);
- checkServiceHostNames(info, "NAMENODE", "namenode_host",
projectedTopology);
- checkServiceHostNames(info, "SECONDARY_NAMENODE", "snamenode_host",
projectedTopology);
- checkServiceHostNames(info, "HBASE_MASTER", "hbase_master_hosts",
projectedTopology);
- checkServiceHostNames(info, "HBASE_REGIONSERVER", "hbase_rs_hosts",
projectedTopology);
- checkServiceHostNames(info, "JOBTRACKER", "jtnode_host",
projectedTopology);
- checkServiceHostNames(info, "TASKTRACKER", "mapred_tt_hosts",
projectedTopology);
- checkServiceHostNames(info, "NONAME_SERVER", "noname_server_hosts",
projectedTopology);
+ checkServiceHostNames(info, "DATANODE", projectedTopology);
+ checkServiceHostNames(info, "NAMENODE", projectedTopology);
+ checkServiceHostNames(info, "SECONDARY_NAMENODE", projectedTopology);
+ checkServiceHostNames(info, "HBASE_MASTER", projectedTopology);
+ checkServiceHostNames(info, "HBASE_REGIONSERVER", projectedTopology);
+ checkServiceHostNames(info, "JOBTRACKER", projectedTopology);
+ checkServiceHostNames(info, "TASKTRACKER", projectedTopology);
+ checkServiceHostNames(info, "NONAME_SERVER", projectedTopology);
}
private void insertTopology(Map<String, Collection<String>>
projectedTopology, String componentName, Set<String> hostNames) {
@@ -696,7 +696,7 @@ public class StageUtilsTest extends EasyMockSupport {
}
// Determine the actual hosts for a given component...
- Set<String> hosts = info.get(mappedComponentName);
+ Set<String> hosts =
info.get(StageUtils.getClusterHostInfoKey(componentName));
if (hosts != null) {
actualHostsList.addAll(getDecompressedSet(hosts));
}
@@ -704,7 +704,7 @@ public class StageUtilsTest extends EasyMockSupport {
assertEquals(expectedHostsList, actualHostsList);
}
- private void checkServiceHostNames(Map<String, Set<String>> info, String
componentName, String mappedComponentName,
+ private void checkServiceHostNames(Map<String, Set<String>> info, String
componentName,
Map<String, Collection<String>>
serviceTopology) {
Set<String> expectedHostsList = new HashSet<>();
Set<String> actualHostsList = new HashSet<>();
@@ -717,7 +717,7 @@ public class StageUtilsTest extends EasyMockSupport {
}
// Determine the actual hosts for a given component...
- Set<String> hosts = info.get(mappedComponentName);
+ Set<String> hosts =
info.get(StageUtils.getClusterHostInfoKey(componentName));
if (hosts != null) {
actualHostsList.addAll(hosts);
}
--
To stop receiving notification emails like this one, please contact
[email protected].