http://git-wip-us.apache.org/repos/asf/hadoop/blob/164c0c4c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/package-info.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/package-info.java deleted file mode 100644 index 0bffc90..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/timelineservice/package-info.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * ATS implementation - */ -@InterfaceAudience.Private -@InterfaceStability.Unstable -package org.apache.slider.server.appmaster.timelineservice; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/164c0c4c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/IndexBlock.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/IndexBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/IndexBlock.java index 8dca4ed..c0a120d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/IndexBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/view/IndexBlock.java @@ -27,7 +27,7 @@ import org.apache.slider.common.tools.SliderUtils; import org.apache.slider.core.registry.docstore.ExportEntry; import org.apache.slider.core.registry.docstore.PublishedExports; import org.apache.slider.core.registry.docstore.PublishedExportsSet; -import org.apache.slider.server.appmaster.metrics.SliderMetrics; +import org.apache.hadoop.yarn.service.metrics.ServiceMetrics; import org.apache.slider.server.appmaster.state.RoleStatus; import org.apache.slider.server.appmaster.web.WebAppApi; import org.slf4j.Logger; @@ -160,7 +160,7 @@ public class IndexBlock extends SliderHamletBlock { roleWithOpenRequest ++; } } - SliderMetrics metrics = status.getComponentMetrics(); + ServiceMetrics metrics = status.getComponentMetrics(); table.tr() .td().a(nameUrl, roleName)._() .td(String.format("%d", metrics.containersDesired.value())) http://git-wip-us.apache.org/repos/asf/hadoop/blob/164c0c4c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/avro/RoleHistoryWriter.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/avro/RoleHistoryWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/avro/RoleHistoryWriter.java index 49d8fb2..52553d0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/avro/RoleHistoryWriter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/avro/RoleHistoryWriter.java @@ -36,7 +36,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.GlobFilter; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; -import org.apache.slider.common.SliderKeys; +import org.apache.hadoop.yarn.service.conf.SliderKeys; import org.apache.slider.common.tools.SliderUtils; import org.apache.slider.core.exceptions.BadConfigException; import org.apache.slider.server.appmaster.state.NodeEntry; http://git-wip-us.apache.org/repos/asf/hadoop/blob/164c0c4c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/HttpProbe.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/HttpProbe.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/HttpProbe.java index f6b03d0..5eba622 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/HttpProbe.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/HttpProbe.java @@ -17,8 +17,11 @@ package org.apache.slider.server.servicemonitor; +import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; -import org.apache.slider.server.appmaster.state.RoleInstance; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.service.compinstance.ComponentInstance; +import org.apache.slider.common.tools.SliderUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -67,16 +70,18 @@ public class HttpProbe extends Probe { connection.setConnectTimeout(timeout); return connection; } - + @Override - public ProbeStatus ping(RoleInstance roleInstance) { + public ProbeStatus ping(ComponentInstance instance) { ProbeStatus status = new ProbeStatus(); - String ip = roleInstance.ip; - if (ip == null) { + ContainerStatus containerStatus = instance.getContainerStatus(); + if (containerStatus == null || SliderUtils.isEmpty(containerStatus.getIPs()) + || StringUtils.isEmpty(containerStatus.getHost())) { status.fail(this, new IOException("IP is not available yet")); return status; } + String ip = containerStatus.getIPs().get(0); HttpURLConnection connection = null; try { URL url = new URL(urlString.replace(HOST_TOKEN, ip)); @@ -86,7 +91,7 @@ public class HttpProbe extends Probe { String error = "Probe " + url + " error code: " + rc; log.info(error); status.fail(this, - new IOException(error)); + new IOException(error)); } else { status.succeed(this); } @@ -94,7 +99,7 @@ public class HttpProbe extends Probe { String error = "Probe " + urlString + " failed for IP " + ip + ": " + e; log.info(error, e); status.fail(this, - new IOException(error, e)); + new IOException(error, e)); } finally { if (connection != null) { connection.disconnect(); @@ -102,5 +107,4 @@ public class HttpProbe extends Probe { } return status; } - } http://git-wip-us.apache.org/repos/asf/hadoop/blob/164c0c4c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/PortProbe.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/PortProbe.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/PortProbe.java index 252242f..da122da 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/PortProbe.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/PortProbe.java @@ -18,6 +18,8 @@ package org.apache.slider.server.servicemonitor; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.yarn.service.compinstance.ComponentInstance; +import org.apache.slider.common.tools.SliderUtils; import org.apache.slider.server.appmaster.state.RoleInstance; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -59,30 +61,34 @@ public class PortProbe extends Probe { /** * Try to connect to the (host,port); a failure to connect within * the specified timeout is a failure. - * @param roleInstance role instance + * @param instance role instance * @return the outcome */ @Override - public ProbeStatus ping(RoleInstance roleInstance) { + public ProbeStatus ping(ComponentInstance instance) { ProbeStatus status = new ProbeStatus(); - String ip = roleInstance.ip; - if (ip == null) { - status.fail(this, new IOException("IP is not available yet")); + if (instance.getContainerStatus() == null || SliderUtils + .isEmpty(instance.getContainerStatus().getIPs())) { + status.fail(this, new IOException( + instance.getCompInstanceName() + ": IP is not available yet")); return status; } + String ip = instance.getContainerStatus().getIPs().get(0); InetSocketAddress sockAddr = new InetSocketAddress(ip, port); Socket socket = new Socket(); try { if (log.isDebugEnabled()) { - log.debug("Connecting to " + sockAddr.toString() + "timeout=" + - MonitorUtils.millisToHumanTime(timeout)); + log.debug(instance.getCompInstanceName() + ": Connecting " + sockAddr + .toString() + ", timeout=" + MonitorUtils + .millisToHumanTime(timeout)); } socket.connect(sockAddr, timeout); status.succeed(this); } catch (Throwable e) { - String error = "Probe " + sockAddr + " failed: " + e; + String error = + instance.getCompInstanceName() + ": Probe " + sockAddr + " failed"; log.debug(error, e); status.fail(this, new IOException(error, e)); } finally { http://git-wip-us.apache.org/repos/asf/hadoop/blob/164c0c4c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/Probe.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/Probe.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/Probe.java index e149442..4809b45 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/Probe.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/servicemonitor/Probe.java @@ -19,6 +19,7 @@ package org.apache.slider.server.servicemonitor; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.service.compinstance.ComponentInstance; import org.apache.slider.server.appmaster.state.RoleInstance; import java.io.IOException; @@ -93,9 +94,8 @@ public abstract class Probe implements MonitorKeys { * Ping the endpoint. All exceptions must be caught and included in the * (failure) status. * - * @param roleInstance instance to ping + * @param instance instance to ping * @return the status */ - public abstract ProbeStatus ping(RoleInstance roleInstance); - + public abstract ProbeStatus ping(ComponentInstance instance); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/164c0c4c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/yarnregistry/YarnRegistryViewForProviders.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/yarnregistry/YarnRegistryViewForProviders.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/yarnregistry/YarnRegistryViewForProviders.java index 6defa2b..76ce7a5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/yarnregistry/YarnRegistryViewForProviders.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/yarnregistry/YarnRegistryViewForProviders.java @@ -19,6 +19,8 @@ package org.apache.slider.server.services.yarnregistry; import com.google.common.base.Preconditions; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.PathNotFoundException; import org.apache.hadoop.registry.client.api.RegistryConstants; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -28,6 +30,8 @@ import org.apache.hadoop.registry.client.binding.RegistryUtils; import org.apache.hadoop.registry.client.binding.RegistryPathUtils; import org.apache.hadoop.registry.client.types.ServiceRecord; +import org.apache.hadoop.yarn.service.compinstance.ComponentInstance; +import org.apache.hadoop.yarn.service.compinstance.ComponentInstanceId; import org.apache.slider.common.tools.SliderUtils; import java.io.IOException; @@ -40,14 +44,13 @@ import static org.apache.hadoop.registry.client.binding.RegistryPathUtils.join; * is registered, offers access to the record and other things. */ public class YarnRegistryViewForProviders { + private static final Log LOG = + LogFactory.getLog(YarnRegistryViewForProviders.class); private final RegistryOperations registryOperations; - private final String user; - private final String sliderServiceClass; private final String instanceName; - private final ApplicationAttemptId applicationAttemptId; /** * Record used where the service registered itself. * Null until the service is registered @@ -78,32 +81,12 @@ public class YarnRegistryViewForProviders { this.user = user; this.sliderServiceClass = sliderServiceClass; this.instanceName = instanceName; - this.applicationAttemptId = applicationAttemptId; - } - - public ApplicationAttemptId getApplicationAttemptId() { - return applicationAttemptId; } public String getUser() { return user; } - public String getSliderServiceClass() { - return sliderServiceClass; - } - - public String getInstanceName() { - return instanceName; - } - - public RegistryOperations getRegistryOperations() { - return registryOperations; - } - - public ServiceRecord getSelfRegistration() { - return selfRegistration; - } private void setSelfRegistration(ServiceRecord selfRegistration) { this.selfRegistration = selfRegistration; @@ -192,24 +175,6 @@ public class YarnRegistryViewForProviders { /** * Add a service under a path for the current user - * @param serviceClass service class to use under ~user - * @param serviceName name of the service - * @param record service record - * @param deleteTreeFirst perform recursive delete of the path first - * @return the path the service was created at - * @throws IOException - */ - public String putService( - String serviceClass, - String serviceName, - ServiceRecord record, - boolean deleteTreeFirst) throws IOException { - return putService(user, serviceClass, serviceName, record, deleteTreeFirst); - } - - - /** - * Add a service under a path for the current user * @param record service record * @param deleteTreeFirst perform recursive delete of the path first * @return the path the service was created at @@ -225,23 +190,16 @@ public class YarnRegistryViewForProviders { } /** - * Update the self record by pushing out the latest version of the service - * registration record. - * @throws IOException any failure. - */ - public void updateSelf() throws IOException { - putService(user, sliderServiceClass, instanceName, selfRegistration, false); - } - - /** * Delete a component - * @param componentName component name + * @param containerId component name * @throws IOException */ - public void deleteComponent(String componentName) throws IOException { + public void deleteComponent(ComponentInstanceId instanceId, + String containerId) throws IOException { String path = RegistryUtils.componentPath( user, sliderServiceClass, instanceName, - componentName); + containerId); + LOG.info(instanceId + ": Deleting registry path " + path); registryOperations.delete(path, false); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/164c0c4c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiConstants.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiConstants.java index 7e8cf5b..daaf0e9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiConstants.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiConstants.java @@ -27,7 +27,7 @@ public interface RestApiConstants { String COMPONENT_TYPE_YARN_DOCKER = "yarn_docker"; String DEFAULT_START_CMD = "/bootstrap/privileged-centos6-sshd"; - String DEFAULT_COMPONENT_NAME = "DEFAULT"; + String DEFAULT_COMPONENT_NAME = "default"; String DEFAULT_IMAGE = "centos:centos6"; String DEFAULT_NETWORK = "bridge"; String DEFAULT_COMMAND_PATH = "/usr/bin/docker"; @@ -52,7 +52,7 @@ public interface RestApiConstants { String PROPERTY_DNS_DEPENDENCY = "site.global.dns.dependency"; String COMMAND_ORDER_SUFFIX_START = "-START"; - String COMMAND_ORDER_SUFFIX_STARTED = "-STARTED"; + String COMMAND_ORDER_SUFFIX_STARTED = "-RUNNING_BUT_UNREADY"; String EXPORT_GROUP_NAME = "QuickLinks"; Integer ERROR_CODE_APP_DOES_NOT_EXIST = 404001; http://git-wip-us.apache.org/repos/asf/hadoop/blob/164c0c4c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java deleted file mode 100644 index 3da6e15..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/ServiceApiUtil.java +++ /dev/null @@ -1,407 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.util; - -import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.registry.client.api.RegistryConstants; -import org.apache.hadoop.registry.client.binding.RegistryUtils; -import org.apache.slider.api.resource.Application; -import org.apache.slider.api.resource.Artifact; -import org.apache.slider.api.resource.Component; -import org.apache.slider.api.resource.Configuration; -import org.apache.slider.api.resource.Resource; -import org.apache.slider.common.tools.SliderFileSystem; -import org.apache.slider.common.tools.SliderUtils; -import org.apache.slider.core.persist.JsonSerDeser; -import org.apache.slider.providers.AbstractClientProvider; -import org.apache.slider.providers.SliderProviderFactory; -import org.apache.slider.server.servicemonitor.MonitorUtils; -import org.codehaus.jackson.map.PropertyNamingStrategy; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; - -public class ServiceApiUtil { - private static final Logger LOG = - LoggerFactory.getLogger(ServiceApiUtil.class); - private static JsonSerDeser<Application> jsonSerDeser = - new JsonSerDeser<>(Application.class, - PropertyNamingStrategy.CAMEL_CASE_TO_LOWER_CASE_WITH_UNDERSCORES); - - @VisibleForTesting - public static void setJsonSerDeser(JsonSerDeser jsd) { - jsonSerDeser = jsd; - } - - @VisibleForTesting - public static void validateAndResolveApplication(Application application, - SliderFileSystem fs, org.apache.hadoop.conf.Configuration conf) throws - IOException { - boolean dnsEnabled = conf.getBoolean(RegistryConstants.KEY_DNS_ENABLED, - RegistryConstants.DEFAULT_DNS_ENABLED); - if (dnsEnabled && RegistryUtils.currentUser().length() > RegistryConstants - .MAX_FQDN_LABEL_LENGTH) { - throw new IllegalArgumentException(RestApiErrorMessages - .ERROR_USER_NAME_INVALID); - } - if (StringUtils.isEmpty(application.getName())) { - throw new IllegalArgumentException( - RestApiErrorMessages.ERROR_APPLICATION_NAME_INVALID); - } - if (!SliderUtils.isClusternameValid(application.getName()) || (dnsEnabled - && application.getName().length() > RegistryConstants - .MAX_FQDN_LABEL_LENGTH)) { - throw new IllegalArgumentException(String.format( - RestApiErrorMessages.ERROR_APPLICATION_NAME_INVALID_FORMAT, - application.getName())); - } - - // If the application has no components do top-level checks - if (!hasComponent(application)) { - // If artifact is of type APPLICATION, read other application components - if (application.getArtifact() != null && application.getArtifact() - .getType() == Artifact.TypeEnum.APPLICATION) { - if (StringUtils.isEmpty(application.getArtifact().getId())) { - throw new IllegalArgumentException( - RestApiErrorMessages.ERROR_ARTIFACT_ID_INVALID); - } - Application otherApplication = loadApplication(fs, - application.getArtifact().getId()); - application.setComponents(otherApplication.getComponents()); - application.setArtifact(null); - SliderUtils.mergeMapsIgnoreDuplicateKeys(application.getQuicklinks(), - otherApplication.getQuicklinks()); - } else { - // Since it is a simple app with no components, create a default - // component - Component comp = createDefaultComponent(application); - validateComponent(comp, fs.getFileSystem()); - application.getComponents().add(comp); - if (application.getLifetime() == null) { - application.setLifetime(RestApiConstants.DEFAULT_UNLIMITED_LIFETIME); - } - return; - } - } - - // Validate there are no component name collisions (collisions are not - // currently supported) and add any components from external applications - // TODO allow name collisions? see AppState#roles - // TODO or add prefix to external component names? - Configuration globalConf = application.getConfiguration(); - Set<String> componentNames = new HashSet<>(); - List<Component> componentsToRemove = new ArrayList<>(); - List<Component> componentsToAdd = new ArrayList<>(); - for (Component comp : application.getComponents()) { - int maxCompLength = RegistryConstants.MAX_FQDN_LABEL_LENGTH; - if (comp.getUniqueComponentSupport()) { - maxCompLength = maxCompLength - Long.toString(Long.MAX_VALUE).length(); - } - if (dnsEnabled && comp.getName().length() > maxCompLength) { - throw new IllegalArgumentException(String.format(RestApiErrorMessages - .ERROR_COMPONENT_NAME_INVALID, maxCompLength, comp.getName())); - } - if (componentNames.contains(comp.getName())) { - throw new IllegalArgumentException("Component name collision: " + - comp.getName()); - } - // If artifact is of type APPLICATION (which cannot be filled from - // global), read external application and add its components to this - // application - if (comp.getArtifact() != null && comp.getArtifact().getType() == - Artifact.TypeEnum.APPLICATION) { - if (StringUtils.isEmpty(comp.getArtifact().getId())) { - throw new IllegalArgumentException( - RestApiErrorMessages.ERROR_ARTIFACT_ID_INVALID); - } - LOG.info("Marking {} for removal", comp.getName()); - componentsToRemove.add(comp); - List<Component> externalComponents = getApplicationComponents(fs, - comp.getArtifact().getId()); - for (Component c : externalComponents) { - Component override = application.getComponent(c.getName()); - if (override != null && override.getArtifact() == null) { - // allow properties from external components to be overridden / - // augmented by properties in this component, except for artifact - // which must be read from external component - override.mergeFrom(c); - LOG.info("Merging external component {} from external {}", c - .getName(), comp.getName()); - } else { - if (componentNames.contains(c.getName())) { - throw new IllegalArgumentException("Component name collision: " + - c.getName()); - } - componentNames.add(c.getName()); - componentsToAdd.add(c); - LOG.info("Adding component {} from external {}", c.getName(), - comp.getName()); - } - } - } else { - // otherwise handle as a normal component - componentNames.add(comp.getName()); - // configuration - comp.getConfiguration().mergeFrom(globalConf); - } - } - application.getComponents().removeAll(componentsToRemove); - application.getComponents().addAll(componentsToAdd); - - // Validate components and let global values take effect if component level - // values are not provided - Artifact globalArtifact = application.getArtifact(); - Resource globalResource = application.getResource(); - Long globalNumberOfContainers = application.getNumberOfContainers(); - String globalLaunchCommand = application.getLaunchCommand(); - for (Component comp : application.getComponents()) { - // fill in global artifact unless it is type APPLICATION - if (comp.getArtifact() == null && application.getArtifact() != null - && application.getArtifact().getType() != Artifact.TypeEnum - .APPLICATION) { - comp.setArtifact(globalArtifact); - } - // fill in global resource - if (comp.getResource() == null) { - comp.setResource(globalResource); - } - // fill in global container count - if (comp.getNumberOfContainers() == null) { - comp.setNumberOfContainers(globalNumberOfContainers); - } - // fill in global launch command - if (comp.getLaunchCommand() == null) { - comp.setLaunchCommand(globalLaunchCommand); - } - // validate dependency existence - if (comp.getDependencies() != null) { - for (String dependency : comp.getDependencies()) { - if (!componentNames.contains(dependency)) { - throw new IllegalArgumentException(String.format( - RestApiErrorMessages.ERROR_DEPENDENCY_INVALID, dependency, - comp.getName())); - } - } - } - validateComponent(comp, fs.getFileSystem()); - } - - // validate dependency tree - sortByDependencies(application.getComponents()); - - // Application lifetime if not specified, is set to unlimited lifetime - if (application.getLifetime() == null) { - application.setLifetime(RestApiConstants.DEFAULT_UNLIMITED_LIFETIME); - } - } - - public static void validateComponent(Component comp, FileSystem fs) - throws IOException { - AbstractClientProvider compClientProvider = SliderProviderFactory - .getClientProvider(comp.getArtifact()); - compClientProvider.validateArtifact(comp.getArtifact(), fs); - - if (comp.getLaunchCommand() == null && (comp.getArtifact() == null || comp - .getArtifact().getType() != Artifact.TypeEnum.DOCKER)) { - throw new IllegalArgumentException(RestApiErrorMessages - .ERROR_ABSENT_LAUNCH_COMMAND); - } - - validateApplicationResource(comp.getResource(), comp); - - if (comp.getNumberOfContainers() == null - || comp.getNumberOfContainers() < 0) { - throw new IllegalArgumentException(String.format( - RestApiErrorMessages.ERROR_CONTAINERS_COUNT_FOR_COMP_INVALID - + ": " + comp.getNumberOfContainers(), comp.getName())); - } - compClientProvider.validateConfigFiles(comp.getConfiguration() - .getFiles(), fs); - - MonitorUtils.getProbe(comp.getReadinessCheck()); - } - - @VisibleForTesting - public static List<Component> getApplicationComponents(SliderFileSystem - fs, String appName) throws IOException { - return loadApplication(fs, appName).getComponents(); - } - - public static Application loadApplication(SliderFileSystem fs, String - appName) throws IOException { - Path appJson = getAppJsonPath(fs, appName); - LOG.info("Loading application definition from " + appJson); - Application externalApplication = jsonSerDeser.load(fs.getFileSystem(), - appJson); - return externalApplication; - } - - public static Path getAppJsonPath(SliderFileSystem fs, String appName) { - Path appDir = fs.buildClusterDirPath(appName); - Path appJson = new Path(appDir, appName + ".json"); - return appJson; - } - - private static void validateApplicationResource(Resource resource, - Component comp) { - // Only apps/components of type APPLICATION can skip resource requirement - if (resource == null) { - throw new IllegalArgumentException( - comp == null ? RestApiErrorMessages.ERROR_RESOURCE_INVALID : String - .format(RestApiErrorMessages.ERROR_RESOURCE_FOR_COMP_INVALID, - comp.getName())); - } - // One and only one of profile OR cpus & memory can be specified. Specifying - // both raises validation error. - if (StringUtils.isNotEmpty(resource.getProfile()) && ( - resource.getCpus() != null || StringUtils - .isNotEmpty(resource.getMemory()))) { - throw new IllegalArgumentException(comp == null ? - RestApiErrorMessages.ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_NOT_SUPPORTED : - String.format( - RestApiErrorMessages.ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_FOR_COMP_NOT_SUPPORTED, - comp.getName())); - } - // Currently resource profile is not supported yet, so we will raise - // validation error if only resource profile is specified - if (StringUtils.isNotEmpty(resource.getProfile())) { - throw new IllegalArgumentException( - RestApiErrorMessages.ERROR_RESOURCE_PROFILE_NOT_SUPPORTED_YET); - } - - String memory = resource.getMemory(); - Integer cpus = resource.getCpus(); - if (StringUtils.isEmpty(memory)) { - throw new IllegalArgumentException( - comp == null ? RestApiErrorMessages.ERROR_RESOURCE_MEMORY_INVALID : - String.format( - RestApiErrorMessages.ERROR_RESOURCE_MEMORY_FOR_COMP_INVALID, - comp.getName())); - } - if (cpus == null) { - throw new IllegalArgumentException( - comp == null ? RestApiErrorMessages.ERROR_RESOURCE_CPUS_INVALID : - String.format( - RestApiErrorMessages.ERROR_RESOURCE_CPUS_FOR_COMP_INVALID, - comp.getName())); - } - if (cpus <= 0) { - throw new IllegalArgumentException(comp == null ? - RestApiErrorMessages.ERROR_RESOURCE_CPUS_INVALID_RANGE : String - .format( - RestApiErrorMessages.ERROR_RESOURCE_CPUS_FOR_COMP_INVALID_RANGE, - comp.getName())); - } - } - - public static boolean hasComponent(Application application) { - if (application.getComponents() == null || application.getComponents() - .isEmpty()) { - return false; - } - return true; - } - - public static Component createDefaultComponent(Application app) { - Component comp = new Component(); - comp.setName(RestApiConstants.DEFAULT_COMPONENT_NAME); - comp.setArtifact(app.getArtifact()); - comp.setResource(app.getResource()); - comp.setNumberOfContainers(app.getNumberOfContainers()); - comp.setLaunchCommand(app.getLaunchCommand()); - comp.setConfiguration(app.getConfiguration()); - return comp; - } - - public static Collection<Component> sortByDependencies(List<Component> - components) { - Map<String, Component> sortedComponents = - sortByDependencies(components, null); - return sortedComponents.values(); - } - - /** - * Each internal call of sortByDependencies will identify all of the - * components with the same dependency depth (the lowest depth that has not - * been processed yet) and add them to the sortedComponents list, preserving - * their original ordering in the components list. - * - * So the first time it is called, all components with no dependencies - * (depth 0) will be identified. The next time it is called, all components - * that have dependencies only on the the depth 0 components will be - * identified (depth 1). This will be repeated until all components have - * been added to the sortedComponents list. If no new components are - * identified but the sortedComponents list is not complete, an error is - * thrown. - */ - private static Map<String, Component> sortByDependencies(List<Component> - components, Map<String, Component> sortedComponents) { - if (sortedComponents == null) { - sortedComponents = new LinkedHashMap<>(); - } - - Map<String, Component> componentsToAdd = new LinkedHashMap<>(); - List<Component> componentsSkipped = new ArrayList<>(); - for (Component component : components) { - String name = component.getName(); - if (sortedComponents.containsKey(name)) { - continue; - } - boolean dependenciesAlreadySorted = true; - if (!SliderUtils.isEmpty(component.getDependencies())) { - for (String dependency : component.getDependencies()) { - if (!sortedComponents.containsKey(dependency)) { - dependenciesAlreadySorted = false; - break; - } - } - } - if (dependenciesAlreadySorted) { - componentsToAdd.put(name, component); - } else { - componentsSkipped.add(component); - } - } - - if (componentsToAdd.size() == 0) { - throw new IllegalArgumentException(String.format(RestApiErrorMessages - .ERROR_DEPENDENCY_CYCLE, componentsSkipped)); - } - sortedComponents.putAll(componentsToAdd); - if (sortedComponents.size() == components.size()) { - return sortedComponents; - } - return sortByDependencies(components, sortedComponents); - } - - public static String $(String s) { - return "${" + s +"}"; - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/164c0c4c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/ClientAMProtocol.proto ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/ClientAMProtocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/ClientAMProtocol.proto new file mode 100644 index 0000000..0a21c24 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/ClientAMProtocol.proto @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +option java_package = "org.apache.hadoop.yarn.proto"; +option java_outer_classname = "ClientAMProtocol"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +package hadoop.yarn; + +service ClientAMProtocolService { + rpc flexComponents(FlexComponentsRequestProto) returns (FlexComponentsResponseProto); + rpc getStatus(GetStatusRequestProto) returns (GetStatusResponseProto); + rpc stop(StopRequestProto) returns (StopResponseProto); +} + +message FlexComponentsRequestProto { + repeated ComponentCountProto components = 1; +} + +message ComponentCountProto { + optional string name = 1; + optional int64 numberOfContainers = 2; +} + +message FlexComponentsResponseProto{ +} + +message GetStatusRequestProto { + +} +message GetStatusResponseProto { + optional string status = 1; +} + +message StopRequestProto { + +} + +message StopResponseProto { + +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/164c0c4c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java new file mode 100644 index 0000000..d99e30e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceApiUtil.java @@ -0,0 +1,539 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.registry.client.api.RegistryConstants; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.slider.api.resource.Application; +import org.apache.slider.api.resource.Artifact; +import org.apache.slider.api.resource.Component; +import org.apache.slider.api.resource.Resource; +import org.apache.slider.common.tools.SliderFileSystem; +import org.apache.slider.core.persist.JsonSerDeser; +import org.apache.slider.util.RestApiConstants; +import org.apache.slider.util.RestApiErrorMessages; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; + +import static org.apache.slider.util.RestApiConstants.DEFAULT_COMPONENT_NAME; +import static org.apache.slider.util.RestApiConstants.DEFAULT_UNLIMITED_LIFETIME; +import static org.apache.slider.util.RestApiErrorMessages.*; +import static org.apache.slider.util.RestApiErrorMessages.ERROR_CONTAINERS_COUNT_INVALID; +import static org.apache.slider.util.RestApiErrorMessages.ERROR_RESOURCE_PROFILE_NOT_SUPPORTED_YET; +import static org.easymock.EasyMock.anyObject; +import static org.easymock.EasyMock.createNiceMock; +import static org.easymock.EasyMock.expect; +import static org.easymock.EasyMock.replay; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +/** + * Test for ServiceApiUtil helper methods. + */ +public class TestServiceApiUtil { + private static final Logger LOG = LoggerFactory + .getLogger(TestServiceApiUtil.class); + private static final String EXCEPTION_PREFIX = "Should have thrown " + + "exception: "; + private static final String NO_EXCEPTION_PREFIX = "Should not have thrown " + + "exception: "; + + private static final String LEN_64_STR = + "abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz01"; + + private static final YarnConfiguration CONF_DEFAULT_DNS = new + YarnConfiguration(); + private static final YarnConfiguration CONF_DNS_ENABLED = new + YarnConfiguration(); + + @BeforeClass + public static void init() { + CONF_DNS_ENABLED.setBoolean(RegistryConstants.KEY_DNS_ENABLED, true); + } + + @Test(timeout = 90000) + public void testResourceValidation() throws Exception { + assertEquals(RegistryConstants.MAX_FQDN_LABEL_LENGTH + 1, LEN_64_STR + .length()); + + SliderFileSystem sfs = initMock(null); + + Application app = new Application(); + + // no name + try { + ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "application with no name"); + } catch (IllegalArgumentException e) { + assertEquals(ERROR_APPLICATION_NAME_INVALID, e.getMessage()); + } + + // bad format name + String[] badNames = {"4finance", "Finance", "finance@home", LEN_64_STR}; + for (String badName : badNames) { + app.setName(badName); + try { + ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "application with bad name " + badName); + } catch (IllegalArgumentException e) { + assertEquals(String.format( + ERROR_APPLICATION_NAME_INVALID_FORMAT, badName), e.getMessage()); + } + } + + // launch command not specified + app.setName(LEN_64_STR); + try { + ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DEFAULT_DNS); + Assert.fail(EXCEPTION_PREFIX + "application with no launch command"); + } catch (IllegalArgumentException e) { + assertEquals(RestApiErrorMessages.ERROR_ABSENT_LAUNCH_COMMAND, + e.getMessage()); + } + + // launch command not specified + app.setName(LEN_64_STR.substring(0, RegistryConstants + .MAX_FQDN_LABEL_LENGTH)); + try { + ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "application with no launch command"); + } catch (IllegalArgumentException e) { + assertEquals(RestApiErrorMessages.ERROR_ABSENT_LAUNCH_COMMAND, + e.getMessage()); + } + + // resource not specified + app.setLaunchCommand("sleep 3600"); + try { + ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "application with no resource"); + } catch (IllegalArgumentException e) { + assertEquals(String.format( + RestApiErrorMessages.ERROR_RESOURCE_FOR_COMP_INVALID, + RestApiConstants.DEFAULT_COMPONENT_NAME), e.getMessage()); + } + + // memory not specified + Resource res = new Resource(); + app.setResource(res); + try { + ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "application with no memory"); + } catch (IllegalArgumentException e) { + assertEquals(String.format( + RestApiErrorMessages.ERROR_RESOURCE_MEMORY_FOR_COMP_INVALID, + RestApiConstants.DEFAULT_COMPONENT_NAME), e.getMessage()); + } + + // invalid no of cpus + res.setMemory("100mb"); + res.setCpus(-2); + try { + ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED); + Assert.fail( + EXCEPTION_PREFIX + "application with invalid no of cpus"); + } catch (IllegalArgumentException e) { + assertEquals(String.format( + RestApiErrorMessages.ERROR_RESOURCE_CPUS_FOR_COMP_INVALID_RANGE, + RestApiConstants.DEFAULT_COMPONENT_NAME), e.getMessage()); + } + + // number of containers not specified + res.setCpus(2); + try { + ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "application with no container count"); + } catch (IllegalArgumentException e) { + Assert.assertTrue(e.getMessage() + .contains(ERROR_CONTAINERS_COUNT_INVALID)); + } + + // specifying profile along with cpus/memory raises exception + res.setProfile("hbase_finance_large"); + try { + ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + + "application with resource profile along with cpus/memory"); + } catch (IllegalArgumentException e) { + assertEquals(String.format(RestApiErrorMessages + .ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_FOR_COMP_NOT_SUPPORTED, + RestApiConstants.DEFAULT_COMPONENT_NAME), + e.getMessage()); + } + + // currently resource profile alone is not supported. + // TODO: remove the next test once resource profile alone is supported. + res.setCpus(null); + res.setMemory(null); + try { + ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "application with resource profile only"); + } catch (IllegalArgumentException e) { + assertEquals(ERROR_RESOURCE_PROFILE_NOT_SUPPORTED_YET, + e.getMessage()); + } + + // unset profile here and add cpus/memory back + res.setProfile(null); + res.setCpus(2); + res.setMemory("2gb"); + + // null number of containers + try { + ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "null number of containers"); + } catch (IllegalArgumentException e) { + Assert.assertTrue(e.getMessage() + .startsWith(ERROR_CONTAINERS_COUNT_INVALID)); + } + + // negative number of containers + app.setNumberOfContainers(-1L); + try { + ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "negative number of containers"); + } catch (IllegalArgumentException e) { + Assert.assertTrue(e.getMessage() + .startsWith(ERROR_CONTAINERS_COUNT_INVALID)); + } + + // everything valid here + app.setNumberOfContainers(5L); + try { + ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + LOG.error("application attributes specified should be valid here", e); + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + } + + @Test + public void testArtifacts() throws IOException { + SliderFileSystem sfs = initMock(null); + + Application app = new Application(); + app.setName("name"); + Resource res = new Resource(); + app.setResource(res); + res.setMemory("512M"); + app.setNumberOfContainers(3L); + + // no artifact id fails with default type + Artifact artifact = new Artifact(); + app.setArtifact(artifact); + try { + ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "application with no artifact id"); + } catch (IllegalArgumentException e) { + assertEquals(ERROR_ARTIFACT_ID_INVALID, e.getMessage()); + } + + // no artifact id fails with APPLICATION type + artifact.setType(Artifact.TypeEnum.APPLICATION); + try { + ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "application with no artifact id"); + } catch (IllegalArgumentException e) { + assertEquals(ERROR_ARTIFACT_ID_INVALID, e.getMessage()); + } + + // no artifact id fails with TARBALL type + artifact.setType(Artifact.TypeEnum.TARBALL); + try { + ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "application with no artifact id"); + } catch (IllegalArgumentException e) { + assertEquals(ERROR_ARTIFACT_ID_INVALID, e.getMessage()); + } + + // everything valid here + artifact.setType(Artifact.TypeEnum.DOCKER); + artifact.setId("docker.io/centos:centos7"); + try { + ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + LOG.error("application attributes specified should be valid here", e); + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + // defaults assigned + assertEquals(app.getComponents().get(0).getName(), + DEFAULT_COMPONENT_NAME); + assertEquals(app.getLifetime(), DEFAULT_UNLIMITED_LIFETIME); + } + + private static Resource createValidResource() { + Resource res = new Resource(); + res.setMemory("512M"); + return res; + } + + private static Component createValidComponent(String compName) { + Component comp = new Component(); + comp.setName(compName); + comp.setResource(createValidResource()); + comp.setNumberOfContainers(1L); + return comp; + } + + private static Application createValidApplication(String compName) { + Application app = new Application(); + app.setLaunchCommand("sleep 3600"); + app.setName("name"); + app.setResource(createValidResource()); + app.setNumberOfContainers(1L); + if (compName != null) { + app.addComponent(createValidComponent(compName)); + } + return app; + } + + private static SliderFileSystem initMock(Application ext) throws IOException { + SliderFileSystem sfs = createNiceMock(SliderFileSystem.class); + FileSystem mockFs = createNiceMock(FileSystem.class); + JsonSerDeser<Application> jsonSerDeser = createNiceMock(JsonSerDeser + .class); + expect(sfs.getFileSystem()).andReturn(mockFs).anyTimes(); + expect(sfs.buildClusterDirPath(anyObject())).andReturn( + new Path("cluster_dir_path")).anyTimes(); + if (ext != null) { + expect(jsonSerDeser.load(anyObject(), anyObject())).andReturn(ext) + .anyTimes(); + } + replay(sfs, mockFs, jsonSerDeser); + ServiceApiUtil.setJsonSerDeser(jsonSerDeser); + return sfs; + } + + @Test + public void testExternalApplication() throws IOException { + Application ext = createValidApplication("comp1"); + SliderFileSystem sfs = initMock(ext); + + Application app = createValidApplication(null); + + Artifact artifact = new Artifact(); + artifact.setType(Artifact.TypeEnum.APPLICATION); + artifact.setId("id"); + app.setArtifact(artifact); + + try { + ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + assertEquals(1, app.getComponents().size()); + assertNotNull(app.getComponent("comp1")); + } + + @Test + public void testDuplicateComponents() throws IOException { + SliderFileSystem sfs = initMock(null); + + String compName = "comp1"; + Application app = createValidApplication(compName); + app.addComponent(createValidComponent(compName)); + + // duplicate component name fails + try { + ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "application with component collision"); + } catch (IllegalArgumentException e) { + assertEquals("Component name collision: " + compName, e.getMessage()); + } + } + + @Test + public void testExternalDuplicateComponent() throws IOException { + Application ext = createValidApplication("comp1"); + SliderFileSystem sfs = initMock(ext); + + Application app = createValidApplication("comp1"); + Artifact artifact = new Artifact(); + artifact.setType(Artifact.TypeEnum.APPLICATION); + artifact.setId("id"); + app.getComponent("comp1").setArtifact(artifact); + + // duplicate component name okay in the case of APPLICATION component + try { + ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + } + + @Test + public void testExternalComponent() throws IOException { + Application ext = createValidApplication("comp1"); + SliderFileSystem sfs = initMock(ext); + + Application app = createValidApplication("comp2"); + Artifact artifact = new Artifact(); + artifact.setType(Artifact.TypeEnum.APPLICATION); + artifact.setId("id"); + app.setArtifact(artifact); + + try { + ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + assertEquals(1, app.getComponents().size()); + // artifact ID not inherited from global + assertNotNull(app.getComponent("comp2")); + + // set APPLICATION artifact id on component + app.getComponent("comp2").setArtifact(artifact); + + try { + ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + assertEquals(1, app.getComponents().size()); + // original component replaced by external component + assertNotNull(app.getComponent("comp1")); + } + + public static void verifyDependencySorting(List<Component> components, + Component... expectedSorting) { + Collection<Component> actualSorting = ServiceApiUtil.sortByDependencies( + components); + assertEquals(expectedSorting.length, actualSorting.size()); + int i = 0; + for (Component component : actualSorting) { + assertEquals(expectedSorting[i++], component); + } + } + + @Test + public void testDependencySorting() throws IOException { + Component a = new Component().name("a"); + Component b = new Component().name("b"); + Component c = new Component().name("c"); + Component d = new Component().name("d").dependencies(Arrays.asList("c")); + Component e = new Component().name("e").dependencies(Arrays.asList("b", + "d")); + + verifyDependencySorting(Arrays.asList(a, b, c), a, b, c); + verifyDependencySorting(Arrays.asList(c, a, b), c, a, b); + verifyDependencySorting(Arrays.asList(a, b, c, d, e), a, b, c, d, e); + verifyDependencySorting(Arrays.asList(e, d, c, b, a), c, b, a, d, e); + + c.setDependencies(Arrays.asList("e")); + try { + verifyDependencySorting(Arrays.asList(a, b, c, d, e)); + Assert.fail(EXCEPTION_PREFIX + "components with dependency cycle"); + } catch (IllegalArgumentException ex) { + assertEquals(String.format( + RestApiErrorMessages.ERROR_DEPENDENCY_CYCLE, Arrays.asList(c, d, + e)), ex.getMessage()); + } + + SliderFileSystem sfs = initMock(null); + Application application = createValidApplication(null); + application.setComponents(Arrays.asList(c, d, e)); + try { + ServiceApiUtil.validateAndResolveApplication(application, sfs, + CONF_DEFAULT_DNS); + Assert.fail(EXCEPTION_PREFIX + "components with bad dependencies"); + } catch (IllegalArgumentException ex) { + assertEquals(String.format( + RestApiErrorMessages.ERROR_DEPENDENCY_INVALID, "b", "e"), ex + .getMessage()); + } + } + + @Test + public void testInvalidComponent() throws IOException { + SliderFileSystem sfs = initMock(null); + testComponent(sfs, false); + testComponent(sfs, true); + } + + @Test + public void testValidateCompName() { + String[] invalidNames = { + "EXAMPLE", // UPPER case not allowed + "example_app" // underscore not allowed. + }; + for (String name : invalidNames) { + try { + ServiceApiUtil.validateCompName(name); + Assert.fail(); + } catch (IllegalArgumentException ex) { + ex.printStackTrace(); + } + } + } + + private static void testComponent(SliderFileSystem sfs, boolean unique) + throws IOException { + int maxLen = RegistryConstants.MAX_FQDN_LABEL_LENGTH; + if (unique) { + assertEquals(19, Long.toString(Long.MAX_VALUE).length()); + maxLen = maxLen - Long.toString(Long.MAX_VALUE).length(); + } + String compName = LEN_64_STR.substring(0, maxLen + 1); + Application app = createValidApplication(null); + app.addComponent(createValidComponent(compName).uniqueComponentSupport( + unique)); + + // invalid component name fails if dns is enabled + try { + ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED); + Assert.fail(EXCEPTION_PREFIX + "application with invalid component name"); + } catch (IllegalArgumentException e) { + assertEquals(String.format(RestApiErrorMessages + .ERROR_COMPONENT_NAME_INVALID, maxLen, compName), e.getMessage()); + } + + // does not fail if dns is disabled + try { + ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DEFAULT_DNS); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + + compName = LEN_64_STR.substring(0, maxLen); + app = createValidApplication(null); + app.addComponent(createValidComponent(compName).uniqueComponentSupport( + unique)); + + // does not fail + try { + ServiceApiUtil.validateAndResolveApplication(app, sfs, CONF_DNS_ENABLED); + } catch (IllegalArgumentException e) { + Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage()); + } + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/164c0c4c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java new file mode 100644 index 0000000..45be54d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/hadoop/yarn/service/TestYarnNativeServices.java @@ -0,0 +1,496 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.service; + +import org.apache.commons.io.FileUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.curator.test.TestingCluster; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.MiniYARNCluster; +import org.apache.hadoop.yarn.service.client.ServiceClient; +import org.apache.hadoop.yarn.util.LinuxResourceCalculatorPlugin; +import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree; +import org.apache.slider.api.resource.Application; +import org.apache.slider.api.resource.Component; +import org.apache.slider.api.resource.Container; +import org.apache.slider.api.resource.ContainerState; +import org.apache.slider.api.resource.Resource; +import org.apache.slider.common.tools.SliderFileSystem; +import org.apache.slider.core.exceptions.SliderException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.net.URL; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeSet; +import java.util.concurrent.TimeoutException; + +import static org.apache.hadoop.registry.client.api.RegistryConstants.KEY_REGISTRY_ZK_QUORUM; +import static org.apache.hadoop.yarn.api.records.YarnApplicationState.FINISHED; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC; +import static org.apache.hadoop.yarn.conf.YarnConfiguration.TIMELINE_SERVICE_ENABLED; +import static org.apache.hadoop.yarn.service.conf.SliderXmlConfKeys.KEY_AM_RESOURCE_MEM; +import static org.apache.hadoop.yarn.service.conf.SliderXmlConfKeys.KEY_SLIDER_BASE_PATH; + +/** + * End to end tests to test deploying services with MiniYarnCluster and a in-JVM + * ZK testing cluster. + */ +public class TestYarnNativeServices { + + private static final Log LOG = + LogFactory.getLog(TestYarnNativeServices.class); + + private MiniYARNCluster yarnCluster = null; + private MiniDFSCluster hdfsCluster = null; + private FileSystem fs = null; + protected Configuration conf = null; + private static final int NUM_NMS = 1; + private File basedir; + + @Rule + public TemporaryFolder tmpFolder = new TemporaryFolder(); + + @Before + public void setup() throws Exception { + setupInternal(NUM_NMS); + } + + private void setupInternal(int numNodeManager) + throws Exception { + LOG.info("Starting up YARN cluster"); +// Logger rootLogger = LogManager.getRootLogger(); +// rootLogger.setLevel(Level.DEBUG); + conf = new YarnConfiguration(); + conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 128); + // reduce the teardown waiting time + conf.setLong(YarnConfiguration.DISPATCHER_DRAIN_EVENTS_TIMEOUT, 1000); + conf.set("yarn.log.dir", "target"); + // mark if we need to launch the v1 timeline server + // disable aux-service based timeline aggregators + conf.set(YarnConfiguration.NM_AUX_SERVICES, ""); + conf.set(YarnConfiguration.NM_VMEM_PMEM_RATIO, "8"); + // Enable ContainersMonitorImpl + conf.set(YarnConfiguration.NM_CONTAINER_MON_RESOURCE_CALCULATOR, + LinuxResourceCalculatorPlugin.class.getName()); + conf.set(YarnConfiguration.NM_CONTAINER_MON_PROCESS_TREE, + ProcfsBasedProcessTree.class.getName()); + conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, true); + conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, true); + conf.setBoolean( + YarnConfiguration.YARN_MINICLUSTER_CONTROL_RESOURCE_MONITORING, true); + conf.setBoolean(TIMELINE_SERVICE_ENABLED, false); + conf.setInt(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE, 100); + conf.setLong(DEBUG_NM_DELETE_DELAY_SEC, 60000); + conf.setLong(KEY_AM_RESOURCE_MEM, 128); + // setup zk cluster + TestingCluster zkCluster; + zkCluster = new TestingCluster(1); + zkCluster.start(); + conf.set(YarnConfiguration.RM_ZK_ADDRESS, zkCluster.getConnectString()); + conf.set(KEY_REGISTRY_ZK_QUORUM, zkCluster.getConnectString()); + LOG.info("ZK cluster: " + zkCluster.getConnectString()); + + fs = FileSystem.get(conf); + basedir = new File("target", "apps"); + if (basedir.exists()) { + FileUtils.deleteDirectory(basedir); + } else { + basedir.mkdirs(); + } + + conf.set(KEY_SLIDER_BASE_PATH, basedir.getAbsolutePath()); + + if (yarnCluster == null) { + yarnCluster = + new MiniYARNCluster(TestYarnNativeServices.class.getSimpleName(), 1, + numNodeManager, 1, 1); + yarnCluster.init(conf); + yarnCluster.start(); + + waitForNMsToRegister(); + + URL url = Thread.currentThread().getContextClassLoader() + .getResource("yarn-site.xml"); + if (url == null) { + throw new RuntimeException( + "Could not find 'yarn-site.xml' dummy file in classpath"); + } + Configuration yarnClusterConfig = yarnCluster.getConfig(); + yarnClusterConfig.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, + new File(url.getPath()).getParent()); + //write the document to a buffer (not directly to the file, as that + //can cause the file being written to get read -which will then fail. + ByteArrayOutputStream bytesOut = new ByteArrayOutputStream(); + yarnClusterConfig.writeXml(bytesOut); + bytesOut.close(); + //write the bytes to the file in the classpath + OutputStream os = new FileOutputStream(new File(url.getPath())); + os.write(bytesOut.toByteArray()); + os.close(); + LOG.info("Write yarn-site.xml configs to: " + url); + } + if (hdfsCluster == null) { + HdfsConfiguration hdfsConfig = new HdfsConfiguration(); + hdfsCluster = new MiniDFSCluster.Builder(hdfsConfig) + .numDataNodes(1).build(); + } + + try { + Thread.sleep(2000); + } catch (InterruptedException e) { + LOG.info("setup thread sleep interrupted. message=" + e.getMessage()); + } + + + } + + private void waitForNMsToRegister() throws Exception { + int sec = 60; + while (sec >= 0) { + if (yarnCluster.getResourceManager().getRMContext().getRMNodes().size() + >= NUM_NMS) { + break; + } + Thread.sleep(1000); + sec--; + } + } + + @After + public void tearDown() throws IOException { + if (yarnCluster != null) { + try { + yarnCluster.stop(); + } finally { + yarnCluster = null; + } + } + if (hdfsCluster != null) { + try { + hdfsCluster.shutdown(); + } finally { + hdfsCluster = null; + } + } + if (basedir != null) { + FileUtils.deleteDirectory(basedir); + } + SliderFileSystem sfs = new SliderFileSystem(conf); + Path appDir = sfs.getBaseApplicationPath(); + sfs.getFileSystem().delete(appDir, true); + } + + + + // End-to-end test to use ServiceClient to deploy a service. + // 1. Create a service with 2 components, each of which has 2 containers + // 2. Flex up each component to 3 containers and check the component instance names + // 3. Flex down each component to 1 container and check the component instance names + // 4. Flex up each component to 2 containers and check the component instance names + // 5. Stop the service + // 6. Destroy the service + @Test (timeout = 500000) + public void testCreateFlexStopDestroyService() throws Exception { + ServiceClient client = createClient(); + Application exampleApp = createExampleApplication(); + client.actionCreate(exampleApp); + waitForAllCompToBeReady(client, exampleApp); + + // Flex two components, each from 2 container to 3 containers. + flexComponents(client, exampleApp, 3L); + // wait for flex to be completed, increase from 2 to 3 containers. + waitForAllCompToBeReady(client, exampleApp); + // check all instances name for each component are in sequential order. + checkCompInstancesInOrder(client, exampleApp); + + // flex down to 1 + flexComponents(client, exampleApp, 1L); + waitForAllCompToBeReady(client, exampleApp); + checkCompInstancesInOrder(client, exampleApp); + + // check component dir and registry are cleaned up. + + // flex up again to 2 + flexComponents(client, exampleApp, 2L); + waitForAllCompToBeReady(client, exampleApp); + checkCompInstancesInOrder(client, exampleApp); + + // stop the service + LOG.info("Stop the service"); + client.actionStop(exampleApp.getName()); + ApplicationReport report = client.getYarnClient() + .getApplicationReport(ApplicationId.fromString(exampleApp.getId())); + // AM unregisters with RM successfully + Assert.assertEquals(FINISHED, report.getYarnApplicationState()); + Assert.assertEquals(FinalApplicationStatus.ENDED, + report.getFinalApplicationStatus()); + + LOG.info("Destroy the service"); + //destroy the service and check the app dir is deleted from fs. + client.actionDestroy(exampleApp.getName()); + SliderFileSystem fileSystem = new SliderFileSystem(conf); + Path appDir = fileSystem.buildClusterDirPath(exampleApp.getName()); + // check the application dir on hdfs (in this case, local fs) are deleted. + Assert.assertFalse(fs.exists(appDir)); + } + + // Create compa with 2 containers + // Create compb with 2 containers which depends on compa + // Check containers for compa started before containers for compb + @Test (timeout = 500000) + public void testComponentStartOrder() throws Exception { + ServiceClient client = createClient(); + Application exampleApp = new Application(); + exampleApp.setName("teststartorder"); + exampleApp.addComponent(createComponent("compa", 2, "sleep 1000")); + Component compb = createComponent("compb", 2, "sleep 1000"); + + // Let compb depedends on compa; + compb.setDependencies(Collections.singletonList("compa")); + exampleApp.addComponent(compb); + + client.actionCreate(exampleApp); + waitForAllCompToBeReady(client, exampleApp); + + // check that containers for compa are launched before containers for compb + checkContainerLaunchDependencies(client, exampleApp, "compa", "compb"); + + client.actionStop(exampleApp.getName()); + client.actionDestroy(exampleApp.getName()); + } + + // Check containers launched are in dependency order + // Get all containers into a list and sort based on container launch time e.g. + // compa-c1, compa-c2, compb-c1, compb-c2; + // check that the container's launch time are align with the dependencies. + private void checkContainerLaunchDependencies(ServiceClient client, + Application exampleApp, String... compOrder) + throws IOException, YarnException { + Application retrievedApp = client.getStatus(exampleApp.getName()); + List<Container> containerList = new ArrayList<>(); + for (Component component : retrievedApp.getComponents()) { + containerList.addAll(component.getContainers()); + } + // sort based on launchTime + containerList + .sort((o1, o2) -> o1.getLaunchTime().compareTo(o2.getLaunchTime())); + LOG.info("containerList: " + containerList); + // check the containers are in the dependency order. + int index = 0; + for (String comp : compOrder) { + long num = retrievedApp.getComponent(comp).getNumberOfContainers(); + for (int i = 0; i < num; i++) { + String compInstanceName = containerList.get(index).getComponentName(); + String compName = + compInstanceName.substring(0, compInstanceName.lastIndexOf('-')); + Assert.assertEquals(comp, compName); + index++; + } + } + } + + + private Map<String, Long> flexComponents(ServiceClient client, + Application exampleApp, long count) throws YarnException, IOException { + Map<String, Long> compCounts = new HashMap<>(); + compCounts.put("compa", count); + compCounts.put("compb", count); + // flex will update the persisted conf to reflect latest number of containers. + exampleApp.getComponent("compa").setNumberOfContainers(count); + exampleApp.getComponent("compb").setNumberOfContainers(count); + client.flexByRestService(exampleApp.getName(), compCounts); + return compCounts; + } + + // Check each component's comp instances name are in sequential order. + // E.g. If there are two instances compA-1 and compA-2 + // When flex up to 4 instances, it should be compA-1 , compA-2, compA-3, compA-4 + // When flex down to 3 instances, it should be compA-1 , compA-2, compA-3. + private void checkCompInstancesInOrder(ServiceClient client, + Application exampleApp) throws IOException, YarnException { + Application application = client.getStatus(exampleApp.getName()); + for (Component comp : application.getComponents()) { + checkEachCompInstancesInOrder(comp); + } + } + + private void checkRegistryAndCompDirDeleted() { + + } + + private void checkEachCompInstancesInOrder(Component component) { + long expectedNumInstances = component.getNumberOfContainers(); + Assert.assertEquals(expectedNumInstances, component.getContainers().size()); + TreeSet<String> instances = new TreeSet<>(); + for (Container container : component.getContainers()) { + instances.add(container.getComponentName()); + } + + int i = 0; + for (String s : instances) { + Assert.assertEquals(component.getName() + "-" + i, s); + i++; + } + } + + private void waitForOneCompToBeReady(ServiceClient client, + Application exampleApp, String readyComp) + throws TimeoutException, InterruptedException { + long numExpectedContainers = + exampleApp.getComponent(readyComp).getNumberOfContainers(); + GenericTestUtils.waitFor(() -> { + try { + Application retrievedApp = client.getStatus(exampleApp.getName()); + Component retrievedComp = retrievedApp.getComponent(readyComp); + + if (retrievedComp.getContainers() != null + && retrievedComp.getContainers().size() == numExpectedContainers) { + LOG.info(readyComp + " found " + numExpectedContainers + + " containers running"); + return true; + } else { + LOG.info(" Waiting for " + readyComp + "'s containers to be running"); + return false; + } + } catch (Exception e) { + e.printStackTrace(); + return false; + } + }, 5000, 200000); + } + + // wait until all the containers for all components become ready state + private void waitForAllCompToBeReady(ServiceClient client, + Application exampleApp) throws TimeoutException, InterruptedException { + int expectedTotalContainers = countTotalContainers(exampleApp); + GenericTestUtils.waitFor(() -> { + try { + Application retrievedApp = client.getStatus(exampleApp.getName()); + int totalReadyContainers = 0; + LOG.info("Num Components " + retrievedApp.getComponents().size()); + for (Component component : retrievedApp.getComponents()) { + LOG.info("looking for " + component.getName()); + LOG.info(component); + if (component.getContainers() != null) { + if (component.getContainers().size() == exampleApp + .getComponent(component.getName()).getNumberOfContainers()) { + for (Container container : component.getContainers()) { + LOG.info( + "Container state " + container.getState() + ", component " + + component.getName()); + if (container.getState() == ContainerState.READY) { + totalReadyContainers++; + LOG.info("Found 1 ready container " + container.getId()); + } + } + } else { + LOG.info(component.getName() + " Expected number of containers " + + exampleApp.getComponent(component.getName()) + .getNumberOfContainers() + ", current = " + component + .getContainers()); + } + } + } + LOG.info("Exit loop, totalReadyContainers= " + totalReadyContainers + + " expected = " + expectedTotalContainers); + return totalReadyContainers == expectedTotalContainers; + } catch (Exception e) { + e.printStackTrace(); + return false; + } + }, 5000, 900000); + } + + private ServiceClient createClient() throws Exception { + ServiceClient client = new ServiceClient() { + @Override protected Path addJarResource(String appName, + Map<String, LocalResource> localResources) + throws IOException, SliderException { + // do nothing, the Unit test will use local jars + return null; + } + }; + client.init(conf); + client.start(); + return client; + } + + + private int countTotalContainers(Application application) { + int totalContainers = 0; + for (Component component : application.getComponents()) { + totalContainers += component.getNumberOfContainers(); + } + return totalContainers; + } + // Example service definition + // 2 components, each of which has 2 containers. + private Application createExampleApplication() { + Application exampleApp = new Application(); + exampleApp.setName("example-app"); + exampleApp.addComponent(createComponent("compa")); + exampleApp.addComponent(createComponent("compb")); + return exampleApp; + } + + private Component createComponent(String name) { + return createComponent(name, 2L, "sleep 1000"); + } + + private Component createComponent(String name, long numContainers, + String command) { + Component comp1 = new Component(); + comp1.setNumberOfContainers(numContainers); + comp1.setLaunchCommand(command); + comp1.setName(name); + Resource resource = new Resource(); + comp1.setResource(resource); + resource.setMemory("128"); + resource.setCpus(1); + return comp1; + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/164c0c4c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/hadoop/yarn/service/client/TestBuildExternalComponents.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/hadoop/yarn/service/client/TestBuildExternalComponents.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/hadoop/yarn/service/client/TestBuildExternalComponents.java new file mode 100644 index 0000000..4bc9f26 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/hadoop/yarn/service/client/TestBuildExternalComponents.java @@ -0,0 +1,128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.service.client; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.service.conf.ExampleAppJson; +import org.apache.slider.api.resource.Component; +import org.apache.hadoop.yarn.service.client.params.ClientArgs; +import org.apache.slider.common.tools.SliderFileSystem; +import org.apache.hadoop.yarn.service.utils.ServiceApiUtil; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.apache.hadoop.yarn.service.client.params.Arguments.ARG_APPDEF; +import static org.apache.hadoop.yarn.service.conf.SliderXmlConfKeys.KEY_SLIDER_BASE_PATH; + +/** + * Test for building / resolving components of type APPLICATION. + */ +public class TestBuildExternalComponents { + + protected Configuration conf = new YarnConfiguration(); + private File basedir; + + // Check component names match with expected + private static void checkComponentNames(List<Component> components, + Set<String> expectedComponents) { + Assert.assertEquals(expectedComponents.size(), components.size()); + for (Component comp : components) { + Assert.assertTrue(expectedComponents.contains(comp.getName())); + } + } + + // 1. Build the appDef and store on fs + // 2. check component names + private void buildAndCheckComponents(String appName, String appDef, + SliderFileSystem sfs, Set<String> names) throws Throwable { + String[] args = + { "build", appName, ARG_APPDEF, ExampleAppJson.resourceName(appDef) }; + ClientArgs clientArgs = new ClientArgs(args); + clientArgs.parse(); + ServiceCLI cli = new ServiceCLI() { + @Override protected void createServiceClient() { + client = new ServiceClient(); + client.init(conf); + client.start(); + } + }; + cli.exec(clientArgs); + + // verify generated conf + List<Component> components = + ServiceApiUtil.getApplicationComponents(sfs, appName); + checkComponentNames(components, names); + } + + @Before + public void setup() throws IOException { + basedir = new File("target", "apps"); + if (basedir.exists()) { + FileUtils.deleteDirectory(basedir); + } else { + basedir.mkdirs(); + } + conf.set(KEY_SLIDER_BASE_PATH, basedir.getAbsolutePath()); + } + + @After + public void tearDown() throws IOException { + if (basedir != null) { + FileUtils.deleteDirectory(basedir); + } + } + + // Test applications defining external components(APPLICATION type) + // can be resolved correctly + @Test + public void testExternalComponentBuild() throws Throwable { + SliderFileSystem sfs = new SliderFileSystem(conf); + + Set<String> nameSet = new HashSet<>(); + nameSet.add("simple"); + nameSet.add("master"); + nameSet.add("worker"); + + // app-1 has 3 components: simple, master, worker + buildAndCheckComponents("app-1", ExampleAppJson.APP_JSON, sfs, nameSet); + buildAndCheckComponents("external-0", ExampleAppJson.EXTERNAL_JSON_0, sfs, + nameSet); + + nameSet.add("other"); + + // external1 has 3 components: simple(APPLICATION - app1), master and other + buildAndCheckComponents("external-1", ExampleAppJson.EXTERNAL_JSON_1, sfs, + nameSet); + + nameSet.add("another"); + + // external2 has 2 components: ext(APPLICATION - external1), another + buildAndCheckComponents("external-2", ExampleAppJson.EXTERNAL_JSON_2, sfs, + nameSet); + } +} --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org