goiri commented on a change in pull request #3779:
URL: https://github.com/apache/hadoop/pull/3779#discussion_r768021357
##########
File path:
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
##########
@@ -1265,8 +1265,27 @@
</property>
<property>
- <description>Max number of OPPORTUNISTIC containers to queue at the
- nodemanager.</description>
+ <description>
+ At the NM, the policy to determine whether to queue an
+ OPPORTUNISTIC container or not.
+ If set to BY_QUEUE_LEN, uses the queue capacity, as set by
+ yarn.nodemanager.opportunistic-containers-max-queue-length
+ to limit how many containers to accept/queue.
+ If set to BY_RESOURCES, limits the number of containers
+ accepted based on the resource capacity of the node.
+ </description>
+ <name>yarn.nodemanager.opportunistic-containers-queue-policy</name>
+ <value>BY_QUEUE_LEN</value>
+ </property>
+
+ <property>
+ <description>
+ Max number of OPPORTUNISTIC containers to queue at the
+ nodemanager (NM). If the value is 0, NMs do not allow any
+ OPPORTUNISTIC containers.
+ If the value is positive, the NM caps the number of OPPORTUNISTIC
Review comment:
We may want to state negative explicitly too.
##########
File path:
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
##########
@@ -106,9 +109,39 @@
private final AsyncDispatcher dispatcher;
private final NodeManagerMetrics metrics;
+ private final OpportunisticContainersQueuePolicy oppContainersQueuePolicy;
private Boolean usePauseEventForPreemption = false;
+ private static int getMaxOppQueueLengthFromConf(final Context context) {
+ if (context == null || context.getConf() == null) {
+ return YarnConfiguration
+ .DEFAULT_NM_OPPORTUNISTIC_CONTAINERS_MAX_QUEUE_LENGTH;
+ }
+
+ return context.getConf().getInt(
+ YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_MAX_QUEUE_LENGTH,
+ YarnConfiguration.DEFAULT_NM_OPPORTUNISTIC_CONTAINERS_MAX_QUEUE_LENGTH
+ );
+ }
+
+ private static OpportunisticContainersQueuePolicy
+ getOppContainersQueuePolicyFromConf(final Context context) {
+ final String queuePolicy;
+ if (context == null || context.getConf() == null) {
+ queuePolicy = YarnConfiguration
+ .DEFAULT_NM_OPPORTUNISTIC_CONTAINERS_QUEUE_POLICY;
+ } else {
+ queuePolicy = context.getConf().get(
Review comment:
I think Configuration#getEnum() could be an option
##########
File path:
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/AllocationBasedResourceUtilizationTracker.java
##########
@@ -92,29 +110,32 @@ private boolean hasResourcesAvailable(long pMemBytes, long
vMemBytes,
if (LOG.isDebugEnabled()) {
LOG.debug("pMemCheck [current={} + asked={} > allowed={}]",
this.containersAllocation.getPhysicalMemory(),
- (pMemBytes >> 20),
- (getContainersMonitor().getPmemAllocatedForContainers() >> 20));
+ convertBytesToMB(pMemBytes),
+ convertBytesToMB(
+ getContainersMonitor().getPmemAllocatedForContainers()));
}
if (this.containersAllocation.getPhysicalMemory() +
- (int) (pMemBytes >> 20) >
- (int) (getContainersMonitor()
- .getPmemAllocatedForContainers() >> 20)) {
+ (int) convertBytesToMB(pMemBytes) >
Review comment:
Do we need to cast?
##########
File path:
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerSchedulerTest.java
##########
@@ -0,0 +1,195 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager;
+
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.ConfigurationException;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.security.NMTokenIdentifier;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
+import
org.apache.hadoop.yarn.server.nodemanager.ContainerStateTransitionListener;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitor;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler.TestContainerSchedulerQueuing;
+import
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import static org.mockito.Mockito.spy;
+
+public class BaseContainerSchedulerTest extends BaseContainerManagerTest {
+ private static final long TWO_GB = 2048 * 1024 * 1024L;
+
+ public BaseContainerSchedulerTest() throws UnsupportedFileSystemException {
+ super();
+ }
+
+ static {
+ LOG = LoggerFactory.getLogger(TestContainerSchedulerQueuing.class);
+ }
+
+ public static class Listener implements ContainerStateTransitionListener {
+
+ public final Map<ContainerId, List<ContainerState>> states =
+ new HashMap<>();
+ public final Map<ContainerId, List<ContainerEventType>> events =
+ new HashMap<>();
+
+ @Override
+ public void init(Context context) {}
+
+ @Override
+ public void preTransition(ContainerImpl op,
+ org.apache.hadoop.yarn.server.nodemanager.containermanager.container.
Review comment:
Didnt you import this? ContainerState should be correct.
##########
File path:
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
##########
@@ -429,6 +482,21 @@ private boolean
resourceAvailableToStartContainer(Container container) {
return this.utilizationTracker.hasResourcesAvailable(container);
}
+ private boolean resourceAvailableToQueueOppContainer(
+ Container newOppContainer) {
+ final Resource cumulativeResource = Resource.newInstance(Resources.none());
+ for (final Container container : queuedGuaranteedContainers.values()) {
+ Resources.addTo(cumulativeResource, container.getResource());
+ }
+
+ for (final Container container : queuedOpportunisticContainers.values()) {
+ Resources.addTo(cumulativeResource, container.getResource());
+ }
+
+ Resources.addTo(cumulativeResource, newOppContainer.getResource());
+ return this.utilizationTracker.hasResourcesAvailable(cumulativeResource);
Review comment:
Is it possible for utilizationTracker to be null?
##########
File path:
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerOppContainersByResources.java
##########
@@ -0,0 +1,251 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler;
+
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ContainerSubState;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerSchedulerTest;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * Tests the behavior of {@link ContainerScheduler} when the max queue length
+ * is set to {@literal < 0} such that the NM only queues
+ * containers if there's enough resources on the node to start
+ * all queued containers.
+ */
+public class TestContainerSchedulerOppContainersByResources
+ extends BaseContainerSchedulerTest {
+ public TestContainerSchedulerOppContainersByResources()
+ throws UnsupportedFileSystemException {
+ }
+
+ @Override
+ public void setup() throws IOException {
+ conf.set(YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_QUEUE_POLICY,
+ OpportunisticContainersQueuePolicy.BY_RESOURCES.name());
+ super.setup();
+ }
+
+ private static boolean isSuccessfulRun(final ContainerStatus
containerStatus) {
+ return containerStatus.getContainerSubState() == ContainerSubState.RUNNING
Review comment:
Can we split this?
if (containerStatus.getContainerSubState() == ContainerSubState.RUNNING) {
return true;
}
...
##########
File path:
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerSchedulerTest.java
##########
@@ -0,0 +1,195 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager;
+
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.ConfigurationException;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.security.NMTokenIdentifier;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
+import
org.apache.hadoop.yarn.server.nodemanager.ContainerStateTransitionListener;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitor;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler.TestContainerSchedulerQueuing;
+import
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import static org.mockito.Mockito.spy;
+
+public class BaseContainerSchedulerTest extends BaseContainerManagerTest {
+ private static final long TWO_GB = 2048 * 1024 * 1024L;
+
+ public BaseContainerSchedulerTest() throws UnsupportedFileSystemException {
+ super();
+ }
+
+ static {
+ LOG = LoggerFactory.getLogger(TestContainerSchedulerQueuing.class);
+ }
+
+ public static class Listener implements ContainerStateTransitionListener {
+
+ public final Map<ContainerId, List<ContainerState>> states =
+ new HashMap<>();
+ public final Map<ContainerId, List<ContainerEventType>> events =
+ new HashMap<>();
+
+ @Override
+ public void init(Context context) {}
+
+ @Override
+ public void preTransition(ContainerImpl op,
+ org.apache.hadoop.yarn.server.nodemanager.containermanager.container.
+ ContainerState beforeState,
+ ContainerEvent eventToBeProcessed) {
+ if (!states.containsKey(op.getContainerId())) {
+ states.put(op.getContainerId(), new ArrayList<>());
+ states.get(op.getContainerId()).add(beforeState);
+ events.put(op.getContainerId(), new ArrayList<>());
+ }
+ }
+
+ @Override
+ public void postTransition(ContainerImpl op,
+ org.apache.hadoop.yarn.server.nodemanager.containermanager.container.
+ ContainerState beforeState,
+ org.apache.hadoop.yarn.server.nodemanager.containermanager.container.
+ ContainerState afterState,
+ ContainerEvent processedEvent) {
+ states.get(op.getContainerId()).add(afterState);
+ events.get(op.getContainerId()).add(processedEvent.getType());
+ }
+ }
+
+ protected boolean delayContainers = true;
+
+ @Override
+ protected ContainerManagerImpl createContainerManager(
+ DeletionService delSrvc) {
+ return new ContainerManagerImpl(context, exec, delSrvc,
+ nodeStatusUpdater, metrics, dirsHandler) {
+
+ @Override
+ protected UserGroupInformation getRemoteUgi() throws YarnException {
+ ApplicationId appId = ApplicationId.newInstance(0, 0);
+ ApplicationAttemptId appAttemptId =
+ ApplicationAttemptId.newInstance(appId, 1);
+ UserGroupInformation ugi =
+ UserGroupInformation.createRemoteUser(appAttemptId.toString());
+ ugi.addTokenIdentifier(new NMTokenIdentifier(appAttemptId, context
+ .getNodeId(), user,
context.getNMTokenSecretManager().getCurrentKey()
+ .getKeyId()));
+ return ugi;
+ }
+
+ @Override
+ protected ContainersMonitor createContainersMonitor(
+ ContainerExecutor exec) {
+ return new ContainersMonitorImpl(exec, dispatcher, this.context) {
+ // Define resources available for containers to be executed.
+ @Override
+ public long getPmemAllocatedForContainers() {
+ return TWO_GB;
+ }
+
+ @Override
+ public long getVmemAllocatedForContainers() {
+ float pmemRatio = getConfig().getFloat(
+ YarnConfiguration.NM_VMEM_PMEM_RATIO,
+ YarnConfiguration.DEFAULT_NM_VMEM_PMEM_RATIO);
+ return (long) (pmemRatio * getPmemAllocatedForContainers());
+ }
+
+ @Override
+ public long getVCoresAllocatedForContainers() {
+ return 4;
+ }
+ };
+ }
+ };
+ }
+
+ @Override
+ protected ContainerExecutor createContainerExecutor() {
+ DefaultContainerExecutor exec = new DefaultContainerExecutor() {
+ ConcurrentMap<String, Boolean> oversleepMap =
+ new ConcurrentHashMap<String, Boolean>();
+
+ /**
+ * Launches the container.
+ * If delayContainers is turned on, then we sleep a while before
+ * starting the container.
+ */
+ @Override
+ public int launchContainer(ContainerStartContext ctx)
+ throws IOException, ConfigurationException {
+ final String containerId =
+ ctx.getContainer().getContainerId().toString();
+ oversleepMap.put(containerId, false);
+ if (delayContainers) {
+ try {
+ Thread.sleep(10000);
Review comment:
This is a long time. Are we even going to the following if?
##########
File path:
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerSchedulerTest.java
##########
@@ -0,0 +1,195 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager;
+
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.ConfigurationException;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.security.NMTokenIdentifier;
+import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
+import
org.apache.hadoop.yarn.server.nodemanager.ContainerStateTransitionListener;
+import org.apache.hadoop.yarn.server.nodemanager.Context;
+import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitor;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler.TestContainerSchedulerQueuing;
+import
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import static org.mockito.Mockito.spy;
+
+public class BaseContainerSchedulerTest extends BaseContainerManagerTest {
+ private static final long TWO_GB = 2048 * 1024 * 1024L;
+
+ public BaseContainerSchedulerTest() throws UnsupportedFileSystemException {
+ super();
+ }
+
+ static {
+ LOG = LoggerFactory.getLogger(TestContainerSchedulerQueuing.class);
+ }
+
+ public static class Listener implements ContainerStateTransitionListener {
+
+ public final Map<ContainerId, List<ContainerState>> states =
+ new HashMap<>();
+ public final Map<ContainerId, List<ContainerEventType>> events =
+ new HashMap<>();
+
+ @Override
+ public void init(Context context) {}
+
+ @Override
+ public void preTransition(ContainerImpl op,
+ org.apache.hadoop.yarn.server.nodemanager.containermanager.container.
+ ContainerState beforeState,
+ ContainerEvent eventToBeProcessed) {
+ if (!states.containsKey(op.getContainerId())) {
+ states.put(op.getContainerId(), new ArrayList<>());
+ states.get(op.getContainerId()).add(beforeState);
+ events.put(op.getContainerId(), new ArrayList<>());
+ }
+ }
+
+ @Override
+ public void postTransition(ContainerImpl op,
+ org.apache.hadoop.yarn.server.nodemanager.containermanager.container.
+ ContainerState beforeState,
+ org.apache.hadoop.yarn.server.nodemanager.containermanager.container.
+ ContainerState afterState,
+ ContainerEvent processedEvent) {
+ states.get(op.getContainerId()).add(afterState);
+ events.get(op.getContainerId()).add(processedEvent.getType());
+ }
+ }
+
+ protected boolean delayContainers = true;
+
+ @Override
+ protected ContainerManagerImpl createContainerManager(
+ DeletionService delSrvc) {
+ return new ContainerManagerImpl(context, exec, delSrvc,
Review comment:
This is a lot of code to something simple. Should we extract?
##########
File path:
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/AllocationBasedResourceUtilizationTracker.java
##########
@@ -80,10 +84,24 @@ public void subtractContainerResource(Container container) {
*/
@Override
public boolean hasResourcesAvailable(Container container) {
- long pMemBytes = container.getResource().getMemorySize() * 1024 * 1024L;
- return hasResourcesAvailable(pMemBytes,
- (long) (getContainersMonitor().getVmemRatio()* pMemBytes),
- container.getResource().getVirtualCores());
+ return hasResourcesAvailable(container.getResource());
+ }
+
+ private static long convertMBToBytes(final long memMB) {
Review comment:
You may want to add a javadoc comment here.
##########
File path:
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerOppContainersByResources.java
##########
@@ -0,0 +1,251 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.scheduler;
+
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ContainerSubState;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerSchedulerTest;
+import
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * Tests the behavior of {@link ContainerScheduler} when the max queue length
+ * is set to {@literal < 0} such that the NM only queues
+ * containers if there's enough resources on the node to start
+ * all queued containers.
+ */
+public class TestContainerSchedulerOppContainersByResources
+ extends BaseContainerSchedulerTest {
+ public TestContainerSchedulerOppContainersByResources()
+ throws UnsupportedFileSystemException {
+ }
+
+ @Override
+ public void setup() throws IOException {
+ conf.set(YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_QUEUE_POLICY,
+ OpportunisticContainersQueuePolicy.BY_RESOURCES.name());
+ super.setup();
+ }
+
+ private static boolean isSuccessfulRun(final ContainerStatus
containerStatus) {
+ return containerStatus.getContainerSubState() == ContainerSubState.RUNNING
+ || containerStatus.getContainerSubState() ==
ContainerSubState.COMPLETING
+ || (
+ containerStatus.getContainerSubState() == ContainerSubState.DONE &&
+ containerStatus.getState() ==
org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE
+ );
+ }
+
+ private void verifyRunAndKilledContainers(
+ final List<ContainerId> statList,
+ final int numExpectedContainers, final Set<ContainerId> runContainers,
+ final Set<ContainerId> killedContainers)
+ throws TimeoutException, InterruptedException {
+ GenericTestUtils.waitFor(
+ () -> {
+ GetContainerStatusesRequest statRequest =
+ GetContainerStatusesRequest.newInstance(statList);
+ final List<ContainerStatus> containerStatuses;
+ try {
+ containerStatuses = containerManager
+ .getContainerStatuses(statRequest).getContainerStatuses();
+ } catch (final Exception e) {
+ return false;
+ }
+
+ if (numExpectedContainers != containerStatuses.size()) {
+ return false;
+ }
+
+ for (final ContainerStatus status : containerStatuses) {
+ if (runContainers.contains(status.getContainerId())) {
+ if (!isSuccessfulRun(status)) {
+ return false;
+ }
+ } else if (killedContainers.contains(status.getContainerId())) {
+ if (!status.getDiagnostics()
+ .contains("Opportunistic container queue is full")) {
+ return false;
+ }
+ } else {
+ return false;
+ }
+ }
+
+ return true;
+ }, 1000, 10000);
+ }
+
+ /**
+ * Tests that newly arrived containers after the resources are filled up
+ * get killed and never get killed.
+ */
+ @Test
+ public void testOpportunisticRunsWhenResourcesAvailable() throws Exception {
+ containerManager.start();
+ List<StartContainerRequest> list = new ArrayList<>();
+ final int numContainers = 8;
+ final int numContainersQueued = 4;
+ final Set<ContainerId> runContainers = new HashSet<>();
+ final Set<ContainerId> killedContainers = new HashSet<>();
+
+ for (int i = 0; i < numContainers; i++) {
+ // OContainers that should be run
+ list.add(StartContainerRequest.newInstance(
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
+ createContainerToken(createContainerId(i), DUMMY_RM_IDENTIFIER,
+ context.getNodeId(),
+ user, BuilderUtils.newResource(512, 1),
+ context.getContainerTokenSecretManager(), null,
+ ExecutionType.OPPORTUNISTIC)));
+ }
+
+ StartContainersRequest allRequests =
+ StartContainersRequest.newInstance(list);
+ containerManager.startContainers(allRequests);
+
+ // Wait for containers to start
+ for (int i = 0; i < numContainersQueued; i++) {
+ final ContainerId containerId = createContainerId(i);
+ BaseContainerManagerTest
+ .waitForNMContainerState(containerManager, containerId,
+ ContainerState.RUNNING, 40);
+ runContainers.add(containerId);
+ }
+
+ // Wait for containers to be killed
+ for (int i = numContainersQueued; i < numContainers; i++) {
+ final ContainerId containerId = createContainerId(i);
+ BaseContainerManagerTest
+ .waitForNMContainerState(containerManager, createContainerId(i),
+ ContainerState.DONE, 40);
+ killedContainers.add(containerId);
+ }
+
+ Thread.sleep(5000);
+
+ // Get container statuses.
+ List<ContainerId> statList = new ArrayList<>();
+ for (int i = 0; i < numContainers; i++) {
+ statList.add(createContainerId(i));
+ }
+
+ verifyRunAndKilledContainers(
+ statList, numContainers, runContainers, killedContainers);
+
+ ContainerScheduler containerScheduler =
+ containerManager.getContainerScheduler();
+ Assert.assertEquals(0,
+ containerScheduler.getNumQueuedContainers());
+ Assert.assertEquals(0,
+ containerScheduler.getNumQueuedGuaranteedContainers());
+ Assert.assertEquals(0,
+ containerScheduler.getNumQueuedOpportunisticContainers());
+ Assert.assertEquals(0,
+ metrics.getQueuedOpportunisticContainers());
+ Assert.assertEquals(0, metrics.getQueuedGuaranteedContainers());
+ }
+
+ /**
+ * Sets the max queue length to negative such that the NM only queues
+ * containers if there's enough resources on the node to start
+ * all queued containers.
+ * Tests that newly arrived containers after the resources are filled up
+ * get killed and never get killed.
+ */
+ @Test
+ public void testKillOpportunisticWhenNoResourcesAvailable() throws Exception
{
+ containerManager.start();
+ List<StartContainerRequest> list = new ArrayList<>();
+
+ // GContainer that takes up the whole node
+ list.add(StartContainerRequest.newInstance(
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
+ createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER,
+ context.getNodeId(),
+ user, BuilderUtils.newResource(2048, 1),
+ context.getContainerTokenSecretManager(), null,
+ ExecutionType.GUARANTEED)));
+
+ // OContainer that should be killed
+ list.add(StartContainerRequest.newInstance(
+ recordFactory.newRecordInstance(ContainerLaunchContext.class),
+ createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
+ context.getNodeId(),
+ user, BuilderUtils.newResource(2048, 1),
+ context.getContainerTokenSecretManager(), null,
+ ExecutionType.OPPORTUNISTIC)));
+
+ StartContainersRequest allRequests =
+ StartContainersRequest.newInstance(list);
+ containerManager.startContainers(allRequests);
+
+ BaseContainerManagerTest.waitForNMContainerState(containerManager,
+ createContainerId(0), ContainerState.RUNNING, 40);
+
+ // Wait for the OContainer to get killed
+ BaseContainerManagerTest.waitForNMContainerState(containerManager,
+ createContainerId(1), ContainerState.DONE,
Review comment:
Single line instead of dropping the 40
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]