YARN-5994. TestCapacityScheduler.testAMLimitUsage fails intermittently. Contributed by Eric Badger
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b326822c Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b326822c Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b326822c Branch: refs/heads/HDFS-10467 Commit: b326822cc2e74ed7bf8f14503fddad56ec775367 Parents: f592346 Author: Eric Payne <[email protected]> Authored: Fri Apr 14 10:53:09 2017 -0500 Committer: Inigo <[email protected]> Committed: Mon Apr 17 11:17:03 2017 -0700 ---------------------------------------------------------------------- .../scheduler/capacity/TestCapacityScheduler.java | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/b326822c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index 447ee3d..bf1f6eb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -40,6 +40,7 @@ import java.util.Set; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CyclicBarrier; +import com.google.common.base.Supplier; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -48,6 +49,7 @@ import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.yarn.LocalConfigurationProvider; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; @@ -3626,7 +3628,7 @@ public class TestCapacityScheduler { Assert.assertEquals(queueInfoB.getDefaultNodeLabelExpression(), "y"); } - @Test(timeout = 30000) + @Test(timeout = 60000) public void testAMLimitUsage() throws Exception { CapacitySchedulerConfiguration config = @@ -3754,7 +3756,8 @@ public class TestCapacityScheduler { private void verifyAMLimitForLeafQueue(CapacitySchedulerConfiguration config) throws Exception { MockRM rm = setUpMove(config); - rm.registerNode("127.0.0.1:1234", 2 * GB); + int nodeMemory = 4 * GB; + rm.registerNode("127.0.0.1:1234", nodeMemory); String queueName = "a1"; String userName = "user_0"; @@ -3770,6 +3773,14 @@ public class TestCapacityScheduler { Resource.newInstance(amResourceLimit.getMemorySize() + 2048, amResourceLimit.getVirtualCores() + 1); + // Wait for the scheduler to be updated with new node capacity + GenericTestUtils.waitFor(new Supplier<Boolean>() { + @Override + public Boolean get() { + return scheduler.getMaximumResourceCapability().getMemorySize() == nodeMemory; + } + }, 100, 60 * 1000); + rm.submitApp(amResource1, "app-1", userName, null, queueName); rm.submitApp(amResource2, "app-2", userName, null, queueName); --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
