Repository: hadoop Updated Branches: refs/heads/branch-2 895588b43 -> 1c601e492
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c601e49/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index a5a2e5f..972cabb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -350,8 +350,8 @@ public class TestLeafQueue { // Start testing... // Only 1 container - a.assignContainers(clusterResource, node_0, false, - new ResourceLimits(clusterResource)); + a.assignContainers(clusterResource, node_0, new ResourceLimits( + clusterResource)); assertEquals( (int)(node_0.getTotalResource().getMemory() * a.getCapacity()) - (1*GB), a.getMetrics().getAvailableMB()); @@ -486,7 +486,7 @@ public class TestLeafQueue { // Start testing... // Only 1 container - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(1*GB, a.getUsedResources().getMemory()); assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); @@ -497,7 +497,7 @@ public class TestLeafQueue { // Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also // you can get one container more than user-limit - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(2*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); @@ -506,7 +506,7 @@ public class TestLeafQueue { assertEquals(2*GB, a.getMetrics().getAllocatedMB()); // Can't allocate 3rd due to user-limit - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(2*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); @@ -516,7 +516,7 @@ public class TestLeafQueue { // Bump up user-limit-factor, now allocate should work a.setUserLimitFactor(10); - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(3*GB, a.getUsedResources().getMemory()); assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); @@ -525,7 +525,7 @@ public class TestLeafQueue { assertEquals(3*GB, a.getMetrics().getAllocatedMB()); // One more should work, for app_1, due to user-limit-factor - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(4*GB, a.getUsedResources().getMemory()); assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); @@ -536,8 +536,8 @@ public class TestLeafQueue { // Test max-capacity // Now - no more allocs since we are at max-cap a.setMaxCapacity(0.5f); - a.assignContainers(clusterResource, node_0, false, - new ResourceLimits(clusterResource)); + a.assignContainers(clusterResource, node_0, new ResourceLimits( + clusterResource)); assertEquals(4*GB, a.getUsedResources().getMemory()); assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(1*GB, app_1.getCurrentConsumption().getMemory()); @@ -652,21 +652,21 @@ public class TestLeafQueue { // recordFactory))); // 1 container to user_0 - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(2*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); // Again one to user_0 since he hasn't exceeded user limit yet - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(3*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(1*GB, app_1.getCurrentConsumption().getMemory()); // One more to user_0 since he is the only active user - a.assignContainers(clusterResource, node_1, false, + a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource)); assertEquals(4*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); @@ -718,7 +718,7 @@ public class TestLeafQueue { assertEquals("There should only be 1 active user!", 1, qb.getActiveUsersManager().getNumActiveUsers()); //get headroom - qb.assignContainers(clusterResource, node_0, false, + qb.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); qb.computeUserLimitAndSetHeadroom(app_0, clusterResource, app_0 .getResourceRequest(u0Priority, ResourceRequest.ANY).getCapability(), @@ -738,7 +738,7 @@ public class TestLeafQueue { TestUtils.createResourceRequest(ResourceRequest.ANY, 4*GB, 1, true, u1Priority, recordFactory))); qb.submitApplicationAttempt(app_2, user_1); - qb.assignContainers(clusterResource, node_1, false, + qb.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource)); qb.computeUserLimitAndSetHeadroom(app_0, clusterResource, app_0 .getResourceRequest(u0Priority, ResourceRequest.ANY).getCapability(), @@ -781,9 +781,9 @@ public class TestLeafQueue { u1Priority, recordFactory))); qb.submitApplicationAttempt(app_1, user_0); qb.submitApplicationAttempt(app_3, user_1); - qb.assignContainers(clusterResource, node_0, false, + qb.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); - qb.assignContainers(clusterResource, node_0, false, + qb.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); qb.computeUserLimitAndSetHeadroom(app_3, clusterResource, app_3 .getResourceRequest(u1Priority, ResourceRequest.ANY).getCapability(), @@ -802,7 +802,7 @@ public class TestLeafQueue { app_4.updateResourceRequests(Collections.singletonList( TestUtils.createResourceRequest(ResourceRequest.ANY, 6*GB, 1, true, u0Priority, recordFactory))); - qb.assignContainers(clusterResource, node_1, false, + qb.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource)); qb.computeUserLimitAndSetHeadroom(app_4, clusterResource, app_4 .getResourceRequest(u0Priority, ResourceRequest.ANY).getCapability(), @@ -875,7 +875,7 @@ public class TestLeafQueue { TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 1, true, priority, recordFactory))); - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(1*GB, a.getUsedResources().getMemory()); assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); @@ -892,7 +892,7 @@ public class TestLeafQueue { TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 2, true, priority, recordFactory))); - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(2*GB, a.getUsedResources().getMemory()); assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); @@ -981,7 +981,7 @@ public class TestLeafQueue { 1, a.getActiveUsersManager().getNumActiveUsers()); // 1 container to user_0 - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(2*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); @@ -992,7 +992,7 @@ public class TestLeafQueue { // the application is not yet active // Again one to user_0 since he hasn't exceeded user limit yet - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(3*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); @@ -1009,7 +1009,7 @@ public class TestLeafQueue { // No more to user_0 since he is already over user-limit // and no more containers to queue since it's already at max-cap - a.assignContainers(clusterResource, node_1, false, + a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource)); assertEquals(3*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); @@ -1023,7 +1023,7 @@ public class TestLeafQueue { TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 0, true, priority, recordFactory))); assertEquals(1, a.getActiveUsersManager().getNumActiveUsers()); - a.assignContainers(clusterResource, node_1, false, + a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource)); assertEquals(0*GB, app_2.getHeadroom().getMemory()); // hit queue max-cap } @@ -1094,7 +1094,7 @@ public class TestLeafQueue { */ // Only 1 container - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(1*GB, a.getUsedResources().getMemory()); assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); @@ -1102,7 +1102,7 @@ public class TestLeafQueue { // Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also // you can get one container more than user-limit - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(2*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); @@ -1110,7 +1110,7 @@ public class TestLeafQueue { // Can't allocate 3rd due to user-limit a.setUserLimit(25); - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(2*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); @@ -1129,7 +1129,7 @@ public class TestLeafQueue { // Now allocations should goto app_2 since // user_0 is at limit inspite of high user-limit-factor a.setUserLimitFactor(10); - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(5*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); @@ -1139,7 +1139,7 @@ public class TestLeafQueue { // Now allocations should goto app_0 since // user_0 is at user-limit not above it - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(6*GB, a.getUsedResources().getMemory()); assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); @@ -1150,7 +1150,7 @@ public class TestLeafQueue { // Test max-capacity // Now - no more allocs since we are at max-cap a.setMaxCapacity(0.5f); - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(6*GB, a.getUsedResources().getMemory()); assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); @@ -1162,7 +1162,7 @@ public class TestLeafQueue { // Now, allocations should goto app_3 since it's under user-limit a.setMaxCapacity(1.0f); a.setUserLimitFactor(1); - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(7*GB, a.getUsedResources().getMemory()); assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); @@ -1171,7 +1171,7 @@ public class TestLeafQueue { assertEquals(1*GB, app_3.getCurrentConsumption().getMemory()); // Now we should assign to app_3 again since user_2 is under user-limit - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(8*GB, a.getUsedResources().getMemory()); assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); @@ -1271,7 +1271,7 @@ public class TestLeafQueue { // Start testing... // Only 1 container - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(1*GB, a.getUsedResources().getMemory()); assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); @@ -1282,7 +1282,7 @@ public class TestLeafQueue { // Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also // you can get one container more than user-limit - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(2*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); @@ -1291,7 +1291,7 @@ public class TestLeafQueue { assertEquals(2*GB, a.getMetrics().getAllocatedMB()); // Now, reservation should kick in for app_1 - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(6*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); @@ -1308,7 +1308,7 @@ public class TestLeafQueue { ContainerState.COMPLETE, "", ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true); - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(5*GB, a.getUsedResources().getMemory()); assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); @@ -1325,7 +1325,7 @@ public class TestLeafQueue { ContainerState.COMPLETE, "", ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true); - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(4*GB, a.getUsedResources().getMemory()); assertEquals(0*GB, app_0.getCurrentConsumption().getMemory()); @@ -1393,7 +1393,7 @@ public class TestLeafQueue { // Start testing... - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(2*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); @@ -1403,7 +1403,7 @@ public class TestLeafQueue { assertEquals(0*GB, a.getMetrics().getAvailableMB()); // Now, reservation should kick in for app_1 - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(6*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); @@ -1417,7 +1417,7 @@ public class TestLeafQueue { // We do not need locality delay here doReturn(-1).when(a).getNodeLocalityDelay(); - a.assignContainers(clusterResource, node_1, false, + a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource)); assertEquals(10*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); @@ -1434,7 +1434,7 @@ public class TestLeafQueue { ContainerState.COMPLETE, "", ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true); - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(8*GB, a.getUsedResources().getMemory()); assertEquals(0*GB, app_0.getCurrentConsumption().getMemory()); @@ -1503,7 +1503,7 @@ public class TestLeafQueue { // Start testing... // Only 1 container - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(1*GB, a.getUsedResources().getMemory()); assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); @@ -1511,14 +1511,14 @@ public class TestLeafQueue { // Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also // you can get one container more than user-limit - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(2*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); // Now, reservation should kick in for app_1 - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(6*GB, a.getUsedResources().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); @@ -1533,7 +1533,7 @@ public class TestLeafQueue { ContainerState.COMPLETE, "", ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true); - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(5*GB, a.getUsedResources().getMemory()); assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); @@ -1543,7 +1543,7 @@ public class TestLeafQueue { assertEquals(1, app_1.getReReservations(priority)); // Re-reserve - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(5*GB, a.getUsedResources().getMemory()); assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); @@ -1553,7 +1553,7 @@ public class TestLeafQueue { assertEquals(2, app_1.getReReservations(priority)); // Try to schedule on node_1 now, should *move* the reservation - a.assignContainers(clusterResource, node_1, false, + a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource)); assertEquals(9*GB, a.getUsedResources().getMemory()); assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); @@ -1571,7 +1571,7 @@ public class TestLeafQueue { ContainerState.COMPLETE, "", ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true); - CSAssignment assignment = a.assignContainers(clusterResource, node_0, false, + CSAssignment assignment = a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(8*GB, a.getUsedResources().getMemory()); assertEquals(0*GB, app_0.getCurrentConsumption().getMemory()); @@ -1643,7 +1643,7 @@ public class TestLeafQueue { CSAssignment assignment = null; // Start with off switch, shouldn't allocate due to delay scheduling - assignment = a.assignContainers(clusterResource, node_2, false, + assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource)); verify(app_0, never()).allocate(any(NodeType.class), eq(node_2), any(Priority.class), any(ResourceRequest.class), any(Container.class)); @@ -1652,7 +1652,7 @@ public class TestLeafQueue { assertEquals(NodeType.NODE_LOCAL, assignment.getType()); // None->NODE_LOCAL // Another off switch, shouldn't allocate due to delay scheduling - assignment = a.assignContainers(clusterResource, node_2, false, + assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource)); verify(app_0, never()).allocate(any(NodeType.class), eq(node_2), any(Priority.class), any(ResourceRequest.class), any(Container.class)); @@ -1661,7 +1661,7 @@ public class TestLeafQueue { assertEquals(NodeType.NODE_LOCAL, assignment.getType()); // None->NODE_LOCAL // Another off switch, shouldn't allocate due to delay scheduling - assignment = a.assignContainers(clusterResource, node_2, false, + assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource)); verify(app_0, never()).allocate(any(NodeType.class), eq(node_2), any(Priority.class), any(ResourceRequest.class), any(Container.class)); @@ -1671,7 +1671,7 @@ public class TestLeafQueue { // Another off switch, now we should allocate // since missedOpportunities=3 and reqdContainers=3 - assignment = a.assignContainers(clusterResource, node_2, false, + assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource)); verify(app_0).allocate(eq(NodeType.OFF_SWITCH), eq(node_2), any(Priority.class), any(ResourceRequest.class), any(Container.class)); @@ -1680,7 +1680,7 @@ public class TestLeafQueue { assertEquals(NodeType.OFF_SWITCH, assignment.getType()); // NODE_LOCAL - node_0 - assignment = a.assignContainers(clusterResource, node_0, false, + assignment = a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); verify(app_0).allocate(eq(NodeType.NODE_LOCAL), eq(node_0), any(Priority.class), any(ResourceRequest.class), any(Container.class)); @@ -1689,7 +1689,7 @@ public class TestLeafQueue { assertEquals(NodeType.NODE_LOCAL, assignment.getType()); // NODE_LOCAL - node_1 - assignment = a.assignContainers(clusterResource, node_1, false, + assignment = a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource)); verify(app_0).allocate(eq(NodeType.NODE_LOCAL), eq(node_1), any(Priority.class), any(ResourceRequest.class), any(Container.class)); @@ -1718,14 +1718,14 @@ public class TestLeafQueue { doReturn(1).when(a).getNodeLocalityDelay(); // Shouldn't assign RACK_LOCAL yet - assignment = a.assignContainers(clusterResource, node_3, false, + assignment = a.assignContainers(clusterResource, node_3, new ResourceLimits(clusterResource)); assertEquals(1, app_0.getSchedulingOpportunities(priority)); assertEquals(2, app_0.getTotalRequiredResources(priority)); assertEquals(NodeType.NODE_LOCAL, assignment.getType()); // None->NODE_LOCAL // Should assign RACK_LOCAL now - assignment = a.assignContainers(clusterResource, node_3, false, + assignment = a.assignContainers(clusterResource, node_3, new ResourceLimits(clusterResource)); verify(app_0).allocate(eq(NodeType.RACK_LOCAL), eq(node_3), any(Priority.class), any(ResourceRequest.class), any(Container.class)); @@ -1807,7 +1807,7 @@ public class TestLeafQueue { // Start with off switch, shouldn't allocate P1 due to delay scheduling // thus, no P2 either! - a.assignContainers(clusterResource, node_2, false, + a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource)); verify(app_0, never()).allocate(any(NodeType.class), eq(node_2), eq(priority_1), any(ResourceRequest.class), any(Container.class)); @@ -1820,7 +1820,7 @@ public class TestLeafQueue { // Another off-switch, shouldn't allocate P1 due to delay scheduling // thus, no P2 either! - a.assignContainers(clusterResource, node_2, false, + a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource)); verify(app_0, never()).allocate(any(NodeType.class), eq(node_2), eq(priority_1), any(ResourceRequest.class), any(Container.class)); @@ -1832,7 +1832,7 @@ public class TestLeafQueue { assertEquals(1, app_0.getTotalRequiredResources(priority_2)); // Another off-switch, shouldn't allocate OFF_SWITCH P1 - a.assignContainers(clusterResource, node_2, false, + a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource)); verify(app_0).allocate(eq(NodeType.OFF_SWITCH), eq(node_2), eq(priority_1), any(ResourceRequest.class), any(Container.class)); @@ -1844,7 +1844,7 @@ public class TestLeafQueue { assertEquals(1, app_0.getTotalRequiredResources(priority_2)); // Now, DATA_LOCAL for P1 - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); verify(app_0).allocate(eq(NodeType.NODE_LOCAL), eq(node_0), eq(priority_1), any(ResourceRequest.class), any(Container.class)); @@ -1856,7 +1856,7 @@ public class TestLeafQueue { assertEquals(1, app_0.getTotalRequiredResources(priority_2)); // Now, OFF_SWITCH for P2 - a.assignContainers(clusterResource, node_1, false, + a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource)); verify(app_0, never()).allocate(any(NodeType.class), eq(node_1), eq(priority_1), any(ResourceRequest.class), any(Container.class)); @@ -1933,7 +1933,7 @@ public class TestLeafQueue { app_0.updateResourceRequests(app_0_requests_0); // NODE_LOCAL - node_0_1 - a.assignContainers(clusterResource, node_0_0, false, + a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource)); verify(app_0).allocate(eq(NodeType.NODE_LOCAL), eq(node_0_0), any(Priority.class), any(ResourceRequest.class), any(Container.class)); @@ -1942,7 +1942,7 @@ public class TestLeafQueue { // No allocation on node_1_0 even though it's node/rack local since // required(ANY) == 0 - a.assignContainers(clusterResource, node_1_0, false, + a.assignContainers(clusterResource, node_1_0, new ResourceLimits(clusterResource)); verify(app_0, never()).allocate(any(NodeType.class), eq(node_1_0), any(Priority.class), any(ResourceRequest.class), any(Container.class)); @@ -1959,7 +1959,7 @@ public class TestLeafQueue { // No allocation on node_0_1 even though it's node/rack local since // required(rack_1) == 0 - a.assignContainers(clusterResource, node_0_1, false, + a.assignContainers(clusterResource, node_0_1, new ResourceLimits(clusterResource)); verify(app_0, never()).allocate(any(NodeType.class), eq(node_1_0), any(Priority.class), any(ResourceRequest.class), any(Container.class)); @@ -1967,7 +1967,7 @@ public class TestLeafQueue { assertEquals(1, app_0.getTotalRequiredResources(priority)); // NODE_LOCAL - node_1 - a.assignContainers(clusterResource, node_1_0, false, + a.assignContainers(clusterResource, node_1_0, new ResourceLimits(clusterResource)); verify(app_0).allocate(eq(NodeType.NODE_LOCAL), eq(node_1_0), any(Priority.class), any(ResourceRequest.class), any(Container.class)); @@ -2220,7 +2220,7 @@ public class TestLeafQueue { // node_0_1 // Shouldn't allocate since RR(rack_0) = null && RR(ANY) = relax: false - a.assignContainers(clusterResource, node_0_1, false, + a.assignContainers(clusterResource, node_0_1, new ResourceLimits(clusterResource)); verify(app_0, never()).allocate(any(NodeType.class), eq(node_0_1), any(Priority.class), any(ResourceRequest.class), any(Container.class)); @@ -2243,7 +2243,7 @@ public class TestLeafQueue { // node_1_1 // Shouldn't allocate since RR(rack_1) = relax: false - a.assignContainers(clusterResource, node_1_1, false, + a.assignContainers(clusterResource, node_1_1, new ResourceLimits(clusterResource)); verify(app_0, never()).allocate(any(NodeType.class), eq(node_0_1), any(Priority.class), any(ResourceRequest.class), any(Container.class)); @@ -2274,7 +2274,7 @@ public class TestLeafQueue { // node_1_1 // Shouldn't allocate since node_1_1 is blacklisted - a.assignContainers(clusterResource, node_1_1, false, + a.assignContainers(clusterResource, node_1_1, new ResourceLimits(clusterResource)); verify(app_0, never()).allocate(any(NodeType.class), eq(node_1_1), any(Priority.class), any(ResourceRequest.class), any(Container.class)); @@ -2303,7 +2303,7 @@ public class TestLeafQueue { // node_1_1 // Shouldn't allocate since rack_1 is blacklisted - a.assignContainers(clusterResource, node_1_1, false, + a.assignContainers(clusterResource, node_1_1, new ResourceLimits(clusterResource)); verify(app_0, never()).allocate(any(NodeType.class), eq(node_1_1), any(Priority.class), any(ResourceRequest.class), any(Container.class)); @@ -2330,7 +2330,7 @@ public class TestLeafQueue { // Blacklist: < host_0_0 > <---- // Now, should allocate since RR(rack_1) = relax: true - a.assignContainers(clusterResource, node_1_1, false, + a.assignContainers(clusterResource, node_1_1, new ResourceLimits(clusterResource)); verify(app_0,never()).allocate(eq(NodeType.RACK_LOCAL), eq(node_1_1), any(Priority.class), any(ResourceRequest.class), any(Container.class)); @@ -2361,7 +2361,7 @@ public class TestLeafQueue { // host_1_0: 8G // host_1_1: 7G - a.assignContainers(clusterResource, node_1_0, false, + a.assignContainers(clusterResource, node_1_0, new ResourceLimits(clusterResource)); verify(app_0).allocate(eq(NodeType.NODE_LOCAL), eq(node_1_0), any(Priority.class), any(ResourceRequest.class), any(Container.class)); @@ -2444,7 +2444,7 @@ public class TestLeafQueue { recordFactory))); try { - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); } catch (NullPointerException e) { Assert.fail("NPE when allocating container on node but " http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c601e49/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java index 4f89386..7da1c97 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java @@ -156,7 +156,7 @@ public class TestParentQueue { // Next call - nothing if (allocation > 0) { doReturn(new CSAssignment(Resources.none(), type)).when(queue) - .assignContainers(eq(clusterResource), eq(node), eq(false), + .assignContainers(eq(clusterResource), eq(node), any(ResourceLimits.class)); // Mock the node's resource availability @@ -167,8 +167,7 @@ public class TestParentQueue { return new CSAssignment(allocatedResource, type); } - }). -when(queue).assignContainers(eq(clusterResource), eq(node), eq(false), + }).when(queue).assignContainers(eq(clusterResource), eq(node), any(ResourceLimits.class)); } @@ -232,7 +231,7 @@ when(queue).assignContainers(eq(clusterResource), eq(node), eq(false), // Simulate B returning a container on node_0 stubQueueAllocation(a, clusterResource, node_0, 0*GB); stubQueueAllocation(b, clusterResource, node_0, 1*GB); - root.assignContainers(clusterResource, node_0, false, + root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); verifyQueueMetrics(a, 0*GB, clusterResource); verifyQueueMetrics(b, 1*GB, clusterResource); @@ -240,13 +239,13 @@ when(queue).assignContainers(eq(clusterResource), eq(node), eq(false), // Now, A should get the scheduling opportunity since A=0G/6G, B=1G/14G stubQueueAllocation(a, clusterResource, node_1, 2*GB); stubQueueAllocation(b, clusterResource, node_1, 1*GB); - root.assignContainers(clusterResource, node_1, false, + root.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource)); InOrder allocationOrder = inOrder(a, b); allocationOrder.verify(a).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyBoolean(), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits()); allocationOrder.verify(b).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyBoolean(), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits()); verifyQueueMetrics(a, 2*GB, clusterResource); verifyQueueMetrics(b, 2*GB, clusterResource); @@ -254,13 +253,13 @@ when(queue).assignContainers(eq(clusterResource), eq(node), eq(false), // since A has 2/6G while B has 2/14G stubQueueAllocation(a, clusterResource, node_0, 1*GB); stubQueueAllocation(b, clusterResource, node_0, 2*GB); - root.assignContainers(clusterResource, node_0, false, + root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); allocationOrder = inOrder(b, a); allocationOrder.verify(b).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyBoolean(), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits()); allocationOrder.verify(a).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyBoolean(), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits()); verifyQueueMetrics(a, 3*GB, clusterResource); verifyQueueMetrics(b, 4*GB, clusterResource); @@ -268,13 +267,13 @@ when(queue).assignContainers(eq(clusterResource), eq(node), eq(false), // since A has 3/6G while B has 4/14G stubQueueAllocation(a, clusterResource, node_0, 0*GB); stubQueueAllocation(b, clusterResource, node_0, 4*GB); - root.assignContainers(clusterResource, node_0, false, + root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); allocationOrder = inOrder(b, a); allocationOrder.verify(b).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyBoolean(), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits()); allocationOrder.verify(a).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyBoolean(), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits()); verifyQueueMetrics(a, 3*GB, clusterResource); verifyQueueMetrics(b, 8*GB, clusterResource); @@ -282,13 +281,13 @@ when(queue).assignContainers(eq(clusterResource), eq(node), eq(false), // since A has 3/6G while B has 8/14G stubQueueAllocation(a, clusterResource, node_1, 1*GB); stubQueueAllocation(b, clusterResource, node_1, 1*GB); - root.assignContainers(clusterResource, node_1, false, + root.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource)); allocationOrder = inOrder(a, b); allocationOrder.verify(b).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyBoolean(), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits()); allocationOrder.verify(a).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyBoolean(), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits()); verifyQueueMetrics(a, 4*GB, clusterResource); verifyQueueMetrics(b, 9*GB, clusterResource); } @@ -405,6 +404,22 @@ when(queue).assignContainers(eq(clusterResource), eq(node), eq(false), @Test public void testMultiLevelQueues() throws Exception { + /* + * Structure of queue: + * Root + * ____________ + * / | \ \ + * A B C D + * / | / | \ \ + * A1 A2 B1 B2 B3 C1 + * \ + * C11 + * \ + * C111 + * \ + * C1111 + */ + // Setup queue configs setupMultiLevelQueues(csConf); @@ -449,7 +464,7 @@ when(queue).assignContainers(eq(clusterResource), eq(node), eq(false), stubQueueAllocation(b, clusterResource, node_0, 0*GB); stubQueueAllocation(c, clusterResource, node_0, 1*GB); stubQueueAllocation(d, clusterResource, node_0, 0*GB); - root.assignContainers(clusterResource, node_0, false, + root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); verifyQueueMetrics(a, 0*GB, clusterResource); verifyQueueMetrics(b, 0*GB, clusterResource); @@ -462,7 +477,7 @@ when(queue).assignContainers(eq(clusterResource), eq(node), eq(false), stubQueueAllocation(a, clusterResource, node_1, 0*GB); stubQueueAllocation(b2, clusterResource, node_1, 4*GB); stubQueueAllocation(c, clusterResource, node_1, 0*GB); - root.assignContainers(clusterResource, node_1, false, + root.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource)); verifyQueueMetrics(a, 0*GB, clusterResource); verifyQueueMetrics(b, 4*GB, clusterResource); @@ -474,15 +489,15 @@ when(queue).assignContainers(eq(clusterResource), eq(node), eq(false), stubQueueAllocation(a1, clusterResource, node_0, 1*GB); stubQueueAllocation(b3, clusterResource, node_0, 2*GB); stubQueueAllocation(c, clusterResource, node_0, 2*GB); - root.assignContainers(clusterResource, node_0, false, + root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); InOrder allocationOrder = inOrder(a, c, b); allocationOrder.verify(a).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyBoolean(), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits()); allocationOrder.verify(c).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyBoolean(), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits()); allocationOrder.verify(b).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyBoolean(), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits()); verifyQueueMetrics(a, 1*GB, clusterResource); verifyQueueMetrics(b, 6*GB, clusterResource); verifyQueueMetrics(c, 3*GB, clusterResource); @@ -501,17 +516,17 @@ when(queue).assignContainers(eq(clusterResource), eq(node), eq(false), stubQueueAllocation(b3, clusterResource, node_2, 1*GB); stubQueueAllocation(b1, clusterResource, node_2, 1*GB); stubQueueAllocation(c, clusterResource, node_2, 1*GB); - root.assignContainers(clusterResource, node_2, false, + root.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource)); allocationOrder = inOrder(a, a2, a1, b, c); allocationOrder.verify(a).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyBoolean(), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits()); allocationOrder.verify(a2).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyBoolean(), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits()); allocationOrder.verify(b).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyBoolean(), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits()); allocationOrder.verify(c).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyBoolean(), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits()); verifyQueueMetrics(a, 3*GB, clusterResource); verifyQueueMetrics(b, 8*GB, clusterResource); verifyQueueMetrics(c, 4*GB, clusterResource); @@ -611,7 +626,7 @@ when(queue).assignContainers(eq(clusterResource), eq(node), eq(false), // Simulate B returning a container on node_0 stubQueueAllocation(a, clusterResource, node_0, 0*GB, NodeType.OFF_SWITCH); stubQueueAllocation(b, clusterResource, node_0, 1*GB, NodeType.OFF_SWITCH); - root.assignContainers(clusterResource, node_0, false, + root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); verifyQueueMetrics(a, 0*GB, clusterResource); verifyQueueMetrics(b, 1*GB, clusterResource); @@ -620,13 +635,13 @@ when(queue).assignContainers(eq(clusterResource), eq(node), eq(false), // also, B gets a scheduling opportunity since A allocates RACK_LOCAL stubQueueAllocation(a, clusterResource, node_1, 2*GB, NodeType.RACK_LOCAL); stubQueueAllocation(b, clusterResource, node_1, 1*GB, NodeType.OFF_SWITCH); - root.assignContainers(clusterResource, node_1, false, + root.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource)); InOrder allocationOrder = inOrder(a, b); allocationOrder.verify(a).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyBoolean(), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits()); allocationOrder.verify(b).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyBoolean(), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits()); verifyQueueMetrics(a, 2*GB, clusterResource); verifyQueueMetrics(b, 2*GB, clusterResource); @@ -635,13 +650,13 @@ when(queue).assignContainers(eq(clusterResource), eq(node), eq(false), // However, since B returns off-switch, A won't get an opportunity stubQueueAllocation(a, clusterResource, node_0, 1*GB, NodeType.NODE_LOCAL); stubQueueAllocation(b, clusterResource, node_0, 2*GB, NodeType.OFF_SWITCH); - root.assignContainers(clusterResource, node_0, false, + root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); allocationOrder = inOrder(b, a); allocationOrder.verify(b).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyBoolean(), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits()); allocationOrder.verify(a).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyBoolean(), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits()); verifyQueueMetrics(a, 2*GB, clusterResource); verifyQueueMetrics(b, 4*GB, clusterResource); @@ -680,7 +695,7 @@ when(queue).assignContainers(eq(clusterResource), eq(node), eq(false), // Simulate B3 returning a container on node_0 stubQueueAllocation(b2, clusterResource, node_0, 0*GB, NodeType.OFF_SWITCH); stubQueueAllocation(b3, clusterResource, node_0, 1*GB, NodeType.OFF_SWITCH); - root.assignContainers(clusterResource, node_0, false, + root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); verifyQueueMetrics(b2, 0*GB, clusterResource); verifyQueueMetrics(b3, 1*GB, clusterResource); @@ -689,13 +704,13 @@ when(queue).assignContainers(eq(clusterResource), eq(node), eq(false), // also, B3 gets a scheduling opportunity since B2 allocates RACK_LOCAL stubQueueAllocation(b2, clusterResource, node_1, 1*GB, NodeType.RACK_LOCAL); stubQueueAllocation(b3, clusterResource, node_1, 1*GB, NodeType.OFF_SWITCH); - root.assignContainers(clusterResource, node_1, false, + root.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource)); InOrder allocationOrder = inOrder(b2, b3); allocationOrder.verify(b2).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyBoolean(), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits()); allocationOrder.verify(b3).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyBoolean(), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits()); verifyQueueMetrics(b2, 1*GB, clusterResource); verifyQueueMetrics(b3, 2*GB, clusterResource); @@ -704,13 +719,13 @@ when(queue).assignContainers(eq(clusterResource), eq(node), eq(false), // However, since B3 returns off-switch, B2 won't get an opportunity stubQueueAllocation(b2, clusterResource, node_0, 1*GB, NodeType.NODE_LOCAL); stubQueueAllocation(b3, clusterResource, node_0, 1*GB, NodeType.OFF_SWITCH); - root.assignContainers(clusterResource, node_0, false, + root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); allocationOrder = inOrder(b3, b2); allocationOrder.verify(b3).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyBoolean(), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits()); allocationOrder.verify(b2).assignContainers(eq(clusterResource), - any(FiCaSchedulerNode.class), anyBoolean(), anyResourceLimits()); + any(FiCaSchedulerNode.class), anyResourceLimits()); verifyQueueMetrics(b2, 1*GB, clusterResource); verifyQueueMetrics(b3, 3*GB, clusterResource); http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c601e49/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java index b3250e5..c5b7587 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java @@ -265,7 +265,7 @@ public class TestReservations { // Start testing... // Only AM - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(2 * GB, a.getUsedResources().getMemory()); assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); @@ -277,7 +277,7 @@ public class TestReservations { assertEquals(0 * GB, node_2.getUsedResource().getMemory()); // Only 1 map - simulating reduce - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(5 * GB, a.getUsedResources().getMemory()); assertEquals(5 * GB, app_0.getCurrentConsumption().getMemory()); @@ -289,7 +289,7 @@ public class TestReservations { assertEquals(0 * GB, node_2.getUsedResource().getMemory()); // Only 1 map to other node - simulating reduce - a.assignContainers(clusterResource, node_1, false, + a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource)); assertEquals(8 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); @@ -304,7 +304,7 @@ public class TestReservations { assertEquals(2, app_0.getTotalRequiredResources(priorityReduce)); // try to assign reducer (5G on node 0 and should reserve) - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(13 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); @@ -320,7 +320,7 @@ public class TestReservations { assertEquals(2, app_0.getTotalRequiredResources(priorityReduce)); // assign reducer to node 2 - a.assignContainers(clusterResource, node_2, false, + a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource)); assertEquals(18 * GB, a.getUsedResources().getMemory()); assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory()); @@ -337,7 +337,7 @@ public class TestReservations { // node_1 heartbeat and unreserves from node_0 in order to allocate // on node_1 - a.assignContainers(clusterResource, node_1, false, + a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource)); assertEquals(18 * GB, a.getUsedResources().getMemory()); assertEquals(18 * GB, app_0.getCurrentConsumption().getMemory()); @@ -421,7 +421,7 @@ public class TestReservations { // Start testing... // Only AM - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(2 * GB, a.getUsedResources().getMemory()); assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); @@ -433,7 +433,7 @@ public class TestReservations { assertEquals(0 * GB, node_2.getUsedResource().getMemory()); // Only 1 map - simulating reduce - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(5 * GB, a.getUsedResources().getMemory()); assertEquals(5 * GB, app_0.getCurrentConsumption().getMemory()); @@ -445,7 +445,7 @@ public class TestReservations { assertEquals(0 * GB, node_2.getUsedResource().getMemory()); // Only 1 map to other node - simulating reduce - a.assignContainers(clusterResource, node_1, false, + a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource)); assertEquals(8 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); @@ -460,7 +460,7 @@ public class TestReservations { assertEquals(2, app_0.getTotalRequiredResources(priorityReduce)); // try to assign reducer (5G on node 0 and should reserve) - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(13 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); @@ -476,7 +476,7 @@ public class TestReservations { assertEquals(2, app_0.getTotalRequiredResources(priorityReduce)); // assign reducer to node 2 - a.assignContainers(clusterResource, node_2, false, + a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource)); assertEquals(18 * GB, a.getUsedResources().getMemory()); assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory()); @@ -493,7 +493,7 @@ public class TestReservations { // node_1 heartbeat and won't unreserve from node_0, potentially stuck // if AM doesn't handle - a.assignContainers(clusterResource, node_1, false, + a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource)); assertEquals(18 * GB, a.getUsedResources().getMemory()); assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory()); @@ -569,7 +569,7 @@ public class TestReservations { // Start testing... // Only AM - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(2 * GB, a.getUsedResources().getMemory()); assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); @@ -580,7 +580,7 @@ public class TestReservations { assertEquals(0 * GB, node_1.getUsedResource().getMemory()); // Only 1 map - simulating reduce - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(5 * GB, a.getUsedResources().getMemory()); assertEquals(5 * GB, app_0.getCurrentConsumption().getMemory()); @@ -591,7 +591,7 @@ public class TestReservations { assertEquals(0 * GB, node_1.getUsedResource().getMemory()); // Only 1 map to other node - simulating reduce - a.assignContainers(clusterResource, node_1, false, + a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource)); assertEquals(8 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); @@ -605,7 +605,7 @@ public class TestReservations { assertEquals(2, app_0.getTotalRequiredResources(priorityReduce)); // try to assign reducer (5G on node 0 and should reserve) - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(13 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); @@ -620,7 +620,7 @@ public class TestReservations { assertEquals(2, app_0.getTotalRequiredResources(priorityReduce)); // could allocate but told need to unreserve first - a.assignContainers(clusterResource, node_1, true, + a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource)); assertEquals(13 * GB, a.getUsedResources().getMemory()); assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory()); @@ -652,6 +652,8 @@ public class TestReservations { String host_1 = "host_1"; FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8 * GB); + + Resource clusterResource = Resources.createResource(2 * 8 * GB); // Setup resource-requests Priority priorityMap = TestUtils.createMockPriority(5); @@ -681,23 +683,28 @@ public class TestReservations { node_0.getNodeID(), "user", rmContext); // no reserved containers - NodeId unreserveId = app_0.getNodeIdToUnreserve(priorityMap, capability); + NodeId unreserveId = + app_0.getNodeIdToUnreserve(priorityMap, capability, + cs.getResourceCalculator(), clusterResource); assertEquals(null, unreserveId); // no reserved containers - reserve then unreserve app_0.reserve(node_0, priorityMap, rmContainer_1, container_1); app_0.unreserve(node_0, priorityMap); - unreserveId = app_0.getNodeIdToUnreserve(priorityMap, capability); + unreserveId = app_0.getNodeIdToUnreserve(priorityMap, capability, + cs.getResourceCalculator(), clusterResource); assertEquals(null, unreserveId); // no container large enough is reserved app_0.reserve(node_0, priorityMap, rmContainer_1, container_1); - unreserveId = app_0.getNodeIdToUnreserve(priorityMap, capability); + unreserveId = app_0.getNodeIdToUnreserve(priorityMap, capability, + cs.getResourceCalculator(), clusterResource); assertEquals(null, unreserveId); // reserve one that is now large enough app_0.reserve(node_1, priorityMap, rmContainer, container); - unreserveId = app_0.getNodeIdToUnreserve(priorityMap, capability); + unreserveId = app_0.getNodeIdToUnreserve(priorityMap, capability, + cs.getResourceCalculator(), clusterResource); assertEquals(node_1.getNodeID(), unreserveId); } @@ -741,14 +748,14 @@ public class TestReservations { // nothing reserved boolean res = a.findNodeToUnreserve(csContext.getClusterResource(), - node_1, app_0, priorityMap, capability); + node_1, app_0, priorityMap, capability, capability); assertFalse(res); // reserved but scheduler doesn't know about that node. app_0.reserve(node_1, priorityMap, rmContainer, container); node_1.reserveResource(app_0, priorityMap, rmContainer); res = a.findNodeToUnreserve(csContext.getClusterResource(), node_1, app_0, - priorityMap, capability); + priorityMap, capability, capability); assertFalse(res); } @@ -815,7 +822,7 @@ public class TestReservations { // Start testing... // Only AM - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(2 * GB, a.getUsedResources().getMemory()); assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); @@ -826,7 +833,7 @@ public class TestReservations { assertEquals(0 * GB, node_1.getUsedResource().getMemory()); // Only 1 map - simulating reduce - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(5 * GB, a.getUsedResources().getMemory()); assertEquals(5 * GB, app_0.getCurrentConsumption().getMemory()); @@ -837,7 +844,7 @@ public class TestReservations { assertEquals(0 * GB, node_1.getUsedResource().getMemory()); // Only 1 map to other node - simulating reduce - a.assignContainers(clusterResource, node_1, false, + a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource)); assertEquals(8 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); @@ -852,14 +859,15 @@ public class TestReservations { // absoluteMaxCapacity Resource capability = Resources.createResource(32 * GB, 0); boolean res = - a.canAssignToThisQueue(clusterResource, capability, - CommonNodeLabelsManager.EMPTY_STRING_SET, app_0, true); + a.canAssignToThisQueue(clusterResource, + CommonNodeLabelsManager.EMPTY_STRING_SET, new ResourceLimits( + clusterResource), capability, Resources.none()); assertFalse(res); // now add in reservations and make sure it continues if config set // allocate to queue so that the potential new capacity is greater then // absoluteMaxCapacity - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(13 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); @@ -872,14 +880,17 @@ public class TestReservations { capability = Resources.createResource(5 * GB, 0); res = - a.canAssignToThisQueue(clusterResource, capability, - CommonNodeLabelsManager.EMPTY_STRING_SET, app_0, true); + a.canAssignToThisQueue(clusterResource, + CommonNodeLabelsManager.EMPTY_STRING_SET, new ResourceLimits( + clusterResource), capability, Resources + .createResource(5 * GB)); assertTrue(res); // tell to not check reservations res = - a.canAssignToThisQueue(clusterResource, capability, - CommonNodeLabelsManager.EMPTY_STRING_SET, app_0, false); + a.canAssignToThisQueue(clusterResource, + CommonNodeLabelsManager.EMPTY_STRING_SET, new ResourceLimits( + clusterResource), capability, Resources.none()); assertFalse(res); refreshQueuesTurnOffReservationsContLook(a, csConf); @@ -887,13 +898,16 @@ public class TestReservations { // should return false no matter what checkReservations is passed // in since feature is off res = - a.canAssignToThisQueue(clusterResource, capability, - CommonNodeLabelsManager.EMPTY_STRING_SET, app_0, false); + a.canAssignToThisQueue(clusterResource, + CommonNodeLabelsManager.EMPTY_STRING_SET, new ResourceLimits( + clusterResource), capability, Resources.none()); assertFalse(res); res = - a.canAssignToThisQueue(clusterResource, capability, - CommonNodeLabelsManager.EMPTY_STRING_SET, app_0, true); + a.canAssignToThisQueue(clusterResource, + CommonNodeLabelsManager.EMPTY_STRING_SET, new ResourceLimits( + clusterResource), capability, Resources + .createResource(5 * GB)); assertFalse(res); } @@ -985,15 +999,15 @@ public class TestReservations { .createResourceRequest(ResourceRequest.ANY, 2 * GB, 1, true, priorityAM, recordFactory))); app_0.updateResourceRequests(Collections.singletonList(TestUtils - .createResourceRequest(ResourceRequest.ANY, 5 * GB, 2, true, - priorityReduce, recordFactory))); - app_0.updateResourceRequests(Collections.singletonList(TestUtils .createResourceRequest(ResourceRequest.ANY, 3 * GB, 2, true, priorityMap, recordFactory))); + app_0.updateResourceRequests(Collections.singletonList(TestUtils + .createResourceRequest(ResourceRequest.ANY, 5 * GB, 2, true, + priorityReduce, recordFactory))); // Start testing... // Only AM - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(2 * GB, a.getUsedResources().getMemory()); assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); @@ -1004,7 +1018,7 @@ public class TestReservations { assertEquals(0 * GB, node_1.getUsedResource().getMemory()); // Only 1 map - simulating reduce - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(5 * GB, a.getUsedResources().getMemory()); assertEquals(5 * GB, app_0.getCurrentConsumption().getMemory()); @@ -1015,7 +1029,7 @@ public class TestReservations { assertEquals(0 * GB, node_1.getUsedResource().getMemory()); // Only 1 map to other node - simulating reduce - a.assignContainers(clusterResource, node_1, false, + a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource)); assertEquals(8 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); @@ -1029,7 +1043,7 @@ public class TestReservations { // now add in reservations and make sure it continues if config set // allocate to queue so that the potential new capacity is greater then // absoluteMaxCapacity - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(13 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); @@ -1117,18 +1131,18 @@ public class TestReservations { .createResourceRequest(ResourceRequest.ANY, 2 * GB, 1, true, priorityAM, recordFactory))); app_0.updateResourceRequests(Collections.singletonList(TestUtils - .createResourceRequest(ResourceRequest.ANY, 5 * GB, 1, true, - priorityReduce, recordFactory))); - app_0.updateResourceRequests(Collections.singletonList(TestUtils .createResourceRequest(ResourceRequest.ANY, 3 * GB, 2, true, priorityMap, recordFactory))); app_0.updateResourceRequests(Collections.singletonList(TestUtils + .createResourceRequest(ResourceRequest.ANY, 5 * GB, 1, true, + priorityReduce, recordFactory))); + app_0.updateResourceRequests(Collections.singletonList(TestUtils .createResourceRequest(ResourceRequest.ANY, 8 * GB, 2, true, priorityLast, recordFactory))); // Start testing... // Only AM - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(2 * GB, a.getUsedResources().getMemory()); assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); @@ -1140,7 +1154,7 @@ public class TestReservations { assertEquals(0 * GB, node_2.getUsedResource().getMemory()); // Only 1 map - simulating reduce - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(5 * GB, a.getUsedResources().getMemory()); assertEquals(5 * GB, app_0.getCurrentConsumption().getMemory()); @@ -1152,7 +1166,7 @@ public class TestReservations { assertEquals(0 * GB, node_2.getUsedResource().getMemory()); // Only 1 map to other node - simulating reduce - a.assignContainers(clusterResource, node_1, false, + a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource)); assertEquals(8 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); @@ -1164,38 +1178,41 @@ public class TestReservations { assertEquals(3 * GB, node_1.getUsedResource().getMemory()); assertEquals(0 * GB, node_2.getUsedResource().getMemory()); - // try to assign reducer (5G on node 0), but tell it - // it has to unreserve. No room to allocate and shouldn't reserve - // since nothing currently reserved. - a.assignContainers(clusterResource, node_0, true, - new ResourceLimits(clusterResource)); + // try to assign reducer (5G on node 0), but tell it's resource limits < + // used (8G) + required (5G). It will not reserved since it has to unreserve + // some resource. Even with continous reservation looking, we don't allow + // unreserve resource to reserve container. + a.assignContainers(clusterResource, node_0, + new ResourceLimits(Resources.createResource(10 * GB))); assertEquals(8 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(8 * GB, a.getMetrics().getAllocatedMB()); assertEquals(16 * GB, a.getMetrics().getAvailableMB()); - assertEquals(16 * GB, app_0.getHeadroom().getMemory()); + // app_0's headroom = limit (10G) - used (8G) = 2G + assertEquals(2 * GB, app_0.getHeadroom().getMemory()); assertEquals(5 * GB, node_0.getUsedResource().getMemory()); assertEquals(3 * GB, node_1.getUsedResource().getMemory()); assertEquals(0 * GB, node_2.getUsedResource().getMemory()); - // try to assign reducer (5G on node 2), but tell it - // it has to unreserve. Has room but shouldn't reserve - // since nothing currently reserved. - a.assignContainers(clusterResource, node_2, true, - new ResourceLimits(clusterResource)); + // try to assign reducer (5G on node 0), but tell it's resource limits < + // used (8G) + required (5G). It will not reserved since it has to unreserve + // some resource. Unfortunately, there's nothing to unreserve. + a.assignContainers(clusterResource, node_2, + new ResourceLimits(Resources.createResource(10 * GB))); assertEquals(8 * GB, a.getUsedResources().getMemory()); assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(8 * GB, a.getMetrics().getAllocatedMB()); assertEquals(16 * GB, a.getMetrics().getAvailableMB()); - assertEquals(16 * GB, app_0.getHeadroom().getMemory()); + // app_0's headroom = limit (10G) - used (8G) = 2G + assertEquals(2 * GB, app_0.getHeadroom().getMemory()); assertEquals(5 * GB, node_0.getUsedResource().getMemory()); assertEquals(3 * GB, node_1.getUsedResource().getMemory()); assertEquals(0 * GB, node_2.getUsedResource().getMemory()); // let it assign 5G to node_2 - a.assignContainers(clusterResource, node_2, false, + a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource)); assertEquals(13 * GB, a.getUsedResources().getMemory()); assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory()); @@ -1208,7 +1225,7 @@ public class TestReservations { assertEquals(5 * GB, node_2.getUsedResource().getMemory()); // reserve 8G node_0 - a.assignContainers(clusterResource, node_0, false, + a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource)); assertEquals(21 * GB, a.getUsedResources().getMemory()); assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory()); @@ -1223,7 +1240,7 @@ public class TestReservations { // try to assign (8G on node 2). No room to allocate, // continued to try due to having reservation above, // but hits queue limits so can't reserve anymore. - a.assignContainers(clusterResource, node_2, false, + a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource)); assertEquals(21 * GB, a.getUsedResources().getMemory()); assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory());
