Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java?rev=1432796&r1=1432795&r2=1432796&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java Mon Jan 14 03:44:35 2013 @@ -91,10 +91,10 @@ public class TestCapacityScheduler { private org.apache.hadoop.yarn.server.resourcemanager.NodeManager registerNode(String hostName, int containerManagerPort, int httpPort, - String rackName, int memory) + String rackName, Resource capability) throws IOException { return new org.apache.hadoop.yarn.server.resourcemanager.NodeManager( - hostName, containerManagerPort, httpPort, rackName, memory, + hostName, containerManagerPort, httpPort, rackName, capability, resourceManager.getResourceTrackerService(), resourceManager .getRMContext()); } @@ -107,13 +107,15 @@ public class TestCapacityScheduler { // Register node1 String host_0 = "host_0"; org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 = - registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, 4 * GB); + registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, + Resources.createResource(4 * GB, 1)); nm_0.heartbeat(); // Register node2 String host_1 = "host_1"; org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_1 = - registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, 2 * GB); + registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, + Resources.createResource(2 * GB, 1)); nm_1.heartbeat(); // ResourceRequest priorities @@ -129,10 +131,10 @@ public class TestCapacityScheduler { application_0.addNodeManager(host_0, 1234, nm_0); application_0.addNodeManager(host_1, 1234, nm_1); - Resource capability_0_0 = Resources.createResource(1 * GB); + Resource capability_0_0 = Resources.createResource(1 * GB, 1); application_0.addResourceRequestSpec(priority_1, capability_0_0); - Resource capability_0_1 = Resources.createResource(2 * GB); + Resource capability_0_1 = Resources.createResource(2 * GB, 1); application_0.addResourceRequestSpec(priority_0, capability_0_1); Task task_0_0 = new Task(application_0, priority_1, @@ -146,10 +148,10 @@ public class TestCapacityScheduler { application_1.addNodeManager(host_0, 1234, nm_0); application_1.addNodeManager(host_1, 1234, nm_1); - Resource capability_1_0 = Resources.createResource(3 * GB); + Resource capability_1_0 = Resources.createResource(3 * GB, 1); application_1.addResourceRequestSpec(priority_1, capability_1_0); - Resource capability_1_1 = Resources.createResource(2 * GB); + Resource capability_1_1 = Resources.createResource(2 * GB, 1); application_1.addResourceRequestSpec(priority_0, capability_1_1); Task task_1_0 = new Task(application_1, priority_1,
Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java?rev=1432796&r1=1432795&r2=1432796&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java Mon Jan 14 03:44:35 2013 @@ -55,6 +55,8 @@ import org.apache.hadoop.yarn.conf.YarnC import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceCalculator; +import org.apache.hadoop.yarn.server.resourcemanager.resource.DefaultResourceCalculator; import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; @@ -90,6 +92,8 @@ public class TestLeafQueue { final static int GB = 1024; final static String DEFAULT_RACK = "/default"; + private final ResourceCalculator resourceCalculator = new DefaultResourceCalculator(); + @Before public void setUp() throws Exception { CapacityScheduler spyCs = new CapacityScheduler(); @@ -108,17 +112,22 @@ public class TestLeafQueue { when(csContext.getConfiguration()).thenReturn(csConf); when(csContext.getConf()).thenReturn(conf); when(csContext.getMinimumResourceCapability()). - thenReturn(Resources.createResource(GB)); + thenReturn(Resources.createResource(GB, 1)); when(csContext.getMaximumResourceCapability()). - thenReturn(Resources.createResource(16*GB)); + thenReturn(Resources.createResource(16*GB, 32)); when(csContext.getClusterResources()). - thenReturn(Resources.createResource(100 * 16 * GB)); + thenReturn(Resources.createResource(100 * 16 * GB, 100 * 32)); + when(csContext.getApplicationComparator()). + thenReturn(CapacityScheduler.applicationComparator); + when(csContext.getQueueComparator()). + thenReturn(CapacityScheduler.queueComparator); + when(csContext.getResourceCalculator()). + thenReturn(resourceCalculator); + root = CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, - CapacityScheduler.queueComparator, - CapacityScheduler.applicationComparator, TestUtils.spyHook); cs.reinitialize(csConf, rmContext); @@ -266,7 +275,8 @@ public class TestLeafQueue { FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8*GB); final int numNodes = 1; - Resource clusterResource = Resources.createResource(numNodes * (8*GB)); + Resource clusterResource = + Resources.createResource(numNodes * (8*GB), numNodes * 16); when(csContext.getNumClusterNodes()).thenReturn(numNodes); // Setup resource-requests @@ -387,7 +397,8 @@ public class TestLeafQueue { FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8*GB); final int numNodes = 1; - Resource clusterResource = Resources.createResource(numNodes * (8*GB)); + Resource clusterResource = + Resources.createResource(numNodes * (8*GB), numNodes * 16); when(csContext.getNumClusterNodes()).thenReturn(numNodes); // Setup resource-requests @@ -519,7 +530,8 @@ public class TestLeafQueue { FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8*GB); final int numNodes = 2; - Resource clusterResource = Resources.createResource(numNodes * (8*GB)); + Resource clusterResource = + Resources.createResource(numNodes * (8*GB), numNodes * 16); when(csContext.getNumClusterNodes()).thenReturn(numNodes); // Setup resource-requests @@ -612,7 +624,7 @@ public class TestLeafQueue { FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 8*GB); final int numNodes = 2; - Resource clusterResource = Resources.createResource(numNodes * (8*GB)); + Resource clusterResource = Resources.createResource(numNodes * (8*GB), 1); when(csContext.getNumClusterNodes()).thenReturn(numNodes); // Setup resource-requests @@ -728,7 +740,8 @@ public class TestLeafQueue { FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8*GB); final int numNodes = 1; - Resource clusterResource = Resources.createResource(numNodes * (8*GB)); + Resource clusterResource = + Resources.createResource(numNodes * (8*GB), numNodes * 16); when(csContext.getNumClusterNodes()).thenReturn(numNodes); // Setup resource-requests @@ -889,7 +902,8 @@ public class TestLeafQueue { FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 4*GB); final int numNodes = 2; - Resource clusterResource = Resources.createResource(numNodes * (4*GB)); + Resource clusterResource = + Resources.createResource(numNodes * (4*GB), numNodes * 16); when(csContext.getNumClusterNodes()).thenReturn(numNodes); // Setup resource-requests @@ -990,7 +1004,8 @@ public class TestLeafQueue { FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 4*GB); final int numNodes = 3; - Resource clusterResource = Resources.createResource(numNodes * (4*GB)); + Resource clusterResource = + Resources.createResource(numNodes * (4*GB), numNodes * 16); when(csContext.getNumClusterNodes()).thenReturn(numNodes); // Setup resource-requests @@ -1090,11 +1105,13 @@ public class TestLeafQueue { FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, DEFAULT_RACK, 0, 4*GB); final int numNodes = 3; - Resource clusterResource = Resources.createResource(numNodes * (4*GB)); + Resource clusterResource = + Resources.createResource(numNodes * (4*GB), numNodes * 16); when(csContext.getNumClusterNodes()).thenReturn(numNodes); when(csContext.getMaximumResourceCapability()).thenReturn( - Resources.createResource(4*GB)); - when(a.getMaximumAllocation()).thenReturn(Resources.createResource(4*GB)); + Resources.createResource(4*GB, 16)); + when(a.getMaximumAllocation()).thenReturn( + Resources.createResource(4*GB, 16)); when(a.getMinimumAllocationFactor()).thenReturn(0.25f); // 1G / 4G // Setup resource-requests @@ -1164,12 +1181,14 @@ public class TestLeafQueue { // Now finish another container from app_0 and see the reservation cancelled a.completedContainer(clusterResource, app_0, node_0, app_0.getLiveContainers().iterator().next(), null, RMContainerEventType.KILL); - a.assignContainers(clusterResource, node_0); - assertEquals(4*GB, a.getUsedResources().getMemory()); + CSAssignment assignment = a.assignContainers(clusterResource, node_0); + assertEquals(8*GB, a.getUsedResources().getMemory()); assertEquals(0*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(4*GB, app_1.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentReservation().getMemory()); + assertEquals(4*GB, app_1.getCurrentReservation().getMemory()); assertEquals(0*GB, node_0.getUsedResource().getMemory()); + assertEquals(4*GB, + assignment.getExcessReservation().getContainer().getResource().getMemory()); } @@ -1204,7 +1223,8 @@ public class TestLeafQueue { FiCaSchedulerNode node_2 = TestUtils.getMockNode(host_2, rack_2, 0, 8*GB); final int numNodes = 3; - Resource clusterResource = Resources.createResource(numNodes * (8*GB)); + Resource clusterResource = + Resources.createResource(numNodes * (8*GB), numNodes * 16); when(csContext.getNumClusterNodes()).thenReturn(numNodes); // Setup resource-requests and submit @@ -1344,7 +1364,8 @@ public class TestLeafQueue { FiCaSchedulerNode node_2 = TestUtils.getMockNode(host_2, rack_2, 0, 8*GB); final int numNodes = 3; - Resource clusterResource = Resources.createResource(numNodes * (8*GB)); + Resource clusterResource = + Resources.createResource(numNodes * (8*GB), 1); when(csContext.getNumClusterNodes()).thenReturn(numNodes); // Setup resource-requests and submit @@ -1473,7 +1494,8 @@ public class TestLeafQueue { FiCaSchedulerNode node_1_0 = TestUtils.getMockNode(host_1_0, rack_1, 0, 8*GB); final int numNodes = 3; - Resource clusterResource = Resources.createResource(numNodes * (8*GB)); + Resource clusterResource = Resources.createResource( + numNodes * (8*GB), numNodes * 16); when(csContext.getNumClusterNodes()).thenReturn(numNodes); // Setup resource-requests and submit Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java?rev=1432796&r1=1432795&r2=1432796&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java Mon Jan 14 03:44:35 2013 @@ -45,6 +45,8 @@ import org.apache.hadoop.yarn.api.record import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceCalculator; +import org.apache.hadoop.yarn.server.resourcemanager.resource.DefaultResourceCalculator; import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; @@ -68,6 +70,9 @@ public class TestParentQueue { final static int GB = 1024; final static String DEFAULT_RACK = "/default"; + private final ResourceCalculator resourceComparator = + new DefaultResourceCalculator(); + @Before public void setUp() throws Exception { rmContext = TestUtils.getMockRMContext(); @@ -78,11 +83,17 @@ public class TestParentQueue { when(csContext.getConf()).thenReturn(conf); when(csContext.getConfiguration()).thenReturn(csConf); when(csContext.getMinimumResourceCapability()).thenReturn( - Resources.createResource(GB)); + Resources.createResource(GB, 1)); when(csContext.getMaximumResourceCapability()).thenReturn( - Resources.createResource(16*GB)); + Resources.createResource(16*GB, 32)); when(csContext.getClusterResources()). - thenReturn(Resources.createResource(100 * 16 * GB)); + thenReturn(Resources.createResource(100 * 16 * GB, 100 * 32)); + when(csContext.getApplicationComparator()). + thenReturn(CapacityScheduler.applicationComparator); + when(csContext.getQueueComparator()). + thenReturn(CapacityScheduler.queueComparator); + when(csContext.getResourceCalculator()). + thenReturn(resourceComparator); } private static final String A = "a"; @@ -104,7 +115,7 @@ public class TestParentQueue { private FiCaSchedulerApp getMockApplication(int appId, String user) { FiCaSchedulerApp application = mock(FiCaSchedulerApp.class); doReturn(user).when(application).getUser(); - doReturn(Resources.createResource(0)).when(application).getHeadroom(); + doReturn(Resources.createResource(0, 0)).when(application).getHeadroom(); return application; } @@ -192,12 +203,11 @@ public class TestParentQueue { CSQueue root = CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, - CapacityScheduler.queueComparator, - CapacityScheduler.applicationComparator, TestUtils.spyHook); // Setup some nodes final int memoryPerNode = 10; + final int coresPerNode = 16; final int numNodes = 2; FiCaSchedulerNode node_0 = @@ -206,7 +216,8 @@ public class TestParentQueue { TestUtils.getMockNode("host_1", DEFAULT_RACK, 0, memoryPerNode*GB); final Resource clusterResource = - Resources.createResource(numNodes * (memoryPerNode*GB)); + Resources.createResource(numNodes * (memoryPerNode*GB), + numNodes * coresPerNode); when(csContext.getNumClusterNodes()).thenReturn(numNodes); // Start testing @@ -286,8 +297,7 @@ public class TestParentQueue { try { CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, - CapacityScheduler.queueComparator, - CapacityScheduler.applicationComparator, TestUtils.spyHook); + TestUtils.spyHook); } catch (IllegalArgumentException ie) { exceptionOccured = true; } @@ -301,8 +311,7 @@ public class TestParentQueue { try { CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, - CapacityScheduler.queueComparator, - CapacityScheduler.applicationComparator, TestUtils.spyHook); + TestUtils.spyHook); } catch (IllegalArgumentException ie) { exceptionOccured = true; } @@ -316,8 +325,7 @@ public class TestParentQueue { try { CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, - CapacityScheduler.queueComparator, - CapacityScheduler.applicationComparator, TestUtils.spyHook); + TestUtils.spyHook); } catch (IllegalArgumentException ie) { exceptionOccured = true; } @@ -394,12 +402,11 @@ public class TestParentQueue { CSQueue root = CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, - CapacityScheduler.queueComparator, - CapacityScheduler.applicationComparator, TestUtils.spyHook); // Setup some nodes final int memoryPerNode = 10; + final int coresPerNode = 16; final int numNodes = 3; FiCaSchedulerNode node_0 = @@ -410,7 +417,8 @@ public class TestParentQueue { TestUtils.getMockNode("host_2", DEFAULT_RACK, 0, memoryPerNode*GB); final Resource clusterResource = - Resources.createResource(numNodes * (memoryPerNode*GB)); + Resources.createResource(numNodes * (memoryPerNode*GB), + numNodes * coresPerNode); when(csContext.getNumClusterNodes()).thenReturn(numNodes); // Start testing @@ -510,8 +518,6 @@ public class TestParentQueue { Map<String, CSQueue> queues = new HashMap<String, CSQueue>(); CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, - CapacityScheduler.queueComparator, - CapacityScheduler.applicationComparator, TestUtils.spyHook); } @@ -529,8 +535,6 @@ public class TestParentQueue { Map<String, CSQueue> queues = new HashMap<String, CSQueue>(); CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, - CapacityScheduler.queueComparator, - CapacityScheduler.applicationComparator, TestUtils.spyHook); } @@ -553,8 +557,6 @@ public class TestParentQueue { try { CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, - CapacityScheduler.queueComparator, - CapacityScheduler.applicationComparator, TestUtils.spyHook); } catch (IllegalArgumentException e) { fail("Failed to create queues with 0 capacity: " + e); @@ -571,12 +573,11 @@ public class TestParentQueue { CSQueue root = CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, - CapacityScheduler.queueComparator, - CapacityScheduler.applicationComparator, TestUtils.spyHook); // Setup some nodes final int memoryPerNode = 10; + final int coresPerNode = 16; final int numNodes = 2; FiCaSchedulerNode node_0 = @@ -585,7 +586,8 @@ public class TestParentQueue { TestUtils.getMockNode("host_1", DEFAULT_RACK, 0, memoryPerNode*GB); final Resource clusterResource = - Resources.createResource(numNodes * (memoryPerNode*GB)); + Resources.createResource(numNodes * (memoryPerNode*GB), + numNodes * coresPerNode); when(csContext.getNumClusterNodes()).thenReturn(numNodes); // Start testing @@ -637,12 +639,11 @@ public class TestParentQueue { CSQueue root = CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, - CapacityScheduler.queueComparator, - CapacityScheduler.applicationComparator, TestUtils.spyHook); // Setup some nodes final int memoryPerNode = 10; + final int coresPerNode = 10; final int numNodes = 2; FiCaSchedulerNode node_0 = @@ -651,7 +652,8 @@ public class TestParentQueue { TestUtils.getMockNode("host_1", DEFAULT_RACK, 0, memoryPerNode*GB); final Resource clusterResource = - Resources.createResource(numNodes * (memoryPerNode*GB)); + Resources.createResource(numNodes * (memoryPerNode*GB), + numNodes * coresPerNode); when(csContext.getNumClusterNodes()).thenReturn(numNodes); // Start testing @@ -721,8 +723,6 @@ public class TestParentQueue { CSQueue root = CapacityScheduler.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, - CapacityScheduler.queueComparator, - CapacityScheduler.applicationComparator, TestUtils.spyHook); UserGroupInformation user = UserGroupInformation.getCurrentUser(); Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java?rev=1432796&r1=1432795&r2=1432796&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java Mon Jan 14 03:44:35 2013 @@ -118,7 +118,7 @@ public class TestUtils { RecordFactory recordFactory) { ResourceRequest request = recordFactory.newRecordInstance(ResourceRequest.class); - Resource capability = Resources.createResource(memory); + Resource capability = Resources.createResource(memory, 1); request.setNumContainers(numContainers); request.setHostName(hostName); @@ -153,7 +153,7 @@ public class TestUtils { RMNode rmNode = mock(RMNode.class); when(rmNode.getNodeID()).thenReturn(nodeId); when(rmNode.getTotalCapability()).thenReturn( - Resources.createResource(capability)); + Resources.createResource(capability, 1)); when(rmNode.getNodeAddress()).thenReturn(host+":"+port); when(rmNode.getHostName()).thenReturn(host); when(rmNode.getRackName()).thenReturn(rack); Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java?rev=1432796&r1=1432795&r2=1432796&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java Mon Jan 14 03:44:35 2013 @@ -19,6 +19,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import java.io.File; @@ -26,6 +28,7 @@ import java.io.FileWriter; import java.io.IOException; import java.io.PrintWriter; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.LinkedList; import java.util.List; @@ -53,6 +56,7 @@ import org.apache.hadoop.yarn.server.res import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; +import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent; @@ -1187,4 +1191,88 @@ public class TestFairScheduler { // Request should be fulfilled assertEquals(2, scheduler.applications.get(attId1).getLiveContainers().size()); } + + @Test + public void testReservationWhileMultiplePriorities() { + // Add a node + RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(1024)); + NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); + scheduler.handle(nodeEvent1); + + ApplicationAttemptId attId = createSchedulingRequest(1024, "queue1", + "user1", 1, 2); + scheduler.update(); + NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1, + new ArrayList<ContainerStatus>(), new ArrayList<ContainerStatus>()); + scheduler.handle(updateEvent); + + FSSchedulerApp app = scheduler.applications.get(attId); + assertEquals(1, app.getLiveContainers().size()); + + ContainerId containerId = scheduler.applications.get(attId) + .getLiveContainers().iterator().next().getContainerId(); + + // Cause reservation to be created + createSchedulingRequestExistingApplication(1024, 2, attId); + scheduler.update(); + scheduler.handle(updateEvent); + + assertEquals(1, app.getLiveContainers().size()); + + // Create request at higher priority + createSchedulingRequestExistingApplication(1024, 1, attId); + scheduler.update(); + scheduler.handle(updateEvent); + + assertEquals(1, app.getLiveContainers().size()); + // Reserved container should still be at lower priority + for (RMContainer container : app.getReservedContainers()) { + assertEquals(2, container.getReservedPriority().getPriority()); + } + + // Complete container + scheduler.allocate(attId, new ArrayList<ResourceRequest>(), + Arrays.asList(containerId)); + + // Schedule at opening + scheduler.update(); + scheduler.handle(updateEvent); + + // Reserved container (at lower priority) should be run + Collection<RMContainer> liveContainers = app.getLiveContainers(); + assertEquals(1, liveContainers.size()); + for (RMContainer liveContainer : liveContainers) { + Assert.assertEquals(2, liveContainer.getContainer().getPriority().getPriority()); + } + } + + @Test + public void testAclSubmitApplication() throws Exception { + // Set acl's + Configuration conf = createConfiguration(); + conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); + scheduler.reinitialize(conf, resourceManager.getRMContext()); + + PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); + out.println("<?xml version=\"1.0\"?>"); + out.println("<allocations>"); + out.println("<queue name=\"queue1\">"); + out.println("<aclSubmitApps>norealuserhasthisname</aclSubmitApps>"); + out.println("</queue>"); + out.println("</allocations>"); + out.close(); + + QueueManager queueManager = scheduler.getQueueManager(); + queueManager.initialize(); + + ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1", + "norealuserhasthisname", 1); + ApplicationAttemptId attId2 = createSchedulingRequest(1024, "queue1", + "norealuserhasthisname2", 1); + + FSSchedulerApp app1 = scheduler.applications.get(attId1); + assertNotNull("The application was not allowed", app1); + FSSchedulerApp app2 = scheduler.applications.get(attId2); + assertNull("The application was allowed", app2); + } } Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java?rev=1432796&r1=1432795&r2=1432796&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java Mon Jan 14 03:44:35 2013 @@ -71,9 +71,9 @@ public class TestFifoScheduler { private org.apache.hadoop.yarn.server.resourcemanager.NodeManager registerNode(String hostName, int containerManagerPort, int nmHttpPort, - String rackName, int memory) throws IOException { + String rackName, Resource capability) throws IOException { return new org.apache.hadoop.yarn.server.resourcemanager.NodeManager( - hostName, containerManagerPort, nmHttpPort, rackName, memory, + hostName, containerManagerPort, nmHttpPort, rackName, capability, resourceManager.getResourceTrackerService(), resourceManager .getRMContext()); } @@ -121,13 +121,15 @@ public class TestFifoScheduler { // Register node1 String host_0 = "host_0"; org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_0 = - registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, 4 * GB); + registerNode(host_0, 1234, 2345, NetworkTopology.DEFAULT_RACK, + Resources.createResource(4 * GB, 1)); nm_0.heartbeat(); // Register node2 String host_1 = "host_1"; org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm_1 = - registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, 2 * GB); + registerNode(host_1, 1234, 2345, NetworkTopology.DEFAULT_RACK, + Resources.createResource(2 * GB, 1)); nm_1.heartbeat(); // ResourceRequest priorities Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java?rev=1432796&r1=1432795&r2=1432796&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java Mon Jan 14 03:44:35 2013 @@ -424,8 +424,8 @@ public class TestRMWebServicesCapacitySc assertEquals("absoluteUsedCapacity doesn't match", 0, info.absoluteUsedCapacity, 1e-3f); assertEquals("numApplications doesn't match", 0, info.numApplications); - assertTrue("usedResources doesn't match", - info.usedResources.matches("memory: 0")); + assertTrue("usedResources doesn't match ", + info.usedResources.matches("<memory:0, vCores:0>")); assertTrue("queueName doesn't match, got: " + info.queueName + " expected: " + q, qshortName.matches(info.queueName)); assertTrue("state doesn't match", Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java?rev=1432796&r1=1432795&r2=1432796&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java Mon Jan 14 03:44:35 2013 @@ -218,7 +218,7 @@ public class TestContainerManagerSecurit dummyIdentifier.readFields(di); // Malice user modifies the resource amount - Resource modifiedResource = BuilderUtils.newResource(2048); + Resource modifiedResource = BuilderUtils.newResource(2048, 1); ContainerTokenIdentifier modifiedIdentifier = new ContainerTokenIdentifier(dummyIdentifier.getContainerID(), dummyIdentifier.getNmHostAddress(), "testUser", modifiedResource, @@ -401,23 +401,9 @@ public class TestContainerManagerSecurit UnsupportedFileSystemException, YarnRemoteException, InterruptedException { - // TODO: Use a resource to work around bugs. Today NM doesn't create local - // app-dirs if there are no file to download!! - String fileName = "testFile-" + appID.toString(); - File testFile = new File(localDir.getAbsolutePath(), fileName); - FileWriter tmpFile = new FileWriter(testFile); - tmpFile.write("testing"); - tmpFile.close(); - URL testFileURL = ConverterUtils.getYarnUrlFromPath(FileContext - .getFileContext().makeQualified( - new Path(localDir.getAbsolutePath(), fileName))); - LocalResource rsrc = BuilderUtils.newLocalResource(testFileURL, - LocalResourceType.FILE, LocalResourceVisibility.PRIVATE, testFile - .length(), testFile.lastModified()); - ContainerLaunchContext amContainer = BuilderUtils .newContainerLaunchContext(null, "testUser", BuilderUtils - .newResource(1024), Collections.singletonMap(fileName, rsrc), + .newResource(1024, 1), Collections.<String, LocalResource>emptyMap(), new HashMap<String, String>(), Arrays.asList("sleep", "100"), new HashMap<String, ByteBuffer>(), null, new HashMap<ApplicationAccessType, String>()); @@ -495,7 +481,7 @@ public class TestContainerManagerSecurit // Request a container allocation. List<ResourceRequest> ask = new ArrayList<ResourceRequest>(); ask.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0), "*", - BuilderUtils.newResource(1024), 1)); + BuilderUtils.newResource(1024, 1), 1)); AllocateRequest allocateRequest = BuilderUtils.newAllocateRequest( BuilderUtils.newApplicationAttemptId(appID, 1), 0, 0F, ask, @@ -596,7 +582,9 @@ public class TestContainerManagerSecurit ContainerLaunchContext context = BuilderUtils.newContainerLaunchContext(tokenId.getContainerID(), "testUser", - BuilderUtils.newResource(tokenId.getResource().getMemory()), + BuilderUtils.newResource( + tokenId.getResource().getMemory(), + tokenId.getResource().getVirtualCores()), new HashMap<String, LocalResource>(), new HashMap<String, String>(), new ArrayList<String>(), new HashMap<String, ByteBuffer>(), null, Modified: hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java?rev=1432796&r1=1432795&r2=1432796&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java (original) +++ hadoop/common/branches/HDFS-2802/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java Mon Jan 14 03:44:35 2013 @@ -254,11 +254,14 @@ public class WebAppProxyServlet extends if(securityEnabled) { String cookieName = getCheckCookieName(id); - for(Cookie c: req.getCookies()) { - if(cookieName.equals(c.getName())) { - userWasWarned = true; - userApproved = userApproved || Boolean.valueOf(c.getValue()); - break; + Cookie[] cookies = req.getCookies(); + if (cookies != null) { + for (Cookie c : cookies) { + if (cookieName.equals(c.getName())) { + userWasWarned = true; + userApproved = userApproved || Boolean.valueOf(c.getValue()); + break; + } } } } @@ -283,9 +286,12 @@ public class WebAppProxyServlet extends "please try the history server"); return; } - URI trackingUri = ProxyUriUtils.getUriFromAMUrl( - applicationReport.getOriginalTrackingUrl()); - if(applicationReport.getOriginalTrackingUrl().equals("N/A")) { + String original = applicationReport.getOriginalTrackingUrl(); + URI trackingUri = null; + if (original != null) { + trackingUri = ProxyUriUtils.getUriFromAMUrl(original); + } + if(original == null || original.equals("N/A")) { String message; switch(applicationReport.getFinalApplicationStatus()) { case FAILED:
