http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java deleted file mode 100644 index cbe96ee..0000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java +++ /dev/null @@ -1,1144 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * <p> - * http://www.apache.org/licenses/LICENSE-2.0 - * <p> - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.node; - -import com.google.common.base.Supplier; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.test.PathUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.mockito.Mockito; - -import java.io.File; -import java.io.IOException; -import java.util.LinkedList; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static java.util.concurrent.TimeUnit.SECONDS; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DEADNODE_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_STALENODE_INTERVAL; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState - .HEALTHY; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE; -import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND; -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.core.StringStartsWith.startsWith; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -/** - * Test the Node Manager class. - */ -public class TestNodeManager { - - private File testDir; - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - @BeforeClass - public static void init() throws IOException { - } - - @Before - public void setup() { - testDir = PathUtils.getTestDir( - TestNodeManager.class); - } - - @After - public void cleanup() { - FileUtil.fullyDelete(testDir); - } - - /** - * Returns a new copy of Configuration. - * - * @return Config - */ - OzoneConfiguration getConf() { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, - testDir.getAbsolutePath()); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, - TimeUnit.MILLISECONDS); - return conf; - } - - /** - * Creates a NodeManager. - * - * @param config - Config for the node manager. - * @return SCNNodeManager - * @throws IOException - */ - - SCMNodeManager createNodeManager(OzoneConfiguration config) - throws IOException { - EventQueue eventQueue = new EventQueue(); - eventQueue.addHandler(SCMEvents.NEW_NODE, - Mockito.mock(NewNodeHandler.class)); - eventQueue.addHandler(SCMEvents.STALE_NODE, - Mockito.mock(StaleNodeHandler.class)); - eventQueue.addHandler(SCMEvents.DEAD_NODE, - Mockito.mock(DeadNodeHandler.class)); - SCMNodeManager nodeManager = new SCMNodeManager(config, - UUID.randomUUID().toString(), null, eventQueue); - assertFalse("Node manager should be in chill mode", - nodeManager.isOutOfChillMode()); - return nodeManager; - } - - /** - * Tests that Node manager handles heartbeats correctly, and comes out of - * chill Mode. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmHeartbeat() throws IOException, - InterruptedException, TimeoutException { - - try (SCMNodeManager nodeManager = createNodeManager(getConf())) { - // Send some heartbeats from different nodes. - for (int x = 0; x < nodeManager.getMinimumChillModeNodes(); x++) { - DatanodeDetails datanodeDetails = TestUtils - .createRandomDatanodeAndRegister(nodeManager); - nodeManager.processHeartbeat(datanodeDetails); - } - - //TODO: wait for heartbeat to be processed - Thread.sleep(4 * 1000); - assertTrue("Heartbeat thread should have picked up the" + - "scheduled heartbeats and transitioned out of chill mode.", - nodeManager.isOutOfChillMode()); - } - } - - /** - * asserts that if we send no heartbeats node manager stays in chillmode. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmNoHeartbeats() throws IOException, - InterruptedException, TimeoutException { - - try (SCMNodeManager nodeManager = createNodeManager(getConf())) { - //TODO: wait for heartbeat to be processed - Thread.sleep(4 * 1000); - assertFalse("No heartbeats, Node manager should have been in" + - " chill mode.", nodeManager.isOutOfChillMode()); - } - } - - /** - * Asserts that if we don't get enough unique nodes we stay in chillmode. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmNotEnoughHeartbeats() throws IOException, - InterruptedException, TimeoutException { - try (SCMNodeManager nodeManager = createNodeManager(getConf())) { - - // Need 100 nodes to come out of chill mode, only one node is sending HB. - nodeManager.setMinimumChillModeNodes(100); - nodeManager.processHeartbeat(TestUtils - .createRandomDatanodeAndRegister(nodeManager)); - //TODO: wait for heartbeat to be processed - Thread.sleep(4 * 1000); - assertFalse("Not enough heartbeat, Node manager should have" + - "been in chillmode.", nodeManager.isOutOfChillMode()); - } - } - - /** - * Asserts that many heartbeat from the same node is counted as a single - * node. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmSameNodeHeartbeats() throws IOException, - InterruptedException, TimeoutException { - - try (SCMNodeManager nodeManager = createNodeManager(getConf())) { - nodeManager.setMinimumChillModeNodes(3); - DatanodeDetails datanodeDetails = TestUtils - .createRandomDatanodeAndRegister(nodeManager); - - // Send 10 heartbeat from same node, and assert we never leave chill mode. - for (int x = 0; x < 10; x++) { - nodeManager.processHeartbeat(datanodeDetails); - } - - //TODO: wait for heartbeat to be processed - Thread.sleep(4 * 1000); - assertFalse("Not enough nodes have send heartbeat to node" + - "manager.", nodeManager.isOutOfChillMode()); - } - } - - /** - * Asserts that adding heartbeats after shutdown does not work. This implies - * that heartbeat thread has been shutdown safely by closing the node - * manager. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmShutdown() throws IOException, InterruptedException, - TimeoutException { - OzoneConfiguration conf = getConf(); - conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - 100, TimeUnit.MILLISECONDS); - SCMNodeManager nodeManager = createNodeManager(conf); - DatanodeDetails datanodeDetails = TestUtils - .createRandomDatanodeAndRegister(nodeManager); - nodeManager.close(); - - // These should never be processed. - nodeManager.processHeartbeat(datanodeDetails); - - // Let us just wait for 2 seconds to prove that HBs are not processed. - Thread.sleep(2 * 1000); - - //TODO: add assertion - } - - /** - * Asserts scm informs datanodes to re-register with the nodemanager - * on a restart. - * - * @throws Exception - */ - @Test - public void testScmHeartbeatAfterRestart() throws Exception { - OzoneConfiguration conf = getConf(); - conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - 100, TimeUnit.MILLISECONDS); - DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails(); - UUID dnId = datanodeDetails.getUuid(); - String storagePath = testDir.getAbsolutePath() + "/" + dnId; - StorageReportProto report = - TestUtils.createStorageReport(dnId, storagePath, 100, 10, 90, null); - try (SCMNodeManager nodemanager = createNodeManager(conf)) { - nodemanager.register(datanodeDetails, - TestUtils.createNodeReport(report), - TestUtils.getRandomPipelineReports()); - List<SCMCommand> command = nodemanager.processHeartbeat(datanodeDetails); - Assert.assertTrue(nodemanager.getAllNodes().contains(datanodeDetails)); - Assert.assertTrue("On regular HB calls, SCM responses a " - + "datanode with an empty command list", command.isEmpty()); - } - - // Sends heartbeat without registering to SCM. - // This happens when SCM restarts. - try (SCMNodeManager nodemanager = createNodeManager(conf)) { - Assert.assertFalse(nodemanager - .getAllNodes().contains(datanodeDetails)); - try { - // SCM handles heartbeat asynchronously. - // It may need more than one heartbeat processing to - // send the notification. - GenericTestUtils.waitFor(new Supplier<Boolean>() { - @Override public Boolean get() { - List<SCMCommand> command = - nodemanager.processHeartbeat(datanodeDetails); - return command.size() == 1 && command.get(0).getType() - .equals(SCMCommandProto.Type.reregisterCommand); - } - }, 100, 3 * 1000); - } catch (TimeoutException e) { - Assert.fail("Times out to verify that scm informs " - + "datanode to re-register itself."); - } - } - } - - /** - * Asserts that we detect as many healthy nodes as we have generated heartbeat - * for. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmHealthyNodeCount() throws IOException, - InterruptedException, TimeoutException { - OzoneConfiguration conf = getConf(); - final int count = 10; - - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - - for (int x = 0; x < count; x++) { - DatanodeDetails datanodeDetails = TestUtils - .createRandomDatanodeAndRegister(nodeManager); - nodeManager.processHeartbeat(datanodeDetails); - } - //TODO: wait for heartbeat to be processed - Thread.sleep(4 * 1000); - assertEquals(count, nodeManager.getNodeCount(HEALTHY)); - } - } - - /** - * Asserts that if user provides a value less than 5 times the heartbeat - * interval as the StaleNode Value, we throw since that is a QoS that we - * cannot maintain. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - - @Test - public void testScmSanityOfUserConfig1() throws IOException, - InterruptedException, TimeoutException { - OzoneConfiguration conf = getConf(); - final int interval = 100; - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval, - MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); - - // This should be 5 times more than OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL - // and 3 times more than OZONE_SCM_HEARTBEAT_INTERVAL - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, interval, MILLISECONDS); - - thrown.expect(IllegalArgumentException.class); - - // This string is a multiple of the interval value - thrown.expectMessage( - startsWith("100 is not within min = 500 or max = 100000")); - createNodeManager(conf); - } - - /** - * Asserts that if Stale Interval value is more than 5 times the value of HB - * processing thread it is a sane value. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmSanityOfUserConfig2() throws IOException, - InterruptedException, TimeoutException { - OzoneConfiguration conf = getConf(); - final int interval = 100; - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval, - TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, TimeUnit.SECONDS); - - // This should be 5 times more than OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL - // and 3 times more than OZONE_SCM_HEARTBEAT_INTERVAL - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3 * 1000, MILLISECONDS); - createNodeManager(conf).close(); - } - - /** - * Asserts that a single node moves from Healthy to stale node, then from - * stale node to dead node if it misses enough heartbeats. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmDetectStaleAndDeadNode() throws IOException, - InterruptedException, TimeoutException { - final int interval = 100; - final int nodeCount = 10; - - OzoneConfiguration conf = getConf(); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval, - MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); - conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); - - - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - List<DatanodeDetails> nodeList = createNodeSet(nodeManager, nodeCount); - - - DatanodeDetails staleNode = TestUtils.createRandomDatanodeAndRegister( - nodeManager); - - // Heartbeat once - nodeManager.processHeartbeat(staleNode); - - // Heartbeat all other nodes. - for (DatanodeDetails dn : nodeList) { - nodeManager.processHeartbeat(dn); - } - - // Wait for 2 seconds .. and heartbeat good nodes again. - Thread.sleep(2 * 1000); - - for (DatanodeDetails dn : nodeList) { - nodeManager.processHeartbeat(dn); - } - - // Wait for 2 seconds, wait a total of 4 seconds to make sure that the - // node moves into stale state. - Thread.sleep(2 * 1000); - List<DatanodeDetails> staleNodeList = nodeManager.getNodes(STALE); - assertEquals("Expected to find 1 stale node", - 1, nodeManager.getNodeCount(STALE)); - assertEquals("Expected to find 1 stale node", - 1, staleNodeList.size()); - assertEquals("Stale node is not the expected ID", staleNode - .getUuid(), staleNodeList.get(0).getUuid()); - Thread.sleep(1000); - - // heartbeat good nodes again. - for (DatanodeDetails dn : nodeList) { - nodeManager.processHeartbeat(dn); - } - - // 6 seconds is the dead window for this test , so we wait a total of - // 7 seconds to make sure that the node moves into dead state. - Thread.sleep(2 * 1000); - - // the stale node has been removed - staleNodeList = nodeManager.getNodes(STALE); - assertEquals("Expected to find 1 stale node", - 0, nodeManager.getNodeCount(STALE)); - assertEquals("Expected to find 1 stale node", - 0, staleNodeList.size()); - - // Check for the dead node now. - List<DatanodeDetails> deadNodeList = nodeManager.getNodes(DEAD); - assertEquals("Expected to find 1 dead node", 1, - nodeManager.getNodeCount(DEAD)); - assertEquals("Expected to find 1 dead node", - 1, deadNodeList.size()); - assertEquals("Dead node is not the expected ID", staleNode - .getUuid(), deadNodeList.get(0).getUuid()); - } - } - - /** - * Check for NPE when datanodeDetails is passed null for sendHeartbeat. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmCheckForErrorOnNullDatanodeDetails() throws IOException, - InterruptedException, TimeoutException { - try (SCMNodeManager nodeManager = createNodeManager(getConf())) { - nodeManager.processHeartbeat(null); - } catch (NullPointerException npe) { - GenericTestUtils.assertExceptionContains("Heartbeat is missing " + - "DatanodeDetails.", npe); - } - } - - /** - * Asserts that a dead node, stale node and healthy nodes co-exist. The counts - * , lists and node ID match the expected node state. - * <p/> - * This test is pretty complicated because it explores all states of Node - * manager in a single test. Please read thru the comments to get an idea of - * the current state of the node Manager. - * <p/> - * This test is written like a state machine to avoid threads and concurrency - * issues. This test is replicated below with the use of threads. Avoiding - * threads make it easy to debug the state machine. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - /** - * These values are very important. Here is what it means so you don't - * have to look it up while reading this code. - * - * OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL - This the frequency of the - * HB processing thread that is running in the SCM. This thread must run - * for the SCM to process the Heartbeats. - * - * OZONE_SCM_HEARTBEAT_INTERVAL - This is the frequency at which - * datanodes will send heartbeats to SCM. Please note: This is the only - * config value for node manager that is specified in seconds. We don't - * want SCM heartbeat resolution to be more than in seconds. - * In this test it is not used, but we are forced to set it because we - * have validation code that checks Stale Node interval and Dead Node - * interval is larger than the value of - * OZONE_SCM_HEARTBEAT_INTERVAL. - * - * OZONE_SCM_STALENODE_INTERVAL - This is the time that must elapse - * from the last heartbeat for us to mark a node as stale. In this test - * we set that to 3. That is if a node has not heartbeat SCM for last 3 - * seconds we will mark it as stale. - * - * OZONE_SCM_DEADNODE_INTERVAL - This is the time that must elapse - * from the last heartbeat for a node to be marked dead. We have an - * additional constraint that this must be at least 2 times bigger than - * Stale node Interval. - * - * With these we are trying to explore the state of this cluster with - * various timeouts. Each section is commented so that you can keep - * track of the state of the cluster nodes. - * - */ - - @Test - public void testScmClusterIsInExpectedState1() throws IOException, - InterruptedException, TimeoutException { - OzoneConfiguration conf = getConf(); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, - MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); - conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); - - - /** - * Cluster state: Healthy: All nodes are heartbeat-ing like normal. - */ - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - DatanodeDetails healthyNode = - TestUtils.createRandomDatanodeAndRegister(nodeManager); - DatanodeDetails staleNode = - TestUtils.createRandomDatanodeAndRegister(nodeManager); - DatanodeDetails deadNode = - TestUtils.createRandomDatanodeAndRegister(nodeManager); - nodeManager.processHeartbeat(healthyNode); - nodeManager.processHeartbeat(staleNode); - nodeManager.processHeartbeat(deadNode); - - // Sleep so that heartbeat processing thread gets to run. - Thread.sleep(500); - - //Assert all nodes are healthy. - assertEquals(3, nodeManager.getAllNodes().size()); - assertEquals(3, nodeManager.getNodeCount(HEALTHY)); - - /** - * Cluster state: Quiesced: We are going to sleep for 3 seconds. Which - * means that no node is heartbeating. All nodes should move to Stale. - */ - Thread.sleep(3 * 1000); - assertEquals(3, nodeManager.getAllNodes().size()); - assertEquals(3, nodeManager.getNodeCount(STALE)); - - - /** - * Cluster State : Move healthy node back to healthy state, move other 2 - * nodes to Stale State. - * - * We heartbeat healthy node after 1 second and let other 2 nodes elapse - * the 3 second windows. - */ - - nodeManager.processHeartbeat(healthyNode); - nodeManager.processHeartbeat(staleNode); - nodeManager.processHeartbeat(deadNode); - - Thread.sleep(1500); - nodeManager.processHeartbeat(healthyNode); - Thread.sleep(2 * 1000); - assertEquals(1, nodeManager.getNodeCount(HEALTHY)); - - - // 3.5 seconds from last heartbeat for the stale and deadNode. So those - // 2 nodes must move to Stale state and the healthy node must - // remain in the healthy State. - List<DatanodeDetails> healthyList = nodeManager.getNodes(HEALTHY); - assertEquals("Expected one healthy node", 1, healthyList.size()); - assertEquals("Healthy node is not the expected ID", healthyNode - .getUuid(), healthyList.get(0).getUuid()); - - assertEquals(2, nodeManager.getNodeCount(STALE)); - - /** - * Cluster State: Allow healthyNode to remain in healthy state and - * staleNode to move to stale state and deadNode to move to dead state. - */ - - nodeManager.processHeartbeat(healthyNode); - nodeManager.processHeartbeat(staleNode); - Thread.sleep(1500); - nodeManager.processHeartbeat(healthyNode); - Thread.sleep(2 * 1000); - - // 3.5 seconds have elapsed for stale node, so it moves into Stale. - // 7 seconds have elapsed for dead node, so it moves into dead. - // 2 Seconds have elapsed for healthy node, so it stays in healhty state. - healthyList = nodeManager.getNodes(HEALTHY); - List<DatanodeDetails> staleList = nodeManager.getNodes(STALE); - List<DatanodeDetails> deadList = nodeManager.getNodes(DEAD); - - assertEquals(3, nodeManager.getAllNodes().size()); - assertEquals(1, nodeManager.getNodeCount(HEALTHY)); - assertEquals(1, nodeManager.getNodeCount(STALE)); - assertEquals(1, nodeManager.getNodeCount(DEAD)); - - assertEquals("Expected one healthy node", - 1, healthyList.size()); - assertEquals("Healthy node is not the expected ID", healthyNode - .getUuid(), healthyList.get(0).getUuid()); - - assertEquals("Expected one stale node", - 1, staleList.size()); - assertEquals("Stale node is not the expected ID", staleNode - .getUuid(), staleList.get(0).getUuid()); - - assertEquals("Expected one dead node", - 1, deadList.size()); - assertEquals("Dead node is not the expected ID", deadNode - .getUuid(), deadList.get(0).getUuid()); - /** - * Cluster State : let us heartbeat all the nodes and verify that we get - * back all the nodes in healthy state. - */ - nodeManager.processHeartbeat(healthyNode); - nodeManager.processHeartbeat(staleNode); - nodeManager.processHeartbeat(deadNode); - Thread.sleep(500); - //Assert all nodes are healthy. - assertEquals(3, nodeManager.getAllNodes().size()); - assertEquals(3, nodeManager.getNodeCount(HEALTHY)); - } - } - - /** - * Heartbeat a given set of nodes at a specified frequency. - * - * @param manager - Node Manager - * @param list - List of datanodeIDs - * @param sleepDuration - Duration to sleep between heartbeats. - * @throws InterruptedException - */ - private void heartbeatNodeSet(SCMNodeManager manager, - List<DatanodeDetails> list, - int sleepDuration) throws InterruptedException { - while (!Thread.currentThread().isInterrupted()) { - for (DatanodeDetails dn : list) { - manager.processHeartbeat(dn); - } - Thread.sleep(sleepDuration); - } - } - - /** - * Create a set of Nodes with a given prefix. - * - * @param count - number of nodes. - * @return List of Nodes. - */ - private List<DatanodeDetails> createNodeSet(SCMNodeManager nodeManager, int - count) { - List<DatanodeDetails> list = new LinkedList<>(); - for (int x = 0; x < count; x++) { - DatanodeDetails datanodeDetails = TestUtils - .createRandomDatanodeAndRegister(nodeManager); - list.add(datanodeDetails); - } - return list; - } - - /** - * Function that tells us if we found the right number of stale nodes. - * - * @param nodeManager - node manager - * @param count - number of stale nodes to look for. - * @return true if we found the expected number. - */ - private boolean findNodes(NodeManager nodeManager, int count, - HddsProtos.NodeState state) { - return count == nodeManager.getNodeCount(state); - } - - /** - * Asserts that we can create a set of nodes that send its heartbeats from - * different threads and NodeManager behaves as expected. - * - * @throws IOException - * @throws InterruptedException - */ - @Test - public void testScmClusterIsInExpectedState2() throws IOException, - InterruptedException, TimeoutException { - final int healthyCount = 5000; - final int staleCount = 100; - final int deadCount = 10; - - OzoneConfiguration conf = getConf(); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, - MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); - conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); - - - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - List<DatanodeDetails> healthyNodeList = createNodeSet(nodeManager, - healthyCount); - List<DatanodeDetails> staleNodeList = createNodeSet(nodeManager, - staleCount); - List<DatanodeDetails> deadNodeList = createNodeSet(nodeManager, - deadCount); - - Runnable healthyNodeTask = () -> { - try { - // 2 second heartbeat makes these nodes stay healthy. - heartbeatNodeSet(nodeManager, healthyNodeList, 2 * 1000); - } catch (InterruptedException ignored) { - } - }; - - Runnable staleNodeTask = () -> { - try { - // 4 second heartbeat makes these nodes go to stale and back to - // healthy again. - heartbeatNodeSet(nodeManager, staleNodeList, 4 * 1000); - } catch (InterruptedException ignored) { - } - }; - - - // No Thread just one time HBs the node manager, so that these will be - // marked as dead nodes eventually. - for (DatanodeDetails dn : deadNodeList) { - nodeManager.processHeartbeat(dn); - } - - - Thread thread1 = new Thread(healthyNodeTask); - thread1.setDaemon(true); - thread1.start(); - - - Thread thread2 = new Thread(staleNodeTask); - thread2.setDaemon(true); - thread2.start(); - - Thread.sleep(10 * 1000); - - // Assert all healthy nodes are healthy now, this has to be a greater - // than check since Stale nodes can be healthy when we check the state. - - assertTrue(nodeManager.getNodeCount(HEALTHY) >= healthyCount); - - assertEquals(deadCount, nodeManager.getNodeCount(DEAD)); - - List<DatanodeDetails> deadList = nodeManager.getNodes(DEAD); - - for (DatanodeDetails node : deadList) { - assertTrue(deadNodeList.contains(node)); - } - - - - // Checking stale nodes is tricky since they have to move between - // healthy and stale to avoid becoming dead nodes. So we search for - // that state for a while, if we don't find that state waitfor will - // throw. - GenericTestUtils.waitFor(() -> findNodes(nodeManager, staleCount, STALE), - 500, 4 * 1000); - - thread1.interrupt(); - thread2.interrupt(); - } - } - - /** - * Asserts that we can handle 6000+ nodes heartbeating SCM. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - public void testScmCanHandleScale() throws IOException, - InterruptedException, TimeoutException { - final int healthyCount = 3000; - final int staleCount = 3000; - OzoneConfiguration conf = getConf(); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, - MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, - SECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3 * 1000, - MILLISECONDS); - conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6 * 1000, - MILLISECONDS); - - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - List<DatanodeDetails> healthyList = createNodeSet(nodeManager, - healthyCount); - List<DatanodeDetails> staleList = createNodeSet(nodeManager, - staleCount); - - Runnable healthyNodeTask = () -> { - try { - heartbeatNodeSet(nodeManager, healthyList, 2 * 1000); - } catch (InterruptedException ignored) { - - } - }; - - Runnable staleNodeTask = () -> { - try { - heartbeatNodeSet(nodeManager, staleList, 4 * 1000); - } catch (InterruptedException ignored) { - } - }; - - Thread thread1 = new Thread(healthyNodeTask); - thread1.setDaemon(true); - thread1.start(); - - Thread thread2 = new Thread(staleNodeTask); - thread2.setDaemon(true); - thread2.start(); - Thread.sleep(3 * 1000); - - GenericTestUtils.waitFor(() -> findNodes(nodeManager, staleCount, STALE), - 500, 20 * 1000); - assertEquals("Node count mismatch", - healthyCount + staleCount, nodeManager.getAllNodes().size()); - - thread1.interrupt(); - thread2.interrupt(); - } - } - - - @Test - public void testScmEnterAndExitChillMode() throws IOException, - InterruptedException { - OzoneConfiguration conf = getConf(); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, - MILLISECONDS); - - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - nodeManager.setMinimumChillModeNodes(10); - DatanodeDetails datanodeDetails = TestUtils - .createRandomDatanodeAndRegister(nodeManager); - nodeManager.processHeartbeat(datanodeDetails); - String status = nodeManager.getChillModeStatus(); - Assert.assertThat(status, containsString("Still in chill " + - "mode, waiting on nodes to report in.")); - - // Should not exit chill mode since 10 nodes have not heartbeat yet. - assertFalse(nodeManager.isOutOfChillMode()); - - // Force exit chill mode. - nodeManager.forceExitChillMode(); - assertTrue(nodeManager.isOutOfChillMode()); - status = nodeManager.getChillModeStatus(); - Assert.assertThat(status, - containsString("Out of chill mode.")); - - - // Enter back to into chill mode. - nodeManager.enterChillMode(); - assertFalse(nodeManager.isOutOfChillMode()); - status = nodeManager.getChillModeStatus(); - Assert.assertThat(status, - containsString("Out of startup chill mode," + - " but in manual chill mode.")); - - // Assert that node manager force enter cannot be overridden by nodes HBs. - for (int x = 0; x < 20; x++) { - DatanodeDetails datanode = TestUtils - .createRandomDatanodeAndRegister(nodeManager); - nodeManager.processHeartbeat(datanode); - } - - Thread.sleep(500); - assertFalse(nodeManager.isOutOfChillMode()); - - // Make sure that once we exit out of manual chill mode, we fall back - // to the number of nodes to get out chill mode. - nodeManager.exitChillMode(); - assertTrue(nodeManager.isOutOfChillMode()); - status = nodeManager.getChillModeStatus(); - Assert.assertThat(status, - containsString("Out of chill mode.")); - } - } - - /** - * Test multiple nodes sending initial heartbeat with their node report. - * - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - @Ignore - // TODO: Enable this after we implement NodeReportEvent handler. - public void testScmStatsFromNodeReport() throws IOException, - InterruptedException, TimeoutException { - OzoneConfiguration conf = getConf(); - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, - MILLISECONDS); - final int nodeCount = 10; - final long capacity = 2000; - final long used = 100; - final long remaining = capacity - used; - - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - for (int x = 0; x < nodeCount; x++) { - DatanodeDetails datanodeDetails = TestUtils - .createRandomDatanodeAndRegister(nodeManager); - UUID dnId = datanodeDetails.getUuid(); - long free = capacity - used; - String storagePath = testDir.getAbsolutePath() + "/" + dnId; - StorageReportProto report = TestUtils - .createStorageReport(dnId, storagePath, capacity, used, free, null); - nodeManager.processHeartbeat(datanodeDetails); - } - //TODO: wait for heartbeat to be processed - Thread.sleep(4 * 1000); - assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY)); - assertEquals(capacity * nodeCount, (long) nodeManager.getStats() - .getCapacity().get()); - assertEquals(used * nodeCount, (long) nodeManager.getStats() - .getScmUsed().get()); - assertEquals(remaining * nodeCount, (long) nodeManager.getStats() - .getRemaining().get()); - } - } - - /** - * Test single node stat update based on nodereport from different heartbeat - * status (healthy, stale and dead). - * @throws IOException - * @throws InterruptedException - * @throws TimeoutException - */ - @Test - @Ignore - // TODO: Enable this after we implement NodeReportEvent handler. - public void testScmNodeReportUpdate() throws IOException, - InterruptedException, TimeoutException { - OzoneConfiguration conf = getConf(); - final int heartbeatCount = 5; - final int nodeCount = 1; - final int interval = 100; - - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval, - MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); - conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); - conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); - - try (SCMNodeManager nodeManager = createNodeManager(conf)) { - DatanodeDetails datanodeDetails = - TestUtils.createRandomDatanodeAndRegister(nodeManager); - final long capacity = 2000; - final long usedPerHeartbeat = 100; - UUID dnId = datanodeDetails.getUuid(); - for (int x = 0; x < heartbeatCount; x++) { - long scmUsed = x * usedPerHeartbeat; - long remaining = capacity - scmUsed; - String storagePath = testDir.getAbsolutePath() + "/" + dnId; - StorageReportProto report = TestUtils - .createStorageReport(dnId, storagePath, capacity, scmUsed, - remaining, null); - - nodeManager.processHeartbeat(datanodeDetails); - Thread.sleep(100); - } - - final long expectedScmUsed = usedPerHeartbeat * (heartbeatCount - 1); - final long expectedRemaining = capacity - expectedScmUsed; - - GenericTestUtils.waitFor( - () -> nodeManager.getStats().getScmUsed().get() == expectedScmUsed, - 100, 4 * 1000); - - long foundCapacity = nodeManager.getStats().getCapacity().get(); - assertEquals(capacity, foundCapacity); - - long foundScmUsed = nodeManager.getStats().getScmUsed().get(); - assertEquals(expectedScmUsed, foundScmUsed); - - long foundRemaining = nodeManager.getStats().getRemaining().get(); - assertEquals(expectedRemaining, foundRemaining); - - // Test NodeManager#getNodeStats - assertEquals(nodeCount, nodeManager.getNodeStats().size()); - long nodeCapacity = nodeManager.getNodeStat(datanodeDetails).get() - .getCapacity().get(); - assertEquals(capacity, nodeCapacity); - - foundScmUsed = nodeManager.getNodeStat(datanodeDetails).get().getScmUsed() - .get(); - assertEquals(expectedScmUsed, foundScmUsed); - - foundRemaining = nodeManager.getNodeStat(datanodeDetails).get() - .getRemaining().get(); - assertEquals(expectedRemaining, foundRemaining); - - // Compare the result from - // NodeManager#getNodeStats and NodeManager#getNodeStat - SCMNodeStat stat1 = nodeManager.getNodeStats(). - get(datanodeDetails.getUuid()); - SCMNodeStat stat2 = nodeManager.getNodeStat(datanodeDetails).get(); - assertEquals(stat1, stat2); - - // Wait up to 4s so that the node becomes stale - // Verify the usage info should be unchanged. - GenericTestUtils.waitFor( - () -> nodeManager.getNodeCount(STALE) == 1, 100, - 4 * 1000); - assertEquals(nodeCount, nodeManager.getNodeStats().size()); - - foundCapacity = nodeManager.getNodeStat(datanodeDetails).get() - .getCapacity().get(); - assertEquals(capacity, foundCapacity); - foundScmUsed = nodeManager.getNodeStat(datanodeDetails).get() - .getScmUsed().get(); - assertEquals(expectedScmUsed, foundScmUsed); - - foundRemaining = nodeManager.getNodeStat(datanodeDetails).get(). - getRemaining().get(); - assertEquals(expectedRemaining, foundRemaining); - - // Wait up to 4 more seconds so the node becomes dead - // Verify usage info should be updated. - GenericTestUtils.waitFor( - () -> nodeManager.getNodeCount(DEAD) == 1, 100, - 4 * 1000); - - assertEquals(0, nodeManager.getNodeStats().size()); - foundCapacity = nodeManager.getStats().getCapacity().get(); - assertEquals(0, foundCapacity); - - foundScmUsed = nodeManager.getStats().getScmUsed().get(); - assertEquals(0, foundScmUsed); - - foundRemaining = nodeManager.getStats().getRemaining().get(); - assertEquals(0, foundRemaining); - - nodeManager.processHeartbeat(datanodeDetails); - - // Wait up to 5 seconds so that the dead node becomes healthy - // Verify usage info should be updated. - GenericTestUtils.waitFor( - () -> nodeManager.getNodeCount(HEALTHY) == 1, - 100, 5 * 1000); - GenericTestUtils.waitFor( - () -> nodeManager.getStats().getScmUsed().get() == expectedScmUsed, - 100, 4 * 1000); - assertEquals(nodeCount, nodeManager.getNodeStats().size()); - foundCapacity = nodeManager.getNodeStat(datanodeDetails).get() - .getCapacity().get(); - assertEquals(capacity, foundCapacity); - foundScmUsed = nodeManager.getNodeStat(datanodeDetails).get().getScmUsed() - .get(); - assertEquals(expectedScmUsed, foundScmUsed); - foundRemaining = nodeManager.getNodeStat(datanodeDetails).get() - .getRemaining().get(); - assertEquals(expectedRemaining, foundRemaining); - } - } - - @Test - public void testHandlingSCMCommandEvent() { - OzoneConfiguration conf = getConf(); - conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - 100, TimeUnit.MILLISECONDS); - - DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails(); - UUID dnId = datanodeDetails.getUuid(); - String storagePath = testDir.getAbsolutePath() + "/" + dnId; - StorageReportProto report = - TestUtils.createStorageReport(dnId, storagePath, 100, 10, 90, null); - - EventQueue eq = new EventQueue(); - try (SCMNodeManager nodemanager = createNodeManager(conf)) { - eq.addHandler(DATANODE_COMMAND, nodemanager); - - nodemanager - .register(datanodeDetails, TestUtils.createNodeReport(report), - TestUtils.getRandomPipelineReports()); - eq.fireEvent(DATANODE_COMMAND, - new CommandForDatanode<>(datanodeDetails.getUuid(), - new CloseContainerCommand(1L, ReplicationType.STAND_ALONE, - PipelineID.randomId()))); - - eq.processAll(1000L); - List<SCMCommand> command = - nodemanager.processHeartbeat(datanodeDetails); - Assert.assertEquals(1, command.size()); - Assert - .assertEquals(command.get(0).getClass(), CloseContainerCommand.class); - } catch (IOException e) { - e.printStackTrace(); - } - } - -}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java ---------------------------------------------------------------------- diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java deleted file mode 100644 index f9b1392..0000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * <p> - * http://www.apache.org/licenses/LICENSE-2.0 - * <p> - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.node; - -import java.io.IOException; -import java.util.UUID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode; -import org.apache.hadoop.hdds.server.events.Event; -import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TestNodeReportHandler implements EventPublisher { - - private static final Logger LOG = LoggerFactory - .getLogger(TestNodeReportHandler.class); - private NodeReportHandler nodeReportHandler; - private SCMNodeManager nodeManager; - private String storagePath = GenericTestUtils.getRandomizedTempPath() - .concat("/" + UUID.randomUUID().toString()); - - @Before - public void resetEventCollector() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - nodeManager = new SCMNodeManager(conf, "cluster1", null, new EventQueue()); - nodeReportHandler = new NodeReportHandler(nodeManager); - } - - @Test - public void testNodeReport() throws IOException { - DatanodeDetails dn = TestUtils.randomDatanodeDetails(); - StorageReportProto storageOne = TestUtils - .createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null); - - SCMNodeMetric nodeMetric = nodeManager.getNodeStat(dn); - Assert.assertNull(nodeMetric); - - nodeReportHandler.onMessage( - getNodeReport(dn, storageOne), this); - nodeMetric = nodeManager.getNodeStat(dn); - - Assert.assertTrue(nodeMetric.get().getCapacity().get() == 100); - Assert.assertTrue(nodeMetric.get().getRemaining().get() == 90); - Assert.assertTrue(nodeMetric.get().getScmUsed().get() == 10); - - StorageReportProto storageTwo = TestUtils - .createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null); - nodeReportHandler.onMessage( - getNodeReport(dn, storageOne, storageTwo), this); - nodeMetric = nodeManager.getNodeStat(dn); - - Assert.assertTrue(nodeMetric.get().getCapacity().get() == 200); - Assert.assertTrue(nodeMetric.get().getRemaining().get() == 180); - Assert.assertTrue(nodeMetric.get().getScmUsed().get() == 20); - - } - - private NodeReportFromDatanode getNodeReport(DatanodeDetails dn, - StorageReportProto... reports) { - NodeReportProto nodeReportProto = TestUtils.createNodeReport(reports); - return new NodeReportFromDatanode(dn, nodeReportProto); - } - - @Override - public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent( - EVENT_TYPE event, PAYLOAD payload) { - LOG.info("Event is published: {}", payload); - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java ---------------------------------------------------------------------- diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java deleted file mode 100644 index 623fc16..0000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java +++ /dev/null @@ -1,262 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.node; - -import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.*; -import org.junit.Rule; -import org.junit.rules.ExpectedException; - -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.Set; -import java.util.ArrayList; -import java.util.HashSet; -import java.io.IOException; -import java.util.concurrent.ConcurrentHashMap; - -/** - * Test Node Storage Map. - */ -public class TestSCMNodeStorageStatMap { - private final static int DATANODE_COUNT = 100; - private final long capacity = 10L * OzoneConsts.GB; - private final long used = 2L * OzoneConsts.GB; - private final long remaining = capacity - used; - private static OzoneConfiguration conf = new OzoneConfiguration(); - private final Map<UUID, Set<StorageLocationReport>> testData = - new ConcurrentHashMap<>(); - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - private void generateData() { - for (int dnIndex = 1; dnIndex <= DATANODE_COUNT; dnIndex++) { - UUID dnId = UUID.randomUUID(); - Set<StorageLocationReport> reportSet = new HashSet<>(); - String path = GenericTestUtils.getTempPath( - TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + - Integer.toString(dnIndex)); - StorageLocationReport.Builder builder = - StorageLocationReport.newBuilder(); - builder.setStorageType(StorageType.DISK).setId(dnId.toString()) - .setStorageLocation(path).setScmUsed(used).setRemaining(remaining) - .setCapacity(capacity).setFailed(false); - reportSet.add(builder.build()); - testData.put(UUID.randomUUID(), reportSet); - } - } - - private UUID getFirstKey() { - return testData.keySet().iterator().next(); - } - - @Before - public void setUp() throws Exception { - generateData(); - } - - @After - public void tearDown() throws Exception { - } - - @Test - public void testIsKnownDatanode() throws SCMException { - SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf); - UUID knownNode = getFirstKey(); - UUID unknownNode = UUID.randomUUID(); - Set<StorageLocationReport> report = testData.get(knownNode); - map.insertNewDatanode(knownNode, report); - Assert.assertTrue("Not able to detect a known node", - map.isKnownDatanode(knownNode)); - Assert.assertFalse("Unknown node detected", - map.isKnownDatanode(unknownNode)); - } - - @Test - public void testInsertNewDatanode() throws SCMException { - SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf); - UUID knownNode = getFirstKey(); - Set<StorageLocationReport> report = testData.get(knownNode); - map.insertNewDatanode(knownNode, report); - Assert.assertEquals(map.getStorageVolumes(knownNode), - testData.get(knownNode)); - thrown.expect(SCMException.class); - thrown.expectMessage("already exists"); - map.insertNewDatanode(knownNode, report); - } - - @Test - public void testUpdateUnknownDatanode() throws SCMException { - SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf); - UUID unknownNode = UUID.randomUUID(); - String path = GenericTestUtils.getTempPath( - TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + unknownNode - .toString()); - Set<StorageLocationReport> reportSet = new HashSet<>(); - StorageLocationReport.Builder builder = StorageLocationReport.newBuilder(); - builder.setStorageType(StorageType.DISK).setId(unknownNode.toString()) - .setStorageLocation(path).setScmUsed(used).setRemaining(remaining) - .setCapacity(capacity).setFailed(false); - reportSet.add(builder.build()); - thrown.expect(SCMException.class); - thrown.expectMessage("No such datanode"); - map.updateDatanodeMap(unknownNode, reportSet); - } - - @Test - public void testProcessNodeReportCheckOneNode() throws IOException { - UUID key = getFirstKey(); - List<StorageReportProto> reportList = new ArrayList<>(); - Set<StorageLocationReport> reportSet = testData.get(key); - SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf); - map.insertNewDatanode(key, reportSet); - Assert.assertTrue(map.isKnownDatanode(key)); - UUID storageId = UUID.randomUUID(); - String path = - GenericTestUtils.getRandomizedTempPath().concat("/" + storageId); - StorageLocationReport report = reportSet.iterator().next(); - long reportCapacity = report.getCapacity(); - long reportScmUsed = report.getScmUsed(); - long reportRemaining = report.getRemaining(); - StorageReportProto storageReport = TestUtils.createStorageReport(storageId, - path, reportCapacity, reportScmUsed, reportRemaining, null); - StorageReportResult result = - map.processNodeReport(key, TestUtils.createNodeReport(storageReport)); - Assert.assertEquals(result.getStatus(), - SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL); - StorageContainerDatanodeProtocolProtos.NodeReportProto.Builder nrb = - NodeReportProto.newBuilder(); - StorageReportProto srb = reportSet.iterator().next().getProtoBufMessage(); - reportList.add(srb); - result = map.processNodeReport(key, TestUtils.createNodeReport(reportList)); - Assert.assertEquals(result.getStatus(), - SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL); - - reportList.add(TestUtils - .createStorageReport(UUID.randomUUID(), path, reportCapacity, - reportCapacity, 0, null)); - result = map.processNodeReport(key, TestUtils.createNodeReport(reportList)); - Assert.assertEquals(result.getStatus(), - SCMNodeStorageStatMap.ReportStatus.STORAGE_OUT_OF_SPACE); - // Mark a disk failed - StorageReportProto srb2 = StorageReportProto.newBuilder() - .setStorageUuid(UUID.randomUUID().toString()) - .setStorageLocation(srb.getStorageLocation()).setScmUsed(reportCapacity) - .setCapacity(reportCapacity).setRemaining(0).setFailed(true).build(); - reportList.add(srb2); - nrb.addAllStorageReport(reportList); - result = map.processNodeReport(key, nrb.addStorageReport(srb).build()); - Assert.assertEquals(result.getStatus(), - SCMNodeStorageStatMap.ReportStatus.FAILED_AND_OUT_OF_SPACE_STORAGE); - - } - - @Test - public void testProcessMultipleNodeReports() throws SCMException { - SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf); - int counter = 1; - // Insert all testData into the SCMNodeStorageStatMap Map. - for (Map.Entry<UUID, Set<StorageLocationReport>> keyEntry : testData - .entrySet()) { - map.insertNewDatanode(keyEntry.getKey(), keyEntry.getValue()); - } - Assert.assertEquals(DATANODE_COUNT * capacity, map.getTotalCapacity()); - Assert.assertEquals(DATANODE_COUNT * remaining, map.getTotalFreeSpace()); - Assert.assertEquals(DATANODE_COUNT * used, map.getTotalSpaceUsed()); - - // upadate 1/4th of the datanode to be full - for (Map.Entry<UUID, Set<StorageLocationReport>> keyEntry : testData - .entrySet()) { - Set<StorageLocationReport> reportSet = new HashSet<>(); - String path = GenericTestUtils.getTempPath( - TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + keyEntry - .getKey().toString()); - StorageLocationReport.Builder builder = - StorageLocationReport.newBuilder(); - builder.setStorageType(StorageType.DISK) - .setId(keyEntry.getKey().toString()).setStorageLocation(path) - .setScmUsed(capacity).setRemaining(0).setCapacity(capacity) - .setFailed(false); - reportSet.add(builder.build()); - - map.updateDatanodeMap(keyEntry.getKey(), reportSet); - counter++; - if (counter > DATANODE_COUNT / 4) { - break; - } - } - Assert.assertEquals(DATANODE_COUNT / 4, - map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.CRITICAL) - .size()); - Assert.assertEquals(0, - map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.WARN) - .size()); - Assert.assertEquals(0.75 * DATANODE_COUNT, - map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.NORMAL) - .size(), 0); - - Assert.assertEquals(DATANODE_COUNT * capacity, map.getTotalCapacity(), 0); - Assert.assertEquals(0.75 * DATANODE_COUNT * remaining, - map.getTotalFreeSpace(), 0); - Assert.assertEquals( - 0.75 * DATANODE_COUNT * used + (0.25 * DATANODE_COUNT * capacity), - map.getTotalSpaceUsed(), 0); - counter = 1; - // Remove 1/4 of the DataNodes from the Map - for (Map.Entry<UUID, Set<StorageLocationReport>> keyEntry : testData - .entrySet()) { - map.removeDatanode(keyEntry.getKey()); - counter++; - if (counter > DATANODE_COUNT / 4) { - break; - } - } - - Assert.assertEquals(0, - map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.CRITICAL) - .size()); - Assert.assertEquals(0, - map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.WARN) - .size()); - Assert.assertEquals(0.75 * DATANODE_COUNT, - map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.NORMAL) - .size(), 0); - - Assert - .assertEquals(0.75 * DATANODE_COUNT * capacity, map.getTotalCapacity(), - 0); - Assert.assertEquals(0.75 * DATANODE_COUNT * remaining, - map.getTotalFreeSpace(), 0); - Assert - .assertEquals(0.75 * DATANODE_COUNT * used, map.getTotalSpaceUsed(), 0); - - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java ---------------------------------------------------------------------- diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java deleted file mode 100644 index dfd8397..0000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -/** - * Make CheckStyle Happy. - */ -package org.apache.hadoop.hdds.scm.node; \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java ---------------------------------------------------------------------- diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java deleted file mode 100644 index ec1d527..0000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java +++ /dev/null @@ -1,327 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.scm.node.states; - -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import java.util.Map; -import java.util.Random; -import java.util.Set; -import java.util.TreeSet; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; - -/** - * Test classes for Node2ContainerMap. - */ -public class TestNode2ContainerMap { - private final static int DATANODE_COUNT = 300; - private final static int CONTAINER_COUNT = 1000; - private final Map<UUID, TreeSet<ContainerID>> testData = new - ConcurrentHashMap<>(); - - @Rule - public ExpectedException thrown = ExpectedException.none(); - - private void generateData() { - for (int dnIndex = 1; dnIndex <= DATANODE_COUNT; dnIndex++) { - TreeSet<ContainerID> currentSet = new TreeSet<>(); - for (int cnIndex = 1; cnIndex <= CONTAINER_COUNT; cnIndex++) { - long currentCnIndex = (dnIndex * CONTAINER_COUNT) + cnIndex; - currentSet.add(new ContainerID(currentCnIndex)); - } - testData.put(UUID.randomUUID(), currentSet); - } - } - - private UUID getFirstKey() { - return testData.keySet().iterator().next(); - } - - @Before - public void setUp() throws Exception { - generateData(); - } - - @After - public void tearDown() throws Exception { - } - - @Test - public void testIsKnownDatanode() throws SCMException { - Node2ContainerMap map = new Node2ContainerMap(); - UUID knownNode = getFirstKey(); - UUID unknownNode = UUID.randomUUID(); - Set<ContainerID> containerIDs = testData.get(knownNode); - map.insertNewDatanode(knownNode, containerIDs); - Assert.assertTrue("Not able to detect a known node", - map.isKnownDatanode(knownNode)); - Assert.assertFalse("Unknown node detected", - map.isKnownDatanode(unknownNode)); - } - - @Test - public void testInsertNewDatanode() throws SCMException { - Node2ContainerMap map = new Node2ContainerMap(); - UUID knownNode = getFirstKey(); - Set<ContainerID> containerIDs = testData.get(knownNode); - map.insertNewDatanode(knownNode, containerIDs); - Set<ContainerID> readSet = map.getContainers(knownNode); - - // Assert that all elements are present in the set that we read back from - // node map. - Set newSet = new TreeSet((readSet)); - Assert.assertTrue(newSet.removeAll(containerIDs)); - Assert.assertTrue(newSet.size() == 0); - - thrown.expect(SCMException.class); - thrown.expectMessage("already exists"); - map.insertNewDatanode(knownNode, containerIDs); - - map.removeDatanode(knownNode); - map.insertNewDatanode(knownNode, containerIDs); - - } - - @Test - public void testProcessReportCheckOneNode() throws SCMException { - UUID key = getFirstKey(); - Set<ContainerID> values = testData.get(key); - Node2ContainerMap map = new Node2ContainerMap(); - map.insertNewDatanode(key, values); - Assert.assertTrue(map.isKnownDatanode(key)); - ReportResult result = map.processReport(key, values); - Assert.assertEquals(result.getStatus(), - ReportResult.ReportStatus.ALL_IS_WELL); - } - - @Test - public void testUpdateDatanodeMap() throws SCMException { - UUID datanodeId = getFirstKey(); - Set<ContainerID> values = testData.get(datanodeId); - Node2ContainerMap map = new Node2ContainerMap(); - map.insertNewDatanode(datanodeId, values); - Assert.assertTrue(map.isKnownDatanode(datanodeId)); - Assert.assertEquals(CONTAINER_COUNT, map.getContainers(datanodeId).size()); - - //remove one container - values.remove(values.iterator().next()); - Assert.assertEquals(CONTAINER_COUNT - 1, values.size()); - Assert.assertEquals(CONTAINER_COUNT, map.getContainers(datanodeId).size()); - - map.setContainersForDatanode(datanodeId, values); - - Assert.assertEquals(values.size(), map.getContainers(datanodeId).size()); - Assert.assertEquals(values, map.getContainers(datanodeId)); - } - - @Test - public void testProcessReportInsertAll() throws SCMException { - Node2ContainerMap map = new Node2ContainerMap(); - - for (Map.Entry<UUID, TreeSet<ContainerID>> keyEntry : testData.entrySet()) { - map.insertNewDatanode(keyEntry.getKey(), keyEntry.getValue()); - } - // Assert all Keys are known datanodes. - for (UUID key : testData.keySet()) { - Assert.assertTrue(map.isKnownDatanode(key)); - } - } - - /* - For ProcessReport we have to test the following scenarios. - - 1. New Datanode - A new datanode appears and we have to add that to the - SCM's Node2Container Map. - - 2. New Container - A Datanode exists, but a new container is added to that - DN. We need to detect that and return a list of added containers. - - 3. Missing Container - A Datanode exists, but one of the expected container - on that datanode is missing. We need to detect that. - - 4. We get a container report that has both the missing and new containers. - We need to return separate lists for these. - */ - - /** - * Assert that we are able to detect the addition of a new datanode. - * - * @throws SCMException - */ - @Test - public void testProcessReportDetectNewDataNode() throws SCMException { - Node2ContainerMap map = new Node2ContainerMap(); - // If we attempt to process a node that is not present in the map, - // we get a result back that says, NEW_NODE_FOUND. - UUID key = getFirstKey(); - TreeSet<ContainerID> values = testData.get(key); - ReportResult result = map.processReport(key, values); - Assert.assertEquals(ReportResult.ReportStatus.NEW_DATANODE_FOUND, - result.getStatus()); - Assert.assertEquals(result.getNewEntries().size(), values.size()); - } - - /** - * This test asserts that processReport is able to detect new containers - * when it is added to a datanode. For that we populate the DN with a list - * of containerIDs and then add few more containers and make sure that we - * are able to detect them. - * - * @throws SCMException - */ - @Test - public void testProcessReportDetectNewContainers() throws SCMException { - Node2ContainerMap map = new Node2ContainerMap(); - UUID key = getFirstKey(); - TreeSet<ContainerID> values = testData.get(key); - map.insertNewDatanode(key, values); - - final int newCount = 100; - ContainerID last = values.last(); - TreeSet<ContainerID> addedContainers = new TreeSet<>(); - for (int x = 1; x <= newCount; x++) { - long cTemp = last.getId() + x; - addedContainers.add(new ContainerID(cTemp)); - } - - // This set is the super set of existing containers and new containers. - TreeSet<ContainerID> newContainersSet = new TreeSet<>(values); - newContainersSet.addAll(addedContainers); - - ReportResult result = map.processReport(key, newContainersSet); - - //Assert that expected size of missing container is same as addedContainers - Assert.assertEquals(ReportResult.ReportStatus.NEW_ENTRIES_FOUND, - result.getStatus()); - - Assert.assertEquals(addedContainers.size(), - result.getNewEntries().size()); - - // Assert that the Container IDs are the same as we added new. - Assert.assertTrue("All objects are not removed.", - result.getNewEntries().removeAll(addedContainers)); - } - - /** - * This test asserts that processReport is able to detect missing containers - * if they are misssing from a list. - * - * @throws SCMException - */ - @Test - public void testProcessReportDetectMissingContainers() throws SCMException { - Node2ContainerMap map = new Node2ContainerMap(); - UUID key = getFirstKey(); - TreeSet<ContainerID> values = testData.get(key); - map.insertNewDatanode(key, values); - - final int removeCount = 100; - Random r = new Random(); - - ContainerID first = values.first(); - TreeSet<ContainerID> removedContainers = new TreeSet<>(); - - // Pick a random container to remove it is ok to collide no issues. - for (int x = 0; x < removeCount; x++) { - int startBase = (int) first.getId(); - long cTemp = r.nextInt(values.size()); - removedContainers.add(new ContainerID(cTemp + startBase)); - } - - // This set is a new set with some containers removed. - TreeSet<ContainerID> newContainersSet = new TreeSet<>(values); - newContainersSet.removeAll(removedContainers); - - ReportResult result = map.processReport(key, newContainersSet); - - - //Assert that expected size of missing container is same as addedContainers - Assert.assertEquals(ReportResult.ReportStatus.MISSING_ENTRIES, - result.getStatus()); - Assert.assertEquals(removedContainers.size(), - result.getMissingEntries().size()); - - // Assert that the Container IDs are the same as we added new. - Assert.assertTrue("All missing containers not found.", - result.getMissingEntries().removeAll(removedContainers)); - } - - @Test - public void testProcessReportDetectNewAndMissingContainers() throws - SCMException { - Node2ContainerMap map = new Node2ContainerMap(); - UUID key = getFirstKey(); - TreeSet<ContainerID> values = testData.get(key); - map.insertNewDatanode(key, values); - - Set<ContainerID> insertedSet = new TreeSet<>(); - // Insert nodes from 1..30 - for (int x = 1; x <= 30; x++) { - insertedSet.add(new ContainerID(x)); - } - - - final int removeCount = 100; - Random r = new Random(); - - ContainerID first = values.first(); - TreeSet<ContainerID> removedContainers = new TreeSet<>(); - - // Pick a random container to remove it is ok to collide no issues. - for (int x = 0; x < removeCount; x++) { - int startBase = (int) first.getId(); - long cTemp = r.nextInt(values.size()); - removedContainers.add(new ContainerID(cTemp + startBase)); - } - - Set<ContainerID> newSet = new TreeSet<>(values); - newSet.addAll(insertedSet); - newSet.removeAll(removedContainers); - - ReportResult result = map.processReport(key, newSet); - - - Assert.assertEquals( - ReportResult.ReportStatus.MISSING_AND_NEW_ENTRIES_FOUND, - result.getStatus()); - Assert.assertEquals(removedContainers.size(), - result.getMissingEntries().size()); - - - // Assert that the Container IDs are the same as we added new. - Assert.assertTrue("All missing containers not found.", - result.getMissingEntries().removeAll(removedContainers)); - - Assert.assertEquals(insertedSet.size(), - result.getNewEntries().size()); - - // Assert that the Container IDs are the same as we added new. - Assert.assertTrue("All inserted containers are not found.", - result.getNewEntries().removeAll(insertedSet)); - } -} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java ---------------------------------------------------------------------- diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java deleted file mode 100644 index 6610fcd..0000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Test Node2Container Map. - */ -package org.apache.hadoop.hdds.scm.node.states; \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/package-info.java ---------------------------------------------------------------------- diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/package-info.java deleted file mode 100644 index da05c59..0000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm; -/** - * SCM tests - */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMChillModeManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMChillModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMChillModeManager.java deleted file mode 100644 index 486c604..0000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMChillModeManager.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.ââSee the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.ââThe ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.ââYou may obtain a copy of the License at - * - * ââââ http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.server; - -import java.util.ArrayList; -import java.util.List; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.HddsTestUtils; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; - -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertFalse; - -/** Test class for SCMChillModeManager. - */ -public class TestSCMChillModeManager { - - private static EventQueue queue; - private SCMChillModeManager scmChillModeManager; - private static Configuration config; - private List<ContainerInfo> containers; - - @Rule - public Timeout timeout = new Timeout(1000 * 20); - - @BeforeClass - public static void setUp() { - queue = new EventQueue(); - config = new OzoneConfiguration(); - } - - @Test - public void testChillModeState() throws Exception { - // Test 1: test for 0 containers - testChillMode(0); - - // Test 2: test for 20 containers - testChillMode(20); - } - - @Test - public void testChillModeStateWithNullContainers() { - new SCMChillModeManager(config, null, queue); - } - - private void testChillMode(int numContainers) throws Exception { - containers = new ArrayList<>(); - containers.addAll(HddsTestUtils.getContainerInfo(numContainers)); - scmChillModeManager = new SCMChillModeManager(config, containers, queue); - queue.addHandler(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - scmChillModeManager); - assertTrue(scmChillModeManager.getInChillMode()); - queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - HddsTestUtils.createNodeRegistrationContainerReport(containers)); - GenericTestUtils.waitFor(() -> { - return !scmChillModeManager.getInChillMode(); - }, 100, 1000 * 5); - } - - @Test - public void testChillModeExitRule() throws Exception { - containers = new ArrayList<>(); - containers.addAll(HddsTestUtils.getContainerInfo(25 * 4)); - scmChillModeManager = new SCMChillModeManager(config, containers, queue); - queue.addHandler(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - scmChillModeManager); - assertTrue(scmChillModeManager.getInChillMode()); - - testContainerThreshold(containers.subList(0, 25), 0.25); - assertTrue(scmChillModeManager.getInChillMode()); - testContainerThreshold(containers.subList(25, 50), 0.50); - assertTrue(scmChillModeManager.getInChillMode()); - testContainerThreshold(containers.subList(50, 75), 0.75); - assertTrue(scmChillModeManager.getInChillMode()); - testContainerThreshold(containers.subList(75, 100), 1.0); - - GenericTestUtils.waitFor(() -> { - return !scmChillModeManager.getInChillMode(); - }, 100, 1000 * 5); - } - - @Test - public void testDisableChillMode() { - OzoneConfiguration conf = new OzoneConfiguration(config); - conf.setBoolean(HddsConfigKeys.HDDS_SCM_CHILLMODE_ENABLED, false); - scmChillModeManager = new SCMChillModeManager(conf, containers, queue); - assertFalse(scmChillModeManager.getInChillMode()); - } - - private void testContainerThreshold(List<ContainerInfo> dnContainers, - double expectedThreshold) - throws Exception { - queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, - HddsTestUtils.createNodeRegistrationContainerReport(dnContainers)); - GenericTestUtils.waitFor(() -> { - double threshold = scmChillModeManager.getCurrentContainerThreshold(); - return threshold == expectedThreshold; - }, 100, 2000 * 9); - } - -} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java ---------------------------------------------------------------------- diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java deleted file mode 100644 index 4b20018..0000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMClientProtocolServer.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements.ââSee the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership.ââThe ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License.ââYou may obtain a copy of the License at - * - * ââââ http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.server; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.test.LambdaTestUtils; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -/** - * Test class for @{@link SCMClientProtocolServer}. - * */ -public class TestSCMClientProtocolServer { - private SCMClientProtocolServer scmClientProtocolServer; - private OzoneConfiguration config; - private EventQueue eventQueue; - - @Before - public void setUp() throws Exception { - config = new OzoneConfiguration(); - eventQueue = new EventQueue(); - scmClientProtocolServer = new SCMClientProtocolServer(config, null); - eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS, scmClientProtocolServer); - } - - @After - public void tearDown() throws Exception { - } - - @Test - public void testAllocateContainerFailureInChillMode() throws Exception { - LambdaTestUtils.intercept(SCMException.class, - "hillModePrecheck failed for allocateContainer", () -> { - scmClientProtocolServer.allocateContainer( - ReplicationType.STAND_ALONE, ReplicationFactor.ONE, ""); - }); - } -} \ No newline at end of file --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
