This is an automated email from the ASF dual-hosted git repository.
sodonnell pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new ea1ff1e HDDS-5690. Speed up TestContainerReplication by removing
testSkipDemmissionAndMaintenanceNode (#2591)
ea1ff1e is described below
commit ea1ff1e34d3a9fad423a2b1db586d249c44c9fb0
Author: Stephen O'Donnell <[email protected]>
AuthorDate: Thu Sep 2 21:06:27 2021 +0100
HDDS-5690. Speed up TestContainerReplication by removing
testSkipDemmissionAndMaintenanceNode (#2591)
---
.../TestSCMContainerPlacementRackAware.java | 27 +++++++++++++
.../ozone/container/TestContainerReplication.java | 47 ----------------------
2 files changed, 27 insertions(+), 47 deletions(-)
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
index 376752b..dd004f8 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
@@ -20,6 +20,7 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
+import java.util.Random;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.StorageUnit;
@@ -48,6 +49,8 @@ import org.mockito.Mockito;
import org.apache.commons.lang3.StringUtils;
+import static
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONED;
+import static
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN;
import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA;
import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA;
@@ -524,4 +527,28 @@ public class TestSCMContainerPlacementRackAware {
assertTrue(stat.isPolicySatisfied());
assertEquals(0, stat.misReplicationCount());
}
+
+ @Test
+ public void testOutOfServiceNodesNotSelected() {
+ // Set all the nodes to out of service
+ for (DatanodeInfo dn : dnInfos) {
+ dn.setNodeStatus(new NodeStatus(DECOMMISSIONED, HEALTHY));
+ }
+
+ for (int i=0; i<10; i++) {
+ // Set a random DN to in_service and ensure it is always picked
+ int index = new Random().nextInt(dnInfos.size());
+ dnInfos.get(index).setNodeStatus(NodeStatus.inServiceHealthy());
+ try {
+ List<DatanodeDetails> datanodeDetails =
+ policy.chooseDatanodes(null, null, 1, 0, 0);
+ Assert.assertEquals(dnInfos.get(index), datanodeDetails.get(0));
+ } catch (SCMException e) {
+ // If we get SCMException: No satisfied datanode to meet the ... this
is
+ // ok, as there is only 1 IN_SERVICE node and with the retry logic we
+ // may never find it.
+ }
+ dnInfos.get(index).setNodeStatus(new NodeStatus(DECOMMISSIONED,
HEALTHY));
+ }
+ }
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
index 80fd44c..adb6a5d 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
@@ -26,7 +26,6 @@ import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTER
import static
org.apache.hadoop.ozone.container.TestHelper.waitForContainerClose;
import static org.apache.hadoop.ozone.container.TestHelper.waitForReplicaCount;
import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
import java.io.IOException;
import java.io.OutputStream;
@@ -34,17 +33,13 @@ import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import
org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration;
import
org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity;
import
org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRackAware;
import
org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRandom;
-import org.apache.hadoop.hdds.scm.node.NodeDecommissionManager;
-import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneBucket;
@@ -137,48 +132,6 @@ public class TestContainerReplication {
waitForReplicaCount(containerID, 3, cluster);
}
- @Test
- public void testSkipDecommissionAndMaintenanceNode() throws Exception {
- List<OmKeyLocationInfo> keyLocations = lookupKey(cluster);
- assertFalse(keyLocations.isEmpty());
-
- OmKeyLocationInfo keyLocation = keyLocations.get(0);
- long containerID = keyLocation.getContainerID();
- waitForContainerClose(cluster, containerID);
-
- // Mark other two DN Decommission and Maintenance
- NodeDecommissionManager decommissionManager =
- cluster.getStorageContainerManager().getScmDecommissionManager();
- boolean deCommission = true;
- for (HddsDatanodeService d1 : cluster.getHddsDatanodes()) {
- boolean match = false;
- for (DatanodeDetails d2 : keyLocations.get(0).getPipeline().getNodes()) {
- if (d1.getDatanodeDetails().equals(d2)) {
- match = true;
- break;
- }
- }
- if (!match) {
- if (deCommission) {
- decommissionManager.startDecommission(d1.getDatanodeDetails());
- deCommission = false;
- } else {
- decommissionManager.startMaintenance(d1.getDatanodeDetails(), 1);
- }
- }
- }
-
- cluster.shutdownHddsDatanode(keyLocation.getPipeline().getFirstNode());
-
- waitForReplicaCount(containerID, 2, cluster);
- try {
- waitForReplicaCount(containerID, 3, cluster);
- fail("Replication should not succeed without extra IN_SERVICE nodes");
- } catch (TimeoutException e) {
- Assert.assertTrue(TestHelper.countReplicas(containerID, cluster) == 2);
- }
- }
-
private static OzoneConfiguration createConfiguration() {
OzoneConfiguration conf = new OzoneConfiguration();
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]