This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 88287edc3a HDDS-6875. Migrate parameterized tests in hdds-common to 
JUnit5 (#3513)
88287edc3a is described below

commit 88287edc3a2b78f20a4796bd996d20a79ef97222
Author: Kaijie Chen <[email protected]>
AuthorDate: Tue Jun 21 19:46:34 2022 +0800

    HDDS-6875. Migrate parameterized tests in hdds-common to JUnit5 (#3513)
---
 hadoop-hdds/common/pom.xml                         |   5 +
 .../hdds/TestComponentVersionInvariants.java       |  76 ++++---
 .../hadoop/hdds/client/TestReplicationConfig.java  | 242 ++++++++++----------
 .../hdds/scm/net/TestNetworkTopologyImpl.java      | 243 +++++++++++----------
 .../hadoop/hdds/scm/net/TestNodeSchemaLoader.java  | 129 +++++------
 .../hadoop/hdds/scm/net/TestYamlSchemaLoader.java  |  95 ++++----
 6 files changed, 378 insertions(+), 412 deletions(-)

diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index cf65d8b1d5..0c1273df0a 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -169,6 +169,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd";>
       <artifactId>junit-jupiter-api</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>org.junit.jupiter</groupId>
+      <artifactId>junit-jupiter-params</artifactId>
+      <scope>test</scope>
+    </dependency>
     <dependency>
       <groupId>org.junit.jupiter</groupId>
       <artifactId>junit-jupiter-engine</artifactId>
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestComponentVersionInvariants.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestComponentVersionInvariants.java
index 4ba9943c39..d5524688a6 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestComponentVersionInvariants.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestComponentVersionInvariants.java
@@ -19,75 +19,73 @@ package org.apache.hadoop.hdds;
 
 import org.apache.hadoop.ozone.ClientVersion;
 import org.apache.hadoop.ozone.OzoneManagerVersion;
-import org.assertj.core.util.Arrays;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
 
-import static org.junit.Assert.assertEquals;
+import java.util.stream.Stream;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.params.provider.Arguments.arguments;
 
 /**
  * Test to ensure Component version instances conform with invariants relied
  * upon in other parts of the codebase.
  */
-@RunWith(Parameterized.class)
 public class TestComponentVersionInvariants {
 
-  @Parameterized.Parameters
-  public static Object[][] values() {
-    Object[][] values = new Object[3][];
-    values[0] =
-        Arrays.array(
+  public static Stream<Arguments> values() {
+    return Stream.of(
+        arguments(
             DatanodeVersion.values(),
             DatanodeVersion.DEFAULT_VERSION,
-            DatanodeVersion.FUTURE_VERSION);
-    values[1] =
-        Arrays.array(
+            DatanodeVersion.FUTURE_VERSION),
+        arguments(
             ClientVersion.values(),
             ClientVersion.DEFAULT_VERSION,
-            ClientVersion.FUTURE_VERSION);
-    values[2] =
-        Arrays.array(
+            ClientVersion.FUTURE_VERSION),
+        arguments(
             OzoneManagerVersion.values(),
             OzoneManagerVersion.DEFAULT_VERSION,
-            OzoneManagerVersion.FUTURE_VERSION);
-    return values;
-  }
-
-  private final ComponentVersion[] values;
-  private final ComponentVersion defaultValue;
-  private final ComponentVersion futureValue;
-
-  public TestComponentVersionInvariants(ComponentVersion[] values,
-      ComponentVersion defaultValue, ComponentVersion futureValue) {
-    this.values = new ComponentVersion[values.length];
-    System.arraycopy(values, 0, this.values, 0, values.length);
-    this.defaultValue = defaultValue;
-    this.futureValue = futureValue;
+            OzoneManagerVersion.FUTURE_VERSION)
+    );
   }
 
   // FUTURE_VERSION is the latest
-  @Test
-  public void testFutureVersionHasTheHighestOrdinal() {
+  @ParameterizedTest
+  @MethodSource("values")
+  public void testFutureVersionHasTheHighestOrdinal(
+      ComponentVersion[] values, ComponentVersion defaultValue,
+      ComponentVersion futureValue) {
+
     assertEquals(values[values.length - 1], futureValue);
   }
 
   // FUTURE_VERSION's internal version id is -1
-  @Test
-  public void testFuturVersionHasMinusOneAsProtoRepresentation() {
+  @ParameterizedTest
+  @MethodSource("values")
+  public void testFuturVersionHasMinusOneAsProtoRepresentation(
+      ComponentVersion[] values, ComponentVersion defaultValue,
+      ComponentVersion futureValue) {
     assertEquals(-1, futureValue.toProtoValue());
 
   }
 
   // DEFAULT_VERSION's internal version id is 0
-  @Test
-  public void testDefaultVersionHasZeroAsProtoRepresentation() {
+  @ParameterizedTest
+  @MethodSource("values")
+  public void testDefaultVersionHasZeroAsProtoRepresentation(
+      ComponentVersion[] values, ComponentVersion defaultValue,
+      ComponentVersion futureValue) {
     assertEquals(0, defaultValue.toProtoValue());
   }
 
   // versions are increasing monotonically by one
-  @Test
-  public void testAssignedProtoRepresentations() {
+  @ParameterizedTest
+  @MethodSource("values")
+  public void testAssignedProtoRepresentations(
+      ComponentVersion[] values, ComponentVersion defaultValue,
+      ComponentVersion futureValue) {
     int startValue = defaultValue.toProtoValue();
     // we skip the future version at the last position
     for (int i = 0; i < values.length - 1; i++) {
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
index ed8c8f764e..9f939904ca 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java
@@ -23,72 +23,48 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
+
+import java.util.stream.Stream;
 
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.params.provider.Arguments.arguments;
 
 /**
  * Test replicationConfig.
  */
-@RunWith(Parameterized.class)
 public class TestReplicationConfig {
 
   private static final int MB = 1024 * 1024;
   private static final int KB = 1024;
 
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  @Parameterized.Parameter
-  public String type;
-
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  @Parameterized.Parameter(1)
-  public String factor;
-
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  @Parameterized.Parameter(2)
-  public String codec;
-
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  @Parameterized.Parameter(3)
-  public int data;
-
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  @Parameterized.Parameter(4)
-  public int parity;
-
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  @Parameterized.Parameter(5)
-  public int chunkSize;
-
-  @SuppressWarnings("checkstyle:VisibilityModifier")
-  @Parameterized.Parameter(6)
-  public Class<?> replicationConfigClass;
-
-  //NOTE: if a new cunckSize is used/added in the parameters other than KB or 
MB
+  //NOTE: if a new chunkSize is used/added in the parameters other than KB or 
MB
   // please revisit the method createECDescriptor, to handle the new chunkSize.
-  @Parameterized.Parameters(name = "{0}/{1}")
-  public static Object[][] parameters() {
-    return new Object[][] {
-        {"RATIS", "ONE", null, 0, 0, 0, RatisReplicationConfig.class },
-        {"RATIS", "THREE", null, 0, 0, 0, RatisReplicationConfig.class},
-        {"STAND_ALONE", "ONE", null, 0, 0, 0,
-            StandaloneReplicationConfig.class},
-        {"STAND_ALONE", "THREE", null, 0, 0, 0,
-            StandaloneReplicationConfig.class},
-        {"EC", "RS-3-2-1024K", "RS", 3, 2, MB, ECReplicationConfig.class},
-        {"EC", "RS-3-2-1024", "RS", 3, 2, KB, ECReplicationConfig.class},
-        {"EC", "RS-6-3-1024K", "RS", 6, 3, MB, ECReplicationConfig.class},
-        {"EC", "RS-6-3-1024", "RS", 6, 3, KB, ECReplicationConfig.class},
-        {"EC", "RS-10-4-1024K", "RS", 10, 4, MB, ECReplicationConfig.class},
-        {"EC", "RS-10-4-1024", "RS", 10, 4, KB, ECReplicationConfig.class},
-    };
+  public static Stream<Arguments> replicaType() {
+    return Stream.of(
+        arguments("RATIS", "ONE", RatisReplicationConfig.class),
+        arguments("RATIS", "THREE", RatisReplicationConfig.class),
+        arguments("STAND_ALONE", "ONE", StandaloneReplicationConfig.class),
+        arguments("STAND_ALONE", "THREE", StandaloneReplicationConfig.class)
+    );
+  }
+
+  public static Stream<Arguments> ecType() {
+    return Stream.of(
+        arguments("RS", 3, 2, MB),
+        arguments("RS", 3, 2, KB),
+        arguments("RS", 6, 3, MB),
+        arguments("RS", 6, 3, KB),
+        arguments("RS", 10, 4, MB),
+        arguments("RS", 10, 4, KB)
+    );
   }
 
   @Test
@@ -102,9 +78,10 @@ public class TestReplicationConfig {
         RatisReplicationConfig.class);
   }
 
-  @Test
-  public void testGetDefaultShouldCreateReplicationConfFromConfValues() {
-    assumeRatisOrStandaloneType();
+  @ParameterizedTest
+  @MethodSource("replicaType")
+  public void testGetDefaultShouldCreateReplicationConfFromConfValues(
+      String type, String factor, Class<?> replicationConfigClass) {
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(OZONE_REPLICATION_TYPE, type);
     conf.set(OZONE_REPLICATION, factor);
@@ -113,16 +90,17 @@ public class TestReplicationConfig {
 
     validate(replicationConfig,
         org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
-        org.apache.hadoop.hdds.client.ReplicationFactor.valueOf(factor));
+        org.apache.hadoop.hdds.client.ReplicationFactor.valueOf(factor),
+        replicationConfigClass);
   }
 
-  @Test
-  public void testGetDefaultShouldCreateECReplicationConfFromConfValues() {
-    assumeECType();
-
+  @ParameterizedTest
+  @MethodSource("ecType")
+  public void testGetDefaultShouldCreateECReplicationConfFromConfValues(
+      String codec, int data, int parity, int chunkSize) {
     OzoneConfiguration conf = new OzoneConfiguration();
-    conf.set(OZONE_REPLICATION_TYPE, type);
-    conf.set(OZONE_REPLICATION, ecDescriptor());
+    conf.set(OZONE_REPLICATION_TYPE, "EC");
+    conf.set(OZONE_REPLICATION, ecDescriptor(codec, data, parity, chunkSize));
 
     ReplicationConfig replicationConfig = ReplicationConfig.getDefault(conf);
 
@@ -130,9 +108,10 @@ public class TestReplicationConfig {
         EcCodec.valueOf(codec), data, parity, chunkSize);
   }
 
-  @Test
-  public void deserialize() {
-    assumeRatisOrStandaloneType();
+  @ParameterizedTest
+  @MethodSource("replicaType")
+  public void deserialize(String type, String factor,
+      Class<?> replicationConfigClass) {
     final ReplicationConfig replicationConfig =
         ReplicationConfig.fromProtoTypeAndFactor(
             ReplicationType.valueOf(type),
@@ -140,12 +119,13 @@ public class TestReplicationConfig {
 
     validate(replicationConfig,
         org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
-        org.apache.hadoop.hdds.client.ReplicationFactor.valueOf(factor));
+        org.apache.hadoop.hdds.client.ReplicationFactor.valueOf(factor),
+        replicationConfigClass);
   }
 
-  @Test
-  public void deserializeEC() {
-    assumeECType();
+  @ParameterizedTest
+  @MethodSource("ecType")
+  public void deserializeEC(String codec, int data, int parity, int chunkSize) 
{
     HddsProtos.ECReplicationConfig proto =
         HddsProtos.ECReplicationConfig.newBuilder()
             .setCodec(codec)
@@ -160,9 +140,10 @@ public class TestReplicationConfig {
     validate(config, EcCodec.valueOf(codec), data, parity, chunkSize);
   }
 
-  @Test
-  public void testECReplicationConfigGetReplication() {
-    assumeECType();
+  @ParameterizedTest
+  @MethodSource("ecType")
+  public void testECReplicationConfigGetReplication(
+      String codec, int data, int parity, int chunkSize) {
     HddsProtos.ECReplicationConfig proto =
         HddsProtos.ECReplicationConfig.newBuilder().setCodec(codec)
             .setData(data).setParity(parity).setEcChunkSize(chunkSize).build();
@@ -170,27 +151,29 @@ public class TestReplicationConfig {
     ReplicationConfig config =
         ReplicationConfig.fromProto(ReplicationType.EC, null, proto);
 
-    Assert.assertEquals(EcCodec.valueOf(
+    assertEquals(EcCodec.valueOf(
         codec) + ECReplicationConfig.EC_REPLICATION_PARAMS_DELIMITER
             + data + ECReplicationConfig.EC_REPLICATION_PARAMS_DELIMITER
             + parity + ECReplicationConfig.EC_REPLICATION_PARAMS_DELIMITER
             + chunkSize, config.getReplication());
   }
 
-  @Test
-  public void testReplicationConfigGetReplication() {
-    assumeRatisOrStandaloneType();
+  @ParameterizedTest
+  @MethodSource("replicaType")
+  public void testReplicationConfigGetReplication(String type, String factor,
+      Class<?> replicationConfigClass) {
     final ReplicationConfig replicationConfig = ReplicationConfig
         .fromTypeAndFactor(
             org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
             org.apache.hadoop.hdds.client.ReplicationFactor.valueOf(factor));
 
-    Assert.assertEquals(factor, replicationConfig.getReplication());
+    assertEquals(factor, replicationConfig.getReplication());
   }
 
-  @Test
-  public void fromJavaObjects() {
-    assumeRatisOrStandaloneType();
+  @ParameterizedTest
+  @MethodSource("replicaType")
+  public void fromJavaObjects(String type, String factor,
+      Class<?> replicationConfigClass) {
     final ReplicationConfig replicationConfig =
         ReplicationConfig.fromTypeAndFactor(
             org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
@@ -198,12 +181,14 @@ public class TestReplicationConfig {
 
     validate(replicationConfig,
         org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
-        org.apache.hadoop.hdds.client.ReplicationFactor.valueOf(factor));
+        org.apache.hadoop.hdds.client.ReplicationFactor.valueOf(factor),
+        replicationConfigClass);
   }
 
-  @Test
-  public void testParseFromTypeAndFactorAsString() {
-    assumeRatisOrStandaloneType();
+  @ParameterizedTest
+  @MethodSource("replicaType")
+  public void testParseFromTypeAndFactorAsString(String type, String factor,
+      Class<?> replicationConfigClass) {
     ConfigurationSource conf = new OzoneConfiguration();
     ReplicationConfig replicationConfig = ReplicationConfig.parse(
         org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
@@ -211,12 +196,14 @@ public class TestReplicationConfig {
 
     validate(replicationConfig,
         org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
-        org.apache.hadoop.hdds.client.ReplicationFactor.valueOf(factor));
+        org.apache.hadoop.hdds.client.ReplicationFactor.valueOf(factor),
+        replicationConfigClass);
   }
 
-  @Test
-  public void testParseFromTypeAndFactorAsStringifiedInteger() {
-    assumeRatisOrStandaloneType();
+  @ParameterizedTest
+  @MethodSource("replicaType")
+  public void testParseFromTypeAndFactorAsStringifiedInteger(
+      String type, String factor, Class<?> replicationConfigClass) {
     ConfigurationSource conf = new OzoneConfiguration();
     String f =
         factor.equals("ONE") ? "1"
@@ -229,16 +216,19 @@ public class TestReplicationConfig {
 
     validate(replicationConfig,
         org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
-        org.apache.hadoop.hdds.client.ReplicationFactor.valueOf(this.factor));
+        org.apache.hadoop.hdds.client.ReplicationFactor.valueOf(factor),
+        replicationConfigClass);
   }
 
-  @Test
-  public void testParseECReplicationConfigFromString() {
-    assumeECType();
+  @ParameterizedTest
+  @MethodSource("ecType")
+  public void testParseECReplicationConfigFromString(
+      String codec, int data, int parity, int chunkSize) {
 
     ConfigurationSource conf = new OzoneConfiguration();
     ReplicationConfig repConfig = ReplicationConfig.parse(
-        org.apache.hadoop.hdds.client.ReplicationType.EC, ecDescriptor(), 
conf);
+        org.apache.hadoop.hdds.client.ReplicationType.EC,
+        ecDescriptor(codec, data, parity, chunkSize), conf);
 
     validate(repConfig, EcCodec.valueOf(codec), data, parity, chunkSize);
   }
@@ -251,9 +241,10 @@ public class TestReplicationConfig {
    * or STAND_ALONE, then replica count can be adjusted with the replication
    * factor.
    */
-  @Test
-  public void testAdjustReplication() {
-    assumeRatisOrStandaloneType();
+  @ParameterizedTest
+  @MethodSource("replicaType")
+  public void testAdjustReplication(String type, String factor,
+      Class<?> replicationConfigClass) {
     ConfigurationSource conf = new OzoneConfiguration();
     ReplicationConfig replicationConfig = ReplicationConfig.parse(
         org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
@@ -262,12 +253,14 @@ public class TestReplicationConfig {
     validate(
         ReplicationConfig.adjustReplication(replicationConfig, (short) 3, 
conf),
         org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
-        org.apache.hadoop.hdds.client.ReplicationFactor.THREE);
+        org.apache.hadoop.hdds.client.ReplicationFactor.THREE,
+        replicationConfigClass);
 
     validate(
         ReplicationConfig.adjustReplication(replicationConfig, (short) 1, 
conf),
         org.apache.hadoop.hdds.client.ReplicationType.valueOf(type),
-        org.apache.hadoop.hdds.client.ReplicationFactor.ONE);
+        org.apache.hadoop.hdds.client.ReplicationFactor.ONE,
+        replicationConfigClass);
   }
 
   /**
@@ -278,12 +271,14 @@ public class TestReplicationConfig {
    * then the client can not adjust the configuration via the replication
    * factor.
    */
-  @Test
-  public void testAdjustECReplication() {
-    assumeECType();
+  @ParameterizedTest
+  @MethodSource("ecType")
+  public void testAdjustECReplication(String codec, int data, int parity,
+      int chunkSize) {
     ConfigurationSource conf = new OzoneConfiguration();
     ReplicationConfig replicationConfig = ReplicationConfig.parse(
-        org.apache.hadoop.hdds.client.ReplicationType.EC, ecDescriptor(), 
conf);
+        org.apache.hadoop.hdds.client.ReplicationType.EC,
+        ecDescriptor(codec, data, parity, chunkSize), conf);
 
     validate(
         ReplicationConfig.adjustReplication(replicationConfig, (short) 3, 
conf),
@@ -304,9 +299,10 @@ public class TestReplicationConfig {
    * system there might exist some keys that were created with a now disallowed
    * ReplicationConfig.
    */
-  @Test
-  public void testValidationBasedOnConfig() {
-    assumeRatisOrStandaloneType();
+  @ParameterizedTest
+  @MethodSource("replicaType")
+  public void testValidationBasedOnConfig(String type, String factor,
+      Class<?> replicationConfigClass) {
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(OZONE_REPLICATION + ".allowed-configs",
         "^STANDALONE/ONE|RATIS/THREE$");
@@ -357,28 +353,18 @@ public class TestReplicationConfig {
             org.apache.hadoop.hdds.client.ReplicationType.CHAINED, "", conf));
   }
 
-  private void validate(ReplicationConfig replicationConfig,
-      org.apache.hadoop.hdds.client.ReplicationType expectedType,
-      org.apache.hadoop.hdds.client.ReplicationFactor expectedFactor) {
-
-    validate(replicationConfig, expectedType, expectedFactor,
-        replicationConfigClass);
-  }
-
-
   private void validate(ReplicationConfig replicationConfig,
       org.apache.hadoop.hdds.client.ReplicationType expectedType,
       org.apache.hadoop.hdds.client.ReplicationFactor expectedFactor,
       Class<?> expectedReplicationConfigClass) {
 
-    assertEquals(expectedReplicationConfigClass, replicationConfig.getClass());
+    assertSame(expectedReplicationConfigClass, replicationConfig.getClass());
 
-    assertEquals(
-        expectedType.name(), replicationConfig.getReplicationType().name());
-    assertEquals(
-        expectedFactor.getValue(), replicationConfig.getRequiredNodes());
-    assertEquals(
-        expectedFactor.name(),
+    assertEquals(expectedType.name(),
+        replicationConfig.getReplicationType().name());
+    assertEquals(expectedFactor.getValue(),
+        replicationConfig.getRequiredNodes());
+    assertEquals(expectedFactor.name(),
         ((ReplicatedReplicationConfig) replicationConfig)
             .getReplicationFactor().name());
   }
@@ -387,7 +373,7 @@ public class TestReplicationConfig {
       EcCodec expectedCodec,
       int expectedData, int expectedParity, int expectedChunkSize) {
 
-    assertEquals(ECReplicationConfig.class, replicationConfig.getClass());
+    assertSame(ECReplicationConfig.class, replicationConfig.getClass());
     assertEquals(ReplicationType.EC, replicationConfig.getReplicationType());
 
     ECReplicationConfig ecReplicationConfig =
@@ -402,16 +388,10 @@ public class TestReplicationConfig {
         replicationConfig.getRequiredNodes());
   }
 
-  private String ecDescriptor() {
+  private String ecDescriptor(String codec, int data, int parity,
+      int chunkSize) {
     return codec.toUpperCase() + "-" + data + "-" + parity + "-" +
         (chunkSize == MB ? "1024K" : "1024");
   }
 
-  private void assumeRatisOrStandaloneType() {
-    Assume.assumeTrue(type.equals("RATIS") || type.equals("STAND_ALONE"));
-  }
-
-  private void assumeECType() {
-    Assume.assumeTrue(type.equals("EC"));
-  }
 }
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java
index 0a3251dd9b..53991f3382 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java
@@ -25,6 +25,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.stream.Collectors;
+import java.util.stream.Stream;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -38,26 +39,25 @@ import static 
org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA;
 import static org.apache.hadoop.hdds.scm.net.NetConstants.REGION_SCHEMA;
 import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT;
 import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.junit.Assume.assumeTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
+import static org.junit.jupiter.params.provider.Arguments.arguments;
 
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /** Test the network topology functions. */
-@RunWith(Parameterized.class)
+@Timeout(30)
 public class TestNetworkTopologyImpl {
   private static final Logger LOG = LoggerFactory.getLogger(
       TestNetworkTopologyImpl.class);
@@ -65,7 +65,7 @@ public class TestNetworkTopologyImpl {
   private Node[] dataNodes;
   private Random random = new Random();
 
-  public TestNetworkTopologyImpl(NodeSchema[] schemas, Node[] nodeArray) {
+  public void initNetworkTopology(NodeSchema[] schemas, Node[] nodeArray) {
     NodeSchemaManager.getInstance().init(schemas, true);
     cluster = new NetworkTopologyImpl(NodeSchemaManager.getInstance());
     dataNodes = nodeArray.clone();
@@ -74,13 +74,9 @@ public class TestNetworkTopologyImpl {
     }
   }
 
-  @Rule
-  public Timeout testTimeout = Timeout.seconds(300);
-
-  @Parameters
-  public static Collection<Object[]> setupDatanodes() {
-    Object[][] topologies = new Object[][]{
-        {new NodeSchema[] {ROOT_SCHEMA, LEAF_SCHEMA},
+  public static Stream<Arguments> topologies() {
+    return Stream.of(
+        arguments(new NodeSchema[] {ROOT_SCHEMA, LEAF_SCHEMA},
             new Node[]{
                 createDatanode("1.1.1.1", "/"),
                 createDatanode("2.2.2.2", "/"),
@@ -90,8 +86,8 @@ public class TestNetworkTopologyImpl {
                 createDatanode("6.6.6.6", "/"),
                 createDatanode("7.7.7.7", "/"),
                 createDatanode("8.8.8.8", "/"),
-            }},
-        {new NodeSchema[] {ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA},
+            }),
+        arguments(new NodeSchema[] {ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA},
             new Node[]{
                 createDatanode("1.1.1.1", "/r1"),
                 createDatanode("2.2.2.2", "/r1"),
@@ -101,9 +97,9 @@ public class TestNetworkTopologyImpl {
                 createDatanode("6.6.6.6", "/r3"),
                 createDatanode("7.7.7.7", "/r3"),
                 createDatanode("8.8.8.8", "/r3"),
-            }},
-        {new NodeSchema[]
-            {ROOT_SCHEMA, DATACENTER_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA},
+            }),
+        arguments(new NodeSchema[] {ROOT_SCHEMA, DATACENTER_SCHEMA, 
RACK_SCHEMA,
+            LEAF_SCHEMA},
             new Node[]{
                 createDatanode("1.1.1.1", "/d1/r1"),
                 createDatanode("2.2.2.2", "/d1/r1"),
@@ -113,8 +109,8 @@ public class TestNetworkTopologyImpl {
                 createDatanode("6.6.6.6", "/d2/r3"),
                 createDatanode("7.7.7.7", "/d2/r3"),
                 createDatanode("8.8.8.8", "/d2/r3"),
-            }},
-        {new NodeSchema[] {ROOT_SCHEMA, DATACENTER_SCHEMA, RACK_SCHEMA,
+            }),
+        arguments(new NodeSchema[] {ROOT_SCHEMA, DATACENTER_SCHEMA, 
RACK_SCHEMA,
             NODEGROUP_SCHEMA, LEAF_SCHEMA},
             new Node[]{
                 createDatanode("1.1.1.1", "/d1/r1/ng1"),
@@ -137,9 +133,9 @@ public class TestNetworkTopologyImpl {
                 createDatanode("18.18.18.18", "/d4/r1/ng2"),
                 createDatanode("19.19.19.19", "/d4/r1/ng3"),
                 createDatanode("20.20.20.20", "/d4/r1/ng3"),
-            }},
-        {new NodeSchema[] {ROOT_SCHEMA, REGION_SCHEMA, DATACENTER_SCHEMA,
-            RACK_SCHEMA, NODEGROUP_SCHEMA, LEAF_SCHEMA},
+            }),
+        arguments(new NodeSchema[] {ROOT_SCHEMA, REGION_SCHEMA,
+            DATACENTER_SCHEMA, RACK_SCHEMA, NODEGROUP_SCHEMA, LEAF_SCHEMA},
             new Node[]{
                 createDatanode("1.1.1.1", "/d1/rg1/r1/ng1"),
                 createDatanode("2.2.2.2", "/d1/rg1/r1/ng1"),
@@ -166,13 +162,14 @@ public class TestNetworkTopologyImpl {
                 createDatanode("23.23.23.23", "/d2/rg2/r2/ng1"),
                 createDatanode("24.24.24.24", "/d2/rg2/r2/ng1"),
                 createDatanode("25.25.25.25", "/d2/rg2/r2/ng1"),
-            }}
-    };
-    return Arrays.asList(topologies);
+            })
+    );
   }
 
-  @Test
-  public void testContains() {
+  @ParameterizedTest
+  @MethodSource("topologies")
+  public void testContains(NodeSchema[] schemas, Node[] nodeArray) {
+    initNetworkTopology(schemas, nodeArray);
     Node nodeNotInMap = createDatanode("8.8.8.8", "/d2/r4");
     for (int i = 0; i < dataNodes.length; i++) {
       assertTrue(cluster.contains(dataNodes[i]));
@@ -180,18 +177,22 @@ public class TestNetworkTopologyImpl {
     assertFalse(cluster.contains(nodeNotInMap));
   }
 
-  @Test
-  public void testNumOfChildren() {
+  @ParameterizedTest
+  @MethodSource("topologies")
+  public void testNumOfChildren(NodeSchema[] schemas, Node[] nodeArray) {
+    initNetworkTopology(schemas, nodeArray);
     assertEquals(dataNodes.length, cluster.getNumOfLeafNode(null));
     assertEquals(0, cluster.getNumOfLeafNode("/switch1/node1"));
   }
 
-  @Test
-  public void testGetNode() {
+  @ParameterizedTest
+  @MethodSource("topologies")
+  public void testGetNode(NodeSchema[] schemas, Node[] nodeArray) {
+    initNetworkTopology(schemas, nodeArray);
     assertEquals(cluster.getNode(""), cluster.getNode(null));
     assertEquals(cluster.getNode(""), cluster.getNode("/"));
-    assertEquals(null, cluster.getNode("/switch1/node1"));
-    assertEquals(null, cluster.getNode("/switch1"));
+    assertNull(cluster.getNode("/switch1/node1"));
+    assertNull(cluster.getNode("/switch1"));
 
     if (cluster.getNode("/d1") != null) {
       List<String> excludedScope = new ArrayList<>();
@@ -203,12 +204,9 @@ public class TestNetworkTopologyImpl {
 
   @Test
   public void testCreateInvalidTopology() {
-    List<NodeSchema> schemas = new ArrayList<NodeSchema>();
-    schemas.add(ROOT_SCHEMA);
-    schemas.add(RACK_SCHEMA);
-    schemas.add(LEAF_SCHEMA);
-    NodeSchemaManager.getInstance().init(schemas.toArray(new NodeSchema[0]),
-        true);
+    NodeSchema[] schemas =
+        new NodeSchema[]{ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA};
+    NodeSchemaManager.getInstance().init(schemas, true);
     NetworkTopology newCluster = new NetworkTopologyImpl(
         NodeSchemaManager.getInstance());
     Node[] invalidDataNodes = new Node[] {
@@ -243,8 +241,10 @@ public class TestNetworkTopologyImpl {
     }
   }
 
-  @Test
-  public void testAncestor() {
+  @ParameterizedTest
+  @MethodSource("topologies")
+  public void testAncestor(NodeSchema[] schemas, Node[] nodeArray) {
+    initNetworkTopology(schemas, nodeArray);
     assumeTrue(cluster.getMaxLevel() > 2);
     int maxLevel = cluster.getMaxLevel();
     assertTrue(cluster.isSameParent(dataNodes[0], dataNodes[1]));
@@ -270,8 +270,10 @@ public class TestNetworkTopologyImpl {
         maxLevel - 1));
   }
 
-  @Test
-  public void testAddRemove() {
+  @ParameterizedTest
+  @MethodSource("topologies")
+  public void testAddRemove(NodeSchema[] schemas, Node[] nodeArray) {
+    initNetworkTopology(schemas, nodeArray);
     for (int i = 0; i < dataNodes.length; i++) {
       cluster.remove(dataNodes[i]);
     }
@@ -305,8 +307,11 @@ public class TestNetworkTopologyImpl {
     }
   }
 
-  @Test
-  public void testGetNumOfNodesWithLevel() {
+  @ParameterizedTest
+  @MethodSource("topologies")
+  public void testGetNumOfNodesWithLevel(NodeSchema[] schemas,
+      Node[] nodeArray) {
+    initNetworkTopology(schemas, nodeArray);
     int maxLevel = cluster.getMaxLevel();
     try {
       assertEquals(1, cluster.getNumOfNodes(0));
@@ -343,8 +348,10 @@ public class TestNetworkTopologyImpl {
     assertEquals(dataNodes.length, cluster.getNumOfNodes(maxLevel));
   }
 
-  @Test
-  public void testGetNodesWithLevel() {
+  @ParameterizedTest
+  @MethodSource("topologies")
+  public void testGetNodesWithLevel(NodeSchema[] schemas, Node[] nodeArray) {
+    initNetworkTopology(schemas, nodeArray);
     int maxLevel = cluster.getMaxLevel();
     try {
       assertNotNull(cluster.getNodes(0));
@@ -367,8 +374,10 @@ public class TestNetworkTopologyImpl {
     assertEquals(dataNodes.length, cluster.getNodes(maxLevel).size());
   }
 
-  @Test
-  public void testChooseRandomSimple() {
+  @ParameterizedTest
+  @MethodSource("topologies")
+  public void testChooseRandomSimple(NodeSchema[] schemas, Node[] nodeArray) {
+    initNetworkTopology(schemas, nodeArray);
     String path =
         dataNodes[random.nextInt(dataNodes.length)].getNetworkFullPath();
     assertEquals(path, cluster.chooseRandom(path).getNetworkFullPath());
@@ -378,7 +387,7 @@ public class TestNetworkTopologyImpl {
       assertTrue(cluster.chooseRandom(path).getNetworkLocation()
           .startsWith(path));
       Node node = cluster.chooseRandom("~" + path);
-      assertTrue(!node.getNetworkLocation()
+      assertFalse(node.getNetworkLocation()
           .startsWith(path));
       path = path.substring(0,
           path.lastIndexOf(PATH_SEPARATOR_STR));
@@ -408,8 +417,11 @@ public class TestNetworkTopologyImpl {
   /**
    * Following test checks that chooseRandom works for an excluded scope.
    */
-  @Test
-  public void testChooseRandomExcludedScope() {
+  @ParameterizedTest
+  @MethodSource("topologies")
+  public void testChooseRandomExcludedScope(NodeSchema[] schemas,
+      Node[] nodeArray) {
+    initNetworkTopology(schemas, nodeArray);
     int[] excludedNodeIndexs = {0, dataNodes.length - 1,
         random.nextInt(dataNodes.length), random.nextInt(dataNodes.length)};
     String scope;
@@ -421,7 +433,7 @@ public class TestNetworkTopologyImpl {
         frequency = pickNodesAtRandom(100, scope, null, 0);
         for (Node key : dataNodes) {
           if (key.isDescendant(path)) {
-            assertTrue(frequency.get(key) == 0);
+            assertEquals(0, (int) frequency.get(key));
           }
         }
         path = path.substring(0, path.lastIndexOf(PATH_SEPARATOR_STR));
@@ -435,18 +447,18 @@ public class TestNetworkTopologyImpl {
     }
 
     // "" excludedScope,  no node will ever be chosen
-    List<String> pathList = new ArrayList();
+    List<String> pathList = new ArrayList<>();
     pathList.add("");
     frequency = pickNodes(100, pathList, null, null, 0);
     for (Node key : dataNodes) {
-      assertTrue(frequency.get(key) == 0);
+      assertEquals(0, frequency.get(key));
     }
 
     // "~" scope, no node will ever be chosen
     scope = "~";
     frequency = pickNodesAtRandom(100, scope, null, 0);
     for (Node key : dataNodes) {
-      assertTrue(frequency.get(key) == 0);
+      assertEquals(0, frequency.get(key));
     }
     // out network topology excluded scope, every node should be chosen
     pathList.clear();
@@ -461,8 +473,11 @@ public class TestNetworkTopologyImpl {
   /**
    * Following test checks that chooseRandom works for an excluded nodes.
    */
-  @Test
-  public void testChooseRandomExcludedNode() {
+  @ParameterizedTest
+  @MethodSource("topologies")
+  public void testChooseRandomExcludedNode(NodeSchema[] schemas,
+      Node[] nodeArray) {
+    initNetworkTopology(schemas, nodeArray);
     Node[][] excludedNodeLists = {
         {},
         {dataNodes[0]},
@@ -489,9 +504,8 @@ public class TestNetworkTopologyImpl {
               (ancestorList.size() > 0 &&
                   ancestorList.stream()
                       .map(a -> (InnerNode) a)
-                      .filter(a -> a.isAncestor(key))
-                      .collect(Collectors.toList()).size() > 0)) {
-            assertTrue(frequency.get(key) == 0);
+                      .anyMatch(a -> a.isAncestor(key)))) {
+            assertEquals(0, frequency.get(key));
           }
         }
         ancestorGen++;
@@ -503,7 +517,7 @@ public class TestNetworkTopologyImpl {
     while (ancestorGen < cluster.getMaxLevel()) {
       frequency = pickNodesAtRandom(leafNum, null, excludedList, ancestorGen);
       for (Node key : dataNodes) {
-        assertTrue(frequency.get(key) == 0);
+        assertEquals(0, frequency.get(key));
       }
       ancestorGen++;
     }
@@ -522,8 +536,11 @@ public class TestNetworkTopologyImpl {
   /**
    * Following test checks that chooseRandom works for excluded nodes and 
scope.
    */
-  @Test
-  public void testChooseRandomExcludedNodeAndScope() {
+  @ParameterizedTest
+  @MethodSource("topologies")
+  public void testChooseRandomExcludedNodeAndScope(NodeSchema[] schemas,
+      Node[] nodeArray) {
+    initNetworkTopology(schemas, nodeArray);
     int[] excludedNodeIndexs = {0, dataNodes.length - 1,
         random.nextInt(dataNodes.length), random.nextInt(dataNodes.length)};
     Node[][] excludedNodeLists = {
@@ -558,9 +575,8 @@ public class TestNetworkTopologyImpl {
                   (ancestorList.size() > 0 &&
                       ancestorList.stream()
                           .map(a -> (InnerNode) a)
-                          .filter(a -> a.isAncestor(key))
-                          .collect(Collectors.toList()).size() > 0)) {
-                assertTrue(frequency.get(key) == 0);
+                          .anyMatch(a -> a.isAncestor(key)))) {
+                assertEquals(0, frequency.get(key));
               }
             }
           }
@@ -580,7 +596,7 @@ public class TestNetworkTopologyImpl {
           frequency =
               pickNodesAtRandom(leafNum, scope, excludedList, ancestorGen);
           for (Node key : dataNodes) {
-            assertTrue(frequency.get(key) == 0);
+            assertEquals(0, frequency.get(key));
           }
           ancestorGen++;
         }
@@ -603,8 +619,11 @@ public class TestNetworkTopologyImpl {
    * Following test checks that chooseRandom works for excluded nodes, scope
    * and ancestor generation.
    */
-  @Test
-  public void testChooseRandomWithAffinityNode() {
+  @ParameterizedTest
+  @MethodSource("topologies")
+  public void testChooseRandomWithAffinityNode(NodeSchema[] schemas,
+      Node[] nodeArray) {
+    initNetworkTopology(schemas, nodeArray);
     int[] excludedNodeIndexs = {0, dataNodes.length - 1,
         random.nextInt(dataNodes.length), random.nextInt(dataNodes.length)};
     Node[][] excludedNodeLists = {
@@ -669,8 +688,8 @@ public class TestNetworkTopologyImpl {
                         "through ancestor node's leaf nodes. node:" +
                         key.getNetworkFullPath() + ", ancestor node:" +
                         affinityAncestor.getNetworkFullPath() +
-                        ", excludedScope: " + pathList.toString() + ", " +
-                        "excludedList:" + excludedList.toString());
+                        ", excludedScope: " + pathList + ", " +
+                        "excludedList:" + excludedList);
                   }
                 }
               }
@@ -678,7 +697,7 @@ public class TestNetworkTopologyImpl {
             ancestorGen--;
           }
           pathList = pathList.stream().map(path ->
-              path.substring(0, path.lastIndexOf(PATH_SEPARATOR_STR)))
+                  path.substring(0, path.lastIndexOf(PATH_SEPARATOR_STR)))
               .collect(Collectors.toList());
         }
       }
@@ -697,7 +716,7 @@ public class TestNetworkTopologyImpl {
             frequency = pickNodesAtRandom(leafNum, scope, excludedList,
                 dataNodes[k], ancestorGen);
             for (Node key : dataNodes) {
-              assertTrue(frequency.get(key) == 0);
+              assertEquals(0, frequency.get(key));
             }
             ancestorGen++;
           }
@@ -809,8 +828,10 @@ public class TestNetworkTopologyImpl {
     assertEquals(18, newCluster.getDistanceCost(nodeList[0], nodeList[3]));
   }
 
-  @Test
-  public void testSortByDistanceCost() {
+  @ParameterizedTest
+  @MethodSource("topologies")
+  public void testSortByDistanceCost(NodeSchema[] schemas, Node[] nodeArray) {
+    initNetworkTopology(schemas, nodeArray);
     Node[][] nodes = {
         {},
         {dataNodes[0]},
@@ -849,12 +870,12 @@ public class TestNetworkTopologyImpl {
             if ((i + 1) < ret.size()) {
               int cost1 = cluster.getDistanceCost(reader, ret.get(i));
               int cost2 = cluster.getDistanceCost(reader, ret.get(i + 1));
-              assertTrue("reader:" + (reader != null ?
-                  reader.getNetworkFullPath() : "null") +
-                  ",node1:" + ret.get(i).getNetworkFullPath() +
-                  ",node2:" + ret.get(i + 1).getNetworkFullPath() +
-                  ",cost1:" + cost1 + ",cost2:" + cost2,
-                  cost1 == Integer.MAX_VALUE || cost1 <= cost2);
+              assertTrue(cost1 == Integer.MAX_VALUE || cost1 <= cost2,
+                  "reader:" + (reader != null ?
+                      reader.getNetworkFullPath() : "null") +
+                      ",node1:" + ret.get(i).getNetworkFullPath() +
+                      ",node2:" + ret.get(i + 1).getNetworkFullPath() +
+                      ",cost1:" + cost1 + ",cost2:" + cost2);
             }
           }
           length--;
@@ -875,12 +896,12 @@ public class TestNetworkTopologyImpl {
             int cost2 = cluster.getDistanceCost(
                 reader, sortedNodeList.get(i + 1));
             // node can be removed when called in testConcurrentAccess
-            assertTrue("reader:" + (reader != null ?
-                reader.getNetworkFullPath() : "null") +
-                ",node1:" + sortedNodeList.get(i).getNetworkFullPath() +
-                ",node2:" + sortedNodeList.get(i + 1).getNetworkFullPath() +
-                ",cost1:" + cost1 + ",cost2:" + cost2,
-                cost1 == Integer.MAX_VALUE || cost1 <= cost2);
+            assertTrue(cost1 == Integer.MAX_VALUE || cost1 <= cost2,
+                "reader:" + (reader != null ?
+                    reader.getNetworkFullPath() : "null") +
+                    ",node1:" + sortedNodeList.get(i).getNetworkFullPath() +
+                    ",node2:" + sortedNodeList.get(i + 1).getNetworkFullPath() 
+
+                    ",cost1:" + cost1 + ",cost2:" + cost2);
           }
         }
         length--;
@@ -918,15 +939,15 @@ public class TestNetworkTopologyImpl {
     NodeImpl r1 = new NodeImpl("r1", "/", NODE_COST_DEFAULT);
     NodeImpl r12 = new NodeImpl("r12", "/", NODE_COST_DEFAULT);
     NodeImpl dc = new NodeImpl("dc", "/r12", NODE_COST_DEFAULT);
-    Assert.assertFalse(r1.isAncestor(dc));
-    Assert.assertFalse(r1.isAncestor("/r12/dc2"));
-    Assert.assertFalse(dc.isDescendant(r1));
-    Assert.assertFalse(dc.isDescendant("/r1"));
+    assertFalse(r1.isAncestor(dc));
+    assertFalse(r1.isAncestor("/r12/dc2"));
+    assertFalse(dc.isDescendant(r1));
+    assertFalse(dc.isDescendant("/r1"));
 
-    Assert.assertTrue(r12.isAncestor(dc));
-    Assert.assertTrue(r12.isAncestor("/r12/dc2"));
-    Assert.assertTrue(dc.isDescendant(r12));
-    Assert.assertTrue(dc.isDescendant("/r12"));
+    assertTrue(r12.isAncestor(dc));
+    assertTrue(r12.isAncestor("/r12/dc2"));
+    assertTrue(dc.isDescendant(r12));
+    assertTrue(dc.isDescendant("/r12"));
   }
 
   @Test
@@ -939,8 +960,8 @@ public class TestNetworkTopologyImpl {
 
     List<String> excludedScope = new ArrayList<>();
     excludedScope.add("/r1");
-    Assert.assertFalse(n1.isDescendant("/r1"));
-    Assert.assertEquals(n1, dc.getLeaf(0, excludedScope, null, 0));
+    assertFalse(n1.isDescendant("/r1"));
+    assertEquals(n1, dc.getLeaf(0, excludedScope, null, 0));
   }
 
   private static Node createDatanode(String name, String path) {
@@ -959,7 +980,7 @@ public class TestNetworkTopologyImpl {
    */
   private Map<Node, Integer> pickNodesAtRandom(int numNodes,
       String excludedScope, Collection<Node> excludedNodes, int ancestorGen) {
-    Map<Node, Integer> frequency = new HashMap<Node, Integer>();
+    Map<Node, Integer> frequency = new HashMap<>();
     for (Node dnd : dataNodes) {
       frequency.put(dnd, 0);
     }
@@ -989,7 +1010,7 @@ public class TestNetworkTopologyImpl {
   private Map<Node, Integer> pickNodesAtRandom(int numNodes,
       String excludedScope, Collection<Node> excludedNodes, Node affinityNode,
       int ancestorGen) {
-    Map<Node, Integer> frequency = new HashMap<Node, Integer>();
+    Map<Node, Integer> frequency = new HashMap<>();
     for (Node dnd : dataNodes) {
       frequency.put(dnd, 0);
     }
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java
index c00aae98cd..d960097d88 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaLoader.java
@@ -17,103 +17,80 @@
  */
 package org.apache.hadoop.hdds.scm.net;
 
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.experimental.runners.Enclosed;
-import org.junit.rules.Timeout;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
 
 import java.io.FileNotFoundException;
 import java.net.URL;
-import java.util.Arrays;
-import java.util.Collection;
+import java.util.stream.Stream;
 
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.fail;
 import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.params.provider.Arguments.arguments;
 
 /** Test the node schema loader. */
-@RunWith(Enclosed.class)
+@Timeout(2)
 public class TestNodeSchemaLoader {
 
   /**
    * Parameterized test cases for various error conditions.
    */
-  @RunWith(Parameterized.class)
-  public static class ParameterizedTests {
-
-    private final String schemaFile;
-    private final String errMsg;
-
-    @Rule
-    public Timeout testTimeout = Timeout.seconds(2);
-
-    @Parameters
-    public static Collection<Object[]> getSchemaFiles() {
-      Object[][] schemaFiles = new Object[][]{
-          {"enforce-error.xml", "layer without prefix defined"},
-          {"invalid-cost.xml", "Cost should be positive number or 0"},
-          {"multiple-leaf.xml", "Multiple LEAF layers are found"},
-          {"multiple-root.xml", "Multiple ROOT layers are found"},
-          {"no-leaf.xml", "No LEAF layer is found"},
-          {"no-root.xml", "No ROOT layer is found"},
-          {"path-layers-size-mismatch.xml",
-              "Topology path depth doesn't match layer element numbers"},
-          {"path-with-id-reference-failure.xml",
-              "No layer found for id"},
-          {"unknown-layer-type.xml", "Unsupported layer type"},
-          {"wrong-path-order-1.xml",
-              "Topology path doesn't start with ROOT layer"},
-          {"wrong-path-order-2.xml",
-              "Topology path doesn't end with LEAF layer"},
-          {"no-topology.xml", "no or multiple <topology> element"},
-          {"multiple-topology.xml", "no or multiple <topology> element"},
-          {"invalid-version.xml", "Bad layoutversion value"},
-          {"external-entity.xml", "accessExternalDTD"},
-      };
-      return Arrays.asList(schemaFiles);
-    }
-
-    public ParameterizedTests(String schemaFile, String errMsg) {
-      this.schemaFile = schemaFile;
-      this.errMsg = errMsg;
-    }
+  public static Stream<Arguments> getSchemaFiles() {
+    return Stream.of(
+        arguments("enforce-error.xml", "layer without prefix defined"),
+        arguments("invalid-cost.xml", "Cost should be positive number or 0"),
+        arguments("multiple-leaf.xml", "Multiple LEAF layers are found"),
+        arguments("multiple-root.xml", "Multiple ROOT layers are found"),
+        arguments("no-leaf.xml", "No LEAF layer is found"),
+        arguments("no-root.xml", "No ROOT layer is found"),
+        arguments("path-layers-size-mismatch.xml",
+            "Topology path depth doesn't match layer element numbers"),
+        arguments("path-with-id-reference-failure.xml",
+            "No layer found for id"),
+        arguments("unknown-layer-type.xml", "Unsupported layer type"),
+        arguments("wrong-path-order-1.xml",
+            "Topology path doesn't start with ROOT layer"),
+        arguments("wrong-path-order-2.xml",
+            "Topology path doesn't end with LEAF layer"),
+        arguments("no-topology.xml", "no or multiple <topology> element"),
+        arguments("multiple-topology.xml", "no or multiple <topology> 
element"),
+        arguments("invalid-version.xml", "Bad layoutversion value"),
+        arguments("external-entity.xml", "accessExternalDTD")
+    );
+  }
 
-    @Test
-    public void testInvalid() {
-      String filePath = getClassloaderResourcePath(schemaFile);
-      Exception e = assertThrows(IllegalArgumentException.class,
-          () -> NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath));
-      assertMessageContains(e.getMessage(), errMsg, schemaFile);
-    }
+  @ParameterizedTest
+  @MethodSource("getSchemaFiles")
+  public void testInvalid(String schemaFile, String errMsg) {
+    String filePath = getClassloaderResourcePath(schemaFile);
+    Exception e = assertThrows(IllegalArgumentException.class,
+        () -> NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath));
+    assertMessageContains(e.getMessage(), errMsg, schemaFile);
   }
 
   /**
    * Test cases that do not use the parameters, should be executed only once.
    */
-  public static class NonParameterizedTests {
-
-    private static final String VALID_SCHEMA_FILE = "good.xml";
 
-    @Rule
-    public Timeout testTimeout = Timeout.seconds(2);
+  private static final String VALID_SCHEMA_FILE = "good.xml";
 
-    @Test
-    public void testGood() throws Exception {
-      String filePath = getClassloaderResourcePath(VALID_SCHEMA_FILE);
-      NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath);
-    }
+  @Test
+  public void testGood() throws Exception {
+    String filePath = getClassloaderResourcePath(VALID_SCHEMA_FILE);
+    NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath);
+  }
 
-    @Test
-    public void testNotExist() {
-      String filePath = getClassloaderResourcePath(VALID_SCHEMA_FILE)
-          .replace(VALID_SCHEMA_FILE, "non-existent.xml");
-      Exception e = assertThrows(FileNotFoundException.class,
-          () -> NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath));
-      assertMessageContains(e.getMessage(), "not found", "non-existent.xml");
-    }
+  @Test
+  public void testNotExist() {
+    String filePath = getClassloaderResourcePath(VALID_SCHEMA_FILE)
+        .replace(VALID_SCHEMA_FILE, "non-existent.xml");
+    Exception e = assertThrows(FileNotFoundException.class,
+        () -> NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath));
+    assertMessageContains(e.getMessage(), "not found", "non-existent.xml");
   }
 
   private static void assertMessageContains(
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestYamlSchemaLoader.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestYamlSchemaLoader.java
index bb99a8916a..6756485926 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestYamlSchemaLoader.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestYamlSchemaLoader.java
@@ -17,87 +17,72 @@
  */
 package org.apache.hadoop.hdds.scm.net;
 
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.Arrays;
-import java.util.Collection;
+import java.io.FileNotFoundException;
+import java.util.stream.Stream;
 
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.params.provider.Arguments.arguments;
 
 /** Test the node schema loader. */
-@RunWith(Parameterized.class)
+@Timeout(30)
 public class TestYamlSchemaLoader {
   private static final Logger LOG =
       LoggerFactory.getLogger(TestYamlSchemaLoader.class);
-  private ClassLoader classLoader =
+  private final ClassLoader classLoader =
       Thread.currentThread().getContextClassLoader();
 
-  public TestYamlSchemaLoader(String schemaFile, String errMsg) {
-    try {
-      String filePath = classLoader.getResource(
-          "./networkTopologyTestFiles/" + schemaFile).getPath();
-      NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath);
-      fail("expect exceptions");
-    } catch (Throwable e) {
-      assertTrue(e.getMessage().contains(errMsg));
-    }
+  public static Stream<Arguments> getSchemaFiles() {
+    return Stream.of(
+        arguments("multiple-root.yaml", "Multiple root"),
+        arguments("middle-leaf.yaml", "Leaf node in the middle")
+    );
   }
 
-  @Rule
-  public Timeout testTimeout = Timeout.seconds(30);
-
-  @Parameters
-  public static Collection<Object[]> getSchemaFiles() {
-    Object[][] schemaFiles = new Object[][]{
-        {"multiple-root.yaml", "Multiple root"},
-        {"middle-leaf.yaml", "Leaf node in the middle"},
-    };
-    return Arrays.asList(schemaFiles);
+  @ParameterizedTest
+  @MethodSource("getSchemaFiles")
+  public void loadSchemaFromFile(String schemaFile, String errMsg) {
+    String filePath = classLoader.getResource(
+        "./networkTopologyTestFiles/" + schemaFile).getPath();
+    Throwable e = assertThrows(IllegalArgumentException.class, () ->
+        NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath));
+    assertTrue(e.getMessage().contains(errMsg));
   }
 
-
   @Test
   public void testGood() {
-    try {
-      String filePath = classLoader.getResource(
-              "./networkTopologyTestFiles/good.yaml").getPath();
-      NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath);
-    } catch (Throwable e) {
-      fail("should succeed");
-    }
+    String filePath = classLoader.getResource(
+        "./networkTopologyTestFiles/good.yaml").getPath();
+    assertDoesNotThrow(() ->
+        NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath));
   }
 
   @Test
   public void testNotExist() {
     String filePath = classLoader.getResource(
         "./networkTopologyTestFiles/good.yaml").getPath() + ".backup";
-    try {
-      NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath);
-      fail("should fail");
-    } catch (Throwable e) {
-      assertTrue(e.getMessage().contains("not found"));
-    }
+    Throwable e = assertThrows(FileNotFoundException.class, () ->
+        NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath));
+    assertTrue(e.getMessage().contains("not found"));
   }
 
   @Test
   public void testDefaultYaml() {
-    try {
-      String filePath = classLoader.getResource(
-          "network-topology-default.yaml").getPath();
-      NodeSchemaLoader.NodeSchemaLoadResult result =
-          NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath);
-      Assert.assertEquals(3, result.getSchemaList().size());
-    } catch (Throwable e) {
-      fail("should succeed");
-    }
+    String filePath = classLoader.getResource(
+        "network-topology-default.yaml").getPath();
+    NodeSchemaLoader.NodeSchemaLoadResult result =
+        assertDoesNotThrow(() ->
+            NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath));
+    assertEquals(3, result.getSchemaList().size());
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to