This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 39ce464628 HDDS-9980. Improve assertTrue assertions in hdds-common 
(#5910)
39ce464628 is described below

commit 39ce464628e1417fad91f61d337891960d7bde5f
Author: Doroszlai, Attila <[email protected]>
AuthorDate: Fri Jan 5 20:00:06 2024 +0100

    HDDS-9980. Improve assertTrue assertions in hdds-common (#5910)
---
 .../java/org/apache/hadoop/hdds/TestHddsUtils.java |  17 +-
 .../java/org/apache/hadoop/hdds/fs/TestDU.java     |  16 +-
 .../hdds/fs/TestDedicatedDiskSpaceUsage.java       |  11 +-
 .../hadoop/hdds/fs/TestSpaceUsageFactory.java      |  11 +-
 .../hadoop/hdds/protocol/TestDatanodeDetails.java  |   6 +-
 .../org/apache/hadoop/hdds/scm/TestSCMHAUtils.java |   4 +-
 .../container/TestReplicationManagerReport.java    |  24 +--
 .../hdds/scm/net/TestNetworkTopologyImpl.java      | 196 +++++++++------------
 .../hadoop/hdds/scm/net/TestNodeSchemaManager.java |  37 ++--
 .../hadoop/hdds/scm/net/TestYamlSchemaLoader.java  |  22 +--
 .../hadoop/hdds/utils/db/TestLeakDetector.java     |   8 +-
 .../apache/hadoop/ozone/lock/TestLockManager.java  |  25 ++-
 .../upgrade/TestLayoutVersionInstanceFactory.java  |  36 ++--
 .../ozone/upgrade/TestUpgradeFinalizerActions.java |   5 +-
 14 files changed, 187 insertions(+), 231 deletions(-)

diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java
index fca23cf8ae..0143f341fa 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java
@@ -39,6 +39,7 @@ import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_ADDRESS_KEY;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
@@ -55,7 +56,7 @@ public class TestHddsUtils {
           CommonConfigurationKeysPublic.HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS;
 
   @Test
-  public void testGetHostName() {
+  void testGetHostName() {
     assertEquals(Optional.of("localhost"),
         HddsUtils.getHostName("localhost:1234"));
 
@@ -67,7 +68,7 @@ public class TestHddsUtils {
   }
 
   @Test
-  public void validatePath() {
+  void validatePath() {
     HddsUtils.validatePath(Paths.get("/"), Paths.get("/"));
     HddsUtils.validatePath(Paths.get("/a"), Paths.get("/"));
     HddsUtils.validatePath(Paths.get("/a"), Paths.get("/a"));
@@ -86,7 +87,7 @@ public class TestHddsUtils {
   }
 
   @Test
-  public void testGetSCMAddresses() {
+  void testGetSCMAddresses() {
     final OzoneConfiguration conf = new OzoneConfiguration();
     Collection<InetSocketAddress> addresses;
     InetSocketAddress addr;
@@ -133,7 +134,7 @@ public class TestHddsUtils {
       assertTrue(expected1.remove(current.getHostName(),
           current.getPort()));
     }
-    assertTrue(expected1.isEmpty());
+    assertThat(expected1).isEmpty();
 
     // Verify names with spaces
     conf.setStrings(
@@ -147,7 +148,7 @@ public class TestHddsUtils {
       assertTrue(expected2.remove(current.getHostName(),
           current.getPort()));
     }
-    assertTrue(expected2.isEmpty());
+    assertThat(expected2).isEmpty();
 
     // Verify empty value
     conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "");
@@ -176,7 +177,7 @@ public class TestHddsUtils {
 
 
   @Test
-  public void testGetSCMAddressesWithHAConfig() {
+  void testGetSCMAddressesWithHAConfig() {
     OzoneConfiguration conf = new OzoneConfiguration();
     String scmServiceId = "scmserviceId";
     String[] nodes = new String[]{"scm1", "scm2", "scm3"};
@@ -209,7 +210,7 @@ public class TestHddsUtils {
   }
 
   @Test
-  public void testGetNumberFromConfigKeys() {
+  void testGetNumberFromConfigKeys() {
     final String testnum1 = "8";
     final String testnum2 = "7";
     final String serviceId = "id1";
@@ -242,7 +243,7 @@ public class TestHddsUtils {
   }
 
   @Test
-  public void testRedactSensitivePropsForLogging() {
+  void testRedactSensitivePropsForLogging() {
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(SENSITIVE_CONFIG_KEYS, String.join("\n",
             "password$",
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java
index 0d956a18ee..a87f3fad25 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java
@@ -25,6 +25,7 @@ import org.junit.jupiter.api.Test;
 
 import static org.apache.hadoop.ozone.OzoneConsts.KB;
 import static org.apache.ozone.test.GenericTestUtils.getTestDir;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assumptions.assumeFalse;
 import static org.junit.jupiter.api.Assumptions.assumeTrue;
@@ -42,14 +43,14 @@ public class TestDU {
   private static final File DIR = getTestDir(TestDU.class.getSimpleName());
 
   @BeforeEach
-  public void setUp() {
+  void setUp() {
     assumeFalse(Shell.WINDOWS);
     FileUtil.fullyDelete(DIR);
     assertTrue(DIR.mkdirs());
   }
 
   @AfterEach
-  public void tearDown() throws IOException {
+  void tearDown() throws IOException {
     FileUtil.fullyDelete(DIR);
   }
 
@@ -77,7 +78,7 @@ public class TestDU {
    * This is true for most file systems.
    */
   @Test
-  public void testGetUsed() throws Exception {
+  void testGetUsed() throws Exception {
     final long writtenSize = 32 * KB;
     File file = new File(DIR, "data");
     createFile(file, (int) writtenSize);
@@ -89,7 +90,7 @@ public class TestDU {
   }
 
   @Test
-  public void testExcludePattern() throws IOException {
+  void testExcludePattern() throws IOException {
     createFile(new File(DIR, "include.txt"), (int) (4 * KB));
     createFile(new File(DIR, "exclude.tmp"), (int) (100 * KB));
     SpaceUsageSource du = new DU(DIR, "*.tmp");
@@ -103,10 +104,9 @@ public class TestDU {
     // Allow for extra 8K on-disk slack for local file systems
     // that may store additional file metadata (eg ext attrs).
     final long max = expected + 8 * KB;
-    assertTrue(expected <= actual && actual <= max, () ->
-        String.format(
-            "Invalid on-disk size: %d, expected to be in [%d, %d]",
-            actual, expected, max));
+    assertThat(actual)
+        .isGreaterThanOrEqualTo(expected)
+        .isLessThanOrEqualTo(max);
   }
 
 }
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsage.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsage.java
index eb68dbdad6..85b21df86b 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsage.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsage.java
@@ -27,12 +27,13 @@ import java.io.IOException;
 
 import static org.apache.hadoop.hdds.fs.TestDU.createFile;
 import static org.apache.ozone.test.GenericTestUtils.getTestDir;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Tests for {@link DedicatedDiskSpaceUsage}.
  */
-public class TestDedicatedDiskSpaceUsage {
+class TestDedicatedDiskSpaceUsage {
 
   private static final File DIR =
       getTestDir(TestDedicatedDiskSpaceUsage.class.getSimpleName());
@@ -40,24 +41,24 @@ public class TestDedicatedDiskSpaceUsage {
   private static final int FILE_SIZE = 1024;
 
   @BeforeEach
-  public void setUp() {
+  void setUp() {
     FileUtil.fullyDelete(DIR);
     assertTrue(DIR.mkdirs());
   }
 
   @AfterEach
-  public void tearDown() throws IOException {
+  void tearDown() {
     FileUtil.fullyDelete(DIR);
   }
 
   @Test
-  public void testGetUsed() throws IOException {
+  void testGetUsed() throws IOException {
     File file = new File(DIR, "data");
     createFile(file, FILE_SIZE);
     SpaceUsageSource subject = new DedicatedDiskSpaceUsage(DIR);
 
     // condition comes from TestDFCachingGetSpaceUsed in Hadoop Common
-    assertTrue(subject.getUsedSpace() >= FILE_SIZE - 20);
+    assertThat(subject.getUsedSpace()).isGreaterThanOrEqualTo(FILE_SIZE - 20);
   }
 
 }
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSpaceUsageFactory.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSpaceUsageFactory.java
index 5a6e98e229..496d11e0a4 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSpaceUsageFactory.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSpaceUsageFactory.java
@@ -23,14 +23,15 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.ozone.test.GenericTestUtils.LogCapturer;
 
-import static 
org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory.Conf.configKeyForClassName;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.slf4j.LoggerFactory;
+
+import static 
org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory.Conf.configKeyForClassName;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
 import static org.junit.jupiter.api.Assertions.assertSame;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-import org.slf4j.LoggerFactory;
 
 /**
  * Tests for {@link SpaceUsageCheckFactory}.
@@ -101,9 +102,7 @@ public class TestSpaceUsageFactory {
   }
 
   private void assertLogged(String substring) {
-    String output = capturer.getOutput();
-    assertTrue(output.contains(substring), () -> "Expected " + substring + " " 
+
-        "in log output, but only got: " + output);
+    assertThat(capturer.getOutput()).contains(substring);
   }
 
   private static <T extends SpaceUsageCheckFactory> OzoneConfiguration
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java
index e77b97a854..4030f6e46d 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java
@@ -27,8 +27,8 @@ import static 
org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name.ALL_PORT
 import static 
org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name.V0_PORTS;
 import static org.apache.hadoop.ozone.ClientVersion.DEFAULT_VERSION;
 import static 
org.apache.hadoop.ozone.ClientVersion.VERSION_HANDLES_UNKNOWN_DN_PORTS;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.fail;
 
 /**
@@ -37,7 +37,7 @@ import static org.junit.jupiter.api.Assertions.fail;
 public class TestDatanodeDetails {
 
   @Test
-  public void protoIncludesNewPortsOnlyForV1() {
+  void protoIncludesNewPortsOnlyForV1() {
     DatanodeDetails subject = MockDatanodeDetails.randomDatanodeDetails();
 
     HddsProtos.DatanodeDetailsProto proto =
@@ -54,7 +54,7 @@ public class TestDatanodeDetails {
     assertEquals(expectedPorts.size(), dn.getPortsCount());
     for (HddsProtos.Port port : dn.getPortsList()) {
       try {
-        assertTrue(expectedPorts.contains(Port.Name.valueOf(port.getName())));
+        assertThat(expectedPorts).contains(Port.Name.valueOf(port.getName()));
       } catch (IllegalArgumentException e) {
         fail("Unknown port: " + port.getName());
       }
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/TestSCMHAUtils.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/TestSCMHAUtils.java
index e913a639ee..151e7fca67 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/TestSCMHAUtils.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/TestSCMHAUtils.java
@@ -26,8 +26,8 @@ import java.util.Collection;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NODES_KEY;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NODE_ID_KEY;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
 
 /**
  * Tests for {@code SCMHAUtils}.
@@ -49,6 +49,6 @@ class TestSCMHAUtils {
         SCMHAUtils.getSCMNodeIds(output, service);
 
     assertEquals(2, nodesWithoutSelf.size());
-    assertFalse(nodesWithoutSelf.contains(selfId));
+    assertThat(nodesWithoutSelf).doesNotContain(selfId);
   }
 }
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java
index ada60397ef..3bf2ef4023 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java
@@ -31,24 +31,24 @@ import java.util.Random;
 import java.util.concurrent.ThreadLocalRandom;
 
 import static com.fasterxml.jackson.databind.node.JsonNodeType.ARRAY;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Tests for the ReplicationManagerReport class.
  */
-public class TestReplicationManagerReport {
+class TestReplicationManagerReport {
 
   private ReplicationManagerReport report;
 
   @BeforeEach
-  public void setup() {
+  void setup() {
     report = new ReplicationManagerReport();
   }
 
   @Test
-  public void testMetricCanBeIncremented() {
+  void testMetricCanBeIncremented() {
     report.increment(ReplicationManagerReport.HealthState.UNDER_REPLICATED);
     report.increment(ReplicationManagerReport.HealthState.UNDER_REPLICATED);
     report.increment(ReplicationManagerReport.HealthState.OVER_REPLICATED);
@@ -74,7 +74,7 @@ public class TestReplicationManagerReport {
 
 
   @Test
-  public void testJsonOutput() throws IOException {
+  void testJsonOutput() throws IOException {
     report.increment(HddsProtos.LifeCycleState.OPEN);
     report.increment(HddsProtos.LifeCycleState.CLOSED);
     report.increment(HddsProtos.LifeCycleState.CLOSED);
@@ -95,7 +95,7 @@ public class TestReplicationManagerReport {
     ObjectMapper mapper = new ObjectMapper();
     JsonNode json = mapper.readTree(jsonString);
 
-    assertTrue(json.get("reportTimeStamp").longValue() > 0);
+    assertThat(json.get("reportTimeStamp").longValue()).isPositive();
     JsonNode stats = json.get("stats");
     assertEquals(1, stats.get("OPEN").longValue());
     assertEquals(0, stats.get("CLOSING").longValue());
@@ -121,7 +121,7 @@ public class TestReplicationManagerReport {
   }
 
   @Test
-  public void testContainerIDsCanBeSampled() {
+  void testContainerIDsCanBeSampled() {
     report.incrementAndSample(
         ReplicationManagerReport.HealthState.UNDER_REPLICATED,
         new ContainerID(1));
@@ -156,7 +156,7 @@ public class TestReplicationManagerReport {
   }
 
   @Test
-  public void testSamplesAreLimited() {
+  void testSamplesAreLimited() {
     for (int i = 0; i < ReplicationManagerReport.SAMPLE_LIMIT * 2; i++) {
       report.incrementAndSample(
           ReplicationManagerReport.HealthState.UNDER_REPLICATED,
@@ -171,7 +171,7 @@ public class TestReplicationManagerReport {
   }
 
   @Test
-  public void testSerializeToProtoAndBack() {
+  void testSerializeToProtoAndBack() {
     report.setTimestamp(12345);
     Random rand = ThreadLocalRandom.current();
     for (HddsProtos.LifeCycleState s : HddsProtos.LifeCycleState.values()) {
@@ -203,7 +203,7 @@ public class TestReplicationManagerReport {
   }
 
   @Test
-  public void testDeSerializeCanHandleUnknownMetric() {
+  void testDeSerializeCanHandleUnknownMetric() {
     HddsProtos.ReplicationManagerReportProto.Builder proto =
         HddsProtos.ReplicationManagerReportProto.newBuilder();
     proto.setTimestamp(12345);
@@ -232,14 +232,14 @@ public class TestReplicationManagerReport {
   }
 
   @Test
-  public void testStatCannotBeSetTwice() {
+  void testStatCannotBeSetTwice() {
     report.setStat(HddsProtos.LifeCycleState.CLOSED.toString(), 10);
     assertThrows(IllegalStateException.class, () -> report
         .setStat(HddsProtos.LifeCycleState.CLOSED.toString(), 10));
   }
 
   @Test
-  public void testSampleCannotBeSetTwice() {
+  void testSampleCannotBeSetTwice() {
     List<ContainerID> containers = new ArrayList<>();
     containers.add(ContainerID.valueOf(1));
     report.setSample(HddsProtos.LifeCycleState.CLOSED.toString(), containers);
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java
index f0534ae801..67b210a05e 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java
@@ -41,10 +41,13 @@ import static 
org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA;
 import static org.apache.hadoop.hdds.scm.net.NetConstants.REGION_SCHEMA;
 import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT;
 import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
 import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.fail;
 import static org.junit.jupiter.api.Assumptions.assumeTrue;
@@ -68,12 +71,12 @@ import org.slf4j.LoggerFactory;
 
 /** Test the network topology functions. */
 @Timeout(30)
-public class TestNetworkTopologyImpl {
+class TestNetworkTopologyImpl {
   private static final Logger LOG = LoggerFactory.getLogger(
       TestNetworkTopologyImpl.class);
   private NetworkTopology cluster;
   private Node[] dataNodes;
-  private Random random = new Random();
+  private final Random random = new Random();
   private Consumer<List<? extends Node>> mockedShuffleOperation;
 
   @BeforeEach
@@ -87,7 +90,7 @@ public class TestNetworkTopologyImpl {
     ).when(mockedShuffleOperation).accept(any());
   }
 
-  public void initNetworkTopology(NodeSchema[] schemas, Node[] nodeArray) {
+  void initNetworkTopology(NodeSchema[] schemas, Node[] nodeArray) {
     NodeSchemaManager.getInstance().init(schemas, true);
     cluster = new NetworkTopologyImpl(NodeSchemaManager.getInstance(),
         mockedShuffleOperation);
@@ -97,7 +100,7 @@ public class TestNetworkTopologyImpl {
     }
   }
 
-  public static Stream<Arguments> topologies() {
+  static Stream<Arguments> topologies() {
     return Stream.of(
         arguments(new NodeSchema[] {ROOT_SCHEMA, LEAF_SCHEMA},
             new Node[]{
@@ -191,7 +194,7 @@ public class TestNetworkTopologyImpl {
 
   @ParameterizedTest
   @MethodSource("topologies")
-  public void testContains(NodeSchema[] schemas, Node[] nodeArray) {
+  void testContains(NodeSchema[] schemas, Node[] nodeArray) {
     initNetworkTopology(schemas, nodeArray);
     Node nodeNotInMap = createDatanode("8.8.8.8", "/d2/r4");
     for (int i = 0; i < dataNodes.length; i++) {
@@ -202,7 +205,7 @@ public class TestNetworkTopologyImpl {
 
   @ParameterizedTest
   @MethodSource("topologies")
-  public void testNumOfChildren(NodeSchema[] schemas, Node[] nodeArray) {
+  void testNumOfChildren(NodeSchema[] schemas, Node[] nodeArray) {
     initNetworkTopology(schemas, nodeArray);
     assertEquals(dataNodes.length, cluster.getNumOfLeafNode(null));
     assertEquals(0, cluster.getNumOfLeafNode("/switch1/node1"));
@@ -210,7 +213,7 @@ public class TestNetworkTopologyImpl {
 
   @ParameterizedTest
   @MethodSource("topologies")
-  public void testGetNode(NodeSchema[] schemas, Node[] nodeArray) {
+  void testGetNode(NodeSchema[] schemas, Node[] nodeArray) {
     initNetworkTopology(schemas, nodeArray);
     assertEquals(cluster.getNode(""), cluster.getNode(null));
     assertEquals(cluster.getNode(""), cluster.getNode("/"));
@@ -226,7 +229,7 @@ public class TestNetworkTopologyImpl {
   }
 
   @Test
-  public void testCreateInvalidTopology() {
+  void testCreateInvalidTopology() {
     NodeSchema[] schemas =
         new NodeSchema[]{ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA};
     NodeSchemaManager.getInstance().init(schemas, true);
@@ -239,34 +242,29 @@ public class TestNetworkTopologyImpl {
     };
     newCluster.add(invalidDataNodes[0]);
     newCluster.add(invalidDataNodes[1]);
-    try {
-      newCluster.add(invalidDataNodes[2]);
-      fail("expected InvalidTopologyException");
-    } catch (NetworkTopology.InvalidTopologyException e) {
-      assertTrue(e.getMessage().contains("Failed to add"));
-      assertTrue(e.getMessage().contains("Its path depth is not " +
-          newCluster.getMaxLevel()));
-    }
+    Exception e = assertThrows(NetworkTopology.InvalidTopologyException.class,
+        () -> newCluster.add(invalidDataNodes[2]));
+    assertThat(e)
+        .hasMessageContaining("Failed to add")
+        .hasMessageContaining("Its path depth is not " + 
newCluster.getMaxLevel());
   }
 
   @Test
-  public void testInitWithConfigFile() {
+  void testInitWithConfigFile() {
     ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
     OzoneConfiguration conf = new OzoneConfiguration();
-    try {
-      String filePath = classLoader.getResource(
-          "./networkTopologyTestFiles/good.xml").getPath();
-      conf.set(ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, filePath);
-      NetworkTopology newCluster = new NetworkTopologyImpl(conf);
-      LOG.info("network topology max level = {}", newCluster.getMaxLevel());
-    } catch (Throwable e) {
-      fail("should succeed");
-    }
+    String filePath = classLoader
+        .getResource("./networkTopologyTestFiles/good.xml")
+        .getPath();
+    conf.set(ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, filePath);
+    NetworkTopology newCluster = new NetworkTopologyImpl(conf);
+    LOG.info("network topology max level = {}", newCluster.getMaxLevel());
+    assertEquals(4, newCluster.getMaxLevel());
   }
 
   @ParameterizedTest
   @MethodSource("topologies")
-  public void testAncestor(NodeSchema[] schemas, Node[] nodeArray) {
+  void testAncestor(NodeSchema[] schemas, Node[] nodeArray) {
     initNetworkTopology(schemas, nodeArray);
     assumeTrue(cluster.getMaxLevel() > 2);
     int maxLevel = cluster.getMaxLevel();
@@ -295,7 +293,7 @@ public class TestNetworkTopologyImpl {
 
   @ParameterizedTest
   @MethodSource("topologies")
-  public void testAddRemove(NodeSchema[] schemas, Node[] nodeArray) {
+  void testAddRemove(NodeSchema[] schemas, Node[] nodeArray) {
     initNetworkTopology(schemas, nodeArray);
     for (int i = 0; i < dataNodes.length; i++) {
       cluster.remove(dataNodes[i]);
@@ -311,95 +309,60 @@ public class TestNetworkTopologyImpl {
       cluster.add(dataNodes[i]);
     }
     // Inner nodes are created automatically
-    assertTrue(cluster.getNumOfNodes(2) > 0);
-
-    try {
-      cluster.add(cluster.chooseRandom(null).getParent());
-      fail("Inner node can not be added manually");
-    } catch (Exception e) {
-      assertTrue(e.getMessage().startsWith(
-          "Not allowed to add an inner node"));
-    }
+    assertThat(cluster.getNumOfNodes(2)).isPositive();
 
-    try {
-      cluster.remove(cluster.chooseRandom(null).getParent());
-      fail("Inner node can not be removed manually");
-    } catch (Exception e) {
-      assertTrue(e.getMessage().startsWith(
-          "Not allowed to remove an inner node"));
-    }
+    Exception e = assertThrows(IllegalArgumentException.class,
+        () -> cluster.add(cluster.chooseRandom(null).getParent()));
+    assertThat(e).hasMessageStartingWith("Not allowed to add an inner node");
+
+    Exception e2 = assertThrows(IllegalArgumentException.class,
+        () -> cluster.remove(cluster.chooseRandom(null).getParent()));
+    assertThat(e2).hasMessageStartingWith("Not allowed to remove an inner 
node");
   }
 
   @ParameterizedTest
   @MethodSource("topologies")
-  public void testGetNumOfNodesWithLevel(NodeSchema[] schemas,
-      Node[] nodeArray) {
+  void testGetNumOfNodesWithLevel(NodeSchema[] schemas, Node[] nodeArray) {
     initNetworkTopology(schemas, nodeArray);
     int maxLevel = cluster.getMaxLevel();
-    try {
-      assertEquals(1, cluster.getNumOfNodes(0));
-      fail("level 0 is not supported");
-    } catch (IllegalArgumentException e) {
-      assertTrue(e.getMessage().startsWith("Invalid level"));
-    }
 
-    try {
-      assertEquals(1, cluster.getNumOfNodes(0));
-      fail("level 0 is not supported");
-    } catch (IllegalArgumentException e) {
-      assertTrue(e.getMessage().startsWith("Invalid level"));
-    }
+    Exception e = assertThrows(IllegalArgumentException.class,
+        () -> cluster.getNumOfNodes(0));
+    assertThat(e).hasMessageStartingWith("Invalid level");
 
-    try {
-      assertEquals(1, cluster.getNumOfNodes(maxLevel + 1));
-      fail("level out of scope");
-    } catch (IllegalArgumentException e) {
-      assertTrue(e.getMessage().startsWith("Invalid level"));
-    }
+    Exception e2 = assertThrows(IllegalArgumentException.class,
+        () -> cluster.getNumOfNodes(maxLevel + 1));
+    assertThat(e2).hasMessageStartingWith("Invalid level");
 
-    try {
-      assertEquals(1, cluster.getNumOfNodes(maxLevel + 1));
-      fail("level out of scope");
-    } catch (IllegalArgumentException e) {
-      assertTrue(e.getMessage().startsWith("Invalid level"));
-    }
     // root node
     assertEquals(1, cluster.getNumOfNodes(1));
-    assertEquals(1, cluster.getNumOfNodes(1));
     // leaf nodes
     assertEquals(dataNodes.length, cluster.getNumOfNodes(maxLevel));
-    assertEquals(dataNodes.length, cluster.getNumOfNodes(maxLevel));
   }
 
   @ParameterizedTest
   @MethodSource("topologies")
-  public void testGetNodesWithLevel(NodeSchema[] schemas, Node[] nodeArray) {
+  void testGetNodesWithLevel(NodeSchema[] schemas, Node[] nodeArray) {
     initNetworkTopology(schemas, nodeArray);
     int maxLevel = cluster.getMaxLevel();
-    try {
-      assertNotNull(cluster.getNodes(0));
-      fail("level 0 is not supported");
-    } catch (IllegalArgumentException e) {
-      assertTrue(e.getMessage().startsWith("Invalid level"));
-    }
 
-    try {
-      assertNotNull(cluster.getNodes(maxLevel + 1));
-      fail("level out of scope");
-    } catch (IllegalArgumentException e) {
-      assertTrue(e.getMessage().startsWith("Invalid level"));
-    }
+    Exception e = assertThrows(IllegalArgumentException.class,
+        () -> cluster.getNodes(0));
+    assertThat(e).hasMessageStartingWith("Invalid level");
+
+    Exception e2 = assertThrows(IllegalArgumentException.class,
+        () -> cluster.getNodes(maxLevel + 1));
+    assertThat(e2).hasMessageStartingWith("Invalid level");
 
     // root node
     assertEquals(1, cluster.getNodes(1).size());
     // leaf nodes
     assertEquals(dataNodes.length, cluster.getNodes(maxLevel).size());
-    assertEquals(dataNodes.length, cluster.getNodes(maxLevel).size());
   }
 
   @ParameterizedTest
   @MethodSource("topologies")
-  public void testChooseRandomSimple(NodeSchema[] schemas, Node[] nodeArray) {
+  void testChooseRandomSimple(NodeSchema[] schemas, Node[] nodeArray) {
     initNetworkTopology(schemas, nodeArray);
     String path =
         dataNodes[random.nextInt(dataNodes.length)].getNetworkFullPath();
@@ -407,11 +370,11 @@ public class TestNetworkTopologyImpl {
     path = path.substring(0, path.lastIndexOf(PATH_SEPARATOR_STR));
     // test chooseRandom(String scope)
     while (!path.equals(ROOT)) {
-      assertTrue(cluster.chooseRandom(path).getNetworkLocation()
-          .startsWith(path));
+      assertThat(cluster.chooseRandom(path).getNetworkLocation())
+          .startsWith(path);
       Node node = cluster.chooseRandom("~" + path);
-      assertFalse(node.getNetworkLocation()
-          .startsWith(path));
+      assertThat(node.getNetworkLocation())
+          .doesNotStartWith(path);
       path = path.substring(0,
           path.lastIndexOf(PATH_SEPARATOR_STR));
     }
@@ -442,7 +405,7 @@ public class TestNetworkTopologyImpl {
    */
   @ParameterizedTest
   @MethodSource("topologies")
-  public void testChooseRandomExcludedScope(NodeSchema[] schemas,
+  void testChooseRandomExcludedScope(NodeSchema[] schemas,
       Node[] nodeArray) {
     initNetworkTopology(schemas, nodeArray);
     int[] excludedNodeIndexs = {0, dataNodes.length - 1,
@@ -466,7 +429,7 @@ public class TestNetworkTopologyImpl {
     // null excludedScope, every node should be chosen
     frequency = pickNodes(100, null, null, null, 0);
     for (Node key : dataNodes) {
-      assertTrue(frequency.get(key) != 0);
+      assertNotEquals(0, frequency.get(key));
     }
 
     // "" excludedScope,  no node will ever be chosen
@@ -489,7 +452,7 @@ public class TestNetworkTopologyImpl {
     frequency = pickNodes(
         cluster.getNumOfLeafNode(null), pathList, null, null, 0);
     for (Node key : dataNodes) {
-      assertTrue(frequency.get(key) != 0);
+      assertNotEquals(0, frequency.get(key));
     }
   }
 
@@ -498,7 +461,7 @@ public class TestNetworkTopologyImpl {
    */
   @ParameterizedTest
   @MethodSource("topologies")
-  public void testChooseRandomExcludedNode(NodeSchema[] schemas,
+  void testChooseRandomExcludedNode(NodeSchema[] schemas,
       Node[] nodeArray) {
     initNetworkTopology(schemas, nodeArray);
     Node[][] excludedNodeLists = {
@@ -550,7 +513,7 @@ public class TestNetworkTopologyImpl {
     while (ancestorGen < cluster.getMaxLevel()) {
       frequency = pickNodes(leafNum, null, excludedList, null, ancestorGen);
       for (Node key : dataNodes) {
-        assertTrue(frequency.get(key) != 0);
+        assertNotEquals(0, frequency.get(key));
       }
       ancestorGen++;
     }
@@ -561,7 +524,7 @@ public class TestNetworkTopologyImpl {
    */
   @ParameterizedTest
   @MethodSource("topologies")
-  public void testChooseRandomExcludedNodeAndScope(NodeSchema[] schemas,
+  void testChooseRandomExcludedNodeAndScope(NodeSchema[] schemas,
       Node[] nodeArray) {
     initNetworkTopology(schemas, nodeArray);
     int[] excludedNodeIndexs = {0, dataNodes.length - 1,
@@ -632,7 +595,7 @@ public class TestNetworkTopologyImpl {
     while (ancestorGen < cluster.getMaxLevel()) {
       frequency = pickNodes(leafNum, null, null, null, ancestorGen);
       for (Node key : dataNodes) {
-        assertTrue(frequency.get(key) != 0);
+        assertNotEquals(0, frequency.get(key));
       }
       ancestorGen++;
     }
@@ -644,7 +607,7 @@ public class TestNetworkTopologyImpl {
    */
   @ParameterizedTest
   @MethodSource("topologies")
-  public void testChooseRandomWithAffinityNode(NodeSchema[] schemas,
+  void testChooseRandomWithAffinityNode(NodeSchema[] schemas,
       Node[] nodeArray) {
     initNetworkTopology(schemas, nodeArray);
     int[] excludedNodeIndexs = {0, dataNodes.length - 1,
@@ -764,20 +727,17 @@ public class TestNetworkTopologyImpl {
         ancestorGen--;
       }
     }
+
     // check invalid ancestor generation
-    try {
-      cluster.chooseRandom(null, null, null, dataNodes[0],
-          cluster.getMaxLevel());
-      fail("ancestor generation exceeds max level, should fail");
-    } catch (Exception e) {
-      assertTrue(e.getMessage().startsWith("ancestorGen " +
-          cluster.getMaxLevel() +
-          " exceeds this network topology acceptable level"));
-    }
+    Exception e = assertThrows(IllegalArgumentException.class,
+        () -> cluster.chooseRandom(null, null, null, dataNodes[0],
+            cluster.getMaxLevel()));
+    assertThat(e.getMessage()).startsWith("ancestorGen " + 
cluster.getMaxLevel() +
+        " exceeds this network topology acceptable level");
   }
 
   @Test
-  public void testCost() {
+  void testCost() {
     // network topology with default cost
     List<NodeSchema> schemas = new ArrayList<>();
     schemas.add(ROOT_SCHEMA);
@@ -853,7 +813,7 @@ public class TestNetworkTopologyImpl {
 
   @ParameterizedTest
   @MethodSource("topologies")
-  public void testSortByDistanceCost(NodeSchema[] schemas, Node[] nodeArray) {
+  void testSortByDistanceCost(NodeSchema[] schemas, Node[] nodeArray) {
     initNetworkTopology(schemas, nodeArray);
     Node[][] nodes = {
         {},
@@ -936,8 +896,8 @@ public class TestNetworkTopologyImpl {
 
   @ParameterizedTest
   @MethodSource("topologies")
-  public void testSortByDistanceCostNullReader(NodeSchema[] schemas,
-                                               Node[] nodeArray) {
+  void testSortByDistanceCostNullReader(NodeSchema[] schemas,
+      Node[] nodeArray) {
     // GIVEN
     // various cluster topologies with null reader
     initNetworkTopology(schemas, nodeArray);
@@ -955,14 +915,14 @@ public class TestNetworkTopologyImpl {
       verify(mockedShuffleOperation).accept(any());
       verify(spyCluster, never()).getDistanceCost(any(), any());
       assertEquals(length, ret.size());
-      assertTrue(nodeList.containsAll(ret));
+      assertThat(nodeList).containsAll(ret);
       reset(mockedShuffleOperation);
       length--;
     }
   }
 
   @Test
-  public void testSingleNodeRackWithAffinityNode() {
+  void testSingleNodeRackWithAffinityNode() {
     // network topology with default cost
     List<NodeSchema> schemas = new ArrayList<>();
     schemas.add(ROOT_SCHEMA);
@@ -987,7 +947,7 @@ public class TestNetworkTopologyImpl {
   }
 
   @Test
-  public void testUpdateNode() {
+  void testUpdateNode() {
     List<NodeSchema> schemas = new ArrayList<>();
     schemas.add(ROOT_SCHEMA);
     schemas.add(DATACENTER_SCHEMA);
@@ -1025,7 +985,9 @@ public class TestNetworkTopologyImpl {
     newCluster.update(null, newNode3);
     assertTrue(newCluster.contains(newNode3));
   }
-  public void testIsAncestor() {
+
+  @Test
+  void testIsAncestor() {
     NodeImpl r1 = new NodeImpl("r1", "/", NODE_COST_DEFAULT);
     NodeImpl r12 = new NodeImpl("r12", "/", NODE_COST_DEFAULT);
     NodeImpl dc = new NodeImpl("dc", "/r12", NODE_COST_DEFAULT);
@@ -1041,7 +1003,7 @@ public class TestNetworkTopologyImpl {
   }
 
   @Test
-  public void testGetLeafOnLeafParent() {
+  void testGetLeafOnLeafParent() {
     InnerNodeImpl root = new InnerNodeImpl("", "", null, 0, 0);
     InnerNodeImpl r12 = new InnerNodeImpl("r12", "/", root, 1, 0);
     InnerNodeImpl dc = new InnerNodeImpl("dc", "/r12", r12, 2, 0);
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java
index baa24cd917..fb4835da20 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java
@@ -20,29 +20,25 @@ package org.apache.hadoop.hdds.scm.net;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+
 import static org.apache.hadoop.hdds.scm.net.NetConstants.DEFAULT_NODEGROUP;
 import static org.apache.hadoop.hdds.scm.net.NetConstants.DEFAULT_RACK;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNull;
 import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.Timeout;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /** Test the node schema loader. */
 @Timeout(30)
-public class TestNodeSchemaManager {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestNodeSchemaManager.class);
-  private ClassLoader classLoader =
+class TestNodeSchemaManager {
+  private final ClassLoader classLoader =
       Thread.currentThread().getContextClassLoader();
-  private NodeSchemaManager manager;
-  private OzoneConfiguration conf;
+  private final NodeSchemaManager manager;
+  private final OzoneConfiguration conf;
 
-  public TestNodeSchemaManager() {
+  TestNodeSchemaManager() {
     conf = new OzoneConfiguration();
     String filePath = classLoader.getResource(
         "./networkTopologyTestFiles/good.xml").getPath();
@@ -52,38 +48,37 @@ public class TestNodeSchemaManager {
   }
 
   @Test
-  public void testFailure1() {
+  void testFailure1() {
     assertThrows(IllegalArgumentException.class,
         () -> manager.getCost(0));
   }
 
   @Test
-  public void testFailure2() {
+  void testFailure2() {
     assertThrows(IllegalArgumentException.class,
         () -> manager.getCost(manager.getMaxLevel() + 1));
   }
 
   @Test
-  public void testPass() {
+  void testPass() {
     assertEquals(4, manager.getMaxLevel());
     for (int i  = 1; i <= manager.getMaxLevel(); i++) {
-      assertTrue(manager.getCost(i) == 1 || manager.getCost(i) == 0);
+      assertThat(manager.getCost(i)).isIn(0, 1);
     }
   }
 
   @Test
-  public void testInitFailure() {
+  void testInitFailure() {
     String filePath = classLoader.getResource(
         "./networkTopologyTestFiles/good.xml").getPath() + ".backup";
     conf.set(ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, filePath);
     Throwable e = assertThrows(RuntimeException.class,
         () -> manager.init(conf));
-    assertTrue(e.getMessage().contains("Failed to load schema file:" +
-        filePath));
+    assertThat(e).hasMessageContaining("Failed to load schema file:" + 
filePath);
   }
 
   @Test
-  public void testComplete() {
+  void testComplete() {
     // successful complete action
     String path = "/node1";
     assertEquals(DEFAULT_RACK + DEFAULT_NODEGROUP + path,
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestYamlSchemaLoader.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestYamlSchemaLoader.java
index 6756485926..d18d82698f 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestYamlSchemaLoader.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestYamlSchemaLoader.java
@@ -22,27 +22,23 @@ import org.junit.jupiter.api.Timeout;
 import org.junit.jupiter.params.ParameterizedTest;
 import org.junit.jupiter.params.provider.Arguments;
 import org.junit.jupiter.params.provider.MethodSource;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import java.io.FileNotFoundException;
 import java.util.stream.Stream;
 
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.params.provider.Arguments.arguments;
 
 /** Test the node schema loader. */
 @Timeout(30)
-public class TestYamlSchemaLoader {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestYamlSchemaLoader.class);
+class TestYamlSchemaLoader {
   private final ClassLoader classLoader =
       Thread.currentThread().getContextClassLoader();
 
-  public static Stream<Arguments> getSchemaFiles() {
+  static Stream<Arguments> getSchemaFiles() {
     return Stream.of(
         arguments("multiple-root.yaml", "Multiple root"),
         arguments("middle-leaf.yaml", "Leaf node in the middle")
@@ -51,16 +47,16 @@ public class TestYamlSchemaLoader {
 
   @ParameterizedTest
   @MethodSource("getSchemaFiles")
-  public void loadSchemaFromFile(String schemaFile, String errMsg) {
+  void loadSchemaFromFile(String schemaFile, String errMsg) {
     String filePath = classLoader.getResource(
         "./networkTopologyTestFiles/" + schemaFile).getPath();
     Throwable e = assertThrows(IllegalArgumentException.class, () ->
         NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath));
-    assertTrue(e.getMessage().contains(errMsg));
+    assertThat(e).hasMessageContaining(errMsg);
   }
 
   @Test
-  public void testGood() {
+  void testGood() {
     String filePath = classLoader.getResource(
         "./networkTopologyTestFiles/good.yaml").getPath();
     assertDoesNotThrow(() ->
@@ -68,16 +64,16 @@ public class TestYamlSchemaLoader {
   }
 
   @Test
-  public void testNotExist() {
+  void testNotExist() {
     String filePath = classLoader.getResource(
         "./networkTopologyTestFiles/good.yaml").getPath() + ".backup";
     Throwable e = assertThrows(FileNotFoundException.class, () ->
         NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath));
-    assertTrue(e.getMessage().contains("not found"));
+    assertThat(e).hasMessageContaining("not found");
   }
 
   @Test
-  public void testDefaultYaml() {
+  void testDefaultYaml() {
     String filePath = classLoader.getResource(
         "network-topology-default.yaml").getPath();
     NodeSchemaLoader.NodeSchemaLoadResult result =
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestLeakDetector.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestLeakDetector.java
index 0d85e79f26..d1b8bc8b4d 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestLeakDetector.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestLeakDetector.java
@@ -19,15 +19,15 @@ package org.apache.hadoop.hdds.utils.db;
 
 import org.junit.jupiter.api.Test;
 
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Test {@link CodecBuffer.LeakDetector}.
  */
-public final class TestLeakDetector {
+final class TestLeakDetector {
   @Test
-  public void test() throws Exception {
+  void test() throws Exception {
     CodecBuffer.enableLeakDetection();
     // allocate a buffer and then release it.
     CodecBuffer.allocateHeap(2).release();
@@ -39,6 +39,6 @@ public final class TestLeakDetector {
     // It should detect a buffer leak.
     final AssertionError e = assertThrows(AssertionError.class, 
CodecTestUtil::gc);
     e.printStackTrace(System.out);
-    assertTrue(e.getMessage().startsWith("Found 1 leak"));
+    assertThat(e).hasMessageStartingWith("Found 1 leak");
   }
 }
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java
index 6dacf9f7f0..62b8e6ac50 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lock/TestLockManager.java
@@ -26,6 +26,7 @@ import org.junit.jupiter.api.Timeout;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -33,23 +34,22 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
 /**
  * Test-cases to test LockManager.
  */
-public class TestLockManager {
+class TestLockManager {
 
   @Test
   @Timeout(1)
-  public void testWriteLockWithDifferentResource() {
+  void testWriteLockWithDifferentResource() {
     final LockManager<String> manager =
         new LockManager<>(new OzoneConfiguration());
     manager.writeLock("/resourceOne");
     // This should work, as they are different resource.
-    manager.writeLock("/resourceTwo");
+    assertDoesNotThrow(() -> manager.writeLock("/resourceTwo"));
     manager.writeUnlock("/resourceOne");
     manager.writeUnlock("/resourceTwo");
-    assertTrue(true);
   }
 
   @Test
-  public void testWriteLockWithSameResource() throws Exception {
+  void testWriteLockWithSameResource() throws Exception {
     final LockManager<String> manager =
         new LockManager<>(new OzoneConfiguration());
     final AtomicBoolean gotLock = new AtomicBoolean(false);
@@ -74,18 +74,17 @@ public class TestLockManager {
 
   @Test
   @Timeout(1)
-  public void testReadLockWithDifferentResource() {
+  void testReadLockWithDifferentResource() {
     final LockManager<String> manager =
         new LockManager<>(new OzoneConfiguration());
     manager.readLock("/resourceOne");
-    manager.readLock("/resourceTwo");
+    assertDoesNotThrow(() -> manager.readLock("/resourceTwo"));
     manager.readUnlock("/resourceOne");
     manager.readUnlock("/resourceTwo");
-    assertTrue(true);
   }
 
   @Test
-  public void testReadLockWithSameResource() throws Exception {
+  void testReadLockWithSameResource() throws Exception {
     final LockManager<String> manager =
         new LockManager<>(new OzoneConfiguration());
     final AtomicBoolean gotLock = new AtomicBoolean(false);
@@ -103,7 +102,7 @@ public class TestLockManager {
   }
 
   @Test
-  public void testWriteReadLockWithSameResource() throws Exception {
+  void testWriteReadLockWithSameResource() throws Exception {
     final LockManager<String> manager =
         new LockManager<>(new OzoneConfiguration());
     final AtomicBoolean gotLock = new AtomicBoolean(false);
@@ -127,7 +126,7 @@ public class TestLockManager {
   }
 
   @Test
-  public void testReadWriteLockWithSameResource() throws Exception {
+  void testReadWriteLockWithSameResource() throws Exception {
     final LockManager<String> manager =
         new LockManager<>(new OzoneConfiguration());
     final AtomicBoolean gotLock = new AtomicBoolean(false);
@@ -151,7 +150,7 @@ public class TestLockManager {
   }
 
   @Test
-  public void testMultiReadWriteLockWithSameResource() throws Exception {
+  void testMultiReadWriteLockWithSameResource() throws Exception {
     final LockManager<String> manager =
         new LockManager<>(new OzoneConfiguration());
     final AtomicBoolean gotLock = new AtomicBoolean(false);
@@ -180,7 +179,7 @@ public class TestLockManager {
   }
 
   @Test
-  public void testConcurrentWriteLockWithDifferentResource() throws Exception {
+  void testConcurrentWriteLockWithDifferentResource() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
     final int count = 100;
     final LockManager<Integer> manager = new LockManager<>(conf);
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestLayoutVersionInstanceFactory.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestLayoutVersionInstanceFactory.java
index 0d4a7efbbb..a40eecc62b 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestLayoutVersionInstanceFactory.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestLayoutVersionInstanceFactory.java
@@ -18,7 +18,9 @@
 
 package org.apache.hadoop.ozone.upgrade;
 
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.Mockito.mock;
@@ -33,12 +35,12 @@ import org.junit.jupiter.api.Test;
  */
 public class TestLayoutVersionInstanceFactory {
 
-  private MockInterface m1 = new MockClassV1();
-  private MockInterface m2 = new MockClassV2();
+  private final MockInterface m1 = new MockClassV1();
+  private final MockInterface m2 = new MockClassV2();
 
 
   @Test
-  public void testRegister() {
+  void testRegister() {
     LayoutVersionManager lvm = getMockLvm(1, 2);
     LayoutVersionInstanceFactory<MockInterface> factory =
         new LayoutVersionInstanceFactory<>();
@@ -55,18 +57,18 @@ public class TestLayoutVersionInstanceFactory {
         assertThrows(IllegalArgumentException.class,
             () -> factory.register(lvm, getKey("key", 1), new MockClassV1()));
 
-    assertTrue(exception.getMessage().contains("existing entry already"));
+    assertThat(exception).hasMessageContaining("existing entry already");
     assertEquals(1, factory.getInstances().size());
 
     // Verify SLV check.
     exception = assertThrows(IllegalArgumentException.class,
         () -> factory.register(lvm, getKey("key2", 4), new MockClassV2()));
-    assertTrue(exception.getMessage().contains("version is greater"));
+    assertThat(exception).hasMessageContaining("version is greater");
 
   }
 
   @Test
-  public void testGet() {
+  void testGet() {
     LayoutVersionManager lvm = getMockLvm(2, 3);
     LayoutVersionInstanceFactory<MockInterface> factory =
         new LayoutVersionInstanceFactory<>();
@@ -75,26 +77,26 @@ public class TestLayoutVersionInstanceFactory {
     assertTrue(factory.register(lvm, getKey("key", 3), m2));
 
     MockInterface val = factory.get(lvm, getKey("key", 2));
-    assertTrue(val instanceof MockClassV1);
+    assertInstanceOf(MockClassV1.class, val);
 
     // Not passing in version --> Use MLV.
     val = factory.get(lvm, getKey("key", null));
-    assertTrue(val instanceof MockClassV1);
+    assertInstanceOf(MockClassV1.class, val);
 
     // MLV check.
     IllegalArgumentException exception =
         assertThrows(IllegalArgumentException.class,
             () -> factory.get(lvm, getKey("key", 3)));
-    assertTrue(exception.getMessage().contains("version is greater"));
+    assertThat(exception).hasMessageContaining("version is greater");
 
     // Verify failure on Unknown request.
     exception = assertThrows(IllegalArgumentException.class,
         () -> factory.get(lvm, getKey("key1", 1)));
-    assertTrue(exception.getMessage().contains("No suitable instance found"));
+    assertThat(exception).hasMessageContaining("No suitable instance found");
   }
 
   @Test
-  public void testMethodBasedVersionFactory() {
+  void testMethodBasedVersionFactory() {
     LayoutVersionManager lvm = getMockLvm(1, 2);
     LayoutVersionInstanceFactory<Supplier<String>> factory =
         new LayoutVersionInstanceFactory<>();
@@ -119,7 +121,7 @@ public class TestLayoutVersionInstanceFactory {
 
 
   @Test
-  public void testOnFinalize() {
+  void testOnFinalize() {
     LayoutVersionManager lvm = getMockLvm(1, 3);
     LayoutVersionInstanceFactory<MockInterface> factory =
         new LayoutVersionInstanceFactory<>();
@@ -129,12 +131,12 @@ public class TestLayoutVersionInstanceFactory {
     assertTrue(factory.register(lvm, getKey("key2", 2), m2));
 
     MockInterface val = factory.get(lvm, getKey("key", null));
-    assertTrue(val instanceof MockClassV1);
+    assertInstanceOf(MockClassV1.class, val);
     assertEquals(2, factory.getInstances().size());
     assertEquals(2, factory.getInstances().get("key").size());
 
     val = factory.get(lvm, getKey("key2", null));
-    assertTrue(val instanceof MockClassV1);
+    assertInstanceOf(MockClassV1.class, val);
 
     // Finalize the layout version.
     LayoutFeature toFeature = getMockFeatureWithVersion(3);
@@ -142,12 +144,12 @@ public class TestLayoutVersionInstanceFactory {
     lvm = getMockLvm(3, 3);
 
     val = factory.get(lvm, getKey("key", null));
-    assertTrue(val instanceof MockClassV2);
+    assertInstanceOf(MockClassV2.class, val);
     assertEquals(2, factory.getInstances().size());
     assertEquals(1, factory.getInstances().get("key").size());
 
     val = factory.get(lvm, getKey("key2", null));
-    assertTrue(val instanceof MockClassV2);
+    assertInstanceOf(MockClassV2.class, val);
   }
 
   private LayoutFeature getMockFeatureWithVersion(int layoutVersion) {
@@ -183,7 +185,7 @@ public class TestLayoutVersionInstanceFactory {
   /**
    * Mock Impl v2.
    */
-  static class MockClassV2 extends MockClassV1 {
+  private static class MockClassV2 extends MockClassV1 {
   }
 
   /**
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestUpgradeFinalizerActions.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestUpgradeFinalizerActions.java
index d16bb9b5be..3937e79d84 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestUpgradeFinalizerActions.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestUpgradeFinalizerActions.java
@@ -23,6 +23,7 @@ import static 
org.apache.hadoop.ozone.upgrade.LayoutFeature.UpgradeActionType.ON
 import static 
org.apache.hadoop.ozone.upgrade.LayoutFeature.UpgradeActionType.VALIDATE_IN_PREFINALIZE;
 import static 
org.apache.hadoop.ozone.upgrade.TestUpgradeFinalizerActions.MockLayoutFeature.VERSION_2;
 import static 
org.apache.hadoop.ozone.upgrade.TestUpgradeFinalizerActions.MockLayoutFeature.VERSION_3;
+import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.Mockito.mock;
@@ -99,8 +100,8 @@ public class TestUpgradeFinalizerActions {
 
     UpgradeException upgradeException = assertThrows(UpgradeException.class,
         () -> uF.runPrefinalizeStateActions(storage, mockObj));
-    assertTrue(upgradeException.getMessage()
-        .contains("Exception while running pre finalize state validation"));
+    assertThat(upgradeException)
+        .hasMessageContaining("Exception while running pre finalize state 
validation");
   }
 
   private Storage newStorage(File f) throws IOException {


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to