This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 5f7bd726df HDDS-8988. Migrate simple tests in hdds-container-service 
to JUnit5 (#5602)
5f7bd726df is described below

commit 5f7bd726dffa4e80a8fc76868dcbe500095cb626
Author: Galsza <[email protected]>
AuthorDate: Wed Nov 15 18:26:19 2023 +0100

    HDDS-8988. Migrate simple tests in hdds-container-service to JUnit5 (#5602)
---
 .../hadoop/ozone/TestHddsSecureDatanodeInit.java   | 19 +++---
 .../ozone/container/common/TestContainerCache.java | 53 +++++++---------
 .../container/common/TestDatanodeStoreCache.java   | 31 +++++-----
 .../TestSchemaTwoBackwardsCompatibility.java       | 20 +++----
 .../container/common/helpers/TestBlockData.java    | 20 +++----
 .../common/helpers/TestDatanodeVersionFile.java    | 24 ++++----
 .../container/common/interfaces/TestHandler.java   | 32 +++++-----
 .../common/report/TestReportPublisherFactory.java  | 34 ++++++-----
 .../TestDeleteContainerCommandHandler.java         |  6 +-
 .../container/common/utils/TestDiskCheckUtil.java  | 58 ++++++++----------
 .../container/common/utils/TestHddsVolumeUtil.java | 37 ++++++------
 .../common/utils/TestStorageVolumeUtil.java        | 22 +++----
 .../container/common/volume/TestDbVolume.java      | 42 ++++++-------
 .../container/common/volume/TestHddsVolume.java    | 42 ++++++-------
 .../common/volume/TestPeriodicVolumeChecker.java   | 68 +++++++++------------
 .../common/volume/TestReservedVolumeSpace.java     | 44 +++++++-------
 .../container/common/volume/TestVolumeSet.java     | 70 ++++++++++------------
 .../common/volume/TestVolumeSetDiskChecks.java     | 51 +++++++---------
 .../keyvalue/ContainerLayoutTestInfo.java          |  6 +-
 .../impl/TestKeyValueStreamDataChannel.java        | 18 +++---
 .../replication/TestSimpleContainerDownloader.java | 17 +++---
 .../upgrade/TestDataNodeStartupSlvLessThanMlv.java | 21 +++----
 22 files changed, 348 insertions(+), 387 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
index de855e6c26..2f592b4566 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
@@ -61,10 +61,9 @@ import static org.mockito.Mockito.when;
 
 import org.apache.ozone.test.tag.Flaky;
 import org.bouncycastle.cert.X509CertificateHolder;
-import org.junit.Assert;
+import org.junit.jupiter.api.Assertions;
 import org.junit.jupiter.api.AfterAll;
 import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Assertions;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
@@ -329,7 +328,7 @@ public class TestHddsSecureDatanodeInit {
     when(scmClient.getAllRootCaCertificates()).thenReturn(rootCaList);
     // check that new cert ID should not equal to current cert ID
     String certId = newCertHolder.getSerialNumber().toString();
-    Assert.assertFalse(certId.equals(
+    Assertions.assertFalse(certId.equals(
         client.getCertificate().getSerialNumber().toString()));
 
     // start monitor task to renew key and cert
@@ -369,11 +368,11 @@ public class TestHddsSecureDatanodeInit {
       String newCertId = client.getCertificate().getSerialNumber().toString();
       return newCertId.equals(certId2);
     }, 1000, CERT_LIFETIME * 1000);
-    Assert.assertFalse(client.getPrivateKey().equals(privateKey1));
-    Assert.assertFalse(client.getPublicKey().equals(publicKey1));
-    Assert.assertFalse(client.getCACertificate().getSerialNumber()
+    Assertions.assertFalse(client.getPrivateKey().equals(privateKey1));
+    Assertions.assertFalse(client.getPublicKey().equals(publicKey1));
+    Assertions.assertFalse(client.getCACertificate().getSerialNumber()
         .toString().equals(caCertId1));
-    Assert.assertFalse(client.getRootCACertificate().getSerialNumber()
+    Assertions.assertFalse(client.getRootCACertificate().getSerialNumber()
         .toString().equals(rootCaCertId1));
   }
 
@@ -404,7 +403,7 @@ public class TestHddsSecureDatanodeInit {
 
     // check that new cert ID should not equal to current cert ID
     String certId = newCertHolder.getSerialNumber().toString();
-    Assert.assertFalse(certId.equals(
+    Assertions.assertFalse(certId.equals(
         client.getCertificate().getSerialNumber().toString()));
 
     // start monitor task to renew key and cert
@@ -412,12 +411,12 @@ public class TestHddsSecureDatanodeInit {
 
     // certificate failed to renew, client still hold the old expired cert.
     Thread.sleep(CERT_LIFETIME * 1000);
-    Assert.assertFalse(certId.equals(
+    Assertions.assertFalse(certId.equals(
         client.getCertificate().getSerialNumber().toString()));
     try {
       client.getCertificate().checkValidity();
     } catch (Exception e) {
-      Assert.assertTrue(e instanceof CertificateExpiredException);
+      Assertions.assertTrue(e instanceof CertificateExpiredException);
     }
 
     // provide a new valid SCMGetCertResponseProto
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
index 6188d0277c..15b8fad7f3 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java
@@ -27,10 +27,8 @@ import 
org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
 import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
 import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl;
 import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 import java.io.File;
 import java.io.IOException;
@@ -41,8 +39,7 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 
-import static org.junit.Assert.assertEquals;
-
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Test ContainerCache with evictions.
@@ -50,13 +47,10 @@ import static org.junit.Assert.assertEquals;
 public class TestContainerCache {
   private static String testRoot = new FileSystemTestHelper().getTestRootDir();
 
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
   private void createContainerDB(OzoneConfiguration conf, File dbFile)
       throws Exception {
     DatanodeStore store = new DatanodeStoreSchemaTwoImpl(
-            conf, dbFile.getAbsolutePath(), false);
+        conf, dbFile.getAbsolutePath(), false);
 
     // we close since the SCM pre-creates containers.
     // we will open and put Db handle into a cache when keys are being created
@@ -92,12 +86,12 @@ public class TestContainerCache {
     long numCacheMisses = metrics.getNumCacheMisses();
     // Get 2 references out of the same db and verify the objects are same.
     ReferenceCountedDB db1 = cache.getDB(1, "RocksDB",
-            containerDir1.getPath(),
+        containerDir1.getPath(),
         VersionedDatanodeFeatures.SchemaV2.chooseSchemaVersion(), conf);
     assertEquals(1, db1.getReferenceCount());
     assertEquals(numDbGetCount + 1, metrics.getNumDbGetOps());
     ReferenceCountedDB db2 = cache.getDB(1, "RocksDB",
-            containerDir1.getPath(),
+        containerDir1.getPath(),
         VersionedDatanodeFeatures.SchemaV2.chooseSchemaVersion(), conf);
     assertEquals(2, db2.getReferenceCount());
     assertEquals(numCacheMisses + 1, metrics.getNumCacheMisses());
@@ -108,7 +102,7 @@ public class TestContainerCache {
 
     // add one more references to ContainerCache.
     ReferenceCountedDB db3 = cache.getDB(2, "RocksDB",
-            containerDir2.getPath(),
+        containerDir2.getPath(),
         VersionedDatanodeFeatures.SchemaV2.chooseSchemaVersion(), conf);
     assertEquals(1, db3.getReferenceCount());
 
@@ -119,13 +113,13 @@ public class TestContainerCache {
     // add one more reference to ContainerCache and verify that it will not
     // evict the least recent entry as it has reference.
     ReferenceCountedDB db4 = cache.getDB(3, "RocksDB",
-            containerDir3.getPath(),
+        containerDir3.getPath(),
         VersionedDatanodeFeatures.SchemaV2.chooseSchemaVersion(), conf);
     assertEquals(1, db4.getReferenceCount());
 
     assertEquals(2, cache.size());
-    Assert.assertNotNull(cache.get(containerDir1.getPath()));
-    Assert.assertNull(cache.get(containerDir2.getPath()));
+    Assertions.assertNotNull(cache.get(containerDir1.getPath()));
+    Assertions.assertNull(cache.get(containerDir2.getPath()));
 
     // Now close both the references for container1
     db1.close();
@@ -133,20 +127,17 @@ public class TestContainerCache {
     assertEquals(0, db1.getReferenceCount());
     assertEquals(0, db2.getReferenceCount());
 
-
     // The reference count for container1 is 0 but it is not evicted.
     ReferenceCountedDB db5 = cache.getDB(1, "RocksDB",
-            containerDir1.getPath(),
+        containerDir1.getPath(),
         VersionedDatanodeFeatures.SchemaV2.chooseSchemaVersion(), conf);
-    assertEquals(1, db5.getReferenceCount());
-    assertEquals(db1, db5);
-    db5.close();
-    db4.close();
-
-
-    // Decrementing reference count below zero should fail.
-    thrown.expect(IllegalArgumentException.class);
-    db5.close();
+    Assertions.assertThrows(IllegalArgumentException.class, () -> {
+      assertEquals(1, db5.getReferenceCount());
+      assertEquals(db1, db5);
+      db5.close();
+      db4.close();
+      db5.close();
+    });
   }
 
   @Test
@@ -168,9 +159,9 @@ public class TestContainerCache {
         ReferenceCountedDB db1 = cache.getDB(1, "RocksDB",
             containerDir.getPath(),
             VersionedDatanodeFeatures.SchemaV2.chooseSchemaVersion(), conf);
-        Assert.assertNotNull(db1);
+        Assertions.assertNotNull(db1);
       } catch (IOException e) {
-        Assert.fail("Should get the DB instance");
+        Assertions.fail("Should get the DB instance");
       }
     };
     List<Future> futureList = new ArrayList<>();
@@ -180,7 +171,7 @@ public class TestContainerCache {
       try {
         future.get();
       } catch (InterruptedException | ExecutionException e) {
-        Assert.fail("Should get the DB instance");
+        Assertions.fail("Should get the DB instance");
       }
     }
 
@@ -222,7 +213,7 @@ public class TestContainerCache {
     ReferenceCountedDB db4 = cache.getDB(100, "RocksDB",
         containerDir1.getPath(),
         VersionedDatanodeFeatures.SchemaV2.chooseSchemaVersion(), conf);
-    Assert.assertNotEquals(db3, db2);
+    Assertions.assertNotEquals(db3, db2);
     assertEquals(db4, db3);
     db1.close();
     db2.close();
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java
index 8be137c6e1..546fcf5155 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStoreCache.java
@@ -22,27 +22,30 @@ import 
org.apache.hadoop.ozone.container.common.utils.DatanodeStoreCache;
 import org.apache.hadoop.ozone.container.common.utils.RawDB;
 import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
 import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
 
 /**
  * Test DatanodeStoreCache.
  */
 public class TestDatanodeStoreCache {
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
+  @TempDir
+  private Path folder;
 
   private OzoneConfiguration conf = new OzoneConfiguration();
 
   @Test
   public void testBasicOperations() throws IOException {
     DatanodeStoreCache cache = DatanodeStoreCache.getInstance();
-    String dbPath1 = folder.newFolder("basic1").getAbsolutePath();
-    String dbPath2 = folder.newFolder("basic2").getAbsolutePath();
+    String dbPath1 = Files.createDirectory(folder.resolve("basic1"))
+        .toFile().toString();
+    String dbPath2 = Files.createDirectory(folder.resolve("basic2"))
+        .toFile().toString();
     DatanodeStore store1 = new DatanodeStoreSchemaThreeImpl(conf, dbPath1,
         false);
     DatanodeStore store2 = new DatanodeStoreSchemaThreeImpl(conf, dbPath2,
@@ -51,28 +54,28 @@ public class TestDatanodeStoreCache {
     // test normal add
     cache.addDB(dbPath1, new RawDB(store1, dbPath1));
     cache.addDB(dbPath2, new RawDB(store2, dbPath2));
-    Assert.assertEquals(2, cache.size());
+    Assertions.assertEquals(2, cache.size());
 
     // test duplicate add
     cache.addDB(dbPath1, new RawDB(store1, dbPath1));
-    Assert.assertEquals(2, cache.size());
+    Assertions.assertEquals(2, cache.size());
 
     // test get, test reference the same object using ==
-    Assert.assertTrue(store1 == cache.getDB(dbPath1, conf).getStore());
+    Assertions.assertTrue(store1 == cache.getDB(dbPath1, conf).getStore());
 
     // test remove
     cache.removeDB(dbPath1);
-    Assert.assertEquals(1, cache.size());
+    Assertions.assertEquals(1, cache.size());
 
     // test remove non-exist
     try {
       cache.removeDB(dbPath1);
     } catch (Exception e) {
-      Assert.fail("Should not throw " + e);
+      Assertions.fail("Should not throw " + e);
     }
 
     // test shutdown
     cache.shutdownCache();
-    Assert.assertEquals(0, cache.size());
+    Assertions.assertEquals(0, cache.size());
   }
 }
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java
index 2f8f52398a..60050ece90 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java
@@ -55,11 +55,10 @@ import 
org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import 
org.apache.hadoop.ozone.container.testutils.BlockDeletingServiceTestImpl;
 import org.apache.ozone.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 import java.io.File;
 import java.io.IOException;
@@ -75,7 +74,7 @@ import static 
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_CONTA
 import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_COUNT;
 import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_BYTES_USED;
 import static org.apache.hadoop.ozone.OzoneConsts.PENDING_DELETE_BLOCK_COUNT;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
@@ -100,6 +99,7 @@ public class TestSchemaTwoBackwardsCompatibility {
   private OzoneConfiguration conf;
   private String clusterID;
   private String datanodeUuid;
+  @TempDir
   private File testRoot;
   private MutableVolumeSet volumeSet;
   private BlockManager blockManager;
@@ -124,12 +124,8 @@ public class TestSchemaTwoBackwardsCompatibility {
       new DispatcherContext.Builder()
           .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA).build();
 
-  @Rule
-  public TemporaryFolder tempFolder = new TemporaryFolder();
-
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
-    testRoot = tempFolder.newFolder();
     conf = new OzoneConfiguration();
 
     clusterID = UUID.randomUUID().toString();
@@ -157,7 +153,7 @@ public class TestSchemaTwoBackwardsCompatibility {
     when(dispatcher.getHandler(any())).thenReturn(keyValueHandler);
   }
 
-  @After
+  @AfterEach
   public void cleanup() {
     BlockUtils.shutdownCache(conf);
   }
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
index d65d631fc0..4d46b16469 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
@@ -20,12 +20,9 @@ package org.apache.hadoop.ozone.container.common.helpers;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.ozone.common.Checksum;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.apache.ozone.test.JUnit5AwareTimeout;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -33,15 +30,14 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.ThreadLocalRandom;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Tests to test block deleting service.
  */
+@Timeout(10)
 public class TestBlockData {
   static final Logger LOG = LoggerFactory.getLogger(TestBlockData.class);
-  @Rule
-  public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(10));
 
   static ContainerProtos.ChunkInfo buildChunkInfo(String name, long offset,
       long len) {
@@ -102,9 +98,9 @@ public class TestBlockData {
   static void assertChunks(List<ContainerProtos.ChunkInfo> expected,
       BlockData computed) {
     final List<ContainerProtos.ChunkInfo> computedChunks = 
computed.getChunks();
-    Assert.assertEquals("expected=" + expected + "\ncomputed=" +
-        computedChunks, expected, computedChunks);
-    Assert.assertEquals(expected.stream().mapToLong(i -> i.getLen()).sum(),
+    Assertions.assertEquals(expected, computedChunks,
+        "expected=" + expected + "\ncomputed=" + computedChunks);
+    Assertions.assertEquals(expected.stream().mapToLong(i -> i.getLen()).sum(),
         computed.getSize());
   }
 
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
index 02b673bfaa..d9dc7de6d9 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
@@ -22,19 +22,20 @@ import 
org.apache.hadoop.ozone.container.common.HDDSVolumeLayoutVersion;
 import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
 import org.apache.ozone.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 import java.io.File;
 import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
 import java.util.Properties;
 import java.util.UUID;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 /**
  * This class tests {@link DatanodeVersionFile}.
@@ -51,12 +52,13 @@ public class TestDatanodeVersionFile {
   private long cTime;
   private int lv;
 
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
+  @TempDir
+  private Path folder;
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
-    versionFile = folder.newFile("Version");
+    versionFile = Files.createFile(
+        folder.resolve("VersionFile")).toFile();
     storageID = UUID.randomUUID().toString();
     clusterID = UUID.randomUUID().toString();
     datanodeUUID = UUID.randomUUID().toString();
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
index 30ce8f27fd..698c36afb8 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
@@ -34,29 +34,25 @@ import 
org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
 
 import com.google.common.collect.Maps;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.apache.ozone.test.JUnit5AwareTimeout;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 import org.mockito.Mockito;
 
 /**
  * Tests Handler interface.
  */
+@Timeout(300)
 public class TestHandler {
-  @Rule
-  public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300));
 
   private OzoneConfiguration conf;
   private HddsDispatcher dispatcher;
   private ContainerSet containerSet;
   private VolumeSet volumeSet;
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     this.conf = new OzoneConfiguration();
     this.containerSet = Mockito.mock(ContainerSet.class);
@@ -79,7 +75,7 @@ public class TestHandler {
         conf, containerSet, volumeSet, handlers, null, metrics, null);
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     ContainerMetrics.remove();
   }
@@ -89,8 +85,8 @@ public class TestHandler {
     Handler kvHandler = dispatcher.getHandler(
         ContainerProtos.ContainerType.KeyValueContainer);
 
-    Assert.assertTrue("getHandlerForContainerType returned incorrect handler",
-        (kvHandler instanceof KeyValueHandler));
+    Assertions.assertTrue((kvHandler instanceof KeyValueHandler),
+        "getHandlerForContainerType returned incorrect handler");
   }
 
   @Test
@@ -100,11 +96,11 @@ public class TestHandler {
     ContainerProtos.ContainerType invalidContainerType =
         ContainerProtos.ContainerType.forNumber(2);
 
-    Assert.assertEquals("New ContainerType detected. Not an invalid " +
-        "containerType", invalidContainerType, null);
+    Assertions.assertNull(invalidContainerType,
+        "New ContainerType detected. Not an invalid containerType");
 
     Handler dispatcherHandler = dispatcher.getHandler(invalidContainerType);
-    Assert.assertEquals("Get Handler for Invalid ContainerType should " +
-        "return null.", dispatcherHandler, null);
+    Assertions.assertNull(dispatcherHandler,
+        "Get Handler for Invalid ContainerType should return null.");
   }
 }
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java
index ea915f98a2..b34559923c 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java
@@ -23,27 +23,25 @@ import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolPro
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CRLStatusReport;
 
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
+import org.hamcrest.MatcherAssert;
+import org.hamcrest.Matchers;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test cases to test ReportPublisherFactory.
  */
 public class TestReportPublisherFactory {
 
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
   @Test
   public void testGetContainerReportPublisher() {
     OzoneConfiguration conf = new OzoneConfiguration();
     ReportPublisherFactory factory = new ReportPublisherFactory(conf);
     ReportPublisher publisher = factory
         .getPublisherFor(ContainerReportsProto.class);
-    Assert.assertEquals(ContainerReportPublisher.class, publisher.getClass());
-    Assert.assertEquals(conf, publisher.getConf());
+    Assertions.assertEquals(
+        ContainerReportPublisher.class, publisher.getClass());
+    Assertions.assertEquals(conf, publisher.getConf());
   }
 
   @Test
@@ -52,8 +50,8 @@ public class TestReportPublisherFactory {
     ReportPublisherFactory factory = new ReportPublisherFactory(conf);
     ReportPublisher publisher = factory
         .getPublisherFor(NodeReportProto.class);
-    Assert.assertEquals(NodeReportPublisher.class, publisher.getClass());
-    Assert.assertEquals(conf, publisher.getConf());
+    Assertions.assertEquals(NodeReportPublisher.class, publisher.getClass());
+    Assertions.assertEquals(conf, publisher.getConf());
   }
 
   @Test
@@ -62,16 +60,20 @@ public class TestReportPublisherFactory {
     ReportPublisherFactory factory = new ReportPublisherFactory(conf);
     ReportPublisher publisher = factory
         .getPublisherFor(CRLStatusReport.class);
-    Assert.assertEquals(CRLStatusReportPublisher.class, publisher.getClass());
-    Assert.assertEquals(conf, publisher.getConf());
+    Assertions.assertEquals(
+        CRLStatusReportPublisher.class, publisher.getClass());
+    Assertions.assertEquals(conf, publisher.getConf());
   }
 
   @Test
   public void testInvalidReportPublisher() {
     OzoneConfiguration conf = new OzoneConfiguration();
     ReportPublisherFactory factory = new ReportPublisherFactory(conf);
-    exception.expect(RuntimeException.class);
-    exception.expectMessage("No publisher found for report");
-    factory.getPublisherFor(HddsProtos.DatanodeDetailsProto.class);
+    RuntimeException runtimeException = Assertions.assertThrows(
+        RuntimeException.class,
+        () -> factory.getPublisherFor(HddsProtos.DatanodeDetailsProto.class)
+    );
+    MatcherAssert.assertThat(runtimeException.getMessage(),
+        Matchers.containsString("No publisher found for report"));
   }
 }
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerCommandHandler.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerCommandHandler.java
index 06002009c5..c11b25914d 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerCommandHandler.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerCommandHandler.java
@@ -24,8 +24,8 @@ import 
org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand;
 import org.apache.ozone.test.TestClock;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.Assertions;
 import org.mockito.Mockito;
 
@@ -50,7 +50,7 @@ public class TestDeleteContainerCommandHandler {
   private ContainerController controller;
   private StateContext context;
 
-  @Before
+  @BeforeEach
   public void setup() {
     clock = new TestClock(Instant.now(), ZoneId.systemDefault());
     ozoneContainer = mock(OzoneContainer.class);
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestDiskCheckUtil.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestDiskCheckUtil.java
index 701d13d81f..5717e9d6bd 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestDiskCheckUtil.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestDiskCheckUtil.java
@@ -17,11 +17,9 @@
  */
 package org.apache.hadoop.ozone.container.common.utils;
 
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 import java.io.File;
 
@@ -32,57 +30,51 @@ import java.io.File;
  *
  */
 public class TestDiskCheckUtil {
-  @Rule
-  public TemporaryFolder tempTestDir = new TemporaryFolder();
 
+  @TempDir
   private File testDir;
-
-  @Before
-  public void setup() {
-    testDir = tempTestDir.getRoot();
-  }
-
+  
   @Test
   public void testPermissions() {
     // Ensure correct test setup before testing the disk check.
-    Assert.assertTrue(testDir.canRead());
-    Assert.assertTrue(testDir.canWrite());
-    Assert.assertTrue(testDir.canExecute());
-    Assert.assertTrue(DiskCheckUtil.checkPermissions(testDir));
+    Assertions.assertTrue(testDir.canRead());
+    Assertions.assertTrue(testDir.canWrite());
+    Assertions.assertTrue(testDir.canExecute());
+    Assertions.assertTrue(DiskCheckUtil.checkPermissions(testDir));
 
     // Test failure without read permissiosns.
-    Assert.assertTrue(testDir.setReadable(false));
-    Assert.assertFalse(DiskCheckUtil.checkPermissions(testDir));
-    Assert.assertTrue(testDir.setReadable(true));
+    Assertions.assertTrue(testDir.setReadable(false));
+    Assertions.assertFalse(DiskCheckUtil.checkPermissions(testDir));
+    Assertions.assertTrue(testDir.setReadable(true));
 
     // Test failure without write permissiosns.
-    Assert.assertTrue(testDir.setWritable(false));
-    Assert.assertFalse(DiskCheckUtil.checkPermissions(testDir));
-    Assert.assertTrue(testDir.setWritable(true));
+    Assertions.assertTrue(testDir.setWritable(false));
+    Assertions.assertFalse(DiskCheckUtil.checkPermissions(testDir));
+    Assertions.assertTrue(testDir.setWritable(true));
 
     // Test failure without execute permissiosns.
-    Assert.assertTrue(testDir.setExecutable(false));
-    Assert.assertFalse(DiskCheckUtil.checkPermissions(testDir));
-    Assert.assertTrue(testDir.setExecutable(true));
+    Assertions.assertTrue(testDir.setExecutable(false));
+    Assertions.assertFalse(DiskCheckUtil.checkPermissions(testDir));
+    Assertions.assertTrue(testDir.setExecutable(true));
   }
 
   @Test
   public void testExistence() {
     // Ensure correct test setup before testing the disk check.
-    Assert.assertTrue(testDir.exists());
-    Assert.assertTrue(DiskCheckUtil.checkExistence(testDir));
+    Assertions.assertTrue(testDir.exists());
+    Assertions.assertTrue(DiskCheckUtil.checkExistence(testDir));
 
-    Assert.assertTrue(testDir.delete());
-    Assert.assertFalse(DiskCheckUtil.checkExistence(testDir));
+    Assertions.assertTrue(testDir.delete());
+    Assertions.assertFalse(DiskCheckUtil.checkExistence(testDir));
   }
 
   @Test
   public void testReadWrite() {
-    Assert.assertTrue(DiskCheckUtil.checkReadWrite(testDir, testDir, 10));
+    Assertions.assertTrue(DiskCheckUtil.checkReadWrite(testDir, testDir, 10));
 
     // Test file should have been deleted.
     File[] children = testDir.listFiles();
-    Assert.assertNotNull(children);
-    Assert.assertEquals(0, children.length);
+    Assertions.assertNotNull(children);
+    Assertions.assertEquals(0, children.length);
   }
 }
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java
index 3a20e22a09..d05c127838 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java
@@ -25,31 +25,32 @@ import 
org.apache.hadoop.ozone.container.common.volume.DbVolume;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 import java.io.File;
 import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.UUID;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Test for {@link HddsVolumeUtil}.
  */
 public class TestHddsVolumeUtil {
-  @Rule
-  public final TemporaryFolder tempDir = new TemporaryFolder();
+  @TempDir
+  private Path tempDir;
 
   private final String datanodeId = UUID.randomUUID().toString();
   private final String clusterId = UUID.randomUUID().toString();
@@ -58,7 +59,7 @@ public class TestHddsVolumeUtil {
   private MutableVolumeSet hddsVolumeSet;
   private MutableVolumeSet dbVolumeSet;
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     ContainerTestUtils.enableSchemaV3(conf);
 
@@ -66,7 +67,8 @@ public class TestHddsVolumeUtil {
     File[] hddsVolumeDirs = new File[VOLUMNE_NUM];
     StringBuilder hddsDirs = new StringBuilder();
     for (int i = 0; i < VOLUMNE_NUM; i++) {
-      hddsVolumeDirs[i] = tempDir.newFolder();
+      hddsVolumeDirs[i] =
+          Files.createDirectory(tempDir.resolve("volumeDir" + i)).toFile();
       hddsDirs.append(hddsVolumeDirs[i]).append(",");
     }
     conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, hddsDirs.toString());
@@ -77,7 +79,8 @@ public class TestHddsVolumeUtil {
     File[] dbVolumeDirs = new File[VOLUMNE_NUM];
     StringBuilder dbDirs = new StringBuilder();
     for (int i = 0; i < VOLUMNE_NUM; i++) {
-      dbVolumeDirs[i] = tempDir.newFolder();
+      dbVolumeDirs[i] =
+          Files.createDirectory(tempDir.resolve("dbVolumeDir" + i)).toFile();
       dbDirs.append(dbVolumeDirs[i]).append(",");
     }
     conf.set(OzoneConfigKeys.HDDS_DATANODE_CONTAINER_DB_DIR,
@@ -86,7 +89,7 @@ public class TestHddsVolumeUtil {
         StorageVolume.VolumeType.DB_VOLUME, null);
   }
 
-  @After
+  @AfterEach
   public void teardown() {
     hddsVolumeSet.shutdown();
     dbVolumeSet.shutdown();
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestStorageVolumeUtil.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestStorageVolumeUtil.java
index d8bab45662..7b88ba3a31 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestStorageVolumeUtil.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestStorageVolumeUtil.java
@@ -23,18 +23,18 @@ import 
org.apache.hadoop.ozone.container.common.ContainerTestUtils;
 import org.apache.hadoop.ozone.container.common.volume.DbVolume;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.nio.file.Path;
 import java.util.Collections;
 import java.util.UUID;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.times;
@@ -45,8 +45,10 @@ import static org.powermock.api.mockito.PowerMockito.when;
  * Test for {@link StorageVolumeUtil}.
  */
 public class TestStorageVolumeUtil {
-  @Rule
-  public final TemporaryFolder folder = new TemporaryFolder();
+  @TempDir
+  private Path volumeDir;
+  @TempDir
+  private Path dbVolumeDir;
 
   private static final Logger LOG =
       LoggerFactory.getLogger(TestStorageVolumeUtil.class);
@@ -58,13 +60,13 @@ public class TestStorageVolumeUtil {
   private HddsVolume.Builder hddsVolumeBuilder;
   private DbVolume.Builder dbVolumeBuilder;
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
-    hddsVolumeBuilder = new HddsVolume.Builder(folder.newFolder().getPath())
+    hddsVolumeBuilder = new HddsVolume.Builder(volumeDir.toString())
         .datanodeUuid(DATANODE_UUID)
         .conf(CONF)
         .usageCheckFactory(MockSpaceUsageCheckFactory.NONE);
-    dbVolumeBuilder = new DbVolume.Builder(folder.newFolder().getPath())
+    dbVolumeBuilder = new DbVolume.Builder(dbVolumeDir.toString())
         .datanodeUuid(DATANODE_UUID)
         .conf(CONF)
         .usageCheckFactory(MockSpaceUsageCheckFactory.NONE);
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestDbVolume.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestDbVolume.java
index b0f0821943..aaa8632b8b 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestDbVolume.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestDbVolume.java
@@ -23,20 +23,21 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.ozone.container.common.ContainerTestUtils;
 import org.apache.hadoop.ozone.container.common.utils.DatanodeStoreCache;
 import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 import java.io.File;
 import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
 import java.util.Collections;
 import java.util.UUID;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.Mockito.mock;
 import static org.powermock.api.mockito.PowerMockito.when;
 
@@ -52,13 +53,13 @@ public class TestDbVolume {
   private DbVolume.Builder volumeBuilder;
   private File versionFile;
 
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
+  @TempDir
+  private Path folder;
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
-    File rootDir = new File(folder.getRoot(), DbVolume.DB_VOLUME_DIR);
-    volumeBuilder = new DbVolume.Builder(folder.getRoot().getPath())
+    File rootDir = new File(folder.toFile(), DbVolume.DB_VOLUME_DIR);
+    volumeBuilder = new DbVolume.Builder(folder.toString())
         .datanodeUuid(DATANODE_UUID)
         .conf(CONF)
         .usageCheckFactory(MockSpaceUsageCheckFactory.NONE);
@@ -76,16 +77,16 @@ public class TestDbVolume {
     assertEquals(StorageType.DEFAULT, volume.getStorageType());
     assertEquals(HddsVolume.VolumeState.NOT_FORMATTED,
         volume.getStorageState());
-    assertFalse("Version file should not be created when clusterID is not " +
-        "known.", versionFile.exists());
+    assertFalse(versionFile.exists(), "Version file should not be created " +
+        "when clusterID is not known.");
 
     // Format the volume with clusterID.
     volume.format(CLUSTER_ID);
 
     // The state of HddsVolume after formatting with clusterID should be
     // NORMAL and the version file should exist.
-    assertTrue("Volume format should create Version file",
-        versionFile.exists());
+    assertTrue(versionFile.exists(),
+        "Volume format should create Version file");
     assertEquals(CLUSTER_ID, volume.getClusterID());
     assertEquals(HddsVolume.VolumeState.NORMAL, volume.getStorageState());
     assertEquals(0, volume.getHddsVolumeIDs().size());
@@ -102,8 +103,8 @@ public class TestDbVolume {
     assertEquals(StorageType.DEFAULT, volume.getStorageType());
     assertEquals(HddsVolume.VolumeState.NOT_FORMATTED,
         volume.getStorageState());
-    assertFalse("Version file should not be created when clusterID is not " +
-        "known.", versionFile.exists());
+    assertFalse(versionFile.exists(), "Version file should not be created " +
+        "when clusterID is not known.");
 
     // Format the volume with clusterID.
     volume.format(CLUSTER_ID);
@@ -161,7 +162,8 @@ public class TestDbVolume {
     File[] hddsVolumeDirs = new File[volumeNum];
     StringBuilder hddsDirs = new StringBuilder();
     for (int i = 0; i < volumeNum; i++) {
-      hddsVolumeDirs[i] = folder.newFolder();
+      hddsVolumeDirs[i] =
+          Files.createDirectory(folder.resolve("volumeDir" + i)).toFile();
       hddsDirs.append(hddsVolumeDirs[i]).append(",");
     }
     CONF.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, hddsDirs.toString());
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
index 898b44da7b..4c4e32b07b 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
@@ -19,6 +19,8 @@ package org.apache.hadoop.ozone.container.common.volume;
 
 import java.io.File;
 import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.time.Duration;
 import java.util.Properties;
@@ -40,20 +42,19 @@ import org.apache.hadoop.ozone.OzoneConfigKeys;
 import static org.apache.hadoop.hdds.fs.MockSpaceUsagePersistence.inMemory;
 import static org.apache.hadoop.hdds.fs.MockSpaceUsageSource.fixed;
 import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_NAME;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.apache.hadoop.ozone.container.common.ContainerTestUtils;
 import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile;
 import org.apache.hadoop.ozone.container.common.utils.DatanodeStoreCache;
 import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 /**
  * Unit tests for {@link HddsVolume}.
@@ -65,18 +66,18 @@ public class TestHddsVolume {
   private static final OzoneConfiguration CONF = new OzoneConfiguration();
   private static final String RESERVED_SPACE = "100B";
 
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
+  @TempDir
+  private Path folder;
 
   private HddsVolume.Builder volumeBuilder;
   private File versionFile;
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
-    File rootDir = new File(folder.getRoot(), HddsVolume.HDDS_VOLUME_DIR);
-    CONF.set(ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED, folder.getRoot() +
+    File rootDir = new File(folder.toString(), HddsVolume.HDDS_VOLUME_DIR);
+    CONF.set(ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED, folder.toString() +
         ":" + RESERVED_SPACE);
-    volumeBuilder = new HddsVolume.Builder(folder.getRoot().getPath())
+    volumeBuilder = new HddsVolume.Builder(folder.toString())
         .datanodeUuid(DATANODE_UUID)
         .conf(CONF)
         .usageCheckFactory(MockSpaceUsageCheckFactory.NONE);
@@ -119,8 +120,9 @@ public class TestHddsVolume {
     assertEquals(StorageType.DEFAULT, volume.getStorageType());
     assertEquals(HddsVolume.VolumeState.NOT_FORMATTED,
         volume.getStorageState());
-    assertFalse("Version file should not be created when clusterID is not " +
-        "known.", versionFile.exists());
+    assertFalse(versionFile.exists(), "Version file should not be created " +
+        "when clusterID is not " +
+        "known.");
 
 
     // Format the volume with clusterID.
@@ -128,8 +130,8 @@ public class TestHddsVolume {
 
     // The state of HddsVolume after formatting with clusterID should be
     // NORMAL and the version file should exist.
-    assertTrue("Volume format should create Version file",
-        versionFile.exists());
+    assertTrue(versionFile.exists(), "Volume format should create Version " +
+        "file");
     assertEquals(CLUSTER_ID, volume.getClusterID());
     assertEquals(HddsVolume.VolumeState.NORMAL, volume.getStorageState());
 
@@ -534,7 +536,7 @@ public class TestHddsVolume {
   }
 
   private MutableVolumeSet createDbVolumeSet() throws IOException {
-    File dbVolumeDir = folder.newFolder();
+    File dbVolumeDir = 
Files.createDirectory(folder.resolve("NewDir")).toFile();
     CONF.set(OzoneConfigKeys.HDDS_DATANODE_CONTAINER_DB_DIR,
         dbVolumeDir.getAbsolutePath());
     MutableVolumeSet dbVolumeSet = new MutableVolumeSet(DATANODE_UUID,
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java
index 1465f954d5..8516554814 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java
@@ -18,26 +18,23 @@
 
 package org.apache.hadoop.ozone.container.common.volume;
 
-import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
 import org.apache.hadoop.util.FakeTimer;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.junit.rules.TestName;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.apache.ozone.test.JUnit5AwareTimeout;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInfo;
+import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.api.io.TempDir;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
 import java.time.Duration;
 import java.util.concurrent.TimeUnit;
 
@@ -47,39 +44,28 @@ import static 
org.apache.hadoop.ozone.container.common.volume.TestStorageVolumeC
 /**
  * Test periodic volume checker in StorageVolumeChecker.
  */
+@Timeout(150)
 public class TestPeriodicVolumeChecker {
 
   public static final Logger LOG = LoggerFactory.getLogger(
       TestPeriodicVolumeChecker.class);
 
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-  @Rule
-  public TestName testName = new TestName();
-
-  @Rule
-  public TestRule globalTimeout = new JUnit5AwareTimeout(Timeout.seconds(150));
+  @TempDir
+  private Path folder;
 
   private OzoneConfiguration conf = new OzoneConfiguration();
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     conf = new OzoneConfiguration();
-    conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, folder.getRoot()
-        .getAbsolutePath());
+    conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, folder.toString());
     conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
-        folder.newFolder().getAbsolutePath());
-  }
-
-  @After
-  public void cleanup() throws IOException {
-    FileUtils.deleteDirectory(folder.getRoot());
+        Files.createDirectory(folder.resolve("VolumeCheckerDir")).toString());
   }
 
   @Test
-  public void testPeriodicVolumeChecker() throws Exception {
-    LOG.info("Executing {}", testName.getMethodName());
+  public void testPeriodicVolumeChecker(TestInfo testInfo) throws Exception {
+    LOG.info("Executing {}", testInfo.getTestMethod());
 
     DatanodeConfiguration dnConf =
         conf.getObject(DatanodeConfiguration.class);
@@ -100,33 +86,33 @@ public class TestPeriodicVolumeChecker {
       volumeChecker.setDelegateChecker(
           new TestStorageVolumeChecker.DummyChecker());
 
-      Assert.assertEquals(0, volumeChecker.getNumAllVolumeChecks());
-      Assert.assertEquals(0, volumeChecker.getNumAllVolumeSetsChecks());
+      Assertions.assertEquals(0, volumeChecker.getNumAllVolumeChecks());
+      Assertions.assertEquals(0, volumeChecker.getNumAllVolumeSetsChecks());
 
       // first round
       timer.advance(gap.toMillis() / 3);
       volumeChecker.checkAllVolumeSets();
 
-      Assert.assertEquals(2, volumeChecker.getNumAllVolumeChecks());
-      Assert.assertEquals(1, volumeChecker.getNumAllVolumeSetsChecks());
-      Assert.assertEquals(0, volumeChecker.getNumSkippedChecks());
+      Assertions.assertEquals(2, volumeChecker.getNumAllVolumeChecks());
+      Assertions.assertEquals(1, volumeChecker.getNumAllVolumeSetsChecks());
+      Assertions.assertEquals(0, volumeChecker.getNumSkippedChecks());
 
       // periodic disk checker next round within gap
       timer.advance(gap.toMillis() / 3);
       volumeChecker.checkAllVolumeSets();
 
       // skipped next round
-      Assert.assertEquals(2, volumeChecker.getNumAllVolumeChecks());
-      Assert.assertEquals(1, volumeChecker.getNumAllVolumeSetsChecks());
-      Assert.assertEquals(1, volumeChecker.getNumSkippedChecks());
+      Assertions.assertEquals(2, volumeChecker.getNumAllVolumeChecks());
+      Assertions.assertEquals(1, volumeChecker.getNumAllVolumeSetsChecks());
+      Assertions.assertEquals(1, volumeChecker.getNumSkippedChecks());
 
       // periodic disk checker next round
       timer.advance(interval.toMillis());
       volumeChecker.checkAllVolumeSets();
 
-      Assert.assertEquals(4, volumeChecker.getNumAllVolumeChecks());
-      Assert.assertEquals(2, volumeChecker.getNumAllVolumeSetsChecks());
-      Assert.assertEquals(1, volumeChecker.getNumSkippedChecks());
+      Assertions.assertEquals(4, volumeChecker.getNumAllVolumeChecks());
+      Assertions.assertEquals(2, volumeChecker.getNumAllVolumeSetsChecks());
+      Assertions.assertEquals(1, volumeChecker.getNumSkippedChecks());
     } finally {
       volumeChecker.shutdownAndWait(1, TimeUnit.SECONDS);
     }
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java
index e1d9e8b8c6..86b7689e00 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java
@@ -21,12 +21,12 @@ package org.apache.hadoop.ozone.container.common.volume;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
+import java.nio.file.Path;
 import java.util.UUID;
 
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED_PERCENT;
@@ -37,16 +37,16 @@ import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESE
  */
 public class TestReservedVolumeSpace {
 
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-  @Rule
-  public TemporaryFolder temp = new TemporaryFolder();
+  @TempDir
+  private Path folder;
+  @TempDir
+  private Path temp;
   private static final String DATANODE_UUID = UUID.randomUUID().toString();
   private HddsVolume.Builder volumeBuilder;
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
-    volumeBuilder = new HddsVolume.Builder(folder.getRoot().getPath())
+    volumeBuilder = new HddsVolume.Builder(folder.toString())
         .datanodeUuid(DATANODE_UUID)
         .usageCheckFactory(MockSpaceUsageCheckFactory.NONE);
   }
@@ -77,8 +77,8 @@ public class TestReservedVolumeSpace {
             .getReservedInBytes();
     long reservedCalculated = (long) Math.ceil(totalCapacity * percentage);
 
-    Assert.assertEquals(reservedFromVolume, reservedCalculated);
-    Assert.assertEquals(volumeCapacity, volumeCapacityReserved);
+    Assertions.assertEquals(reservedFromVolume, reservedCalculated);
+    Assertions.assertEquals(volumeCapacity, volumeCapacityReserved);
   }
 
   /**
@@ -91,12 +91,12 @@ public class TestReservedVolumeSpace {
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(HDDS_DATANODE_DIR_DU_RESERVED_PERCENT, "0.3");
     conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED,
-        folder.getRoot() + ":500B");
+        folder.toString() + ":500B");
     HddsVolume hddsVolume = volumeBuilder.conf(conf).build();
 
     long reservedFromVolume = hddsVolume.getVolumeInfo().get()
             .getReservedInBytes();
-    Assert.assertEquals(reservedFromVolume, 500);
+    Assertions.assertEquals(reservedFromVolume, 500);
   }
 
   @Test
@@ -106,7 +106,7 @@ public class TestReservedVolumeSpace {
 
     long reservedFromVolume = hddsVolume.getVolumeInfo().get()
             .getReservedInBytes();
-    Assert.assertEquals(reservedFromVolume, 0);
+    Assertions.assertEquals(reservedFromVolume, 0);
   }
 
   @Test
@@ -115,19 +115,19 @@ public class TestReservedVolumeSpace {
     conf.set(HDDS_DATANODE_DIR_DU_RESERVED_PERCENT, "0.3");
     //Setting config for different volume, hence fallback happens
     conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED,
-        temp.getRoot() + ":500B");
+        temp.toString() + ":500B");
     HddsVolume hddsVolume = volumeBuilder.conf(conf).build();
 
     long reservedFromVolume = hddsVolume.getVolumeInfo().get()
             .getReservedInBytes();
-    Assert.assertNotEquals(reservedFromVolume, 0);
+    Assertions.assertNotEquals(reservedFromVolume, 0);
 
     long totalCapacity = hddsVolume.getVolumeInfo().get()
         .getUsageForTesting().getCapacity();
     float percentage = conf.getFloat(HDDS_DATANODE_DIR_DU_RESERVED_PERCENT,
         HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT);
     long reservedCalculated = (long) Math.ceil(totalCapacity * percentage);
-    Assert.assertEquals(reservedFromVolume, reservedCalculated);
+    Assertions.assertEquals(reservedFromVolume, reservedCalculated);
   }
 
   @Test
@@ -136,12 +136,12 @@ public class TestReservedVolumeSpace {
 
     // 500C doesn't match with any Storage Unit
     conf1.set(ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED,
-        folder.getRoot() + ":500C");
+        folder.toString() + ":500C");
     HddsVolume hddsVolume1 = volumeBuilder.conf(conf1).build();
 
     long reservedFromVolume1 = hddsVolume1.getVolumeInfo().get()
             .getReservedInBytes();
-    Assert.assertEquals(reservedFromVolume1, 0);
+    Assertions.assertEquals(reservedFromVolume1, 0);
 
     OzoneConfiguration conf2 = new OzoneConfiguration();
 
@@ -151,6 +151,6 @@ public class TestReservedVolumeSpace {
 
     long reservedFromVolume2 = hddsVolume2.getVolumeInfo().get()
             .getReservedInBytes();
-    Assert.assertEquals(reservedFromVolume2, 0);
+    Assertions.assertEquals(reservedFromVolume2, 0);
   }
 }
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
index 50c9c8b971..d10e678b57 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.container.common.volume;
 
 import java.io.IOException;
 import org.apache.commons.io.FileUtils;
+import org.junit.jupiter.api.Timeout;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
@@ -34,18 +35,14 @@ import org.apache.ozone.test.GenericTestUtils.LogCapturer;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
 import static org.apache.hadoop.ozone.container.common.volume.HddsVolume
     .HDDS_VOLUME_DIR;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.apache.ozone.test.JUnit5AwareTimeout;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.io.File;
 import java.lang.reflect.Method;
@@ -56,6 +53,7 @@ import java.util.UUID;
 /**
  * Tests {@link MutableVolumeSet} operations.
  */
+@Timeout(300)
 public class TestVolumeSet {
 
   private OzoneConfiguration conf;
@@ -72,10 +70,7 @@ public class TestVolumeSet {
         null, StorageVolume.VolumeType.DATA_VOLUME, null);
   }
 
-  @Rule
-  public TestRule testTimeout = new JUnit5AwareTimeout(Timeout.seconds(300));
-
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     conf = new OzoneConfiguration();
     String dataDirKey = volume1 + "," + volume2;
@@ -87,7 +82,7 @@ public class TestVolumeSet {
     initializeVolumeSet();
   }
 
-  @After
+  @AfterEach
   public void shutdown() throws IOException {
     // Delete the volume root dir
     List<StorageVolume> vols = new ArrayList<>();
@@ -118,12 +113,12 @@ public class TestVolumeSet {
     List<StorageVolume> volumesList = volumeSet.getVolumesList();
 
     // VolumeSet initialization should add volume1 and volume2 to VolumeSet
-    assertEquals("VolumeSet intialization is incorrect",
-        volumesList.size(), volumes.size());
-    assertTrue("VolumeSet not initailized correctly",
-        checkVolumeExistsInVolumeSet(volume1));
-    assertTrue("VolumeSet not initailized correctly",
-        checkVolumeExistsInVolumeSet(volume2));
+    assertEquals(volumesList.size(), volumes.size(),
+        "VolumeSet intialization is incorrect");
+    assertTrue(checkVolumeExistsInVolumeSet(volume1),
+        "VolumeSet not initailized correctly");
+    assertTrue(checkVolumeExistsInVolumeSet(volume2),
+        "VolumeSet not initailized correctly");
   }
 
   @Test
@@ -137,8 +132,8 @@ public class TestVolumeSet {
 
     assertTrue(success);
     assertEquals(3, volumeSet.getVolumesList().size());
-    assertTrue("AddVolume did not add requested volume to VolumeSet",
-        checkVolumeExistsInVolumeSet(volume3));
+    assertTrue(checkVolumeExistsInVolumeSet(volume3),
+        "AddVolume did not add requested volume to VolumeSet");
   }
 
   @Test
@@ -151,11 +146,11 @@ public class TestVolumeSet {
     assertEquals(1, volumeSet.getVolumesList().size());
 
     // Failed volume should be added to FailedVolumeList
-    assertEquals("Failed volume not present in FailedVolumeMap",
-        1, volumeSet.getFailedVolumesList().size());
-    assertEquals("Failed Volume list did not match",
-        HddsVolumeUtil.getHddsRoot(volume1),
-        volumeSet.getFailedVolumesList().get(0).getStorageDir().getPath());
+    assertEquals(1, volumeSet.getFailedVolumesList().size(),
+        "Failed volume not present in FailedVolumeMap");
+    assertEquals(HddsVolumeUtil.getHddsRoot(volume1),
+        volumeSet.getFailedVolumesList().get(0).getStorageDir().getPath(),
+        "Failed Volume list did not match");
 
     // Failed volume should not exist in VolumeMap
     assertFalse(volumeSet.getVolumeMap().containsKey(volume1));
@@ -173,13 +168,14 @@ public class TestVolumeSet {
     // Attempting to remove a volume which does not exist in VolumeSet should
     // log a warning.
     LogCapturer logs = LogCapturer.captureLogs(
-            LoggerFactory.getLogger(MutableVolumeSet.class));
+        LoggerFactory.getLogger(MutableVolumeSet.class));
     volumeSet.removeVolume(HddsVolumeUtil.getHddsRoot(volume1));
     assertEquals(1, volumeSet.getVolumesList().size());
     String expectedLogMessage = "Volume : " +
         HddsVolumeUtil.getHddsRoot(volume1) + " does not exist in VolumeSet";
-    assertTrue("Log output does not contain expected log message: "
-        + expectedLogMessage, logs.getOutput().contains(expectedLogMessage));
+    assertTrue(logs.getOutput().contains(expectedLogMessage),
+        "Log output does not contain expected log message: " +
+            expectedLogMessage);
   }
 
   @Test
@@ -193,7 +189,7 @@ public class TestVolumeSet {
     File newVolume = new File(volume3, HDDS_VOLUME_DIR);
     System.out.println("new volume root: " + newVolume);
     newVolume.mkdirs();
-    assertTrue("Failed to create new volume root", newVolume.exists());
+    assertTrue(newVolume.exists(), "Failed to create new volume root");
     File dataDir = new File(newVolume, "chunks");
     dataDir.mkdirs();
     assertTrue(dataDir.exists());
@@ -205,8 +201,8 @@ public class TestVolumeSet {
 
     assertFalse(success);
     assertEquals(2, volumeSet.getVolumesList().size());
-    assertTrue("AddVolume should fail for an inconsistent volume",
-        !checkVolumeExistsInVolumeSet(volume3));
+    assertFalse(checkVolumeExistsInVolumeSet(volume3), "AddVolume should fail" 
+
+        " for an inconsistent volume");
 
     // Delete volume3
     File volume = new File(volume3);
@@ -221,7 +217,7 @@ public class TestVolumeSet {
 
     // Verify that volume usage can be queried during shutdown.
     for (StorageVolume volume : volumesList) {
-      Assert.assertNotNull(volume.getVolumeInfo().get()
+      Assertions.assertNotNull(volume.getVolumeInfo().get()
               .getUsageForTesting());
       volume.getAvailable();
     }
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
index 973d2ae6ae..832fbcac03 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
@@ -56,46 +56,38 @@ import org.apache.hadoop.util.Timer;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Iterables;
 import org.apache.commons.io.FileUtils;
+
 import static org.hamcrest.CoreMatchers.is;
-import org.junit.After;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
+
+import org.hamcrest.MatcherAssert;
+import org.junit.jupiter.api.AfterEach;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
 import org.junit.jupiter.api.Assertions;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.apache.ozone.test.JUnit5AwareTimeout;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-
 /**
  * Verify that {@link MutableVolumeSet} correctly checks for failed disks
  * during initialization.
  */
+@Timeout(30)
 public class TestVolumeSetDiskChecks {
   public static final Logger LOG = LoggerFactory.getLogger(
       TestVolumeSetDiskChecks.class);
 
-  @Rule
-  public TestRule globalTimeout = new JUnit5AwareTimeout(Timeout.seconds(30));
-
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
   private OzoneConfiguration conf = null;
 
   /**
    * Cleanup volume directories.
    */
-  @After
+  @AfterEach
   public void cleanup() {
     final Collection<String> dirs = conf.getTrimmedStringCollection(
         DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY);
@@ -118,8 +110,9 @@ public class TestVolumeSetDiskChecks {
         new MutableVolumeSet(UUID.randomUUID().toString(), conf,
             null, StorageVolume.VolumeType.DATA_VOLUME, null);
 
-    assertThat(volumeSet.getVolumesList().size(), is(numVolumes));
-    assertThat(volumeSet.getFailedVolumesList().size(), is(0));
+    MatcherAssert.assertThat(volumeSet.getVolumesList().size(), 
is(numVolumes));
+    MatcherAssert.assertThat(
+        volumeSet.getFailedVolumesList().size(), is(0));
 
     // Verify that the Ozone dirs were created during initialization.
     Collection<String> dirs = conf.getTrimmedStringCollection(
@@ -157,21 +150,21 @@ public class TestVolumeSetDiskChecks {
         dummyChecker);
 
     volumeSet.checkAllVolumes();
-    Assert.assertEquals(volumeSet.getFailedVolumesList().size(),
+    Assertions.assertEquals(volumeSet.getFailedVolumesList().size(),
         numBadVolumes);
-    Assert.assertEquals(volumeSet.getVolumesList().size(),
+    Assertions.assertEquals(volumeSet.getVolumesList().size(),
         numVolumes - numBadVolumes);
 
     metaVolumeSet.checkAllVolumes();
-    Assert.assertEquals(metaVolumeSet.getFailedVolumesList().size(),
+    Assertions.assertEquals(metaVolumeSet.getFailedVolumesList().size(),
         numBadVolumes);
-    Assert.assertEquals(metaVolumeSet.getVolumesList().size(),
+    Assertions.assertEquals(metaVolumeSet.getVolumesList().size(),
         numVolumes - numBadVolumes);
 
     dbVolumeSet.checkAllVolumes();
-    Assert.assertEquals(dbVolumeSet.getFailedVolumesList().size(),
+    Assertions.assertEquals(dbVolumeSet.getFailedVolumesList().size(),
         numBadVolumes);
-    Assert.assertEquals(dbVolumeSet.getVolumesList().size(),
+    Assertions.assertEquals(dbVolumeSet.getVolumesList().size(),
         numVolumes - numBadVolumes);
 
     volumeSet.shutdown();
@@ -340,9 +333,9 @@ public class TestVolumeSetDiskChecks {
 
     conSet.handleVolumeFailures(stateContext);
     // ContainerID1 should be removed belonging to failed volume
-    Assert.assertNull(conSet.getContainer(containerID1));
+    Assertions.assertNull(conSet.getContainer(containerID1));
     // ContainerID should exist belonging to normal volume
-    Assert.assertNotNull(conSet.getContainer(containerID));
+    Assertions.assertNotNull(conSet.getContainer(containerID));
     expectedReportCount.put(
         StorageContainerDatanodeProtocolProtos.ContainerReportsProto
             .getDescriptor().getFullName(), 1);
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java
index 0adaa09bd8..6773264830 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java
@@ -30,9 +30,9 @@ import java.io.File;
 import static java.util.stream.Collectors.toList;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_PERSISTDATA;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Interface of parameters for testing different chunk layout implementations.
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestKeyValueStreamDataChannel.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestKeyValueStreamDataChannel.java
index ddbd4b39f4..2da3486847 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestKeyValueStreamDataChannel.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestKeyValueStreamDataChannel.java
@@ -38,8 +38,8 @@ import org.apache.ratis.protocol.RaftClientReply;
 import org.apache.ratis.thirdparty.io.netty.buffer.ByteBuf;
 import org.apache.ratis.thirdparty.io.netty.buffer.Unpooled;
 import org.apache.ratis.util.ReferenceCountedObject;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -100,7 +100,7 @@ public class TestKeyValueStreamDataChannel {
     buf.writeBytes(protoLengthBuf);
 
     final ContainerCommandRequestProto proto = readPutBlockRequest(buf);
-    Assert.assertEquals(PUT_BLOCK_PROTO, proto);
+    Assertions.assertEquals(PUT_BLOCK_PROTO, proto);
   }
 
   @Test
@@ -138,7 +138,7 @@ public class TestKeyValueStreamDataChannel {
 
   static void runTestBuffers(int dataSize, int max, int seed, String name)
       throws Exception {
-    Assert.assertTrue(max >= PUT_BLOCK_PROTO_SIZE);
+    Assertions.assertTrue(max >= PUT_BLOCK_PROTO_SIZE);
 
     // random data
     final byte[] data = new byte[dataSize];
@@ -166,18 +166,18 @@ public class TestKeyValueStreamDataChannel {
     // check output
     final ByteBuf outBuf = out.getOutBuf();
     LOG.info("outBuf = {}", outBuf);
-    Assert.assertEquals(dataSize, outBuf.readableBytes());
+    Assertions.assertEquals(dataSize, outBuf.readableBytes());
     for (int i = 0; i < dataSize; i++) {
-      Assert.assertEquals(data[i], outBuf.readByte());
+      Assertions.assertEquals(data[i], outBuf.readByte());
     }
     outBuf.release();
   }
 
   static void assertReply(DataStreamReply reply, int byteWritten,
       ContainerCommandRequestProto proto) {
-    Assert.assertTrue(reply.isSuccess());
-    Assert.assertEquals(byteWritten, reply.getBytesWritten());
-    Assert.assertEquals(proto, ((Reply)reply).getPutBlockRequest());
+    Assertions.assertTrue(reply.isSuccess());
+    Assertions.assertEquals(byteWritten, reply.getBytesWritten());
+    Assertions.assertEquals(proto, ((Reply)reply).getPutBlockRequest());
   }
 
   static class Output implements DataStreamOutput {
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSimpleContainerDownloader.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSimpleContainerDownloader.java
index f6908a7053..9c657e9b74 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSimpleContainerDownloader.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSimpleContainerDownloader.java
@@ -21,11 +21,10 @@ package org.apache.hadoop.ozone.container.replication;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
-import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.Assertions;
 import org.junit.jupiter.api.Timeout;
-import org.junit.rules.TemporaryFolder;
+import org.junit.jupiter.api.io.TempDir;
 
 import java.nio.file.Path;
 import java.nio.file.Paths;
@@ -45,8 +44,8 @@ import static org.mockito.Mockito.verify;
  */
 public class TestSimpleContainerDownloader {
 
-  @Rule
-  public final TemporaryFolder tempDir = new TemporaryFolder();
+  @TempDir
+  private Path tempDir;
 
   @Test
   public void testGetContainerDataFromReplicasHappyPath() throws Exception {
@@ -58,7 +57,7 @@ public class TestSimpleContainerDownloader {
 
     //WHEN
     Path result = downloader.getContainerDataFromReplicas(1L, datanodes,
-        tempDir.newFolder().toPath(), NO_COMPRESSION);
+        tempDir, NO_COMPRESSION);
 
     //THEN
     Assertions.assertEquals(datanodes.get(0).getUuidString(),
@@ -79,7 +78,7 @@ public class TestSimpleContainerDownloader {
     //WHEN
     final Path result =
         downloader.getContainerDataFromReplicas(1L, datanodes,
-            tempDir.newFolder().toPath(), NO_COMPRESSION);
+            tempDir, NO_COMPRESSION);
 
     //THEN
     //first datanode is failed, second worked
@@ -100,7 +99,7 @@ public class TestSimpleContainerDownloader {
     //WHEN
     final Path result =
         downloader.getContainerDataFromReplicas(1L, datanodes,
-            tempDir.newFolder().toPath(), NO_COMPRESSION);
+            tempDir, NO_COMPRESSION);
 
     //THEN
     //first datanode is failed, second worked
@@ -126,7 +125,7 @@ public class TestSimpleContainerDownloader {
     //returned.
     for (int i = 0; i < 10000; i++) {
       Path path = downloader.getContainerDataFromReplicas(1L, datanodes,
-          tempDir.newFolder().toPath(), NO_COMPRESSION);
+          tempDir, NO_COMPRESSION);
       if (path.toString().equals(datanodes.get(1).getUuidString())) {
         return;
       }
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java
index 82a9db0377..e7d20028a6 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java
@@ -22,6 +22,8 @@ import static 
org.apache.hadoop.ozone.OzoneConsts.DATANODE_LAYOUT_VERSION_DIR;
 
 import java.io.File;
 import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
 import java.util.UUID;
 
 import org.apache.hadoop.hdds.HddsConfigKeys;
@@ -31,10 +33,9 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
 import org.apache.hadoop.ozone.upgrade.UpgradeTestUtils;
 import org.apache.ozone.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 /**
  * Tests that DataNode will throw an exception on creation when it reads in a
@@ -42,18 +43,18 @@ import org.junit.rules.TemporaryFolder;
  * software layout version.
  */
 public class TestDataNodeStartupSlvLessThanMlv {
-  @Rule
-  public TemporaryFolder tempFolder = new TemporaryFolder();
+  @TempDir
+  private Path tempFolder;
 
   @Test
   public void testStartupSlvLessThanMlv() throws Exception {
     // Add subdirectories under the temporary folder where the version file
     // will be placed.
-    File datanodeSubdir = tempFolder.newFolder(DATANODE_LAYOUT_VERSION_DIR);
+    File datanodeSubdir = Files.createDirectory(
+        tempFolder.resolve(DATANODE_LAYOUT_VERSION_DIR)).toFile();
 
     OzoneConfiguration conf = new OzoneConfiguration();
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
-        tempFolder.getRoot().getAbsolutePath());
+    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempFolder.toString());
 
     // Set metadata layout version larger then software layout version.
     int largestSlv = maxLayoutVersion();
@@ -66,7 +67,7 @@ public class TestDataNodeStartupSlvLessThanMlv {
 
     try {
       new DatanodeStateMachine(getNewDatanodeDetails(), conf);
-      Assert.fail("Expected IOException due to incorrect MLV on DataNode " +
+      Assertions.fail("Expected IOException due to incorrect MLV on DataNode " 
+
           "creation.");
     } catch (IOException e) {
       String expectedMessage = String.format("Metadata layout version (%s) > " 
+


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to