This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new fb2caac8ba HDDS-10164. Replace GenericTestUtils temp dirs with 
`@TempDir` in hadoop-hdds (#6122)
fb2caac8ba is described below

commit fb2caac8ba7bf646d246cc2bd3138bdf37167e66
Author: Zhaohui Wang <[email protected]>
AuthorDate: Wed Jan 31 18:26:19 2024 +0800

    HDDS-10164. Replace GenericTestUtils temp dirs with `@TempDir` in 
hadoop-hdds (#6122)
---
 .../hdds/fs/TestCachingSpaceUsageSource.java       |  8 ++--
 .../java/org/apache/hadoop/hdds/fs/TestDU.java     | 23 +++------
 .../org/apache/hadoop/hdds/fs/TestDUFactory.java   |  5 +-
 .../hdds/fs/TestDedicatedDiskSpaceUsage.java       | 25 ++--------
 .../fs/TestDedicatedDiskSpaceUsageFactory.java     |  6 +--
 .../hadoop/hdds/fs/TestSaveSpaceUsageToFile.java   | 17 ++-----
 .../metadata/TestDatanodeCRLStoreImpl.java         |  6 +--
 .../hadoop/ozone/TestHddsDatanodeService.java      | 12 +----
 .../hadoop/ozone/TestHddsSecureDatanodeInit.java   | 10 +---
 .../container/common/TestBlockDeletingService.java | 10 +---
 .../container/common/TestDatanodeStateMachine.java | 14 ++----
 .../impl/TestContainerDeletionChoosingPolicy.java  | 15 ++----
 .../common/impl/TestContainerPersistence.java      |  7 +--
 .../container/common/impl/TestHddsDispatcher.java  | 56 +++++++++-------------
 .../volume/TestCapacityVolumeChoosingPolicy.java   | 25 ++++------
 .../volume/TestRoundRobinVolumeChoosingPolicy.java | 20 ++++----
 .../common/volume/TestVolumeSetDiskChecks.java     | 15 +++---
 .../keyvalue/TestKeyValueBlockIterator.java        |  6 +--
 .../container/keyvalue/TestKeyValueContainer.java  | 41 ++++++----------
 .../TestKeyValueContainerIntegrityChecks.java      |  6 +--
 .../container/keyvalue/helpers/TestChunkUtils.java | 12 ++---
 .../replication/TestReplicationSupervisor.java     |  9 ++--
 .../stream/TestDirstreamClientHandler.java         | 16 +------
 .../token/TestOzoneBlockTokenSecretManager.java    |  9 ++--
 .../client/TestDefaultCertificateClient.java       |  6 +--
 .../client/TestDnCertificateClientInit.java        |  8 +---
 .../security/x509/keys/TestHDDSKeyGenerator.java   |  8 +++-
 .../scm/TestStorageContainerManagerHttpServer.java | 14 ++----
 .../hadoop/hdds/scm/block/TestDeletedBlockLog.java |  7 +--
 .../scm/container/TestContainerManagerImpl.java    |  9 +---
 .../scm/container/TestContainerReportHandler.java  |  9 +---
 .../scm/container/TestContainerStateManager.java   |  9 +---
 .../TestIncrementalContainerReportHandler.java     | 14 ++----
 .../scm/container/TestUnknownContainerReport.java  |  9 +---
 .../hadoop/hdds/scm/ha/TestSCMHAConfiguration.java |  8 ++--
 .../hdds/scm/node/TestContainerPlacement.java      |  9 +---
 .../hadoop/hdds/scm/node/TestDeadNodeHandler.java  | 18 +++----
 .../hdds/scm/node/TestNodeReportHandler.java       | 18 +++----
 .../hdds/scm/node/TestSCMNodeStorageStatMap.java   | 52 ++++++++++----------
 .../TestPipelineDatanodesIntersection.java         | 10 +---
 .../scm/pipeline/TestPipelinePlacementPolicy.java  |  8 +---
 .../scm/pipeline/TestPipelineStateManagerImpl.java |  9 +---
 .../scm/pipeline/TestRatisPipelineProvider.java    | 20 ++++----
 .../scm/pipeline/TestSimplePipelineProvider.java   |  9 +---
 .../safemode/TestHealthyPipelineSafeModeRule.java  | 23 +++------
 .../hdds/scm/safemode/TestSCMSafeModeManager.java  | 22 ++-------
 .../scm/security/TestRootCARotationManager.java    |  7 +--
 .../cli/container/upgrade/TestUpgradeManager.java  | 16 +------
 .../hadoop/hdds/scm/TestSecretKeySnapshot.java     |  3 +-
 .../apache/hadoop/hdds/scm/TestSecretKeysApi.java  |  3 +-
 50 files changed, 239 insertions(+), 462 deletions(-)

diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestCachingSpaceUsageSource.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestCachingSpaceUsageSource.java
index 1be70baa94..674c1233de 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestCachingSpaceUsageSource.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestCachingSpaceUsageSource.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdds.fs;
 import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckParams.Builder;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 import org.mockito.stubbing.Answer;
 
 import java.io.File;
@@ -30,7 +31,6 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 
 import static org.apache.hadoop.hdds.fs.MockSpaceUsageCheckParams.newBuilder;
-import static org.apache.ozone.test.GenericTestUtils.getTestDir;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.mockito.Mockito.any;
 import static org.mockito.Mockito.anyLong;
@@ -44,8 +44,8 @@ import static org.mockito.Mockito.when;
  */
 public class TestCachingSpaceUsageSource {
 
-  private static final File DIR =
-      getTestDir(TestCachingSpaceUsageSource.class.getSimpleName());
+  @TempDir
+  private static File dir;
 
   @Test
   public void providesInitialValueUntilStarted() {
@@ -156,7 +156,7 @@ public class TestCachingSpaceUsageSource {
   }
 
   private static Builder paramsBuilder() {
-    return newBuilder(DIR)
+    return newBuilder(dir)
         .withSource(MockSpaceUsageSource.fixed(10000, 1000))
         .withRefresh(Duration.ofMinutes(5));
   }
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java
index a87f3fad25..8363f8b41b 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java
@@ -17,16 +17,13 @@
  */
 package org.apache.hadoop.hdds.fs;
 
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.util.Shell;
-import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 import static org.apache.hadoop.ozone.OzoneConsts.KB;
-import static org.apache.ozone.test.GenericTestUtils.getTestDir;
 import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assumptions.assumeFalse;
 import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
@@ -40,18 +37,12 @@ import java.util.Random;
  */
 public class TestDU {
 
-  private static final File DIR = getTestDir(TestDU.class.getSimpleName());
+  @TempDir
+  private File dir;
 
   @BeforeEach
   void setUp() {
     assumeFalse(Shell.WINDOWS);
-    FileUtil.fullyDelete(DIR);
-    assertTrue(DIR.mkdirs());
-  }
-
-  @AfterEach
-  void tearDown() throws IOException {
-    FileUtil.fullyDelete(DIR);
   }
 
   static void createFile(File newFile, int size) throws IOException {
@@ -80,7 +71,7 @@ public class TestDU {
   @Test
   void testGetUsed() throws Exception {
     final long writtenSize = 32 * KB;
-    File file = new File(DIR, "data");
+    File file = new File(dir, "data");
     createFile(file, (int) writtenSize);
 
     SpaceUsageSource du = new DU(file);
@@ -91,9 +82,9 @@ public class TestDU {
 
   @Test
   void testExcludePattern() throws IOException {
-    createFile(new File(DIR, "include.txt"), (int) (4 * KB));
-    createFile(new File(DIR, "exclude.tmp"), (int) (100 * KB));
-    SpaceUsageSource du = new DU(DIR, "*.tmp");
+    createFile(new File(dir, "include.txt"), (int) (4 * KB));
+    createFile(new File(dir, "exclude.tmp"), (int) (100 * KB));
+    SpaceUsageSource du = new DU(dir, "*.tmp");
 
     long usedSpace = du.getUsedSpace();
 
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java
index 6e603f8ff0..4e8379c949 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java
@@ -22,8 +22,8 @@ import java.time.Duration;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
-import static org.apache.ozone.test.GenericTestUtils.getTestDir;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertSame;
 
@@ -38,8 +38,7 @@ public class TestDUFactory {
   }
 
   @Test
-  public void testParams() {
-    File dir = getTestDir(getClass().getSimpleName());
+  public void testParams(@TempDir File dir) {
     Duration refresh = Duration.ofHours(1);
 
     OzoneConfiguration conf = new OzoneConfiguration();
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsage.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsage.java
index 85b21df86b..04cfd42031 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsage.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsage.java
@@ -17,45 +17,30 @@
  */
 package org.apache.hadoop.hdds.fs;
 
-import org.apache.hadoop.fs.FileUtil;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 import java.io.File;
 import java.io.IOException;
 
 import static org.apache.hadoop.hdds.fs.TestDU.createFile;
-import static org.apache.ozone.test.GenericTestUtils.getTestDir;
 import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Tests for {@link DedicatedDiskSpaceUsage}.
  */
 class TestDedicatedDiskSpaceUsage {
 
-  private static final File DIR =
-      getTestDir(TestDedicatedDiskSpaceUsage.class.getSimpleName());
+  @TempDir
+  private File dir;
 
   private static final int FILE_SIZE = 1024;
 
-  @BeforeEach
-  void setUp() {
-    FileUtil.fullyDelete(DIR);
-    assertTrue(DIR.mkdirs());
-  }
-
-  @AfterEach
-  void tearDown() {
-    FileUtil.fullyDelete(DIR);
-  }
-
   @Test
   void testGetUsed() throws IOException {
-    File file = new File(DIR, "data");
+    File file = new File(dir, "data");
     createFile(file, FILE_SIZE);
-    SpaceUsageSource subject = new DedicatedDiskSpaceUsage(DIR);
+    SpaceUsageSource subject = new DedicatedDiskSpaceUsage(dir);
 
     // condition comes from TestDFCachingGetSpaceUsed in Hadoop Common
     assertThat(subject.getUsedSpace()).isGreaterThanOrEqualTo(FILE_SIZE - 20);
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsageFactory.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsageFactory.java
index 0142ee56af..8391976da0 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsageFactory.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsageFactory.java
@@ -23,8 +23,9 @@ import java.time.Duration;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 
 import static 
org.apache.hadoop.hdds.fs.DedicatedDiskSpaceUsageFactory.Conf.configKeyForRefreshPeriod;
-import static org.apache.ozone.test.GenericTestUtils.getTestDir;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
+
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertSame;
 
@@ -40,10 +41,9 @@ public class TestDedicatedDiskSpaceUsageFactory {
   }
 
   @Test
-  public void testParams() {
+  public void testParams(@TempDir File dir) {
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(configKeyForRefreshPeriod(), "2m");
-    File dir = getTestDir(getClass().getSimpleName());
 
     SpaceUsageCheckParams params = new DedicatedDiskSpaceUsageFactory()
         .setConfiguration(conf)
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSaveSpaceUsageToFile.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSaveSpaceUsageToFile.java
index f35e697508..6a901d6cbc 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSaveSpaceUsageToFile.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSaveSpaceUsageToFile.java
@@ -18,10 +18,9 @@
 package org.apache.hadoop.hdds.fs;
 
 import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.fs.FileUtil;
-import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 import java.io.File;
 import java.io.IOException;
@@ -30,7 +29,6 @@ import java.time.Duration;
 import java.time.Instant;
 import java.util.OptionalLong;
 
-import static org.apache.ozone.test.GenericTestUtils.getTestDir;
 import static org.apache.ozone.test.GenericTestUtils.waitFor;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
@@ -41,8 +39,8 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
  */
 public class TestSaveSpaceUsageToFile {
 
-  private static final File DIR =
-      getTestDir(TestSaveSpaceUsageToFile.class.getSimpleName());
+  @TempDir
+  private File dir;
 
   private static final Duration LONG_EXPIRY = Duration.ofMinutes(15);
 
@@ -53,14 +51,7 @@ public class TestSaveSpaceUsageToFile {
 
   @BeforeEach
   public void setup() {
-    FileUtil.fullyDelete(DIR);
-    assertTrue(DIR.mkdirs());
-    file = new File(DIR, "space_usage.txt");
-  }
-
-  @AfterEach
-  public void cleanup() {
-    FileUtil.fullyDelete(DIR);
+    file = new File(dir, "space_usage.txt");
   }
 
   @Test
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/hdds/datanode/metadata/TestDatanodeCRLStoreImpl.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/hdds/datanode/metadata/TestDatanodeCRLStoreImpl.java
index 8d3de5218a..f73f14f0c2 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/hdds/datanode/metadata/TestDatanodeCRLStoreImpl.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/hdds/datanode/metadata/TestDatanodeCRLStoreImpl.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdds.datanode.metadata;
 
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.security.SecurityConfig;
@@ -27,13 +26,13 @@ import 
org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
 import org.apache.hadoop.hdds.security.x509.crl.CRLInfo;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.apache.ozone.test.GenericTestUtils;
 import org.bouncycastle.asn1.x509.CRLReason;
 import org.bouncycastle.cert.X509CertificateHolder;
 import org.bouncycastle.cert.X509v2CRLBuilder;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 import java.io.File;
 import java.security.KeyPair;
@@ -47,6 +46,7 @@ import static org.junit.jupiter.api.Assertions.assertNotNull;
  * Test class for {@link DatanodeCRLStoreImpl}.
  */
 public class TestDatanodeCRLStoreImpl {
+  @TempDir
   private File testDir;
   private OzoneConfiguration conf;
   private DatanodeCRLStore dnCRLStore;
@@ -56,7 +56,6 @@ public class TestDatanodeCRLStoreImpl {
 
   @BeforeEach
   public void setUp() throws Exception {
-    testDir = GenericTestUtils.getRandomizedTestDir();
     conf = new OzoneConfiguration();
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath());
     dnCRLStore = new DatanodeCRLStoreImpl(conf);
@@ -71,7 +70,6 @@ public class TestDatanodeCRLStoreImpl {
     if (dnCRLStore.getStore() != null) {
       dnCRLStore.getStore().close();
     }
-    FileUtil.fullyDelete(testDir);
   }
   @Test
   public void testCRLStore() throws Exception {
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java
index 8a3921d795..cc88940611 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java
@@ -24,7 +24,6 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.UUID;
 
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -39,11 +38,8 @@ import 
org.apache.hadoop.ozone.container.common.volume.StorageVolume;
 import org.apache.hadoop.ozone.container.keyvalue.ContainerTestVersionInfo;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import 
org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
-import org.apache.ozone.test.GenericTestUtils;
 import org.apache.hadoop.util.ServicePlugin;
 
-import org.junit.jupiter.api.AfterEach;
-
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED;
 import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_TOKEN_ENABLED;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES;
@@ -57,6 +53,7 @@ import static org.junit.jupiter.api.Assertions.assertNotNull;
 import static org.junit.jupiter.api.Assertions.assertNull;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 import org.junit.jupiter.params.ParameterizedTest;
 import org.junit.jupiter.params.provider.ValueSource;
 import org.slf4j.Logger;
@@ -68,6 +65,7 @@ import org.slf4j.LoggerFactory;
 
 public class TestHddsDatanodeService {
 
+  @TempDir
   private File testDir;
   private static final Logger LOG =
       LoggerFactory.getLogger(TestHddsDatanodeService.class);
@@ -92,7 +90,6 @@ public class TestHddsDatanodeService {
     conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES,
         serverAddresses.toArray(new String[0]));
 
-    testDir = GenericTestUtils.getRandomizedTestDir();
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath());
     conf.set(OZONE_SCM_NAMES, "localhost");
     conf.setClass(OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY, MockService.class,
@@ -109,11 +106,6 @@ public class TestHddsDatanodeService {
     conf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY, volumeDir);
   }
 
-  @AfterEach
-  public void tearDown() {
-    FileUtil.fullyDelete(testDir);
-  }
-
   @Test
   public void testStartup() {
     service.start(conf);
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
index 731cab97c4..10d2bc91a7 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
@@ -29,7 +29,6 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.Callable;
 
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -66,16 +65,17 @@ import static org.mockito.Mockito.when;
 
 import org.apache.ozone.test.tag.Flaky;
 import org.bouncycastle.cert.X509CertificateHolder;
-import org.junit.jupiter.api.AfterAll;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 /**
  * Test class for {@link HddsDatanodeService}.
  */
 public class TestHddsSecureDatanodeInit {
+  @TempDir
   private static File testDir;
   private static OzoneConfiguration conf;
   private static HddsDatanodeService service;
@@ -96,7 +96,6 @@ public class TestHddsSecureDatanodeInit {
 
   @BeforeAll
   public static void setUp() throws Exception {
-    testDir = GenericTestUtils.getRandomizedTestDir();
     conf = new OzoneConfiguration();
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath());
     //conf.set(ScmConfigKeys.OZONE_SCM_NAMES, "localhost");
@@ -143,11 +142,6 @@ public class TestHddsSecureDatanodeInit {
     scmClient = mock(SCMSecurityProtocolClientSideTranslatorPB.class);
   }
 
-  @AfterAll
-  public static void tearDown() {
-    FileUtil.fullyDelete(testDir);
-  }
-
   @BeforeEach
   public void setUpDNCertClient() throws IOException {
 
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
index 759170132e..bc56141fb0 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.container.common;
 
 
 import com.google.common.collect.Lists;
-import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.client.BlockID;
@@ -76,6 +75,7 @@ import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.Assumptions;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.api.io.TempDir;
 import org.slf4j.LoggerFactory;
 
 import java.io.File;
@@ -121,6 +121,7 @@ import static org.mockito.Mockito.when;
 @Timeout(30)
 public class TestBlockDeletingService {
 
+  @TempDir
   private File testRoot;
   private String scmId;
   private String datanodeUuid;
@@ -134,12 +135,6 @@ public class TestBlockDeletingService {
   @BeforeEach
   public void init() throws IOException {
     CodecBuffer.enableLeakDetection();
-
-    testRoot = GenericTestUtils
-        .getTestDir(TestBlockDeletingService.class.getSimpleName());
-    if (testRoot.exists()) {
-      FileUtils.cleanDirectory(testRoot);
-    }
     scmId = UUID.randomUUID().toString();
     conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath());
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testRoot.getAbsolutePath());
@@ -152,7 +147,6 @@ public class TestBlockDeletingService {
   @AfterEach
   public void cleanup() throws IOException {
     BlockUtils.shutdownCache(conf);
-    FileUtils.deleteDirectory(testRoot);
     CodecBuffer.assertNoLeaks();
   }
 
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
index a7291e9018..57e9613c2d 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
@@ -28,7 +28,6 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -52,6 +51,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -76,6 +76,7 @@ public class TestDatanodeStateMachine {
   private List<ScmTestMock> mockServers;
   private ExecutorService executorService;
   private OzoneConfiguration conf;
+  @TempDir
   private File testRoot;
 
   @BeforeEach
@@ -105,13 +106,6 @@ public class TestDatanodeStateMachine {
     conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES,
         serverAddresses.toArray(new String[0]));
 
-    String path = GenericTestUtils
-        .getTempPath(TestDatanodeStateMachine.class.getSimpleName());
-    testRoot = new File(path);
-    if (!testRoot.mkdirs()) {
-      LOG.info("Required directories {} already exist.", testRoot);
-    }
-
     File dataDir = new File(testRoot, "data");
     conf.set(HDDS_DATANODE_DIR_KEY, dataDir.getAbsolutePath());
     if (!dataDir.mkdirs()) {
@@ -119,7 +113,7 @@ public class TestDatanodeStateMachine {
     }
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
         new File(testRoot, "scm").getAbsolutePath());
-    path = new File(testRoot, "datanodeID").getAbsolutePath();
+    String path = new File(testRoot, "datanodeID").getAbsolutePath();
     conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR, path);
     executorService = HadoopExecutors.newCachedThreadPool(
         new ThreadFactoryBuilder().setDaemon(true)
@@ -149,8 +143,6 @@ public class TestDatanodeStateMachine {
       }
     } catch (Exception e) {
       //ignore all exception from the shutdown
-    } finally {
-      FileUtil.fullyDelete(testRoot);
     }
   }
 
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
index cd5f6c0f9b..890bca18cb 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
@@ -40,8 +40,8 @@ import 
org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import 
org.apache.hadoop.ozone.container.common.impl.BlockDeletingService.ContainerBlockInfo;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.io.TempDir;
 
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -54,6 +54,8 @@ import static org.mockito.Mockito.when;
  * The class for testing container deletion choosing policy.
  */
 public class TestContainerDeletionChoosingPolicy {
+  @TempDir
+  private File tempFile;
   private String path;
   private OzoneContainer ozoneContainer;
   private ContainerSet containerSet;
@@ -63,23 +65,15 @@ public class TestContainerDeletionChoosingPolicy {
   private static final int SERVICE_TIMEOUT_IN_MILLISECONDS = 0;
   private static final int SERVICE_INTERVAL_IN_MILLISECONDS = 1000;
 
-  private ContainerLayoutVersion layoutVersion;
-
-  public void setLayoutVersion(ContainerLayoutVersion layout) {
-    this.layoutVersion = layout;
-  }
-
   @BeforeEach
   public void init() throws Throwable {
     conf = new OzoneConfiguration();
-    path = GenericTestUtils
-        
.getTempPath(TestContainerDeletionChoosingPolicy.class.getSimpleName());
+    path = tempFile.getPath();
   }
 
   @ContainerLayoutTestInfo.ContainerTest
   public void testRandomChoosingPolicy(ContainerLayoutVersion layout)
       throws IOException {
-    setLayoutVersion(layout);
     File containerDir = new File(path);
     if (containerDir.exists()) {
       FileUtils.deleteDirectory(new File(path));
@@ -143,7 +137,6 @@ public class TestContainerDeletionChoosingPolicy {
   @ContainerLayoutTestInfo.ContainerTest
   public void testTopNOrderedChoosingPolicy(ContainerLayoutVersion layout)
       throws IOException {
-    setLayoutVersion(layout);
     File containerDir = new File(path);
     if (containerDir.exists()) {
       FileUtils.deleteDirectory(new File(path));
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
index 12500201a8..7bbde8a8bf 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -73,7 +73,6 @@ import 
org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerFactory;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
 import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl;
-import org.apache.ozone.test.GenericTestUtils;
 
 import com.google.common.collect.Maps;
 import org.apache.commons.io.FileUtils;
@@ -82,6 +81,7 @@ import org.junit.jupiter.api.AfterAll;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.api.io.TempDir;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -113,6 +113,8 @@ public class TestContainerPersistence {
   private static final String SCM_ID = UUID.randomUUID().toString();
   private static final Logger LOGGER =
       LoggerFactory.getLogger(TestContainerPersistence.class);
+  @TempDir
+  private static File hddsFile;
   private static String hddsPath;
   private static OzoneConfiguration conf;
   private static VolumeChoosingPolicy volumeChoosingPolicy;
@@ -138,8 +140,7 @@ public class TestContainerPersistence {
   @BeforeAll
   public static void init() {
     conf = new OzoneConfiguration();
-    hddsPath = GenericTestUtils
-        .getTempPath(TestContainerPersistence.class.getSimpleName());
+    hddsPath = hddsFile.getPath();
     conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, hddsPath);
     conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, hddsPath);
     volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy();
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
index 71882fe62b..95df6c647f 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.container.common.impl;
 
 import com.google.common.collect.Maps;
 import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.client.BlockID;
@@ -63,6 +62,7 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.ozone.test.GenericTestUtils;
 import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -101,6 +101,8 @@ import static org.mockito.Mockito.when;
 public class TestHddsDispatcher {
   private static final Logger LOG = LoggerFactory.getLogger(
       TestHddsDispatcher.class);
+  @TempDir
+  private File testDir;
 
   public static final IncrementalReportSender<Container> NO_OP_ICR_SENDER =
       c -> {
@@ -110,11 +112,10 @@ public class TestHddsDispatcher {
   public void testContainerCloseActionWhenFull(
       ContainerLayoutVersion layout) throws IOException {
 
-    String testDir = GenericTestUtils.getTempPath(
-        TestHddsDispatcher.class.getSimpleName());
+    String testDirPath = testDir.getPath();
     OzoneConfiguration conf = new OzoneConfiguration();
-    conf.set(HDDS_DATANODE_DIR_KEY, testDir);
-    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
+    conf.set(HDDS_DATANODE_DIR_KEY, testDirPath);
+    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath);
     DatanodeDetails dd = randomDatanodeDetails();
     MutableVolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf,
         null, StorageVolume.VolumeType.DATA_VOLUME, null);
@@ -160,22 +161,20 @@ public class TestHddsDispatcher {
     } finally {
       volumeSet.shutdown();
       ContainerMetrics.remove();
-      FileUtils.deleteDirectory(new File(testDir));
     }
   }
 
   @ContainerLayoutTestInfo.ContainerTest
   public void testContainerCloseActionWhenVolumeFull(
       ContainerLayoutVersion layoutVersion) throws Exception {
-    String testDir = GenericTestUtils.getTempPath(
-        TestHddsDispatcher.class.getSimpleName());
+    String testDirPath = testDir.getPath();
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.setStorageSize(HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE,
         100.0, StorageUnit.BYTES);
     DatanodeDetails dd = randomDatanodeDetails();
 
     HddsVolume.Builder volumeBuilder =
-        new HddsVolume.Builder(testDir).datanodeUuid(dd.getUuidString())
+        new HddsVolume.Builder(testDirPath).datanodeUuid(dd.getUuidString())
             .conf(conf).usageCheckFactory(MockSpaceUsageCheckFactory.NONE);
     // state of cluster : available (140) > 100  ,datanode volume
     // utilisation threshold not yet reached. container creates are successful.
@@ -237,19 +236,17 @@ public class TestHddsDispatcher {
     } finally {
       volumeSet.shutdown();
       ContainerMetrics.remove();
-      FileUtils.deleteDirectory(new File(testDir));
     }
   }
 
   @Test
   public void testCreateContainerWithWriteChunk() throws IOException {
-    String testDir =
-        GenericTestUtils.getTempPath(TestHddsDispatcher.class.getSimpleName());
+    String testDirPath = testDir.getPath();
     try {
       UUID scmId = UUID.randomUUID();
       OzoneConfiguration conf = new OzoneConfiguration();
-      conf.set(HDDS_DATANODE_DIR_KEY, testDir);
-      conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
+      conf.set(HDDS_DATANODE_DIR_KEY, testDirPath);
+      conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath);
       DatanodeDetails dd = randomDatanodeDetails();
       HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf);
       ContainerCommandRequestProto writeChunkRequest =
@@ -292,19 +289,17 @@ public class TestHddsDispatcher {
       }
     } finally {
       ContainerMetrics.remove();
-      FileUtils.deleteDirectory(new File(testDir));
     }
   }
 
   @Test
   public void testContainerNotFoundWithCommitChunk() throws IOException {
-    String testDir =
-        GenericTestUtils.getTempPath(TestHddsDispatcher.class.getSimpleName());
+    String testDirPath = testDir.getPath();
     try {
       UUID scmId = UUID.randomUUID();
       OzoneConfiguration conf = new OzoneConfiguration();
-      conf.set(HDDS_DATANODE_DIR_KEY, testDir);
-      conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
+      conf.set(HDDS_DATANODE_DIR_KEY, testDirPath);
+      conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath);
       DatanodeDetails dd = randomDatanodeDetails();
       HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf);
       ContainerCommandRequestProto writeChunkRequest =
@@ -329,19 +324,17 @@ public class TestHddsDispatcher {
               + " does not exist");
     } finally {
       ContainerMetrics.remove();
-      FileUtils.deleteDirectory(new File(testDir));
     }
   }
 
   @Test
   public void testWriteChunkWithCreateContainerFailure() throws IOException {
-    String testDir = GenericTestUtils.getTempPath(
-        TestHddsDispatcher.class.getSimpleName());
+    String testDirPath = testDir.getPath();
     try {
       UUID scmId = UUID.randomUUID();
       OzoneConfiguration conf = new OzoneConfiguration();
-      conf.set(HDDS_DATANODE_DIR_KEY, testDir);
-      conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
+      conf.set(HDDS_DATANODE_DIR_KEY, testDirPath);
+      conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath);
       DatanodeDetails dd = randomDatanodeDetails();
       HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf);
       ContainerCommandRequestProto writeChunkRequest = getWriteChunkRequest(
@@ -366,19 +359,17 @@ public class TestHddsDispatcher {
               + " creation failed , Result: DISK_OUT_OF_SPACE");
     } finally {
       ContainerMetrics.remove();
-      FileUtils.deleteDirectory(new File(testDir));
     }
   }
 
   @Test
   public void testDuplicateWriteChunkAndPutBlockRequest() throws  IOException {
-    String testDir = GenericTestUtils.getTempPath(
-        TestHddsDispatcher.class.getSimpleName());
+    String testDirPath = testDir.getPath();
     try {
       UUID scmId = UUID.randomUUID();
       OzoneConfiguration conf = new OzoneConfiguration();
-      conf.set(HDDS_DATANODE_DIR_KEY, testDir);
-      conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
+      conf.set(HDDS_DATANODE_DIR_KEY, testDirPath);
+      conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath);
       DatanodeDetails dd = randomDatanodeDetails();
       HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf);
       ContainerCommandRequestProto writeChunkRequest = getWriteChunkRequest(
@@ -426,7 +417,6 @@ public class TestHddsDispatcher {
       }
     } finally {
       ContainerMetrics.remove();
-      FileUtils.deleteDirectory(new File(testDir));
     }
   }
 
@@ -548,11 +538,10 @@ public class TestHddsDispatcher {
 
   @Test
   public void testValidateToken() throws Exception {
-    final String testDir = GenericTestUtils.getRandomizedTempPath();
     try {
       final OzoneConfiguration conf = new OzoneConfiguration();
-      conf.set(HDDS_DATANODE_DIR_KEY, testDir);
-      conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
+      conf.set(HDDS_DATANODE_DIR_KEY, testDir.getPath());
+      conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir.getPath());
 
       final DatanodeDetails dd = randomDatanodeDetails();
       final UUID scmId = UUID.randomUUID();
@@ -611,7 +600,6 @@ public class TestHddsDispatcher {
       }
     } finally {
       ContainerMetrics.remove();
-      FileUtils.deleteDirectory(new File(testDir));
     }
   }
 
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java
index 9ee0d17dde..4718df3ae3 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.ozone.container.common.volume;
 
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory;
 import org.apache.hadoop.hdds.fs.MockSpaceUsageSource;
@@ -29,8 +28,9 @@ import 
org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
-import java.io.File;
+import java.nio.file.Path;
 import java.time.Duration;
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -38,7 +38,6 @@ import java.util.List;
 import java.util.Map;
 
 import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_CHOOSING_POLICY;
-import static org.apache.ozone.test.GenericTestUtils.getTestDir;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -52,35 +51,34 @@ public class TestCapacityVolumeChoosingPolicy {
   private final List<HddsVolume> volumes = new ArrayList<>();
 
   private static final OzoneConfiguration CONF = new OzoneConfiguration();
-  private static final String BASE_DIR =
-      getTestDir(TestCapacityVolumeChoosingPolicy.class.getSimpleName())
-          .getAbsolutePath();
-  private static final String VOLUME_1 = BASE_DIR + "disk1";
-  private static final String VOLUME_2 = BASE_DIR + "disk2";
-  private static final String VOLUME_3 = BASE_DIR + "disk3";
+  @TempDir
+  private Path baseDir;
 
   @BeforeEach
   public void setup() throws Exception {
+    String volume1 = baseDir + "disk1";
+    String volume2 = baseDir + "disk2";
+    String volume3 = baseDir + "disk3";
     policy = new CapacityVolumeChoosingPolicy();
 
     SpaceUsageSource source1 = MockSpaceUsageSource.fixed(500, 100);
     SpaceUsageCheckFactory factory1 = MockSpaceUsageCheckFactory.of(
         source1, Duration.ZERO, SpaceUsagePersistence.None.INSTANCE);
-    HddsVolume vol1 = new HddsVolume.Builder(VOLUME_1)
+    HddsVolume vol1 = new HddsVolume.Builder(volume1)
         .conf(CONF)
         .usageCheckFactory(factory1)
         .build();
     SpaceUsageSource source2 = MockSpaceUsageSource.fixed(500, 200);
     SpaceUsageCheckFactory factory2 = MockSpaceUsageCheckFactory.of(
         source2, Duration.ZERO, SpaceUsagePersistence.None.INSTANCE);
-    HddsVolume vol2 = new HddsVolume.Builder(VOLUME_2)
+    HddsVolume vol2 = new HddsVolume.Builder(volume2)
         .conf(CONF)
         .usageCheckFactory(factory2)
         .build();
     SpaceUsageSource source3 = MockSpaceUsageSource.fixed(500, 300);
     SpaceUsageCheckFactory factory3 = MockSpaceUsageCheckFactory.of(
         source3, Duration.ZERO, SpaceUsagePersistence.None.INSTANCE);
-    HddsVolume vol3 = new HddsVolume.Builder(VOLUME_3)
+    HddsVolume vol3 = new HddsVolume.Builder(volume3)
         .conf(CONF)
         .usageCheckFactory(factory3)
         .build();
@@ -94,9 +92,6 @@ public class TestCapacityVolumeChoosingPolicy {
   @AfterEach
   public void cleanUp() {
     volumes.forEach(HddsVolume::shutdown);
-    FileUtil.fullyDelete(new File(VOLUME_1));
-    FileUtil.fullyDelete(new File(VOLUME_2));
-    FileUtil.fullyDelete(new File(VOLUME_3));
   }
 
   @Test
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
index 72f1d451b5..cc6fe87e19 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
@@ -18,12 +18,11 @@
 
 package org.apache.hadoop.ozone.container.common.volume;
 
-import java.io.File;
+import java.nio.file.Path;
 import java.time.Duration;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory;
 import org.apache.hadoop.hdds.fs.MockSpaceUsageSource;
@@ -35,8 +34,8 @@ import 
org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
-import static org.apache.ozone.test.GenericTestUtils.getTestDir;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -50,27 +49,26 @@ public class TestRoundRobinVolumeChoosingPolicy {
   private final List<HddsVolume> volumes = new ArrayList<>();
 
   private static final OzoneConfiguration CONF = new OzoneConfiguration();
-  private static final String BASE_DIR =
-      getTestDir(TestRoundRobinVolumeChoosingPolicy.class.getSimpleName())
-          .getAbsolutePath();
-  private static final String VOLUME_1 = BASE_DIR + "disk1";
-  private static final String VOLUME_2 = BASE_DIR + "disk2";
+  @TempDir
+  private Path baseDir;
 
   @BeforeEach
   public void setup() throws Exception {
+    String volume1 = baseDir + "disk1";
+    String volume2 = baseDir + "disk2";
     policy = new RoundRobinVolumeChoosingPolicy();
 
     SpaceUsageSource source1 = MockSpaceUsageSource.fixed(500, 100);
     SpaceUsageCheckFactory factory1 = MockSpaceUsageCheckFactory.of(
         source1, Duration.ZERO, SpaceUsagePersistence.None.INSTANCE);
-    HddsVolume vol1 = new HddsVolume.Builder(VOLUME_1)
+    HddsVolume vol1 = new HddsVolume.Builder(volume1)
         .conf(CONF)
         .usageCheckFactory(factory1)
         .build();
     SpaceUsageSource source2 = MockSpaceUsageSource.fixed(500, 200);
     SpaceUsageCheckFactory factory2 = MockSpaceUsageCheckFactory.of(
         source2, Duration.ZERO, SpaceUsagePersistence.None.INSTANCE);
-    HddsVolume vol2 = new HddsVolume.Builder(VOLUME_2)
+    HddsVolume vol2 = new HddsVolume.Builder(volume2)
         .conf(CONF)
         .usageCheckFactory(factory2)
         .build();
@@ -83,8 +81,6 @@ public class TestRoundRobinVolumeChoosingPolicy {
   @AfterEach
   public void cleanUp() {
     volumes.forEach(HddsVolume::shutdown);
-    FileUtil.fullyDelete(new File(VOLUME_1));
-    FileUtil.fullyDelete(new File(VOLUME_2));
   }
 
   @Test
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
index 27e1195a24..e3c610bfe4 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
@@ -40,7 +40,6 @@ import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.container.common.ContainerTestUtils;
-import org.apache.hadoop.ozone.container.common.TestDatanodeStateMachine;
 import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import 
org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
@@ -49,7 +48,6 @@ import 
org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.ozone.test.GenericTestUtils;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.Timer;
 
@@ -59,9 +57,11 @@ import org.apache.commons.io.FileUtils;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.api.io.TempDir;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.commons.lang3.RandomStringUtils.randomAlphanumeric;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
@@ -78,6 +78,8 @@ import static org.mockito.Mockito.when;
 public class TestVolumeSetDiskChecks {
   public static final Logger LOG = LoggerFactory.getLogger(
       TestVolumeSetDiskChecks.class);
+  @TempDir
+  private File dir;
 
   private OzoneConfiguration conf = null;
 
@@ -217,21 +219,21 @@ public class TestVolumeSetDiskChecks {
     final OzoneConfiguration ozoneConf = new OzoneConfiguration();
     final List<String> dirs = new ArrayList<>();
     for (int i = 0; i < numDirs; ++i) {
-      dirs.add(GenericTestUtils.getRandomizedTestDir().getPath());
+      dirs.add(new File(dir, randomAlphanumeric(10)).toString());
     }
     ozoneConf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY,
         String.join(",", dirs));
 
     final List<String> metaDirs = new ArrayList<>();
     for (int i = 0; i < numDirs; ++i) {
-      metaDirs.add(GenericTestUtils.getRandomizedTestDir().getPath());
+      metaDirs.add(new File(dir, randomAlphanumeric(10)).toString());
     }
     ozoneConf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR,
         String.join(",", metaDirs));
 
     final List<String> dbDirs = new ArrayList<>();
     for (int i = 0; i < numDirs; ++i) {
-      dbDirs.add(GenericTestUtils.getRandomizedTestDir().getPath());
+      dbDirs.add(new File(dir, randomAlphanumeric(10)).toString());
     }
     ozoneConf.set(OzoneConfigKeys.HDDS_DATANODE_CONTAINER_DB_DIR,
         String.join(",", dbDirs));
@@ -264,8 +266,7 @@ public class TestVolumeSetDiskChecks {
     ContainerSet conSet = new ContainerSet(20);
     when(ozoneContainer.getContainerSet()).thenReturn(conSet);
 
-    String path = GenericTestUtils
-        .getTempPath(TestDatanodeStateMachine.class.getSimpleName());
+    String path = dir.getPath();
     File testRoot = new File(path);
 
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
index 52316c4326..49ddd5f674 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
@@ -28,7 +28,6 @@ import java.util.NoSuchElementException;
 import java.util.UUID;
 
 import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@@ -46,7 +45,6 @@ import 
org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import 
org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.volume.StorageVolume;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.ozone.test.GenericTestUtils;
 
 import static java.util.stream.Collectors.toList;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
@@ -60,6 +58,7 @@ import static org.junit.jupiter.api.Assertions.assertFalse;
 import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
+import org.junit.jupiter.api.io.TempDir;
 import org.junit.jupiter.params.ParameterizedTest;
 import org.junit.jupiter.params.provider.Arguments;
 import org.junit.jupiter.params.provider.MethodSource;
@@ -75,6 +74,7 @@ public class TestKeyValueBlockIterator {
   private KeyValueContainerData containerData;
   private MutableVolumeSet volumeSet;
   private OzoneConfiguration conf;
+  @TempDir
   private File testRoot;
   private DBHandle db;
   private ContainerLayoutVersion layout;
@@ -110,7 +110,6 @@ public class TestKeyValueBlockIterator {
   }
 
   public void setup() throws Exception {
-    testRoot = GenericTestUtils.getRandomizedTestDir();
     conf.set(HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath());
     conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testRoot.getAbsolutePath());
     volumeSet = new MutableVolumeSet(datanodeID, clusterID, conf, null,
@@ -135,7 +134,6 @@ public class TestKeyValueBlockIterator {
     db.cleanup();
     BlockUtils.shutdownCache(conf);
     volumeSet.shutdown();
-    FileUtil.fullyDelete(testRoot);
   }
 
   @ParameterizedTest
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index a14fc74b3f..89e4fb73ee 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -51,7 +51,6 @@ import 
org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
 import org.apache.hadoop.ozone.container.metadata.AbstractDatanodeStore;
 import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
 import org.apache.hadoop.ozone.container.replication.CopyContainerCompression;
-import org.apache.ozone.test.GenericTestUtils;
 import org.apache.hadoop.util.DiskChecker;
 
 import org.assertj.core.api.Fail;
@@ -110,7 +109,7 @@ import static org.mockito.Mockito.when;
 public class TestKeyValueContainer {
 
   @TempDir
-  private Path folder;
+  private File folder;
 
   private String scmId = UUID.randomUUID().toString();
   private VolumeSet volumeSet;
@@ -229,7 +228,7 @@ public class TestKeyValueContainer {
       ContainerTestVersionInfo versionInfo) throws Exception {
     init(versionInfo);
     String volumeDirPath =
-        Files.createDirectory(folder.resolve("volumeDir"))
+        Files.createDirectory(folder.toPath().resolve("volumeDir"))
             .toFile().getAbsolutePath();
     HddsVolume newVolume = new HddsVolume.Builder(volumeDirPath)
         .conf(CONF).datanodeUuid(datanodeId.toString()).build();
@@ -276,7 +275,7 @@ public class TestKeyValueContainer {
 
     //destination path
     File exportTar = Files.createFile(
-        folder.resolve("export.tar")).toFile();
+        folder.toPath().resolve("export.tar")).toFile();
     TarContainerPacker packer = new TarContainerPacker(NO_COMPRESSION);
     //export the container
     try (FileOutputStream fos = new FileOutputStream(exportTar)) {
@@ -309,7 +308,7 @@ public class TestKeyValueContainer {
     keyValueContainer.update(data.getMetadata(), true);
 
     //destination path
-    File exportTar = Files.createFile(folder.resolve("export.tar")).toFile();
+    File exportTar = 
Files.createFile(folder.toPath().resolve("export.tar")).toFile();
     TarContainerPacker packer = new TarContainerPacker(NO_COMPRESSION);
     //export the container
     try (FileOutputStream fos = new FileOutputStream(exportTar)) {
@@ -347,7 +346,7 @@ public class TestKeyValueContainer {
 
     //destination path
     File folderToExport = Files.createFile(
-        folder.resolve("export.tar")).toFile();
+        folder.toPath().resolve("export.tar")).toFile();
     for (CopyContainerCompression compr : CopyContainerCompression.values()) {
       TarContainerPacker packer = new TarContainerPacker(compr);
 
@@ -524,7 +523,7 @@ public class TestKeyValueContainer {
         .mapToObj(i -> new Thread(() -> {
           try {
             File file = Files.createFile(
-                folder.resolve("concurrent" + i + ".tar")).toFile();
+                folder.toPath().resolve("concurrent" + i + ".tar")).toFile();
             try (OutputStream out = Files.newOutputStream(file.toPath())) {
               keyValueContainer.exportContainerData(out, packer);
             }
@@ -820,7 +819,7 @@ public class TestKeyValueContainer {
     assumeTrue(isSameSchemaVersion(schemaVersion, OzoneConsts.SCHEMA_V3));
     // Create a new HDDS volume
     String volumeDirPath =
-        Files.createDirectory(folder.resolve("volumeDir")).toFile()
+        Files.createDirectory(folder.toPath().resolve("volumeDir")).toFile()
             .getAbsolutePath();
     HddsVolume newVolume = new HddsVolume.Builder(volumeDirPath)
         .conf(CONF).datanodeUuid(datanodeId.toString()).build();
@@ -859,7 +858,7 @@ public class TestKeyValueContainer {
         if (volume == newVolume) {
           File folderToExport =
               Files.createFile(
-                  folder.resolve(containerId + "_exported.tar.gz")).toFile();
+                  folder.toPath().resolve(containerId + 
"_exported.tar.gz")).toFile();
           TarContainerPacker packer = new TarContainerPacker(NO_COMPRESSION);
           //export the container
           try (FileOutputStream fos = new FileOutputStream(folderToExport)) {
@@ -929,7 +928,7 @@ public class TestKeyValueContainer {
 
     //destination path
     File folderToExport = Files.createFile(
-        folder.resolve("export.tar")).toFile();
+        folder.toPath().resolve("export.tar")).toFile();
     for (CopyContainerCompression compr : CopyContainerCompression.values()) {
       TarContainerPacker packer = new TarContainerPacker(compr);
 
@@ -978,7 +977,7 @@ public class TestKeyValueContainer {
 
     //destination path
     File folderToExport = Files.createFile(
-        folder.resolve("export.tar")).toFile();
+        folder.toPath().resolve("export.tar")).toFile();
     for (CopyContainerCompression compr : CopyContainerCompression.values()) {
       TarContainerPacker packer = new TarContainerPacker(compr);
 
@@ -1022,14 +1021,8 @@ public class TestKeyValueContainer {
   public void testImportV2ReplicaToV3HddsVolume(
       ContainerTestVersionInfo versionInfo) throws Exception {
     init(versionInfo);
-    final String testDir = GenericTestUtils.getTempPath(
-        TestKeyValueContainer.class.getSimpleName() + "-"
-            + UUID.randomUUID());
-    try {
-      testMixedSchemaImport(testDir, false);
-    } finally {
-      FileUtils.deleteDirectory(new File(testDir));
-    }
+    final String testDir = folder.getPath();
+    testMixedSchemaImport(testDir, false);
   }
 
   /**
@@ -1039,14 +1032,8 @@ public class TestKeyValueContainer {
   public void testImportV3ReplicaToV2HddsVolume(
       ContainerTestVersionInfo versionInfo) throws Exception {
     init(versionInfo);
-    final String testDir = GenericTestUtils.getTempPath(
-        TestKeyValueContainer.class.getSimpleName() + "-"
-            + UUID.randomUUID());
-    try {
-      testMixedSchemaImport(testDir, true);
-    } finally {
-      FileUtils.deleteDirectory(new File(testDir));
-    }
+    final String testDir = folder.getPath();
+    testMixedSchemaImport(testDir, true);
   }
 
   private void testMixedSchemaImport(String dir,
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java
index 51ecb32224..9c531069e9 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.ozone.container.keyvalue;
 
 import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@@ -34,8 +33,8 @@ import 
org.apache.hadoop.ozone.container.common.volume.StorageVolume;
 import 
org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
-import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.io.TempDir;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -63,6 +62,7 @@ public class TestKeyValueContainerIntegrityChecks {
   private ContainerLayoutTestInfo containerLayoutTestInfo;
   private MutableVolumeSet volumeSet;
   private OzoneConfiguration conf;
+  @TempDir
   private File testRoot;
   private ChunkManager chunkManager;
   private String clusterID = UUID.randomUUID().toString();
@@ -87,7 +87,6 @@ public class TestKeyValueContainerIntegrityChecks {
 
   private void setup() throws Exception {
     LOG.info("Testing  layout:{}", containerLayoutTestInfo.getLayout());
-    this.testRoot = GenericTestUtils.getRandomizedTestDir();
     conf.set(HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath());
     conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testRoot.getAbsolutePath());
     containerLayoutTestInfo.updateConfig(conf);
@@ -101,7 +100,6 @@ public class TestKeyValueContainerIntegrityChecks {
   public void teardown() {
     BlockUtils.shutdownCache(conf);
     volumeSet.shutdown();
-    FileUtil.fullyDelete(testRoot);
   }
 
   protected ContainerLayoutVersion getChunkLayout() {
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java
index 14b47a57c3..1a1158a210 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java
@@ -70,7 +70,7 @@ class TestChunkUtils {
   private static final Random RANDOM = new Random();
 
   @TempDir
-  private Path tempDir;
+  private File tempDir;
 
   static ChunkBuffer readData(File file, long off, long len)
       throws StorageContainerException {
@@ -84,7 +84,7 @@ class TestChunkUtils {
     String s = "Hello World";
     byte[] array = s.getBytes(UTF_8);
     ChunkBuffer data = ChunkBuffer.wrap(ByteBuffer.wrap(array));
-    Path tempFile = tempDir.resolve("concurrent");
+    Path tempFile = tempDir.toPath().resolve("concurrent");
     int len = data.limit();
     int offset = 0;
     File file = tempFile.toFile();
@@ -136,7 +136,7 @@ class TestChunkUtils {
         0, TimeUnit.SECONDS, new LinkedBlockingQueue<>());
     AtomicInteger processed = new AtomicInteger();
     for (int i = 0; i < threads; i++) {
-      Path path = tempDir.resolve(String.valueOf(i));
+      Path path = tempDir.toPath().resolve(String.valueOf(i));
       executor.execute(() -> {
         try {
           ChunkUtils.processFileExclusively(path, () -> {
@@ -166,7 +166,7 @@ class TestChunkUtils {
     String s = "Hello World";
     byte[] array = s.getBytes(UTF_8);
     ChunkBuffer data = ChunkBuffer.wrap(ByteBuffer.wrap(array));
-    Path tempFile = tempDir.resolve("serial");
+    Path tempFile = tempDir.toPath().resolve("serial");
     File file = tempFile.toFile();
     int len = data.limit();
     int offset = 0;
@@ -185,7 +185,7 @@ class TestChunkUtils {
   @Test
   void validateChunkForOverwrite() throws IOException {
 
-    Path tempFile = tempDir.resolve("overwrite");
+    Path tempFile = tempDir.toPath().resolve("overwrite");
     FileUtils.write(tempFile.toFile(), "test", UTF_8);
 
     assertTrue(
@@ -226,7 +226,7 @@ class TestChunkUtils {
 
   @Test
   void testReadData() throws Exception {
-    final File dir = GenericTestUtils.getTestDir("testReadData");
+    final File dir = new File(tempDir, "testReadData");
     try {
       assertTrue(dir.mkdirs());
 
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
index ae10c7812f..26c6853b64 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.ozone.container.replication;
 
+import java.io.File;
 import java.io.IOException;
 import java.nio.file.Path;
 import java.nio.file.Paths;
@@ -65,6 +66,7 @@ import org.apache.ozone.test.GenericTestUtils;
 import org.apache.ozone.test.TestClock;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.io.TempDir;
 
 import jakarta.annotation.Nonnull;
 
@@ -276,7 +278,8 @@ public class TestReplicationSupervisor {
   }
 
   @ContainerLayoutTestInfo.ContainerTest
-  public void testDownloadAndImportReplicatorFailure() throws IOException {
+  public void testDownloadAndImportReplicatorFailure(ContainerLayoutVersion 
layout,
+      @TempDir File tempFile) throws IOException {
     OzoneConfiguration conf = new OzoneConfiguration();
 
     ReplicationSupervisor supervisor = ReplicationSupervisor.newBuilder()
@@ -294,9 +297,7 @@ public class TestReplicationSupervisor {
             any(Path.class), any()))
         .thenReturn(res);
 
-    final String testDir = GenericTestUtils.getTempPath(
-        TestReplicationSupervisor.class.getSimpleName() +
-            "-" + UUID.randomUUID());
+    final String testDir = tempFile.getPath();
     MutableVolumeSet volumeSet = mock(MutableVolumeSet.class);
     when(volumeSet.getVolumesList())
         .thenReturn(singletonList(
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/stream/TestDirstreamClientHandler.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/stream/TestDirstreamClientHandler.java
index b42eda58e3..d3907a6031 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/stream/TestDirstreamClientHandler.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/stream/TestDirstreamClientHandler.java
@@ -19,12 +19,9 @@ package org.apache.hadoop.ozone.container.stream;
 
 import io.netty.buffer.ByteBuf;
 import io.netty.buffer.Unpooled;
-import org.apache.commons.io.FileUtils;
-import org.apache.ozone.test.GenericTestUtils;
 import jakarta.annotation.Nonnull;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 import java.io.IOException;
 import java.nio.charset.StandardCharsets;
@@ -39,18 +36,9 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
  */
 public class TestDirstreamClientHandler {
 
+  @TempDir
   private Path tmpDir;
 
-  @BeforeEach
-  public void init() {
-    tmpDir = GenericTestUtils.getRandomizedTestDir().toPath();
-  }
-
-  @AfterEach
-  public void destroy() throws IOException {
-    FileUtils.deleteDirectory(tmpDir.toFile());
-  }
-
   @Test
   public void oneFileStream() throws IOException {
 
diff --git 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenSecretManager.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenSecretManager.java
index 56996ff1f4..d653c6af79 100644
--- 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenSecretManager.java
+++ 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenSecretManager.java
@@ -31,12 +31,13 @@ import 
org.apache.hadoop.hdds.security.symmetric.SecretKeyVerifierClient;
 import org.apache.hadoop.hdds.security.symmetric.SecretKeyTestUtil;
 import org.apache.hadoop.hdds.security.SecurityConfig;
 import org.apache.hadoop.security.token.Token;
-import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
+import java.io.File;
 import java.security.NoSuchAlgorithmException;
 import java.util.EnumSet;
 import java.util.UUID;
@@ -61,8 +62,8 @@ import static org.mockito.Mockito.when;
  */
 public class TestOzoneBlockTokenSecretManager {
 
-  private static final String BASEDIR = GenericTestUtils
-      .getTempPath(TestOzoneBlockTokenSecretManager.class.getSimpleName());
+  @TempDir
+  private File baseDir;
   private static final String ALGORITHM = "HmacSHA256";
 
   private OzoneBlockTokenSecretManager secretManager;
@@ -77,7 +78,7 @@ public class TestOzoneBlockTokenSecretManager {
     pipeline = MockPipeline.createPipeline(3);
 
     OzoneConfiguration conf = new OzoneConfiguration();
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, BASEDIR);
+    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, baseDir.getPath());
     conf.setBoolean(HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED, true);
     SecurityConfig securityConfig = new SecurityConfig(conf);
 
diff --git 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java
index cf7d2e5bce..b5d0425bec 100644
--- 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java
+++ 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java
@@ -45,7 +45,6 @@ import java.security.Signature;
 import java.security.cert.X509Certificate;
 import java.time.Duration;
 import java.util.Arrays;
-import java.util.UUID;
 import java.util.function.Predicate;
 
 import org.apache.commons.io.FileUtils;
@@ -88,6 +87,7 @@ public class TestDefaultCertificateClient {
   private X509Certificate x509Certificate;
   private DNCertificateClient dnCertClient;
   private HDDSKeyGenerator keyGenerator;
+  @TempDir
   private Path dnMetaDirPath;
   private SecurityConfig dnSecurityConfig;
   private SCMSecurityProtocolClientSideTranslatorPB scmSecurityClient;
@@ -99,10 +99,7 @@ public class TestDefaultCertificateClient {
     OzoneConfiguration config = new OzoneConfiguration();
     config.setStrings(OZONE_SCM_NAMES, "localhost");
     config.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 2);
-    final String dnPath = GenericTestUtils
-        .getTempPath(UUID.randomUUID().toString());
 
-    dnMetaDirPath = Paths.get(dnPath, "test");
     config.set(HDDS_METADATA_DIR_NAME, dnMetaDirPath.toString());
     dnSecurityConfig = new SecurityConfig(config);
 
@@ -130,7 +127,6 @@ public class TestDefaultCertificateClient {
   public void tearDown() throws IOException {
     dnCertClient.close();
     dnCertClient = null;
-    FileUtils.deleteQuietly(dnMetaDirPath.toFile());
   }
 
   /**
diff --git 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDnCertificateClientInit.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDnCertificateClientInit.java
index 987e841e51..3c3330a2b2 100644
--- 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDnCertificateClientInit.java
+++ 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDnCertificateClientInit.java
@@ -28,10 +28,10 @@ import 
org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator;
 import org.apache.hadoop.hdds.security.x509.keys.KeyCodec;
 import org.apache.hadoop.ozone.OzoneSecurityUtil;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.apache.ozone.test.GenericTestUtils;
 import org.bouncycastle.cert.X509CertificateHolder;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.io.TempDir;
 import org.junit.jupiter.params.ParameterizedTest;
 import org.junit.jupiter.params.provider.Arguments;
 import org.junit.jupiter.params.provider.MethodSource;
@@ -42,7 +42,6 @@ import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.security.KeyPair;
 import java.security.cert.X509Certificate;
-import java.util.UUID;
 import java.util.stream.Stream;
 
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME;
@@ -63,6 +62,7 @@ public class TestDnCertificateClientInit {
   private String certSerialId = "3284792342234";
   private DNCertificateClient dnCertificateClient;
   private HDDSKeyGenerator keyGenerator;
+  @TempDir
   private Path metaDirPath;
   private SecurityConfig securityConfig;
   private KeyCodec dnKeyCodec;
@@ -85,9 +85,6 @@ public class TestDnCertificateClientInit {
   @BeforeEach
   public void setUp() throws Exception {
     OzoneConfiguration config = new OzoneConfiguration();
-    final String path = GenericTestUtils
-        .getTempPath(UUID.randomUUID().toString());
-    metaDirPath = Paths.get(path, "test");
     config.set(HDDS_METADATA_DIR_NAME, metaDirPath.toString());
     securityConfig = new SecurityConfig(config);
     keyGenerator = new HDDSKeyGenerator(securityConfig);
@@ -107,7 +104,6 @@ public class TestDnCertificateClientInit {
   public void tearDown() throws IOException {
     dnCertificateClient.close();
     dnCertificateClient = null;
-    FileUtils.deleteQuietly(metaDirPath.toFile());
   }
 
 
diff --git 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java
index cec6b7dd12..9628052b05 100644
--- 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java
+++ 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.hdds.security.x509.keys;
 
 import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
 import static org.junit.jupiter.api.Assertions.assertEquals;
+
+import java.io.File;
 import java.security.KeyPair;
 import java.security.NoSuchAlgorithmException;
 import java.security.NoSuchProviderException;
@@ -29,20 +31,22 @@ import java.security.interfaces.RSAPublicKey;
 import java.security.spec.PKCS8EncodedKeySpec;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.security.SecurityConfig;
-import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 /**
  * Test class for HDDS Key Generator.
  */
 public class TestHDDSKeyGenerator {
   private SecurityConfig config;
+  @TempDir
+  private File tempPath;
 
   @BeforeEach
   public void init() {
     OzoneConfiguration conf = new OzoneConfiguration();
-    conf.set(OZONE_METADATA_DIRS,  GenericTestUtils.getTempPath("testpath"));
+    conf.set(OZONE_METADATA_DIRS,  tempPath.getPath());
     config = new SecurityConfig(conf);
   }
   /**
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
index 0aa2aacf9d..754fab6d1b 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
@@ -24,7 +24,6 @@ import java.net.InetSocketAddress;
 import java.net.URL;
 import java.net.URLConnection;
 
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManagerHttpServer;
 import org.apache.hadoop.hdfs.web.URLConnectionFactory;
@@ -33,10 +32,10 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.apache.ozone.test.GenericTestUtils;
 
 import org.junit.jupiter.api.AfterAll;
 import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.io.TempDir;
 import org.junit.jupiter.params.ParameterizedTest;
 import org.junit.jupiter.params.provider.EnumSource;
 
@@ -46,8 +45,8 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
  * Test http server os SCM with various HTTP option.
  */
 public class TestStorageContainerManagerHttpServer {
-  private static final String BASEDIR = GenericTestUtils
-      
.getTempPath(TestStorageContainerManagerHttpServer.class.getSimpleName());
+  @TempDir
+  private static File baseDir;
   private static String keystoresDir;
   private static String sslConfDir;
   private static OzoneConfiguration conf;
@@ -55,12 +54,10 @@ public class TestStorageContainerManagerHttpServer {
 
   @BeforeAll
   public static void setUp() throws Exception {
-    File base = new File(BASEDIR);
-    FileUtil.fullyDelete(base);
-    File ozoneMetadataDirectory = new File(BASEDIR, "metadata");
+    File ozoneMetadataDirectory = new File(baseDir, "metadata");
     ozoneMetadataDirectory.mkdirs();
     conf = new OzoneConfiguration();
-    keystoresDir = new File(BASEDIR).getAbsolutePath();
+    keystoresDir = baseDir.getAbsolutePath();
     sslConfDir = KeyStoreTestUtil.getClasspathDir(
         TestStorageContainerManagerHttpServer.class);
     KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
@@ -77,7 +74,6 @@ public class TestStorageContainerManagerHttpServer {
   @AfterAll
   public static void tearDown() throws Exception {
     connectionFactory.destroy();
-    FileUtil.fullyDelete(new File(BASEDIR));
     KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
   }
 
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
index 2ca71e3489..03500529ff 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdds.scm.block;
 
 import org.apache.commons.lang3.RandomUtils;
-import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
@@ -55,10 +54,10 @@ import org.apache.hadoop.hdds.utils.db.TableIterator;
 import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
 import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 import java.io.File;
 import java.io.IOException;
@@ -95,6 +94,7 @@ public class TestDeletedBlockLog {
   private  DeletedBlockLogImpl deletedBlockLog;
   private static final int BLOCKS_PER_TXN = 5;
   private OzoneConfiguration conf;
+  @TempDir
   private File testDir;
   private ContainerManager containerManager;
   private Table<ContainerID, ContainerInfo> containerTable;
@@ -111,8 +111,6 @@ public class TestDeletedBlockLog {
 
   @BeforeEach
   public void setup() throws Exception {
-    testDir = GenericTestUtils.getTestDir(
-        TestDeletedBlockLog.class.getSimpleName());
     conf = new OzoneConfiguration();
     conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
     conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20);
@@ -205,7 +203,6 @@ public class TestDeletedBlockLog {
     deletedBlockLog.close();
     scm.stop();
     scm.join();
-    FileUtils.deleteDirectory(testDir);
   }
 
   private Map<Long, List<Long>> generateData(int dataSize) throws IOException {
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java
index a5150f3c95..0f861a5077 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java
@@ -21,10 +21,8 @@ import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
-import java.util.UUID;
 import java.util.concurrent.TimeoutException;
 
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.client.ECReplicationConfig;
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
@@ -45,10 +43,10 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.times;
@@ -66,6 +64,7 @@ import static 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProt
  */
 public class TestContainerManagerImpl {
 
+  @TempDir
   private File testDir;
   private DBStore dbStore;
   private ContainerManager containerManager;
@@ -77,8 +76,6 @@ public class TestContainerManagerImpl {
   @BeforeEach
   public void setUp() throws Exception {
     final OzoneConfiguration conf = SCMTestUtils.getConf();
-    testDir = GenericTestUtils.getTestDir(
-        TestContainerManagerImpl.class.getSimpleName() + UUID.randomUUID());
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
     dbStore = DBStoreBuilder.createDBStore(
         conf, new SCMDBDefinition());
@@ -105,8 +102,6 @@ public class TestContainerManagerImpl {
     if (dbStore != null) {
       dbStore.close();
     }
-
-    FileUtil.fullyDelete(testDir);
   }
 
   @Test
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
index 53512528a0..2bdc42b707 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
@@ -16,7 +16,6 @@
  */
 package org.apache.hadoop.hdds.scm.container;
 
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.client.ECReplicationConfig;
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
@@ -48,10 +47,10 @@ import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
 import 
org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
-import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 import java.io.File;
 import java.io.IOException;
@@ -63,7 +62,6 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.TimeoutException;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
@@ -89,6 +87,7 @@ public class TestContainerReportHandler {
   private ContainerManager containerManager;
   private ContainerStateManager containerStateManager;
   private EventPublisher publisher;
+  @TempDir
   private File testDir;
   private DBStore dbStore;
   private SCMHAManager scmhaManager;
@@ -100,8 +99,6 @@ public class TestContainerReportHandler {
     final OzoneConfiguration conf = SCMTestUtils.getConf();
     nodeManager = new MockNodeManager(true, 10);
     containerManager = mock(ContainerManager.class);
-    testDir = GenericTestUtils.getTestDir(
-        TestContainerReportHandler.class.getSimpleName() + UUID.randomUUID());
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
     dbStore = DBStoreBuilder.createDBStore(
         conf, new SCMDBDefinition());
@@ -165,8 +162,6 @@ public class TestContainerReportHandler {
     if (dbStore != null) {
       dbStore.close();
     }
-
-    FileUtil.fullyDelete(testDir);
   }
 
   private void testReplicaIndexUpdate(ContainerInfo container,
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
index c3dd608ab2..27505c6dd3 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
@@ -23,10 +23,8 @@ import java.time.Clock;
 import java.time.ZoneId;
 import java.util.ArrayList;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.TimeoutException;
 
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -46,10 +44,10 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
-import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.mockito.Mockito.mock;
@@ -64,6 +62,7 @@ public class TestContainerStateManager {
   private ContainerStateManager containerStateManager;
   private PipelineManager pipelineManager;
   private SCMHAManager scmhaManager;
+  @TempDir
   private File testDir;
   private DBStore dbStore;
   private Pipeline pipeline;
@@ -72,8 +71,6 @@ public class TestContainerStateManager {
   public void init() throws IOException, TimeoutException {
     OzoneConfiguration conf = new OzoneConfiguration();
     scmhaManager = SCMHAManagerStub.getInstance(true);
-    testDir = GenericTestUtils.getTestDir(
-        TestContainerStateManager.class.getSimpleName() + UUID.randomUUID());
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
     dbStore = DBStoreBuilder.createDBStore(
         conf, new SCMDBDefinition());
@@ -106,8 +103,6 @@ public class TestContainerStateManager {
     if (dbStore != null) {
       dbStore.close();
     }
-
-    FileUtil.fullyDelete(testDir);
   }
 
   @Test
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
index 8cbfdd9c78..dbcccce598 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdds.scm.container;
 
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.client.ECReplicationConfig;
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
@@ -55,10 +54,10 @@ import 
org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
 import 
org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
-import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 import java.io.File;
 import java.io.IOException;
@@ -70,7 +69,6 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
@@ -106,6 +104,7 @@ public class TestIncrementalContainerReportHandler {
   private HDDSLayoutVersionManager versionManager;
   private SCMContext scmContext = SCMContext.emptyContext();
   private PipelineManager pipelineManager;
+  @TempDir
   private File testDir;
   private DBStore dbStore;
   private SCMHAManager scmhaManager;
@@ -114,9 +113,7 @@ public class TestIncrementalContainerReportHandler {
   public void setup() throws IOException, InvalidStateTransitionException,
       TimeoutException {
     final OzoneConfiguration conf = new OzoneConfiguration();
-    final String path =
-        GenericTestUtils.getTempPath(UUID.randomUUID().toString());
-    Path scmPath = Paths.get(path, "scm-meta");
+    Path scmPath = Paths.get(testDir.getPath(), "scm-meta");
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
     this.containerManager = mock(ContainerManager.class);
     NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
@@ -129,9 +126,6 @@ public class TestIncrementalContainerReportHandler {
         new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap,
             scmContext, versionManager);
     scmhaManager = SCMHAManagerStub.getInstance(true);
-    testDir = GenericTestUtils.getTestDir(
-        TestIncrementalContainerReportHandler.class.getSimpleName()
-            + UUID.randomUUID());
     dbStore = DBStoreBuilder.createDBStore(
         conf, new SCMDBDefinition());
 
@@ -200,8 +194,6 @@ public class TestIncrementalContainerReportHandler {
     if (dbStore != null) {
       dbStore.close();
     }
-
-    FileUtil.fullyDelete(testDir);
   }
 
 
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java
index 72df033ae8..ba16dbff87 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java
@@ -27,9 +27,7 @@ import static org.mockito.Mockito.any;
 import java.io.File;
 import java.io.IOException;
 import java.util.Iterator;
-import java.util.UUID;
 
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -55,10 +53,10 @@ import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
-import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 /**
  * Test container deletion behaviour of unknown containers
@@ -71,6 +69,7 @@ public class TestUnknownContainerReport {
   private ContainerStateManager containerStateManager;
   private EventPublisher publisher;
   private PipelineManager pipelineManager;
+  @TempDir
   private File testDir;
   private DBStore dbStore;
   private SCMHAManager scmhaManager;
@@ -80,8 +79,6 @@ public class TestUnknownContainerReport {
     final OzoneConfiguration conf = SCMTestUtils.getConf();
     this.nodeManager = new MockNodeManager(true, 10);
     this.containerManager = mock(ContainerManager.class);
-    testDir = GenericTestUtils.getTestDir(
-        TestUnknownContainerReport.class.getSimpleName() + UUID.randomUUID());
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
     dbStore = DBStoreBuilder.createDBStore(
         conf, new SCMDBDefinition());
@@ -107,8 +104,6 @@ public class TestUnknownContainerReport {
     if (dbStore != null) {
       dbStore.close();
     }
-
-    FileUtil.fullyDelete(testDir);
   }
 
   @Test
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java
index 54a422b909..75a943ee8d 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.hdds.utils.HddsServerUtil;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.common.Storage;
 import org.apache.hadoop.ozone.ha.ConfUtils;
-import org.apache.ozone.test.GenericTestUtils;
 import org.apache.ratis.conf.RaftProperties;
 import org.apache.ratis.server.RaftServerConfigKeys;
 import org.apache.ratis.util.TimeDuration;
@@ -75,9 +74,11 @@ import static org.mockito.Mockito.when;
  */
 class TestSCMHAConfiguration {
   private OzoneConfiguration conf;
+  @TempDir
+  private File tempDir;
 
   @BeforeEach
-  void setup(@TempDir File tempDir) {
+  void setup() {
     conf = new OzoneConfiguration();
     conf.set(OZONE_METADATA_DIRS, tempDir.getAbsolutePath());
     DefaultConfigManager.clearDefaultConfigs();
@@ -214,8 +215,7 @@ class TestSCMHAConfiguration {
     assertEquals(0, scmRatisConfig.getLogAppenderWaitTimeMin(),
         "getLogAppenderWaitTimeMin");
 
-    final File testDir = GenericTestUtils.getRandomizedTestDir();
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath());
+    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempDir.getPath());
 
     final RaftProperties p = RatisUtil.newRaftProperties(conf);
     final TimeDuration t = RaftServerConfigKeys.Log.Appender.waitTimeMin(p);
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
index 8dd6914e64..36ee161e80 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
@@ -27,7 +27,6 @@ import java.util.List;
 import java.util.UUID;
 import java.util.concurrent.TimeoutException;
 
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
@@ -70,10 +69,10 @@ import 
org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.apache.hadoop.ozone.upgrade.LayoutVersionManager;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.commons.io.IOUtils;
-import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 import static java.util.Collections.emptyList;
 import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA;
@@ -91,6 +90,7 @@ import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY
  * Test for different container placement policy.
  */
 public class TestContainerPlacement {
+  @TempDir
   private File testDir;
   private DBStore dbStore;
   private ContainerManager containerManager;
@@ -103,8 +103,6 @@ public class TestContainerPlacement {
   @BeforeEach
   public void setUp() throws Exception {
     conf = getConf();
-    testDir = GenericTestUtils.getTestDir(
-        TestContainerPlacement.class.getSimpleName() + UUID.randomUUID());
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
     dbStore = DBStoreBuilder.createDBStore(
         conf, new SCMDBDefinition());
@@ -123,8 +121,6 @@ public class TestContainerPlacement {
     if (dbStore != null) {
       dbStore.close();
     }
-
-    FileUtil.fullyDelete(testDir);
   }
 
   /**
@@ -265,7 +261,6 @@ public class TestContainerPlacement {
       if (xceiverClientManager != null) {
         xceiverClientManager.close();
       }
-      FileUtil.fullyDelete(testDir);
     }
   }
 
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
index 4724d94ae2..aa09022b14 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
@@ -33,11 +33,9 @@ import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
 
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -73,11 +71,11 @@ import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
 import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.ozone.test.GenericTestUtils;
 import org.apache.ozone.test.LambdaTestUtils;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 /**
  * Test DeadNodeHandler.
@@ -92,7 +90,8 @@ public class TestDeadNodeHandler {
   private HealthyReadOnlyNodeHandler healthyReadOnlyNodeHandler;
   private EventPublisher publisher;
   private EventQueue eventQueue;
-  private String storageDir;
+  @TempDir
+  private File storageDir;
   private SCMContext scmContext;
   private DeletedBlockLog deletedBlockLog;
 
@@ -104,9 +103,7 @@ public class TestDeadNodeHandler {
     conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 2);
     conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN,
         10, StorageUnit.MB);
-    storageDir = GenericTestUtils.getTempPath(
-        TestDeadNodeHandler.class.getSimpleName() + UUID.randomUUID());
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir);
+    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir.getPath());
     eventQueue = new EventQueue();
     scm = HddsTestUtils.getScm(conf);
     nodeManager = (SCMNodeManager) scm.getScmNodeManager();
@@ -136,20 +133,19 @@ public class TestDeadNodeHandler {
   public void teardown() {
     scm.stop();
     scm.join();
-    FileUtil.fullyDelete(new File(storageDir));
   }
 
   @Test
   @SuppressWarnings("checkstyle:MethodLength")
-  public void testOnMessage() throws Exception {
+  public void testOnMessage(@TempDir File tempDir) throws Exception {
     //GIVEN
     DatanodeDetails datanode1 = MockDatanodeDetails.randomDatanodeDetails();
     DatanodeDetails datanode2 = MockDatanodeDetails.randomDatanodeDetails();
     DatanodeDetails datanode3 = MockDatanodeDetails.randomDatanodeDetails();
 
-    String storagePath = GenericTestUtils.getRandomizedTempPath()
+    String storagePath = tempDir.getPath()
         .concat("/data-" + datanode1.getUuidString());
-    String metaStoragePath = GenericTestUtils.getRandomizedTempPath()
+    String metaStoragePath = tempDir.getPath()
         .concat("/metadata-" + datanode1.getUuidString());
 
     StorageReportProto storageOne = HddsTestUtils.createStorageReport(
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
index ecd5cbed5f..558fc420f4 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
@@ -22,10 +22,10 @@ import static org.junit.jupiter.api.Assertions.assertNull;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
+import java.io.File;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.List;
-import java.util.UUID;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
@@ -43,9 +43,9 @@ import org.apache.hadoop.hdds.server.events.Event;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
-import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -59,10 +59,10 @@ public class TestNodeReportHandler implements 
EventPublisher {
   private NodeReportHandler nodeReportHandler;
   private HDDSLayoutVersionManager versionManager;
   private SCMNodeManager nodeManager;
-  private String storagePath = GenericTestUtils.getRandomizedTempPath()
-      .concat("/data-" + UUID.randomUUID().toString());
-  private String metaStoragePath = GenericTestUtils.getRandomizedTempPath()
-      .concat("/metadata-" + UUID.randomUUID().toString());
+  @TempDir
+  private File storagePath;
+  @TempDir
+  private File metaStoragePath;
 
   @BeforeEach
   public void resetEventCollector() throws IOException {
@@ -84,9 +84,9 @@ public class TestNodeReportHandler implements EventPublisher {
   public void testNodeReport() throws IOException {
     DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
     StorageReportProto storageOne = HddsTestUtils
-        .createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null);
+        .createStorageReport(dn.getUuid(), storagePath.getPath(), 100, 10, 90, 
null);
     MetadataStorageReportProto metaStorageOne = HddsTestUtils
-        .createMetadataStorageReport(metaStoragePath, 100, 10, 90, null);
+        .createMetadataStorageReport(metaStoragePath.getPath(), 100, 10, 90, 
null);
 
     SCMNodeMetric nodeMetric = nodeManager.getNodeStat(dn);
     assertNull(nodeMetric);
@@ -100,7 +100,7 @@ public class TestNodeReportHandler implements 
EventPublisher {
     assertEquals(10, (long) nodeMetric.get().getScmUsed().get());
 
     StorageReportProto storageTwo = HddsTestUtils
-        .createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null);
+        .createStorageReport(dn.getUuid(), storagePath.getPath(), 100, 10, 90, 
null);
     nodeReportHandler.onMessage(
         getNodeReport(dn, Arrays.asList(storageOne, storageTwo),
             Arrays.asList(metaStorageOne)), this);
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
index 6dd5f674d4..147aa71984 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
@@ -28,10 +28,11 @@ import org.apache.hadoop.hdds.scm.HddsTestUtils;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
-import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
+import java.io.File;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
@@ -53,24 +54,25 @@ import static org.junit.jupiter.api.Assertions.assertFalse;
  */
 public class TestSCMNodeStorageStatMap {
   private static final int DATANODE_COUNT = 100;
-  private final long capacity = 10L * OzoneConsts.GB;
-  private final long used = 2L * OzoneConsts.GB;
-  private final long remaining = capacity - used;
+  private static final long CAPACITY = 10L * OzoneConsts.GB;
+  private static final long USED = 2L * OzoneConsts.GB;
+  private static final long REMAINING = CAPACITY - USED;
   private static OzoneConfiguration conf = new OzoneConfiguration();
   private final Map<UUID, Set<StorageLocationReport>> testData =
       new ConcurrentHashMap<>();
+  @TempDir
+  private File tempFile;
 
   private void generateData() {
     for (int dnIndex = 1; dnIndex <= DATANODE_COUNT; dnIndex++) {
       UUID dnId = UUID.randomUUID();
       Set<StorageLocationReport> reportSet = new HashSet<>();
-      String path = GenericTestUtils.getTempPath(
-          TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + dnIndex);
+      String path = tempFile.getPath() + "-" + dnIndex;
       StorageLocationReport.Builder builder =
           StorageLocationReport.newBuilder();
       builder.setStorageType(StorageType.DISK).setId(dnId.toString())
-          .setStorageLocation(path).setScmUsed(used).setRemaining(remaining)
-          .setCapacity(capacity).setFailed(false);
+          .setStorageLocation(path).setScmUsed(USED).setRemaining(REMAINING)
+          .setCapacity(CAPACITY).setFailed(false);
       reportSet.add(builder.build());
       testData.put(UUID.randomUUID(), reportSet);
     }
@@ -114,13 +116,12 @@ public class TestSCMNodeStorageStatMap {
   public void testUpdateUnknownDatanode() {
     SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
     UUID unknownNode = UUID.randomUUID();
-    String path = GenericTestUtils.getTempPath(
-        TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + unknownNode);
+    String path = tempFile.getPath() + "-" + unknownNode;
     Set<StorageLocationReport> reportSet = new HashSet<>();
     StorageLocationReport.Builder builder = StorageLocationReport.newBuilder();
     builder.setStorageType(StorageType.DISK).setId(unknownNode.toString())
-        .setStorageLocation(path).setScmUsed(used).setRemaining(remaining)
-        .setCapacity(capacity).setFailed(false);
+        .setStorageLocation(path).setScmUsed(USED).setRemaining(REMAINING)
+        .setCapacity(CAPACITY).setFailed(false);
     reportSet.add(builder.build());
     Throwable t = assertThrows(SCMException.class,
         () -> map.updateDatanodeMap(unknownNode, reportSet));
@@ -136,8 +137,7 @@ public class TestSCMNodeStorageStatMap {
     map.insertNewDatanode(key, reportSet);
     assertTrue(map.isKnownDatanode(key));
     UUID storageId = UUID.randomUUID();
-    String path =
-        GenericTestUtils.getRandomizedTempPath().concat("/" + storageId);
+    String path = tempFile.getPath().concat("/" + storageId);
     StorageLocationReport report = reportSet.iterator().next();
     long reportCapacity = report.getCapacity();
     long reportScmUsed = report.getScmUsed();
@@ -184,22 +184,20 @@ public class TestSCMNodeStorageStatMap {
         .entrySet()) {
       map.insertNewDatanode(keyEntry.getKey(), keyEntry.getValue());
     }
-    assertEquals(DATANODE_COUNT * capacity, map.getTotalCapacity());
-    assertEquals(DATANODE_COUNT * remaining, map.getTotalFreeSpace());
-    assertEquals(DATANODE_COUNT * used, map.getTotalSpaceUsed());
+    assertEquals(DATANODE_COUNT * CAPACITY, map.getTotalCapacity());
+    assertEquals(DATANODE_COUNT * REMAINING, map.getTotalFreeSpace());
+    assertEquals(DATANODE_COUNT * USED, map.getTotalSpaceUsed());
 
     // update 1/4th of the datanode to be full
     for (Map.Entry<UUID, Set<StorageLocationReport>> keyEntry : testData
         .entrySet()) {
       Set<StorageLocationReport> reportSet = new HashSet<>();
-      String path = GenericTestUtils.getTempPath(
-          TestSCMNodeStorageStatMap.class.getSimpleName() + "-" + keyEntry
-              .getKey().toString());
+      String path = tempFile.getPath() + "-" + keyEntry.getKey().toString();
       StorageLocationReport.Builder builder =
           StorageLocationReport.newBuilder();
       builder.setStorageType(StorageType.DISK)
           .setId(keyEntry.getKey().toString()).setStorageLocation(path)
-          .setScmUsed(capacity).setRemaining(0).setCapacity(capacity)
+          .setScmUsed(CAPACITY).setRemaining(0).setCapacity(CAPACITY)
           .setFailed(false);
       reportSet.add(builder.build());
 
@@ -216,9 +214,9 @@ public class TestSCMNodeStorageStatMap {
     assertEquals(0.75 * DATANODE_COUNT,
         
map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.NORMAL).size(), 
0);
 
-    assertEquals(DATANODE_COUNT * capacity, map.getTotalCapacity(), 0);
-    assertEquals(0.75 * DATANODE_COUNT * remaining, map.getTotalFreeSpace(), 
0);
-    assertEquals(0.75 * DATANODE_COUNT * used + (0.25 * DATANODE_COUNT * 
capacity),
+    assertEquals(DATANODE_COUNT * CAPACITY, map.getTotalCapacity(), 0);
+    assertEquals(0.75 * DATANODE_COUNT * REMAINING, map.getTotalFreeSpace(), 
0);
+    assertEquals(0.75 * DATANODE_COUNT * USED + (0.25 * DATANODE_COUNT * 
CAPACITY),
         map.getTotalSpaceUsed(), 0);
     counter = 1;
     // Remove 1/4 of the DataNodes from the Map
@@ -236,9 +234,9 @@ public class TestSCMNodeStorageStatMap {
     assertEquals(0.75 * DATANODE_COUNT,
         
map.getDatanodeList(SCMNodeStorageStatMap.UtilizationThreshold.NORMAL).size(), 
0);
 
-    assertEquals(0.75 * DATANODE_COUNT * capacity, map.getTotalCapacity(), 0);
-    assertEquals(0.75 * DATANODE_COUNT * remaining, map.getTotalFreeSpace(), 
0);
-    assertEquals(0.75 * DATANODE_COUNT * used, map.getTotalSpaceUsed(), 0);
+    assertEquals(0.75 * DATANODE_COUNT * CAPACITY, map.getTotalCapacity(), 0);
+    assertEquals(0.75 * DATANODE_COUNT * REMAINING, map.getTotalFreeSpace(), 
0);
+    assertEquals(0.75 * DATANODE_COUNT * USED, map.getTotalSpaceUsed(), 0);
 
   }
 }
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java
index 61ba3d3bb8..9afd9c793c 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hdds.scm.pipeline;
 
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -35,9 +34,9 @@ import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
 import org.apache.hadoop.ozone.ClientVersion;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.io.TempDir;
 import org.junit.jupiter.params.ParameterizedTest;
 import org.junit.jupiter.params.provider.CsvSource;
 import org.slf4j.Logger;
@@ -46,7 +45,6 @@ import org.slf4j.LoggerFactory;
 import java.io.File;
 import java.io.IOException;
 import java.util.List;
-import java.util.UUID;
 
 import static org.junit.jupiter.api.Assertions.fail;
 import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
@@ -61,6 +59,7 @@ public class TestPipelineDatanodesIntersection {
 
   private OzoneConfiguration conf;
   private boolean end;
+  @TempDir
   private File testDir;
   private DBStore dbStore;
 
@@ -68,9 +67,6 @@ public class TestPipelineDatanodesIntersection {
   public void initialize() throws IOException {
     conf = SCMTestUtils.getConf();
     end = false;
-    testDir = GenericTestUtils.getTestDir(
-        TestPipelineDatanodesIntersection.class.getSimpleName()
-            + UUID.randomUUID());
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
     dbStore = DBStoreBuilder.createDBStore(
         conf, new SCMDBDefinition());
@@ -81,8 +77,6 @@ public class TestPipelineDatanodesIntersection {
     if (dbStore != null) {
       dbStore.close();
     }
-
-    FileUtil.fullyDelete(testDir);
   }
 
   @ParameterizedTest
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
index 2f0b0a5cc7..e80e7bbb90 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
@@ -27,7 +27,6 @@ import java.util.UUID;
 import java.util.concurrent.TimeoutException;
 import java.util.stream.Collectors;
 
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
@@ -58,10 +57,10 @@ import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
 import org.apache.hadoop.ozone.ClientVersion;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 import java.io.IOException;
 
@@ -96,6 +95,7 @@ public class TestPipelinePlacementPolicy {
   private NetworkTopologyImpl cluster;
   private static final int PIPELINE_PLACEMENT_MAX_NODES_COUNT = 10;
   private static final int PIPELINE_LOAD_LIMIT = 5;
+  @TempDir
   private File testDir;
   private DBStore dbStore;
   private SCMHAManager scmhaManager;
@@ -114,8 +114,6 @@ public class TestPipelinePlacementPolicy {
     conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN,
         10, StorageUnit.MB);
     nodeManager.setNumPipelinePerDatanode(PIPELINE_LOAD_LIMIT);
-    testDir = GenericTestUtils.getTestDir(
-        TestPipelinePlacementPolicy.class.getSimpleName() + UUID.randomUUID());
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
     dbStore = DBStoreBuilder.createDBStore(
         conf, new SCMDBDefinition());
@@ -135,8 +133,6 @@ public class TestPipelinePlacementPolicy {
     if (dbStore != null) {
       dbStore.close();
     }
-
-    FileUtil.fullyDelete(testDir);
   }
 
   private NetworkTopologyImpl initTopology() {
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java
index 3874a88941..e039ee5c02 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManagerImpl.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hdds.scm.pipeline;
 
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
@@ -39,10 +38,10 @@ import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
 import org.apache.hadoop.ozone.ClientVersion;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 import java.io.File;
 import java.io.IOException;
@@ -50,7 +49,6 @@ import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.TimeoutException;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -65,14 +63,13 @@ import static org.assertj.core.api.Assertions.assertThat;
 public class TestPipelineStateManagerImpl {
 
   private PipelineStateManager stateManager;
+  @TempDir
   private File testDir;
   private DBStore dbStore;
 
   @BeforeEach
   public void init() throws Exception {
     final OzoneConfiguration conf = SCMTestUtils.getConf();
-    testDir = GenericTestUtils.getTestDir(
-        TestPipelineStateManagerImpl.class.getSimpleName() + 
UUID.randomUUID());
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
     dbStore = DBStoreBuilder.createDBStore(
         conf, new SCMDBDefinition());
@@ -93,8 +90,6 @@ public class TestPipelineStateManagerImpl {
     if (dbStore != null) {
       dbStore.close();
     }
-
-    FileUtil.fullyDelete(testDir);
   }
 
   private Pipeline createDummyPipeline(int numNodes) {
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
index 977cf137fd..fc51d94917 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hdds.scm.pipeline;
 
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
@@ -41,10 +40,10 @@ import org.apache.hadoop.hdds.scm.node.NodeStatus;
 import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
 import org.apache.hadoop.ozone.ClientVersion;
-import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.Assumptions;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 import java.io.File;
 import java.io.IOException;
@@ -70,7 +69,7 @@ import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.junit.jupiter.api.Assertions.fail;
 
 /**
- * Test for RatisPipelineProvider.
+ * Test for {@link RatisPipelineProvider}.
  */
 public class TestRatisPipelineProvider {
 
@@ -80,6 +79,7 @@ public class TestRatisPipelineProvider {
   private MockNodeManager nodeManager;
   private RatisPipelineProvider provider;
   private PipelineStateManager stateManager;
+  @TempDir
   private File testDir;
   private DBStore dbStore;
 
@@ -89,9 +89,11 @@ public class TestRatisPipelineProvider {
 
   public void init(int maxPipelinePerNode, OzoneConfiguration conf)
       throws Exception {
-    testDir = GenericTestUtils.getTestDir(
-        TestRatisPipelineProvider.class.getSimpleName() + UUID.randomUUID());
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
+    init(maxPipelinePerNode, conf, testDir);
+  }
+
+  public void init(int maxPipelinePerNode, OzoneConfiguration conf, File dir) 
throws Exception {
+    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.getAbsolutePath());
     dbStore = DBStoreBuilder.createDBStore(
         conf, new SCMDBDefinition());
     nodeManager = new MockNodeManager(true, 10);
@@ -114,8 +116,6 @@ public class TestRatisPipelineProvider {
     if (dbStore != null) {
       dbStore.close();
     }
-
-    FileUtil.fullyDelete(testDir);
   }
 
   private static void assertPipelineProperties(
@@ -332,7 +332,7 @@ public class TestRatisPipelineProvider {
   }
 
   @Test
-  public void testCreatePipelinesWhenNotEnoughSpace() throws Exception {
+  public void testCreatePipelinesWhenNotEnoughSpace(@TempDir File tempDir) 
throws Exception {
     String expectedErrorSubstring = "Unable to find enough" +
         " nodes that meet the space requirement";
 
@@ -356,7 +356,7 @@ public class TestRatisPipelineProvider {
 
     OzoneConfiguration largeMetadataConf = new OzoneConfiguration();
     largeMetadataConf.set(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, "300TB");
-    init(1, largeMetadataConf);
+    init(1, largeMetadataConf, tempDir);
     for (ReplicationFactor factor: ReplicationFactor.values()) {
       if (factor == ReplicationFactor.ZERO) {
         continue;
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java
index bbb714debb..ceca4ab73e 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hdds.scm.pipeline;
 
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -34,16 +33,15 @@ import org.apache.hadoop.hdds.utils.db.DBStore;
 import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
 import org.apache.hadoop.ozone.ClientVersion;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
-import java.util.UUID;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
 
@@ -55,6 +53,7 @@ public class TestSimplePipelineProvider {
   private NodeManager nodeManager;
   private PipelineProvider provider;
   private PipelineStateManager stateManager;
+  @TempDir
   private File testDir;
   private DBStore dbStore;
 
@@ -62,8 +61,6 @@ public class TestSimplePipelineProvider {
   public void init() throws Exception {
     nodeManager = new MockNodeManager(true, 10);
     final OzoneConfiguration conf = SCMTestUtils.getConf();
-    testDir = GenericTestUtils.getTestDir(
-        TestSimplePipelineProvider.class.getSimpleName() + UUID.randomUUID());
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
     dbStore = DBStoreBuilder.createDBStore(
         conf, new SCMDBDefinition());
@@ -82,8 +79,6 @@ public class TestSimplePipelineProvider {
     if (dbStore != null) {
       dbStore.close();
     }
-
-    FileUtil.fullyDelete(testDir);
   }
 
   @Test
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java
index 31cd2db1e5..98f1639490 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java
@@ -23,9 +23,7 @@ import java.time.Clock;
 import java.time.ZoneOffset;
 import java.util.ArrayList;
 import java.util.List;
-import java.util.UUID;
 
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -47,6 +45,7 @@ import 
org.apache.hadoop.hdds.scm.pipeline.PipelineManagerImpl;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.ozone.test.GenericTestUtils;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 import org.slf4j.LoggerFactory;
 
 import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -56,6 +55,8 @@ import static org.junit.jupiter.api.Assertions.assertFalse;
  * This class tests HealthyPipelineSafeMode rule.
  */
 public class TestHealthyPipelineSafeModeRule {
+  @TempDir
+  private File tempFile;
 
   @Test
   public void testHealthyPipelineSafeModeRuleWithNoPipelines()
@@ -66,12 +67,9 @@ public class TestHealthyPipelineSafeModeRule {
     List<ContainerInfo> containers =
             new ArrayList<>(HddsTestUtils.getContainerInfo(1));
 
-    String storageDir = GenericTestUtils.getTempPath(
-            TestHealthyPipelineSafeModeRule.class.getName() +
-                    UUID.randomUUID());
     OzoneConfiguration config = new OzoneConfiguration();
     MockNodeManager nodeManager = new MockNodeManager(true, 0);
-    config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir);
+    config.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempFile.getPath());
     // enable pipeline check
     config.setBoolean(
             HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, 
true);
@@ -106,14 +104,11 @@ public class TestHealthyPipelineSafeModeRule {
       assertTrue(healthyPipelineSafeModeRule.validate());
     } finally {
       scmMetadataStore.getStore().close();
-      FileUtil.fullyDelete(new File(storageDir));
     }
   }
 
   @Test
   public void testHealthyPipelineSafeModeRuleWithPipelines() throws Exception {
-    String storageDir = GenericTestUtils.getTempPath(
-        TestHealthyPipelineSafeModeRule.class.getName() + UUID.randomUUID());
 
     EventQueue eventQueue = new EventQueue();
     SCMServiceManager serviceManager = new SCMServiceManager();
@@ -126,7 +121,7 @@ public class TestHealthyPipelineSafeModeRule {
     // stale and last one is dead, and this repeats. So for a 12 node, 9
     // healthy, 2 stale and one dead.
     MockNodeManager nodeManager = new MockNodeManager(true, 12);
-    config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir);
+    config.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempFile.getPath());
     // enable pipeline check
     config.setBoolean(
             HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, 
true);
@@ -201,7 +196,6 @@ public class TestHealthyPipelineSafeModeRule {
           1000, 5000);
     } finally {
       scmMetadataStore.getStore().close();
-      FileUtil.fullyDelete(new File(storageDir));
     }
   }
 
@@ -209,10 +203,6 @@ public class TestHealthyPipelineSafeModeRule {
   @Test
   public void testHealthyPipelineSafeModeRuleWithMixedPipelines()
       throws Exception {
-
-    String storageDir = GenericTestUtils.getTempPath(
-        TestHealthyPipelineSafeModeRule.class.getName() + UUID.randomUUID());
-
     EventQueue eventQueue = new EventQueue();
     SCMServiceManager serviceManager = new SCMServiceManager();
     SCMContext scmContext = SCMContext.emptyContext();
@@ -225,7 +215,7 @@ public class TestHealthyPipelineSafeModeRule {
     // stale and last one is dead, and this repeats. So for a 12 node, 9
     // healthy, 2 stale and one dead.
     MockNodeManager nodeManager = new MockNodeManager(true, 12);
-    config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir);
+    config.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempFile.getPath());
     // enable pipeline check
     config.setBoolean(
             HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, 
true);
@@ -308,7 +298,6 @@ public class TestHealthyPipelineSafeModeRule {
 
     } finally {
       scmMetadataStore.getStore().close();
-      FileUtil.fullyDelete(new File(storageDir));
     }
 
   }
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
index f9114b1d5a..319caabe40 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdds.scm.safemode;
 
 import java.io.File;
 import java.io.IOException;
-import java.nio.file.Path;
 import java.time.Clock;
 import java.time.ZoneOffset;
 import java.util.ArrayList;
@@ -27,12 +26,10 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.lang3.tuple.Pair;
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.client.RatisReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -88,9 +85,11 @@ public class TestSCMSafeModeManager {
   private List<ContainerInfo> containers = Collections.emptyList();
 
   private SCMMetadataStore scmMetadataStore;
+  @TempDir
+  private File tempDir;
 
   @BeforeEach
-  public void setUp(@TempDir Path tempDir) throws IOException {
+  public void setUp() throws IOException {
     queue = new EventQueue();
     scmContext = SCMContext.emptyContext();
     serviceManager = new SCMServiceManager();
@@ -98,7 +97,7 @@ public class TestSCMSafeModeManager {
     config.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION,
         false);
     config.set(HddsConfigKeys.OZONE_METADATA_DIRS,
-        tempDir.toAbsolutePath().toString());
+        tempDir.getAbsolutePath().toString());
     scmMetadataStore = new SCMMetadataStoreImpl(config);
   }
 
@@ -556,11 +555,8 @@ public class TestSCMSafeModeManager {
   public void testSafeModePipelineExitRule() throws Exception {
     containers = new ArrayList<>();
     containers.addAll(HddsTestUtils.getContainerInfo(25 * 4));
-    String storageDir = GenericTestUtils.getTempPath(
-        TestSCMSafeModeManager.class.getName() + UUID.randomUUID());
     try {
       MockNodeManager nodeManager = new MockNodeManager(true, 3);
-      config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir);
       // enable pipeline check
       config.setBoolean(
           HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true);
@@ -606,13 +602,11 @@ public class TestSCMSafeModeManager {
       config.setBoolean(
           HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK,
           false);
-      FileUtil.fullyDelete(new File(storageDir));
     }
   }
 
   @Test
-  public void testPipelinesNotCreatedUntilPreCheckPasses()
-      throws Exception {
+  public void testPipelinesNotCreatedUntilPreCheckPasses() throws Exception {
     int numOfDns = 5;
     // enable pipeline check
     config.setBoolean(
@@ -622,12 +616,6 @@ public class TestSCMSafeModeManager {
         true);
 
     MockNodeManager nodeManager = new MockNodeManager(true, numOfDns);
-    String storageDir = GenericTestUtils.getTempPath(
-        TestSCMSafeModeManager.class.getName() + UUID.randomUUID());
-    config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir);
-    // enable pipeline check
-    config.setBoolean(
-        HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true);
 
     PipelineManagerImpl pipelineManager =
         PipelineManagerImpl.newPipelineManager(
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/security/TestRootCARotationManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/security/TestRootCARotationManager.java
index 7089f68ec7..7d1bdc4736 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/security/TestRootCARotationManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/security/TestRootCARotationManager.java
@@ -17,7 +17,6 @@
 package org.apache.hadoop.hdds.scm.security;
 
 import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.ha.SCMContext;
@@ -40,6 +39,7 @@ import 
org.bouncycastle.cert.jcajce.JcaX509CertificateConverter;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 import java.io.File;
 import java.io.IOException;
@@ -94,6 +94,7 @@ public class TestRootCARotationManager {
   private SCMSecurityProtocolServer scmSecurityProtocolServer;
   private RootCARotationHandlerImpl handler;
   private StatefulServiceStateManager statefulServiceStateManager;
+  @TempDir
   private File testDir;
   private String cID = UUID.randomUUID().toString();
   private String scmID = UUID.randomUUID().toString();
@@ -103,8 +104,6 @@ public class TestRootCARotationManager {
   public void init() throws IOException, TimeoutException,
       CertificateException {
     ozoneConfig = new OzoneConfiguration();
-    testDir = GenericTestUtils.getTestDir(
-        TestRootCARotationManager.class.getSimpleName() + UUID.randomUUID());
     ozoneConfig
         .set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
     ozoneConfig
@@ -146,8 +145,6 @@ public class TestRootCARotationManager {
     if (rootCARotationManager != null) {
       rootCARotationManager.stop();
     }
-
-    FileUtil.fullyDelete(testDir);
   }
 
   @Test
diff --git 
a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java
 
b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java
index 0654e9a7e7..3be931c132 100644
--- 
a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java
+++ 
b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdds.scm.cli.container.upgrade;
 
 import com.google.common.collect.Lists;
-import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -45,10 +44,9 @@ import 
org.apache.hadoop.ozone.container.keyvalue.impl.FilePerBlockStrategy;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
 import 
org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition;
 import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl;
-import org.apache.ozone.test.GenericTestUtils;
-import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 import java.io.File;
 import java.io.IOException;
@@ -78,6 +76,7 @@ public class TestUpgradeManager {
   private static final String SCM_ID = UUID.randomUUID().toString();
   private static final OzoneConfiguration CONF = new OzoneConfiguration();
 
+  @TempDir
   private File testRoot;
   private MutableVolumeSet volumeSet;
   private UUID datanodeId;
@@ -93,12 +92,6 @@ public class TestUpgradeManager {
     dc.setContainerSchemaV3Enabled(true);
     CONF.setFromObject(dc);
 
-    testRoot =
-        GenericTestUtils.getTestDir(TestUpgradeManager.class.getSimpleName());
-    if (testRoot.exists()) {
-      FileUtils.cleanDirectory(testRoot);
-    }
-
     final File volume1Path = new File(testRoot, "volume1");
     final File volume2Path = new File(testRoot, "volume2");
 
@@ -142,11 +135,6 @@ public class TestUpgradeManager {
     chunkManager = new FilePerBlockStrategy(true, blockManager, null);
   }
 
-  @AfterEach
-  public void after() throws IOException {
-    FileUtils.deleteDirectory(testRoot);
-  }
-
   @Test
   public void testUpgrade() throws IOException {
     int num = 2;
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java
index e1c1f4fa51..755d074a18 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java
@@ -37,6 +37,7 @@ import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.api.io.TempDir;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -90,6 +91,7 @@ public final class TestSecretKeySnapshot {
 
   private MiniKdc miniKdc;
   private OzoneConfiguration conf;
+  @TempDir
   private File workDir;
   private File ozoneKeytab;
   private File spnegoKeytab;
@@ -105,7 +107,6 @@ public final class TestSecretKeySnapshot {
 
     ExitUtils.disableSystemExit();
 
-    workDir = GenericTestUtils.getTestDir(getClass().getSimpleName());
     clusterId = UUID.randomUUID().toString();
     scmId = UUID.randomUUID().toString();
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java
index 05c8eda9a3..26c3fc2f7a 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java
@@ -40,6 +40,7 @@ import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.api.io.TempDir;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -91,6 +92,7 @@ public final class TestSecretKeysApi {
       .getLogger(TestSecretKeysApi.class);
   private MiniKdc miniKdc;
   private OzoneConfiguration conf;
+  @TempDir
   private File workDir;
   private File ozoneKeytab;
   private File spnegoKeytab;
@@ -109,7 +111,6 @@ public final class TestSecretKeysApi {
     ExitUtils.disableSystemExit();
     ExitUtil.disableSystemExit();
 
-    workDir = GenericTestUtils.getTestDir(getClass().getSimpleName());
     clusterId = UUID.randomUUID().toString();
     scmId = UUID.randomUUID().toString();
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to