This is an automated email from the ASF dual-hosted git repository.

xianjin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-uniffle.git


The following commit(s) were added to refs/heads/master by this push:
     new 916f0caa [Minor] refactor test code (#432)
916f0caa is described below

commit 916f0caa2dd92b918acbff77ecebfbad14231c8f
Author: advancedxy <[email protected]>
AuthorDate: Wed Dec 21 15:44:49 2022 +0800

    [Minor] refactor test code (#432)
    
    * [Minor] refactor test code
    * fix checkstyle errors
---
 .../spark/shuffle/RssSparkShuffleUtilsTest.java    |  17 +-
 .../shuffle/DelegationRssShuffleManagerTest.java   |   6 +-
 .../shuffle/DelegationRssShuffleManagerTest.java   |   6 +-
 .../java/org/apache/uniffle/client/TestUtils.java  |   2 +
 .../uniffle/common/compression/NoOpCodec.java      |   2 +
 .../org/apache/uniffle/common/KerberizedHdfs.java  |   7 +-
 .../common/compression/CompressionTest.java        |   2 +-
 .../apache/uniffle/common/rpc/GrpcServerTest.java  |   6 +-
 .../common/security/HadoopSecurityContextTest.java | 122 ++++----
 .../security/SecurityContextFactoryTest.java       |   8 +-
 .../apache/uniffle/common/util/RetryUtilsTest.java |   5 +-
 .../apache/uniffle/common/util/RssUtilsTest.java   |   3 +-
 .../coordinator/SimpleClusterManagerTest.java      | 327 ++++++++++-----------
 .../coordinator/access/AccessManagerTest.java      |   8 +-
 .../checker/AccessCandidatesCheckerTest.java       |   4 +-
 .../checker/AccessClusterLoadCheckerTest.java      |   9 +-
 .../checker/AccessQuotaCheckerTest.java            |   2 +-
 .../assignment/BasicAssignmentStrategyTest.java    |   4 +-
 .../PartitionBalanceAssignmentStrategyTest.java    |   2 +-
 .../strategy/assignment/PartitionRangeTest.java    |   4 +-
 .../coordinator/util/CoordinatorUtilsTest.java     |   1 +
 .../org/apache/uniffle/server/HealthCheckTest.java |   2 +-
 .../uniffle/server/ShuffleServerMetricsTest.java   |   2 +-
 .../uniffle/server/ShuffleTaskManagerTest.java     |   2 +-
 .../apache/uniffle/server/StorageCheckerTest.java  |  16 +-
 .../server/buffer/ShuffleBufferManagerTest.java    |   8 +-
 .../uniffle/server/buffer/ShuffleBufferTest.java   |  14 +-
 .../server/storage/LocalStorageManagerTest.java    |   5 +-
 .../StorageManagerFallbackStrategyTest.java        |  20 +-
 29 files changed, 309 insertions(+), 307 deletions(-)

diff --git 
a/client-spark/common/src/test/java/org/apache/spark/shuffle/RssSparkShuffleUtilsTest.java
 
b/client-spark/common/src/test/java/org/apache/spark/shuffle/RssSparkShuffleUtilsTest.java
index 625db083..403525ff 100644
--- 
a/client-spark/common/src/test/java/org/apache/spark/shuffle/RssSparkShuffleUtilsTest.java
+++ 
b/client-spark/common/src/test/java/org/apache/spark/shuffle/RssSparkShuffleUtilsTest.java
@@ -17,7 +17,7 @@
 
 package org.apache.spark.shuffle;
 
-import java.util.Iterator;
+import java.util.Arrays;
 import java.util.Map;
 import java.util.Set;
 
@@ -44,22 +44,25 @@ public class RssSparkShuffleUtilsTest {
     SparkConf conf = new SparkConf();
 
     /**
-     * Case1: dont set the tag implicitly and will return the {@code 
Constants.SHUFFLE_SERVER_VERSION}
+     * Case 1: don't set the tag implicitly and will return the {@code 
Constants.SHUFFLE_SERVER_VERSION}
       */
     Set<String> tags = RssSparkShuffleUtils.getAssignmentTags(conf);
     assertEquals(Constants.SHUFFLE_SERVER_VERSION, tags.iterator().next());
 
     /**
-     * Case2: set the multiple tags implicitly and will return the {@code 
Constants.SHUFFLE_SERVER_VERSION}
+     * Case 2: set the multiple tags implicitly and will return the {@code 
Constants.SHUFFLE_SERVER_VERSION}
      * and configured tags.
      */
     conf.set(RssSparkConfig.RSS_CLIENT_ASSIGNMENT_TAGS.key(), " a,b");
     tags = RssSparkShuffleUtils.getAssignmentTags(conf);
     assertEquals(3, tags.size());
-    Iterator<String> iterator = tags.iterator();
-    assertEquals("a", iterator.next());
-    assertEquals("b", iterator.next());
-    assertEquals(Constants.SHUFFLE_SERVER_VERSION, iterator.next());
+    assertTrue(tags.containsAll(Arrays.asList("a", "b", 
Constants.SHUFFLE_SERVER_VERSION)));
+
+    // Case 3: tags with extra space padding
+    conf.set(RssSparkConfig.RSS_CLIENT_ASSIGNMENT_TAGS.key(), " a,b,c   ");
+    tags = RssSparkShuffleUtils.getAssignmentTags(conf);
+    assertEquals(4, tags.size());
+    assertTrue(tags.containsAll(Arrays.asList("a", "b", "c", 
Constants.SHUFFLE_SERVER_VERSION)));
   }
 
   @Test
diff --git 
a/client-spark/spark2/src/test/java/org/apache/spark/shuffle/DelegationRssShuffleManagerTest.java
 
b/client-spark/spark2/src/test/java/org/apache/spark/shuffle/DelegationRssShuffleManagerTest.java
index 9295d5b3..e7270bae 100644
--- 
a/client-spark/spark2/src/test/java/org/apache/spark/shuffle/DelegationRssShuffleManagerTest.java
+++ 
b/client-spark/spark2/src/test/java/org/apache/spark/shuffle/DelegationRssShuffleManagerTest.java
@@ -175,21 +175,19 @@ public class DelegationRssShuffleManagerTest {
     assertCreateSortShuffleManager(secondConf);
   }
 
-  private DelegationRssShuffleManager assertCreateSortShuffleManager(SparkConf 
conf) throws Exception {
+  private void assertCreateSortShuffleManager(SparkConf conf) throws Exception 
{
     DelegationRssShuffleManager delegationRssShuffleManager = new 
DelegationRssShuffleManager(conf, true);
     assertTrue(delegationRssShuffleManager.getDelegate() instanceof 
SortShuffleManager);
     assertFalse(delegationRssShuffleManager.getDelegate() instanceof 
RssShuffleManager);
     assertFalse(conf.getBoolean(RssSparkConfig.RSS_ENABLED.key(), false));
     assertEquals("sort", conf.get("spark.shuffle.manager"));
-    return delegationRssShuffleManager;
   }
 
-  private DelegationRssShuffleManager assertCreateRssShuffleManager(SparkConf 
conf) throws Exception {
+  private void assertCreateRssShuffleManager(SparkConf conf) throws Exception {
     DelegationRssShuffleManager delegationRssShuffleManager = new 
DelegationRssShuffleManager(conf, true);
     assertFalse(delegationRssShuffleManager.getDelegate() instanceof 
SortShuffleManager);
     assertTrue(delegationRssShuffleManager.getDelegate() instanceof 
RssShuffleManager);
     
assertTrue(Boolean.parseBoolean(conf.get(RssSparkConfig.RSS_ENABLED.key())));
     assertEquals(RssShuffleManager.class.getCanonicalName(), 
conf.get("spark.shuffle.manager"));
-    return delegationRssShuffleManager;
   }
 }
diff --git 
a/client-spark/spark3/src/test/java/org/apache/spark/shuffle/DelegationRssShuffleManagerTest.java
 
b/client-spark/spark3/src/test/java/org/apache/spark/shuffle/DelegationRssShuffleManagerTest.java
index d9271476..d533be84 100644
--- 
a/client-spark/spark3/src/test/java/org/apache/spark/shuffle/DelegationRssShuffleManagerTest.java
+++ 
b/client-spark/spark3/src/test/java/org/apache/spark/shuffle/DelegationRssShuffleManagerTest.java
@@ -175,21 +175,19 @@ public class DelegationRssShuffleManagerTest {
     assertCreateSortShuffleManager(secondConf);
   }
 
-  private DelegationRssShuffleManager assertCreateSortShuffleManager(SparkConf 
conf) throws Exception {
+  private void assertCreateSortShuffleManager(SparkConf conf) throws Exception 
{
     DelegationRssShuffleManager delegationRssShuffleManager = new 
DelegationRssShuffleManager(conf, true);
     assertTrue(delegationRssShuffleManager.getDelegate() instanceof 
SortShuffleManager);
     assertFalse(delegationRssShuffleManager.getDelegate() instanceof 
RssShuffleManager);
     assertFalse(conf.getBoolean(RssSparkConfig.RSS_ENABLED.key(), false));
     assertEquals("sort", conf.get("spark.shuffle.manager"));
-    return delegationRssShuffleManager;
   }
 
-  private DelegationRssShuffleManager assertCreateRssShuffleManager(SparkConf 
conf) throws Exception {
+  private void assertCreateRssShuffleManager(SparkConf conf) throws Exception {
     DelegationRssShuffleManager delegationRssShuffleManager = new 
DelegationRssShuffleManager(conf, true);
     assertFalse(delegationRssShuffleManager.getDelegate() instanceof 
SortShuffleManager);
     assertTrue(delegationRssShuffleManager.getDelegate() instanceof 
RssShuffleManager);
     
assertTrue(Boolean.parseBoolean(conf.get(RssSparkConfig.RSS_ENABLED.key())));
     assertEquals(RssShuffleManager.class.getCanonicalName(), 
conf.get("spark.shuffle.manager"));
-    return delegationRssShuffleManager;
   }
 }
diff --git a/client/src/test/java/org/apache/uniffle/client/TestUtils.java 
b/client/src/test/java/org/apache/uniffle/client/TestUtils.java
index 284d22e8..f5e28986 100644
--- a/client/src/test/java/org/apache/uniffle/client/TestUtils.java
+++ b/client/src/test/java/org/apache/uniffle/client/TestUtils.java
@@ -26,6 +26,7 @@ import 
org.apache.uniffle.client.response.CompressedShuffleBlock;
 import org.apache.uniffle.common.BufferSegment;
 import org.apache.uniffle.common.ShuffleDataResult;
 
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
 import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -70,6 +71,7 @@ public class TestUtils {
       assertNotNull(bs);
       byte[] data = new byte[bs.getLength()];
       System.arraycopy(buffer, bs.getOffset(), data, 0, bs.getLength());
+      assertArrayEquals(entry.getValue(), data);
     }
   }
 
diff --git 
a/common/src/main/java/org/apache/uniffle/common/compression/NoOpCodec.java 
b/common/src/main/java/org/apache/uniffle/common/compression/NoOpCodec.java
index 99c7cb4e..14b9773e 100644
--- a/common/src/main/java/org/apache/uniffle/common/compression/NoOpCodec.java
+++ b/common/src/main/java/org/apache/uniffle/common/compression/NoOpCodec.java
@@ -24,6 +24,8 @@ public class NoOpCodec extends Codec {
   @Override
   public void decompress(ByteBuffer src, int uncompressedLen, ByteBuffer dest, 
int destOffset) {
     dest.put(src);
+    dest.position(destOffset);
+    dest.limit(destOffset + uncompressedLen);
   }
 
   @Override
diff --git a/common/src/test/java/org/apache/uniffle/common/KerberizedHdfs.java 
b/common/src/test/java/org/apache/uniffle/common/KerberizedHdfs.java
index 4275e113..dc1f19ad 100644
--- a/common/src/test/java/org/apache/uniffle/common/KerberizedHdfs.java
+++ b/common/src/test/java/org/apache/uniffle/common/KerberizedHdfs.java
@@ -24,6 +24,7 @@ import java.io.OutputStreamWriter;
 import java.io.Serializable;
 import java.net.BindException;
 import java.net.ServerSocket;
+import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.security.PrivilegedExceptionAction;
@@ -81,7 +82,7 @@ public class KerberizedHdfs implements Serializable {
 
   private Class testRunnerCls = KerberizedHdfs.class;
 
-  // The super user for accessing HDFS
+  // The superuser for accessing HDFS
   private String hdfsKeytab;
   private String hdfsPrincipal;
   // The normal user of alex for accessing HDFS
@@ -127,7 +128,7 @@ public class KerberizedHdfs implements Serializable {
     String oneFileContent = "test content";
     FSDataOutputStream fsDataOutputStream =
         writeFs.create(new org.apache.hadoop.fs.Path("/alex/basic.txt"));
-    BufferedWriter br = new BufferedWriter(new 
OutputStreamWriter(fsDataOutputStream, "UTF-8"));
+    BufferedWriter br = new BufferedWriter(new 
OutputStreamWriter(fsDataOutputStream, StandardCharsets.UTF_8));
     br.write(oneFileContent);
     br.close();
 
@@ -156,7 +157,7 @@ public class KerberizedHdfs implements Serializable {
 
     conf.set(
         
CommonConfigurationKeysPublic.HADOOP_SECURITY_IMPERSONATION_PROVIDER_CLASS,
-        
"org.apache.uniffle.common.KerberizedHdfs$TestDummyImpersonationProvider");
+        TestDummyImpersonationProvider.class.getName());
 
     String keystoresDir = kerberizedDfsBaseDir.toFile().getAbsolutePath();
     String sslConfDir = KeyStoreTestUtil.getClasspathDir(testRunnerCls);
diff --git 
a/common/src/test/java/org/apache/uniffle/common/compression/CompressionTest.java
 
b/common/src/test/java/org/apache/uniffle/common/compression/CompressionTest.java
index 6c3e8c32..fb140b63 100644
--- 
a/common/src/test/java/org/apache/uniffle/common/compression/CompressionTest.java
+++ 
b/common/src/test/java/org/apache/uniffle/common/compression/CompressionTest.java
@@ -35,7 +35,7 @@ public class CompressionTest {
 
   static List<Arguments> testCompression() {
     int[] sizes = {1, 1024, 128 * 1024, 512 * 1024, 1024 * 1024, 4 * 1024 * 
1024};
-    Codec.Type[] types = {Codec.Type.ZSTD, Codec.Type.LZ4, Codec.Type.SNAPPY};
+    Codec.Type[] types = {Codec.Type.ZSTD, Codec.Type.LZ4, Codec.Type.SNAPPY, 
Codec.Type.NOOP};
 
     List<Arguments> arguments = new ArrayList<>();
     for (int size : sizes) {
diff --git 
a/common/src/test/java/org/apache/uniffle/common/rpc/GrpcServerTest.java 
b/common/src/test/java/org/apache/uniffle/common/rpc/GrpcServerTest.java
index ac125749..631a26aa 100644
--- a/common/src/test/java/org/apache/uniffle/common/rpc/GrpcServerTest.java
+++ b/common/src/test/java/org/apache/uniffle/common/rpc/GrpcServerTest.java
@@ -62,7 +62,7 @@ public class GrpcServerTest {
       final int index = i;
       executor.submit(() -> {
         try {
-          Thread.sleep(1000 * 2);
+          Thread.sleep(100 * 2);
         } catch (InterruptedException interruptedException) {
           interruptedException.printStackTrace();
         }
@@ -71,13 +71,15 @@ public class GrpcServerTest {
       });
     }
 
-    Thread.sleep(1000L);
+    Thread.sleep(100);
     double activeThreads = 
grpcMetrics.getGaugeMap().get(GRPC_SERVER_EXECUTOR_ACTIVE_THREADS_KEY).get();
     assertEquals(2, activeThreads);
     double queueSize = 
grpcMetrics.getGaugeMap().get(GRPC_SERVER_EXECUTOR_BLOCKING_QUEUE_SIZE_KEY).get();
     assertEquals(1, queueSize);
 
     countDownLatch.await();
+    // the metrics is updated afterExecute, which means it may take a while 
for the thread to decrease the metrics
+    Thread.sleep(100);
     activeThreads = 
grpcMetrics.getGaugeMap().get(GRPC_SERVER_EXECUTOR_ACTIVE_THREADS_KEY).get();
     assertEquals(0, activeThreads);
     queueSize = 
grpcMetrics.getGaugeMap().get(GRPC_SERVER_EXECUTOR_BLOCKING_QUEUE_SIZE_KEY).get();
diff --git 
a/common/src/test/java/org/apache/uniffle/common/security/HadoopSecurityContextTest.java
 
b/common/src/test/java/org/apache/uniffle/common/security/HadoopSecurityContextTest.java
index ecbffb98..53ad36b2 100644
--- 
a/common/src/test/java/org/apache/uniffle/common/security/HadoopSecurityContextTest.java
+++ 
b/common/src/test/java/org/apache/uniffle/common/security/HadoopSecurityContextTest.java
@@ -41,78 +41,71 @@ public class HadoopSecurityContextTest extends 
KerberizedHdfsBase {
 
   @Test
   public void testSecuredCallable() throws Exception {
-    HadoopSecurityContext context = new HadoopSecurityContext(
-        null,
-        kerberizedHdfs.getHdfsKeytab(),
-        kerberizedHdfs.getHdfsPrincipal(),
-        1000
-    );
+    try (HadoopSecurityContext context = new HadoopSecurityContext(
+              null,
+              kerberizedHdfs.getHdfsKeytab(),
+              kerberizedHdfs.getHdfsPrincipal(),
+              1000)) {
+
+      // case1: when user is empty or null, it should throw exception
+      try {
+        context.runSecured(StringUtils.EMPTY, (Callable<Void>) () -> null);
+        fail();
+      } catch (Exception e) {
+        return;
+      }
+
+      // case2: run by the login user, there is no need to wrap proxy action
+      Path pathWithHdfsUser = new Path("/hdfs/HadoopSecurityContextTest");
+      context.runSecured("hdfs", (Callable<Void>) () -> {
+        kerberizedHdfs.getFileSystem().mkdirs(pathWithHdfsUser);
+        return null;
+      });
+      FileStatus fileStatus = 
kerberizedHdfs.getFileSystem().getFileStatus(pathWithHdfsUser);
+      assertEquals("hdfs", fileStatus.getOwner());
+
+      // case3: run by the proxy user
+      Path pathWithAlexUser = new Path("/alex/HadoopSecurityContextTest");
+      context.runSecured("alex", (Callable<Void>) () -> {
+        kerberizedHdfs.getFileSystem().mkdirs(pathWithAlexUser);
+        return null;
+      });
+      fileStatus = 
kerberizedHdfs.getFileSystem().getFileStatus(pathWithAlexUser);
+      assertEquals("alex", fileStatus.getOwner());
 
-    // case1: when user is empty or null, it should throw exception
-    try {
-      context.runSecured(StringUtils.EMPTY, (Callable<Void>) () -> null);
-      fail();
-    } catch (Exception e) {
-      return;
     }
-
-    // case2: run by the login user, there is no need to wrap proxy action
-    Path pathWithHdfsUser = new Path("/hdfs/HadoopSecurityContextTest");
-    context.runSecured("hdfs", (Callable<Void>) () -> {
-      kerberizedHdfs.getFileSystem().mkdirs(pathWithHdfsUser);
-      return null;
-    });
-    FileStatus fileStatus = 
kerberizedHdfs.getFileSystem().getFileStatus(pathWithHdfsUser);
-    assertEquals("hdfs", fileStatus.getOwner());
-
-    // case3: run by the proxy user
-    Path pathWithAlexUser = new Path("/alex/HadoopSecurityContextTest");
-    context.runSecured("alex", (Callable<Void>) () -> {
-      kerberizedHdfs.getFileSystem().mkdirs(pathWithAlexUser);
-      return null;
-    });
-    fileStatus = 
kerberizedHdfs.getFileSystem().getFileStatus(pathWithAlexUser);
-    assertEquals("alex", fileStatus.getOwner());
-
-    context.close();
   }
 
   @Test
   public void testCreateIllegalContext() throws Exception {
     // case1: lack principal, should throw exception
-    try {
-      HadoopSecurityContext context = new HadoopSecurityContext(
-          null,
-          kerberizedHdfs.getHdfsKeytab(),
-          null,
-          1000
-      );
+    try (HadoopSecurityContext context = new HadoopSecurityContext(
+            null,
+            kerberizedHdfs.getHdfsKeytab(),
+            null,
+            1000)) {
       fail();
     } catch (Exception e) {
       assertTrue(e.getMessage().contains("principal must be not null or 
empty"));
     }
 
     // case2: lack keytab, should throw exception
-    try {
-      HadoopSecurityContext context = new HadoopSecurityContext(
-          null,
-          null,
-          kerberizedHdfs.getHdfsPrincipal(),
-          1000
-      );
+    try (HadoopSecurityContext context = new HadoopSecurityContext(
+            null,
+            null,
+            kerberizedHdfs.getHdfsPrincipal(),
+            1000)) {
       fail();
     } catch (Exception e) {
       assertTrue(e.getMessage().contains("KeytabFilePath must be not null or 
empty"));
     }
 
-    // case3: illegal relogin interval sec
-    try {
-      HadoopSecurityContext context = new HadoopSecurityContext(
-          null,
-          kerberizedHdfs.getHdfsKeytab(),
-          kerberizedHdfs.getHdfsPrincipal(),
-          0
-      );
+    // case3: illegal re-login interval sec
+    try (HadoopSecurityContext context = new HadoopSecurityContext(
+            null,
+            kerberizedHdfs.getHdfsKeytab(),
+            kerberizedHdfs.getHdfsPrincipal(),
+            0)) {
       fail();
     } catch (Exception e) {
       assertTrue(e.getMessage().contains("refreshIntervalSec must be not 
negative"));
@@ -121,13 +114,11 @@ public class HadoopSecurityContextTest extends 
KerberizedHdfsBase {
     // case4: lack krb5 conf, should throw exception
     String krbConfFilePath = System.getProperty("java.security.krb5.conf");
     System.clearProperty("java.security.krb5.conf");
-    try {
-      HadoopSecurityContext context = new HadoopSecurityContext(
-          null,
-          kerberizedHdfs.getHdfsKeytab(),
-          kerberizedHdfs.getHdfsPrincipal(),
-          100
-      );
+    try (HadoopSecurityContext context = new HadoopSecurityContext(
+              null,
+              kerberizedHdfs.getHdfsKeytab(),
+              kerberizedHdfs.getHdfsPrincipal(),
+              100)) {
       fail();
     } catch (Exception e) {
       assertTrue(e.getMessage().contains("Cannot locate KDC"));
@@ -135,11 +126,12 @@ public class HadoopSecurityContextTest extends 
KerberizedHdfsBase {
 
     // case5: After setting the krb5 conf, it should pass
     HadoopSecurityContext context = new HadoopSecurityContext(
-        krbConfFilePath,
-        kerberizedHdfs.getHdfsKeytab(),
-        kerberizedHdfs.getHdfsPrincipal(),
-        100
+            krbConfFilePath,
+            kerberizedHdfs.getHdfsKeytab(),
+            kerberizedHdfs.getHdfsPrincipal(),
+            100
     );
+    context.close();
 
     // recover System property of krb5 conf
     System.setProperty("java.security.krb5.conf", krbConfFilePath);
diff --git 
a/common/src/test/java/org/apache/uniffle/common/security/SecurityContextFactoryTest.java
 
b/common/src/test/java/org/apache/uniffle/common/security/SecurityContextFactoryTest.java
index 301e7588..7fcde9ed 100644
--- 
a/common/src/test/java/org/apache/uniffle/common/security/SecurityContextFactoryTest.java
+++ 
b/common/src/test/java/org/apache/uniffle/common/security/SecurityContextFactoryTest.java
@@ -47,8 +47,8 @@ public class SecurityContextFactoryTest extends 
KerberizedHdfsBase {
     SecurityContext securityContext = 
SecurityContextFactory.get().getSecurityContext();
     assertEquals(NoOpSecurityContext.class, securityContext.getClass());
 
-    final SecurityConfig securityConfig = null;
-    SecurityContextFactory.get().init(securityConfig);
+    // init with null config, should return NoOpSecurityContext.
+    SecurityContextFactory.get().init(null);
     securityContext = SecurityContextFactory.get().getSecurityContext();
     assertEquals(NoOpSecurityContext.class, securityContext.getClass());
   }
@@ -68,13 +68,13 @@ public class SecurityContextFactoryTest extends 
KerberizedHdfsBase {
     }
 
     // case2: create the correct hadoop security context
-    final SecurityConfig correntConfig = SecurityConfig
+    final SecurityConfig correctConfig = SecurityConfig
         .newBuilder()
         .keytabFilePath(kerberizedHdfs.getHdfsKeytab())
         .principal(kerberizedHdfs.getHdfsPrincipal())
         .reloginIntervalSec(60)
         .build();
-    SecurityContextFactory.get().init(correntConfig);
+    SecurityContextFactory.get().init(correctConfig);
     SecurityContext securityContext = 
SecurityContextFactory.get().getSecurityContext();
     assertEquals(HadoopSecurityContext.class, securityContext.getClass());
     securityContext.close();
diff --git 
a/common/src/test/java/org/apache/uniffle/common/util/RetryUtilsTest.java 
b/common/src/test/java/org/apache/uniffle/common/util/RetryUtilsTest.java
index 31e6456c..3a1309d8 100644
--- a/common/src/test/java/org/apache/uniffle/common/util/RetryUtilsTest.java
+++ b/common/src/test/java/org/apache/uniffle/common/util/RetryUtilsTest.java
@@ -58,15 +58,16 @@ public class RetryUtilsTest {
     assertEquals(tryTimes.get(), maxTryTime);
 
     tryTimes.set(0);
+    int ret = 0;
     try {
-      int ret = RetryUtils.retry(() -> {
+      ret = RetryUtils.retry(() -> {
         tryTimes.incrementAndGet();
         return 1;
       }, 10, maxTryTime);
-      assertEquals(ret, 1);
     } catch (Throwable throwable) {
       // ignore
     }
+    assertEquals(ret, 1);
     assertEquals(tryTimes.get(), 1);
 
     tryTimes.set(0);
diff --git 
a/common/src/test/java/org/apache/uniffle/common/util/RssUtilsTest.java 
b/common/src/test/java/org/apache/uniffle/common/util/RssUtilsTest.java
index a84e3902..6ed70798 100644
--- a/common/src/test/java/org/apache/uniffle/common/util/RssUtilsTest.java
+++ b/common/src/test/java/org/apache/uniffle/common/util/RssUtilsTest.java
@@ -38,6 +38,7 @@ import org.apache.uniffle.common.ShuffleServerInfo;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
 import static org.junit.jupiter.api.Assertions.assertNotSame;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.fail;
@@ -63,7 +64,7 @@ public class RssUtilsTest {
       InetAddress ia = InetAddress.getByName(realIp);
       assertTrue(ia instanceof Inet4Address);
       assertFalse(ia.isLinkLocalAddress() || ia.isAnyLocalAddress() || 
ia.isLoopbackAddress());
-      assertTrue(NetworkInterface.getByInetAddress(ia) != null);
+      assertNotNull(NetworkInterface.getByInetAddress(ia));
       assertTrue(ia.isReachable(5000));
       setEnv("RSS_IP", "8.8.8.8");
       assertEquals("8.8.8.8", RssUtils.getHostIp());
diff --git 
a/coordinator/src/test/java/org/apache/uniffle/coordinator/SimpleClusterManagerTest.java
 
b/coordinator/src/test/java/org/apache/uniffle/coordinator/SimpleClusterManagerTest.java
index 1fd84058..2ac693b3 100644
--- 
a/coordinator/src/test/java/org/apache/uniffle/coordinator/SimpleClusterManagerTest.java
+++ 
b/coordinator/src/test/java/org/apache/uniffle/coordinator/SimpleClusterManagerTest.java
@@ -64,96 +64,97 @@ public class SimpleClusterManagerTest {
     CoordinatorConf coordinatorConf = new CoordinatorConf();
     
coordinatorConf.set(CoordinatorConf.COORDINATOR_START_SILENT_PERIOD_ENABLED, 
true);
     
coordinatorConf.set(CoordinatorConf.COORDINATOR_START_SILENT_PERIOD_DURATION, 
20 * 1000L);
-    SimpleClusterManager manager = new SimpleClusterManager(coordinatorConf, 
new Configuration());
-    assertFalse(manager.isReadyForServe());
+    try (SimpleClusterManager manager = new 
SimpleClusterManager(coordinatorConf, new Configuration())) {
+      assertFalse(manager.isReadyForServe());
 
-    manager.setStartTime(System.currentTimeMillis() - 30 * 1000L);
-    assertTrue(manager.isReadyForServe());
+      manager.setStartTime(System.currentTimeMillis() - 30 * 1000L);
+      assertTrue(manager.isReadyForServe());
+    }
   }
 
   @Test
   public void getServerListTest() throws Exception {
     CoordinatorConf ssc = new CoordinatorConf();
     ssc.setLong(CoordinatorConf.COORDINATOR_HEARTBEAT_TIMEOUT, 30 * 1000L);
-    SimpleClusterManager clusterManager = new SimpleClusterManager(ssc, new 
Configuration());
-    ServerNode sn1 = new ServerNode("sn1", "ip", 0, 100L, 50L, 20,
-        10, testTags, true);
-    ServerNode sn2 = new ServerNode("sn2", "ip", 0, 100L, 50L, 21,
-        10, testTags, true);
-    ServerNode sn3 = new ServerNode("sn3", "ip", 0, 100L, 50L, 20,
-        11, testTags, true);
-    clusterManager.add(sn1);
-    clusterManager.add(sn2);
-    clusterManager.add(sn3);
-    List<ServerNode> serverNodes = clusterManager.getServerList(testTags);
-    assertEquals(3, serverNodes.size());
-    Set<String> expectedIds = Sets.newHashSet("sn1", "sn2", "sn3");
-    assertEquals(expectedIds, 
serverNodes.stream().map(ServerNode::getId).collect(Collectors.toSet()));
-
-    // tag changes
-    sn1 = new ServerNode("sn1", "ip", 0, 100L, 50L, 20,
-        10, Sets.newHashSet("new_tag"), true);
-    sn2 = new ServerNode("sn2", "ip", 0, 100L, 50L, 21,
-        10, Sets.newHashSet("test", "new_tag"), true);
-    ServerNode sn4 = new ServerNode("sn4", "ip", 0, 100L, 51L, 20,
-        10, testTags, true);
-    clusterManager.add(sn1);
-    clusterManager.add(sn2);
-    clusterManager.add(sn4);
-    serverNodes = clusterManager.getServerList(testTags);
-    assertEquals(3, serverNodes.size());
-    assertTrue(serverNodes.contains(sn2));
-    assertTrue(serverNodes.contains(sn3));
-    assertTrue(serverNodes.contains(sn4));
-
-    Map<String, Set<ServerNode>> tagToNodes = clusterManager.getTagToNodes();
-    assertEquals(2, tagToNodes.size());
-
-    Set<ServerNode> newTagNodes = tagToNodes.get("new_tag");
-    assertEquals(2, newTagNodes.size());
-    assertTrue(newTagNodes.contains(sn1));
-    assertTrue(newTagNodes.contains(sn2));
-
-    Set<ServerNode> testTagNodes = tagToNodes.get("test");
-    assertEquals(3, testTagNodes.size());
-    assertTrue(testTagNodes.contains(sn2));
-    assertTrue(testTagNodes.contains(sn3));
-    assertTrue(testTagNodes.contains(sn4));
-
-    clusterManager.close();
+    try (SimpleClusterManager clusterManager = new SimpleClusterManager(ssc, 
new Configuration())) {
+
+      ServerNode sn1 = new ServerNode("sn1", "ip", 0, 100L, 50L, 20,
+              10, testTags, true);
+      ServerNode sn2 = new ServerNode("sn2", "ip", 0, 100L, 50L, 21,
+              10, testTags, true);
+      ServerNode sn3 = new ServerNode("sn3", "ip", 0, 100L, 50L, 20,
+              11, testTags, true);
+      clusterManager.add(sn1);
+      clusterManager.add(sn2);
+      clusterManager.add(sn3);
+      List<ServerNode> serverNodes = clusterManager.getServerList(testTags);
+      assertEquals(3, serverNodes.size());
+      Set<String> expectedIds = Sets.newHashSet("sn1", "sn2", "sn3");
+      assertEquals(expectedIds, 
serverNodes.stream().map(ServerNode::getId).collect(Collectors.toSet()));
+
+      // tag changes
+      sn1 = new ServerNode("sn1", "ip", 0, 100L, 50L, 20,
+              10, Sets.newHashSet("new_tag"), true);
+      sn2 = new ServerNode("sn2", "ip", 0, 100L, 50L, 21,
+              10, Sets.newHashSet("test", "new_tag"), true);
+      ServerNode sn4 = new ServerNode("sn4", "ip", 0, 100L, 51L, 20,
+              10, testTags, true);
+      clusterManager.add(sn1);
+      clusterManager.add(sn2);
+      clusterManager.add(sn4);
+      serverNodes = clusterManager.getServerList(testTags);
+      assertEquals(3, serverNodes.size());
+      assertTrue(serverNodes.contains(sn2));
+      assertTrue(serverNodes.contains(sn3));
+      assertTrue(serverNodes.contains(sn4));
+
+      Map<String, Set<ServerNode>> tagToNodes = clusterManager.getTagToNodes();
+      assertEquals(2, tagToNodes.size());
+
+      Set<ServerNode> newTagNodes = tagToNodes.get("new_tag");
+      assertEquals(2, newTagNodes.size());
+      assertTrue(newTagNodes.contains(sn1));
+      assertTrue(newTagNodes.contains(sn2));
+
+      Set<ServerNode> testTagNodes = tagToNodes.get("test");
+      assertEquals(3, testTagNodes.size());
+      assertTrue(testTagNodes.contains(sn2));
+      assertTrue(testTagNodes.contains(sn3));
+      assertTrue(testTagNodes.contains(sn4));
+    }
   }
 
   @Test
   public void 
testGetCorrectServerNodesWhenOneNodeRemovedAndUnhealthyNodeFound() throws 
Exception {
     CoordinatorConf ssc = new CoordinatorConf();
     ssc.setLong(CoordinatorConf.COORDINATOR_HEARTBEAT_TIMEOUT, 30 * 1000L);
-    SimpleClusterManager clusterManager = new SimpleClusterManager(ssc, new 
Configuration());
-    ServerNode sn1 = new ServerNode("sn1", "ip", 0, 100L, 50L, 20,
-        10, testTags, false);
-    ServerNode sn2 = new ServerNode("sn2", "ip", 0, 100L, 50L, 21,
-        10, testTags, true);
-    ServerNode sn3 = new ServerNode("sn3", "ip", 0, 100L, 50L, 20,
-        11, testTags, true);
-    clusterManager.add(sn1);
-    clusterManager.add(sn2);
-    clusterManager.add(sn3);
-
-    List<ServerNode> serverNodes = clusterManager.getServerList(testTags);
-    assertEquals(2, serverNodes.size());
-    assertEquals(0, CoordinatorMetrics.gaugeUnhealthyServerNum.get());
-    clusterManager.nodesCheck();
-
-    List<ServerNode> serverList = clusterManager.getServerList(testTags);
-    Assertions.assertEquals(2, serverList.size());
-    assertEquals(1, CoordinatorMetrics.gaugeUnhealthyServerNum.get());
-
-    sn3.setTimestamp(System.currentTimeMillis() - 60 * 1000L);
-    clusterManager.nodesCheck();
-
-    List<ServerNode> serverList2 = clusterManager.getServerList(testTags);
-    Assertions.assertEquals(1, serverList2.size());
-    assertEquals(1, CoordinatorMetrics.gaugeUnhealthyServerNum.get());
-    clusterManager.close();
+    try (SimpleClusterManager clusterManager = new SimpleClusterManager(ssc, 
new Configuration())) {
+      ServerNode sn1 = new ServerNode("sn1", "ip", 0, 100L, 50L, 20,
+              10, testTags, false);
+      ServerNode sn2 = new ServerNode("sn2", "ip", 0, 100L, 50L, 21,
+              10, testTags, true);
+      ServerNode sn3 = new ServerNode("sn3", "ip", 0, 100L, 50L, 20,
+              11, testTags, true);
+      clusterManager.add(sn1);
+      clusterManager.add(sn2);
+      clusterManager.add(sn3);
+
+      List<ServerNode> serverNodes = clusterManager.getServerList(testTags);
+      assertEquals(2, serverNodes.size());
+      assertEquals(0, CoordinatorMetrics.gaugeUnhealthyServerNum.get());
+      clusterManager.nodesCheck();
+
+      List<ServerNode> serverList = clusterManager.getServerList(testTags);
+      Assertions.assertEquals(2, serverList.size());
+      assertEquals(1, CoordinatorMetrics.gaugeUnhealthyServerNum.get());
+
+      sn3.setTimestamp(System.currentTimeMillis() - 60 * 1000L);
+      clusterManager.nodesCheck();
+
+      List<ServerNode> serverList2 = clusterManager.getServerList(testTags);
+      Assertions.assertEquals(1, serverList2.size());
+      assertEquals(1, CoordinatorMetrics.gaugeUnhealthyServerNum.get());
+    }
   }
 
   private void addNode(String id, SimpleClusterManager clusterManager) {
@@ -166,52 +167,51 @@ public class SimpleClusterManagerTest {
   public void heartbeatTimeoutTest() throws Exception {
     CoordinatorConf ssc = new CoordinatorConf();
     ssc.setLong(CoordinatorConf.COORDINATOR_HEARTBEAT_TIMEOUT, 300L);
-    SimpleClusterManager clusterManager = new SimpleClusterManager(ssc, new 
Configuration());
-
-    addNode("sn0", clusterManager);
-    addNode("sn1", clusterManager);
-    List<ServerNode> serverNodes = clusterManager.getServerList(testTags);
-    assertEquals(2, serverNodes.size());
-    Set<String> expectedIds = Sets.newHashSet("sn0", "sn1");
-    assertEquals(expectedIds,
-        
serverNodes.stream().map(ServerNode::getId).collect(Collectors.toSet()));
-    await().atMost(1, TimeUnit.SECONDS).until(() -> 
clusterManager.getServerList(testTags).isEmpty());
-
-    addNode("sn2", clusterManager);
-    serverNodes = clusterManager.getServerList(testTags);
-    assertEquals(1, serverNodes.size());
-    assertEquals("sn2", serverNodes.get(0).getId());
-    await().atMost(1, TimeUnit.SECONDS).until(() -> 
clusterManager.getServerList(testTags).isEmpty());
-
-    clusterManager.close();
+    try (SimpleClusterManager clusterManager = new SimpleClusterManager(ssc, 
new Configuration())) {
+      addNode("sn0", clusterManager);
+      addNode("sn1", clusterManager);
+      List<ServerNode> serverNodes = clusterManager.getServerList(testTags);
+      assertEquals(2, serverNodes.size());
+      Set<String> expectedIds = Sets.newHashSet("sn0", "sn1");
+      assertEquals(expectedIds,
+              
serverNodes.stream().map(ServerNode::getId).collect(Collectors.toSet()));
+      await().atMost(1, TimeUnit.SECONDS).until(() -> 
clusterManager.getServerList(testTags).isEmpty());
+
+      addNode("sn2", clusterManager);
+      serverNodes = clusterManager.getServerList(testTags);
+      assertEquals(1, serverNodes.size());
+      assertEquals("sn2", serverNodes.get(0).getId());
+      await().atMost(1, TimeUnit.SECONDS).until(() -> 
clusterManager.getServerList(testTags).isEmpty());
+
+    }
   }
 
   @Test
   public void testGetCorrectServerNodesWhenOneNodeRemoved() throws Exception {
     CoordinatorConf ssc = new CoordinatorConf();
     ssc.setLong(CoordinatorConf.COORDINATOR_HEARTBEAT_TIMEOUT, 30 * 1000L);
-    SimpleClusterManager clusterManager = new SimpleClusterManager(ssc, new 
Configuration());
-    ServerNode sn1 = new ServerNode("sn1", "ip", 0, 100L, 50L, 20,
-            10, testTags, true);
-    ServerNode sn2 = new ServerNode("sn2", "ip", 0, 100L, 50L, 21,
-            10, testTags, true);
-    ServerNode sn3 = new ServerNode("sn3", "ip", 0, 100L, 50L, 20,
-            11, testTags, true);
-    clusterManager.add(sn1);
-    clusterManager.add(sn2);
-    clusterManager.add(sn3);
-    List<ServerNode> serverNodes = clusterManager.getServerList(testTags);
-    assertEquals(3, serverNodes.size());
-
-    sn3.setTimestamp(System.currentTimeMillis() - 60 * 1000L);
-    clusterManager.nodesCheck();
-
-    Map<String, Set<ServerNode>> tagToNodes = clusterManager.getTagToNodes();
-    List<ServerNode> serverList = clusterManager.getServerList(testTags);
-    Assertions.assertEquals(2, 
tagToNodes.get(testTags.iterator().next()).size());
-    Assertions.assertEquals(2, serverList.size());
-
-    clusterManager.close();
+    try (SimpleClusterManager clusterManager = new SimpleClusterManager(ssc, 
new Configuration())) {
+      ServerNode sn1 = new ServerNode("sn1", "ip", 0, 100L, 50L, 20,
+              10, testTags, true);
+      ServerNode sn2 = new ServerNode("sn2", "ip", 0, 100L, 50L, 21,
+              10, testTags, true);
+      ServerNode sn3 = new ServerNode("sn3", "ip", 0, 100L, 50L, 20,
+              11, testTags, true);
+      clusterManager.add(sn1);
+      clusterManager.add(sn2);
+      clusterManager.add(sn3);
+      List<ServerNode> serverNodes = clusterManager.getServerList(testTags);
+      assertEquals(3, serverNodes.size());
+
+      sn3.setTimestamp(System.currentTimeMillis() - 60 * 1000L);
+      clusterManager.nodesCheck();
+
+      Map<String, Set<ServerNode>> tagToNodes = clusterManager.getTagToNodes();
+      List<ServerNode> serverList = clusterManager.getServerList(testTags);
+      Assertions.assertEquals(2, 
tagToNodes.get(testTags.iterator().next()).size());
+      Assertions.assertEquals(2, serverList.size());
+
+    }
   }
 
   @Test
@@ -222,53 +222,52 @@ public class SimpleClusterManagerTest {
     ssc.setString(CoordinatorConf.COORDINATOR_EXCLUDE_NODES_FILE_PATH, 
URI.create(excludeNodesPath).toString());
     ssc.setLong(CoordinatorConf.COORDINATOR_EXCLUDE_NODES_CHECK_INTERVAL, 
2000);
 
-    SimpleClusterManager scm = new SimpleClusterManager(ssc, new 
Configuration());
-    scm.add(new ServerNode("node1-1999", "ip", 0, 100L, 50L, 20,
-        10, testTags, true));
-    scm.add(new ServerNode("node2-1999", "ip", 0, 100L, 50L, 20,
-        10, testTags, true));
-    scm.add(new ServerNode("node3-1999", "ip", 0, 100L, 50L, 20,
-        10, testTags, true));
-    scm.add(new ServerNode("node4-1999", "ip", 0, 100L, 50L, 20,
-        10, testTags, true));
-    assertTrue(scm.getExcludeNodes().isEmpty());
-
-    final Set<String> nodes = Sets.newHashSet("node1-1999", "node2-1999");
-    writeExcludeHosts(excludeNodesPath, nodes);
-    await().atMost(3, TimeUnit.SECONDS).until(() -> 
scm.getExcludeNodes().equals(nodes));
-    List<ServerNode> availableNodes = scm.getServerList(testTags);
-    assertEquals(2, availableNodes.size());
-    Set<String> remainNodes = Sets.newHashSet("node3-1999", "node4-1999");
-    assertEquals(remainNodes, 
availableNodes.stream().map(ServerNode::getId).collect(Collectors.toSet()));
-
-    final Set<String> nodes2 = Sets.newHashSet("node3-1999", "node4-1999");
-    writeExcludeHosts(excludeNodesPath, nodes2);
-    await().atMost(3, TimeUnit.SECONDS).until(() -> 
scm.getExcludeNodes().equals(nodes2));
-    assertEquals(nodes2, scm.getExcludeNodes());
-
-    Set<String> excludeNodes = scm.getExcludeNodes();
-    Thread.sleep(3000);
-    // excludeNodes shouldn't be updated if file has no change
-    assertEquals(excludeNodes, scm.getExcludeNodes());
-
-    writeExcludeHosts(excludeNodesPath, Sets.newHashSet());
-    // excludeNodes is an empty file, set should be empty
-    await().atMost(3, TimeUnit.SECONDS).until(() -> 
scm.getExcludeNodes().isEmpty());
-
-    final Set<String> nodes3 = Sets.newHashSet("node1-1999");
-    writeExcludeHosts(excludeNodesPath, nodes3);
-    await().atMost(3, TimeUnit.SECONDS).until(() -> 
scm.getExcludeNodes().equals(nodes3));
-
-    File blacklistFile = new File(excludeNodesPath);
-    assertTrue(blacklistFile.delete());
-    // excludeNodes is deleted, set should be empty
-    await().atMost(3, TimeUnit.SECONDS).until(() -> 
scm.getExcludeNodes().isEmpty());
-
-    remainNodes = Sets.newHashSet("node1-1999", "node2-1999", "node3-1999", 
"node4-1999");
-    availableNodes = scm.getServerList(testTags);
-    assertEquals(remainNodes, 
availableNodes.stream().map(ServerNode::getId).collect(Collectors.toSet()));
-
-    scm.close();
+    try (SimpleClusterManager scm = new SimpleClusterManager(ssc, new 
Configuration())) {
+      scm.add(new ServerNode("node1-1999", "ip", 0, 100L, 50L, 20,
+              10, testTags, true));
+      scm.add(new ServerNode("node2-1999", "ip", 0, 100L, 50L, 20,
+              10, testTags, true));
+      scm.add(new ServerNode("node3-1999", "ip", 0, 100L, 50L, 20,
+              10, testTags, true));
+      scm.add(new ServerNode("node4-1999", "ip", 0, 100L, 50L, 20,
+              10, testTags, true));
+      assertTrue(scm.getExcludeNodes().isEmpty());
+
+      final Set<String> nodes = Sets.newHashSet("node1-1999", "node2-1999");
+      writeExcludeHosts(excludeNodesPath, nodes);
+      await().atMost(3, TimeUnit.SECONDS).until(() -> 
scm.getExcludeNodes().equals(nodes));
+      List<ServerNode> availableNodes = scm.getServerList(testTags);
+      assertEquals(2, availableNodes.size());
+      Set<String> remainNodes = Sets.newHashSet("node3-1999", "node4-1999");
+      assertEquals(remainNodes, 
availableNodes.stream().map(ServerNode::getId).collect(Collectors.toSet()));
+
+      final Set<String> nodes2 = Sets.newHashSet("node3-1999", "node4-1999");
+      writeExcludeHosts(excludeNodesPath, nodes2);
+      await().atMost(3, TimeUnit.SECONDS).until(() -> 
scm.getExcludeNodes().equals(nodes2));
+      assertEquals(nodes2, scm.getExcludeNodes());
+
+      Set<String> excludeNodes = scm.getExcludeNodes();
+      Thread.sleep(3000);
+      // excludeNodes shouldn't be updated if file has no change
+      assertEquals(excludeNodes, scm.getExcludeNodes());
+
+      writeExcludeHosts(excludeNodesPath, Sets.newHashSet());
+      // excludeNodes is an empty file, set should be empty
+      await().atMost(3, TimeUnit.SECONDS).until(() -> 
scm.getExcludeNodes().isEmpty());
+
+      final Set<String> nodes3 = Sets.newHashSet("node1-1999");
+      writeExcludeHosts(excludeNodesPath, nodes3);
+      await().atMost(3, TimeUnit.SECONDS).until(() -> 
scm.getExcludeNodes().equals(nodes3));
+
+      File blacklistFile = new File(excludeNodesPath);
+      assertTrue(blacklistFile.delete());
+      // excludeNodes is deleted, set should be empty
+      await().atMost(3, TimeUnit.SECONDS).until(() -> 
scm.getExcludeNodes().isEmpty());
+
+      remainNodes = Sets.newHashSet("node1-1999", "node2-1999", "node3-1999", 
"node4-1999");
+      availableNodes = scm.getServerList(testTags);
+      assertEquals(remainNodes, 
availableNodes.stream().map(ServerNode::getId).collect(Collectors.toSet()));
+    }
   }
 
   private void writeExcludeHosts(String path, Set<String> values) throws 
Exception {
diff --git 
a/coordinator/src/test/java/org/apache/uniffle/coordinator/access/AccessManagerTest.java
 
b/coordinator/src/test/java/org/apache/uniffle/coordinator/access/AccessManagerTest.java
index df1ce96e..2c3bef34 100644
--- 
a/coordinator/src/test/java/org/apache/uniffle/coordinator/access/AccessManagerTest.java
+++ 
b/coordinator/src/test/java/org/apache/uniffle/coordinator/access/AccessManagerTest.java
@@ -78,16 +78,16 @@ public class AccessManagerTest {
         .isSuccess());
     accessManager.close();
     // test mock checkers
+    String alwaysTrueClassName = MockAccessCheckerAlwaysTrue.class.getName();
     conf.setString(CoordinatorConf.COORDINATOR_ACCESS_CHECKERS.key(),
-        
"org.apache.uniffle.coordinator.access.AccessManagerTest$MockAccessCheckerAlwaysTrue,");
+        alwaysTrueClassName + ",");
     accessManager = new AccessManager(conf, null,
         applicationManager.getQuotaManager(), new Configuration());
     assertEquals(1, accessManager.getAccessCheckers().size());
     assertTrue(accessManager.handleAccessRequest(new 
AccessInfo("mock1")).isSuccess());
     assertTrue(accessManager.handleAccessRequest(new 
AccessInfo("mock2")).isSuccess());
-    conf.setString(CoordinatorConf.COORDINATOR_ACCESS_CHECKERS.key(),
-        
"org.apache.uniffle.coordinator.access.AccessManagerTest$MockAccessCheckerAlwaysTrue,"
-            + 
"org.apache.uniffle.coordinator.access.AccessManagerTest$MockAccessCheckerAlwaysFalse");
+    String alwaysFalseClassName = MockAccessCheckerAlwaysFalse.class.getName();
+    conf.setString(CoordinatorConf.COORDINATOR_ACCESS_CHECKERS.key(), 
alwaysTrueClassName + "," + alwaysFalseClassName);
     accessManager = new AccessManager(conf, null,
         applicationManager.getQuotaManager(), new Configuration());
     assertEquals(2, accessManager.getAccessCheckers().size());
diff --git 
a/coordinator/src/test/java/org/apache/uniffle/coordinator/checker/AccessCandidatesCheckerTest.java
 
b/coordinator/src/test/java/org/apache/uniffle/coordinator/checker/AccessCandidatesCheckerTest.java
index 78ef164d..e50a7915 100644
--- 
a/coordinator/src/test/java/org/apache/uniffle/coordinator/checker/AccessCandidatesCheckerTest.java
+++ 
b/coordinator/src/test/java/org/apache/uniffle/coordinator/checker/AccessCandidatesCheckerTest.java
@@ -63,8 +63,8 @@ public class AccessCandidatesCheckerTest {
         getClass().getClassLoader().getResource("coordinator.conf")).getFile();
     CoordinatorConf conf = new CoordinatorConf(filePath);
     conf.set(CoordinatorConf.COORDINATOR_ACCESS_CANDIDATES_PATH, 
tempDir.toURI().toString());
-    conf.setString(CoordinatorConf.COORDINATOR_ACCESS_CHECKERS.key(),
-        
"org.apache.uniffle.coordinator.access.checker.AccessCandidatesChecker");
+    String checkerClassName = AccessCandidatesChecker.class.getName();
+    conf.setString(CoordinatorConf.COORDINATOR_ACCESS_CHECKERS.key(), 
checkerClassName);
     final ApplicationManager applicationManager = new ApplicationManager(conf);
     // file load checking at startup
     Exception expectedException = null;
diff --git 
a/coordinator/src/test/java/org/apache/uniffle/coordinator/checker/AccessClusterLoadCheckerTest.java
 
b/coordinator/src/test/java/org/apache/uniffle/coordinator/checker/AccessClusterLoadCheckerTest.java
index 7fa76391..a48e69a4 100644
--- 
a/coordinator/src/test/java/org/apache/uniffle/coordinator/checker/AccessClusterLoadCheckerTest.java
+++ 
b/coordinator/src/test/java/org/apache/uniffle/coordinator/checker/AccessClusterLoadCheckerTest.java
@@ -17,7 +17,7 @@
 
 package org.apache.uniffle.coordinator.checker;
 
-import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -52,6 +52,7 @@ import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 public class AccessClusterLoadCheckerTest {
+  private static final String clusterLoaderCheckerName = 
AccessClusterLoadChecker.class.getName();
 
   @BeforeEach
   public void setUp() {
@@ -93,8 +94,7 @@ public class AccessClusterLoadCheckerTest {
     when(clusterManager.getServerList(any())).thenReturn(nodes);
 
     CoordinatorConf conf = new CoordinatorConf();
-    conf.set(COORDINATOR_ACCESS_CHECKERS,
-        
Arrays.asList("org.apache.uniffle.coordinator.access.checker.AccessClusterLoadChecker"));
+    conf.set(COORDINATOR_ACCESS_CHECKERS, 
Collections.singletonList(clusterLoaderCheckerName));
     conf.set(COORDINATOR_SHUFFLE_NODES_MAX, 3);
     conf.set(COORDINATOR_ACCESS_LOADCHECKER_MEMORY_PERCENTAGE, 20.0);
     ApplicationManager applicationManager = new ApplicationManager(conf);
@@ -160,8 +160,7 @@ public class AccessClusterLoadCheckerTest {
     final String filePath = Objects.requireNonNull(
         getClass().getClassLoader().getResource("coordinator.conf")).getFile();
     CoordinatorConf conf = new CoordinatorConf(filePath);
-    conf.setString(COORDINATOR_ACCESS_CHECKERS.key(),
-        
"org.apache.uniffle.coordinator.access.checker.AccessClusterLoadChecker");
+    conf.setString(COORDINATOR_ACCESS_CHECKERS.key(), 
clusterLoaderCheckerName);
     ApplicationManager applicationManager = new ApplicationManager(conf);
     AccessManager accessManager = new AccessManager(conf, clusterManager,
         applicationManager.getQuotaManager(), new Configuration());
diff --git 
a/coordinator/src/test/java/org/apache/uniffle/coordinator/checker/AccessQuotaCheckerTest.java
 
b/coordinator/src/test/java/org/apache/uniffle/coordinator/checker/AccessQuotaCheckerTest.java
index 2c1eb203..79aabb75 100644
--- 
a/coordinator/src/test/java/org/apache/uniffle/coordinator/checker/AccessQuotaCheckerTest.java
+++ 
b/coordinator/src/test/java/org/apache/uniffle/coordinator/checker/AccessQuotaCheckerTest.java
@@ -92,7 +92,7 @@ public class AccessQuotaCheckerTest {
 
     CoordinatorConf conf = new CoordinatorConf();
     conf.set(COORDINATOR_ACCESS_CHECKERS,
-        
Collections.singletonList("org.apache.uniffle.coordinator.access.checker.AccessQuotaChecker"));
+        Collections.singletonList(AccessQuotaChecker.class.getName()));
     conf.set(COORDINATOR_QUOTA_DEFAULT_APP_NUM, 3);
     ApplicationManager applicationManager = new ApplicationManager(conf);
     AccessManager accessManager = new AccessManager(conf, clusterManager,
diff --git 
a/coordinator/src/test/java/org/apache/uniffle/coordinator/strategy/assignment/BasicAssignmentStrategyTest.java
 
b/coordinator/src/test/java/org/apache/uniffle/coordinator/strategy/assignment/BasicAssignmentStrategyTest.java
index 635dce00..03992edc 100644
--- 
a/coordinator/src/test/java/org/apache/uniffle/coordinator/strategy/assignment/BasicAssignmentStrategyTest.java
+++ 
b/coordinator/src/test/java/org/apache/uniffle/coordinator/strategy/assignment/BasicAssignmentStrategyTest.java
@@ -76,7 +76,7 @@ public class BasicAssignmentStrategyTest {
     assertEquals(10, assignments.size());
 
     for (int i = 0; i < 100; i += 10) {
-      assignments.containsKey(new PartitionRange(i, i + 10));
+      assertTrue(assignments.containsKey(new PartitionRange(i, i + 10)));
     }
 
     int i = 0;
@@ -225,7 +225,7 @@ public class BasicAssignmentStrategyTest {
     );
 
     /**
-     * case5: user specify the legal shuffle node num, but cluster dont have 
enough servers,
+     * case5: user specify the legal shuffle node num, but cluster don't have 
enough servers,
      * it will return the remaining servers.
      */
     serverTags = Sets.newHashSet("tag-2");
diff --git 
a/coordinator/src/test/java/org/apache/uniffle/coordinator/strategy/assignment/PartitionBalanceAssignmentStrategyTest.java
 
b/coordinator/src/test/java/org/apache/uniffle/coordinator/strategy/assignment/PartitionBalanceAssignmentStrategyTest.java
index e7b2e525..f4dcb259 100644
--- 
a/coordinator/src/test/java/org/apache/uniffle/coordinator/strategy/assignment/PartitionBalanceAssignmentStrategyTest.java
+++ 
b/coordinator/src/test/java/org/apache/uniffle/coordinator/strategy/assignment/PartitionBalanceAssignmentStrategyTest.java
@@ -270,7 +270,7 @@ public class PartitionBalanceAssignmentStrategyTest {
     );
 
     /**
-     * case5: user specify the legal shuffle node num, but cluster dont have 
enough servers,
+     * case5: user specify the legal shuffle node num, but cluster don't have 
enough servers,
      * it will return the remaining servers.
      */
     serverTags = Sets.newHashSet("tag-2");
diff --git 
a/coordinator/src/test/java/org/apache/uniffle/coordinator/strategy/assignment/PartitionRangeTest.java
 
b/coordinator/src/test/java/org/apache/uniffle/coordinator/strategy/assignment/PartitionRangeTest.java
index 63970e50..81e99491 100644
--- 
a/coordinator/src/test/java/org/apache/uniffle/coordinator/strategy/assignment/PartitionRangeTest.java
+++ 
b/coordinator/src/test/java/org/apache/uniffle/coordinator/strategy/assignment/PartitionRangeTest.java
@@ -22,7 +22,7 @@ import org.junit.jupiter.api.Test;
 import org.apache.uniffle.common.PartitionRange;
 
 import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotSame;
 
 public class PartitionRangeTest {
 
@@ -30,7 +30,7 @@ public class PartitionRangeTest {
   public void test() {
     PartitionRange range1 = new PartitionRange(0, 5);
     PartitionRange range2 = new PartitionRange(0, 5);
-    assertFalse(range1 == range2);
+    assertNotSame(range1, range2);
     assertEquals(range1, range2);
     assertEquals(0, range1.getStart());
     assertEquals(5, range1.getEnd());
diff --git 
a/coordinator/src/test/java/org/apache/uniffle/coordinator/util/CoordinatorUtilsTest.java
 
b/coordinator/src/test/java/org/apache/uniffle/coordinator/util/CoordinatorUtilsTest.java
index f658ec3e..1b0afe28 100644
--- 
a/coordinator/src/test/java/org/apache/uniffle/coordinator/util/CoordinatorUtilsTest.java
+++ 
b/coordinator/src/test/java/org/apache/uniffle/coordinator/util/CoordinatorUtilsTest.java
@@ -131,6 +131,7 @@ public class CoordinatorUtilsTest {
   }
 
   private void validate(int[] expect, List<List<PartitionRange>> rangesGroup) {
+    assertEquals(expect.length, rangesGroup.size());
     for (int i = 0; i < expect.length; i++) {
       assertEquals(expect[i], rangesGroup.get(i).size());
     }
diff --git 
a/server/src/test/java/org/apache/uniffle/server/HealthCheckTest.java 
b/server/src/test/java/org/apache/uniffle/server/HealthCheckTest.java
index fc202a44..f1bbd0c6 100644
--- a/server/src/test/java/org/apache/uniffle/server/HealthCheckTest.java
+++ b/server/src/test/java/org/apache/uniffle/server/HealthCheckTest.java
@@ -49,7 +49,7 @@ public class HealthCheckTest {
     assertConf(conf);
     conf.setString(ShuffleServerConf.HEALTH_CHECKER_CLASS_NAMES.key(), "");
     assertConf(conf);
-    conf.setString(ShuffleServerConf.HEALTH_CHECKER_CLASS_NAMES.key(), 
"org.apache.uniffle.server.LocalStorageChecker");
+    conf.setString(ShuffleServerConf.HEALTH_CHECKER_CLASS_NAMES.key(), 
LocalStorageChecker.class.getCanonicalName());
     conf.set(ShuffleServerConf.RSS_STORAGE_BASE_PATH, Arrays.asList("s1"));
     conf.setString(ShuffleServerConf.RSS_STORAGE_TYPE, 
StorageType.HDFS.name());
     assertConf(conf);
diff --git 
a/server/src/test/java/org/apache/uniffle/server/ShuffleServerMetricsTest.java 
b/server/src/test/java/org/apache/uniffle/server/ShuffleServerMetricsTest.java
index 52629f40..24da7b0e 100644
--- 
a/server/src/test/java/org/apache/uniffle/server/ShuffleServerMetricsTest.java
+++ 
b/server/src/test/java/org/apache/uniffle/server/ShuffleServerMetricsTest.java
@@ -183,7 +183,7 @@ public class ShuffleServerMetricsTest {
     }
 
     List<Future<Void>> results = executorService.invokeAll(calls);
-    for (Future f : results) {
+    for (Future<Void> f : results) {
       f.get();
     }
 
diff --git 
a/server/src/test/java/org/apache/uniffle/server/ShuffleTaskManagerTest.java 
b/server/src/test/java/org/apache/uniffle/server/ShuffleTaskManagerTest.java
index 31e48903..661857d8 100644
--- a/server/src/test/java/org/apache/uniffle/server/ShuffleTaskManagerTest.java
+++ b/server/src/test/java/org/apache/uniffle/server/ShuffleTaskManagerTest.java
@@ -166,7 +166,7 @@ public class ShuffleTaskManagerTest extends HdfsTestBase {
     shuffleTaskManager.requireBuffer(10);
     shuffleTaskManager.requireBuffer(10);
     assertEquals(3, bufferIds.size());
-    // required buffer should be clear if doesn't receive data after timeout
+    // required buffer should be clear if it doesn't receive data after timeout
     Thread.sleep(6000);
     assertEquals(0, bufferIds.size());
 
diff --git 
a/server/src/test/java/org/apache/uniffle/server/StorageCheckerTest.java 
b/server/src/test/java/org/apache/uniffle/server/StorageCheckerTest.java
index 078f9502..c712e989 100644
--- a/server/src/test/java/org/apache/uniffle/server/StorageCheckerTest.java
+++ b/server/src/test/java/org/apache/uniffle/server/StorageCheckerTest.java
@@ -25,6 +25,7 @@ import com.google.common.collect.Lists;
 import org.junit.jupiter.api.AfterAll;
 import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 
 import org.apache.uniffle.storage.common.LocalStorage;
 import org.apache.uniffle.storage.util.StorageType;
@@ -48,16 +49,19 @@ public class StorageCheckerTest {
   }
 
   @Test
-  public void checkTest() throws Exception {
+  public void checkTest(@TempDir File baseDir) throws Exception {
     ShuffleServerConf conf = new ShuffleServerConf();
     conf.setBoolean(ShuffleServerConf.HEALTH_CHECK_ENABLE, true);
     conf.setString(ShuffleServerConf.RSS_STORAGE_TYPE, 
StorageType.LOCALFILE.name());
-    conf.set(ShuffleServerConf.RSS_STORAGE_BASE_PATH, Arrays.asList("st1", 
"st2", "st3"));
+    String st1 = new File(baseDir, "st1").getPath();
+    String st2 = new File(baseDir, "st2").getPath();
+    String st3 = new File(baseDir, "st3").getPath();
+    conf.set(ShuffleServerConf.RSS_STORAGE_BASE_PATH, Arrays.asList(st1, st2, 
st3));
     conf.set(ShuffleServerConf.HEALTH_MIN_STORAGE_PERCENTAGE, 55.0);
     List<LocalStorage> storages = Lists.newArrayList();
-    storages.add(LocalStorage.newBuilder().basePath("st1").build());
-    storages.add(LocalStorage.newBuilder().basePath("st2").build());
-    storages.add(LocalStorage.newBuilder().basePath("st3").build());
+    storages.add(LocalStorage.newBuilder().basePath(st1).build());
+    storages.add(LocalStorage.newBuilder().basePath(st2).build());
+    storages.add(LocalStorage.newBuilder().basePath(st3).build());
     LocalStorageChecker checker = new MockStorageChecker(conf, storages);
 
     assertTrue(checker.checkIsHealthy());
@@ -115,7 +119,7 @@ public class StorageCheckerTest {
     @Override
     long getUsedSpace(File file) {
       long result = 0;
-      switch (file.getPath()) {
+      switch (file.getName()) {
         case "st1":
           switch (callTimes) {
             case 0:
diff --git 
a/server/src/test/java/org/apache/uniffle/server/buffer/ShuffleBufferManagerTest.java
 
b/server/src/test/java/org/apache/uniffle/server/buffer/ShuffleBufferManagerTest.java
index 1e6d056d..4b6c0a77 100644
--- 
a/server/src/test/java/org/apache/uniffle/server/buffer/ShuffleBufferManagerTest.java
+++ 
b/server/src/test/java/org/apache/uniffle/server/buffer/ShuffleBufferManagerTest.java
@@ -63,6 +63,7 @@ public class ShuffleBufferManagerTest extends BufferTestBase {
   public void setUp() {
     conf = new ShuffleServerConf();
     File tmpDir = Files.createTempDir();
+    tmpDir.deleteOnExit();
     File dataDir = new File(tmpDir, "data");
     conf.setString(ShuffleServerConf.RSS_STORAGE_TYPE, 
StorageType.LOCALFILE.name());
     conf.set(ShuffleServerConf.RSS_STORAGE_BASE_PATH, 
Arrays.asList(dataDir.getAbsolutePath()));
@@ -128,7 +129,7 @@ public class ShuffleBufferManagerTest extends 
BufferTestBase {
     assertEquals(0, result.getBufferSegments().get(0).getOffset());
     assertEquals(68, result.getBufferSegments().get(0).getLength());
 
-    // 2th read
+    // 2nd read
     long lastBlockId = result.getBufferSegments().get(0).getBlockId();
     result = shuffleBufferManager.getShuffleData(
         appId,
@@ -182,7 +183,7 @@ public class ShuffleBufferManagerTest extends 
BufferTestBase {
     assertEquals(1, 
bufferPool.get(appId).get(2).get(0).getInFlushBlockMap().size());
     assertEquals(0, bufferPool.get(appId).get(3).get(0).getBlocks().size());
     assertEquals(1, 
bufferPool.get(appId).get(3).get(0).getInFlushBlockMap().size());
-    // keep buffer whose size < low water mark
+    // keep buffer whose size < low watermark
     assertEquals(1, bufferPool.get(appId).get(4).get(0).getBlocks().size());
     // data in flush buffer now, it also can be got before flush finish
     sdr = shuffleBufferManager.getShuffleData(
@@ -300,7 +301,7 @@ public class ShuffleBufferManagerTest extends 
BufferTestBase {
     sc = shuffleBufferManager.cacheShuffleData(appId, shuffleId, false, 
createData(0, 1));
     assertEquals(StatusCode.NO_BUFFER, sc);
 
-    // size won't be reduce which should be processed by flushManager, reset 
buffer size to 0
+    // size won't be reduced which should be processed by flushManager, reset 
buffer size to 0
     shuffleBufferManager.resetSize();
     shuffleBufferManager.removeBuffer(appId);
     assertEquals(startPartitionNum, (int) 
ShuffleServerMetrics.gaugeTotalPartitionNum.get());
@@ -443,6 +444,7 @@ public class ShuffleBufferManagerTest extends 
BufferTestBase {
   public void flushSingleBufferTest() throws Exception {
     ShuffleServerConf shuffleConf = new ShuffleServerConf();
     File tmpDir = Files.createTempDir();
+    tmpDir.deleteOnExit();
     File dataDir = new File(tmpDir, "data");
     shuffleConf.setString(ShuffleServerConf.RSS_STORAGE_TYPE, 
StorageType.LOCALFILE.name());
     shuffleConf.set(ShuffleServerConf.RSS_STORAGE_BASE_PATH, 
Arrays.asList(dataDir.getAbsolutePath()));
diff --git 
a/server/src/test/java/org/apache/uniffle/server/buffer/ShuffleBufferTest.java 
b/server/src/test/java/org/apache/uniffle/server/buffer/ShuffleBufferTest.java
index a45f1a55..39ab4e61 100644
--- 
a/server/src/test/java/org/apache/uniffle/server/buffer/ShuffleBufferTest.java
+++ 
b/server/src/test/java/org/apache/uniffle/server/buffer/ShuffleBufferTest.java
@@ -18,7 +18,6 @@
 package org.apache.uniffle.server.buffer;
 
 import java.util.List;
-import java.util.concurrent.atomic.AtomicLong;
 
 import com.google.common.collect.Lists;
 import org.junit.jupiter.api.Test;
@@ -40,12 +39,11 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class ShuffleBufferTest extends BufferTestBase {
 
-  private static AtomicLong atomBlockId = new AtomicLong(0);
-
   @Test
   public void appendTest() {
     ShuffleBuffer shuffleBuffer = new ShuffleBuffer(100);
     shuffleBuffer.append(createData(10));
+    // ShufflePartitionedBlock has constant 32 bytes overhead
     assertEquals(42, shuffleBuffer.getSize());
     assertFalse(shuffleBuffer.isFull());
 
@@ -129,7 +127,7 @@ public class ShuffleBufferTest extends BufferTestBase {
     assertEquals(15, result.getBufferSegments().get(1).getOffset());
     assertEquals(55, result.getBufferSegments().get(1).getLength());
 
-    // 2th read
+    // 2nd read
     long lastBlockId = result.getBufferSegments().get(1).getBlockId();
     result = shuffleBuffer.getShuffleData(lastBlockId, 60, expectedTasks);
     assertEquals(1, result.getBufferSegments().size());
@@ -171,7 +169,7 @@ public class ShuffleBufferTest extends BufferTestBase {
     assertEquals(15, result.getBufferSegments().get(1).getOffset());
     assertEquals(55, result.getBufferSegments().get(1).getLength());
 
-    // 2th read
+    // 2nd read
     lastBlockId = result.getBufferSegments().get(1).getBlockId();
     result = shuffleBuffer.getShuffleData(lastBlockId, 60, expectedTasks);
     assertEquals(1, result.getBufferSegments().size());
@@ -196,11 +194,11 @@ public class ShuffleBufferTest extends BufferTestBase {
     result = shuffleBuffer.getShuffleData(Constants.INVALID_BLOCK_ID, 60, 
expectedTasks);
     assertEquals(2, result.getBufferSegments().size());
 
-    // 2th read
+    // 2nd read
     lastBlockId = result.getBufferSegments().get(1).getBlockId();
     result = shuffleBuffer.getShuffleData(lastBlockId, 60, expectedTasks);
     assertEquals(2, result.getBufferSegments().size());
-    // 3th read
+    // 3rd read
     lastBlockId = result.getBufferSegments().get(1).getBlockId();
     result = shuffleBuffer.getShuffleData(lastBlockId, 60, expectedTasks);
     assertEquals(3, result.getBufferSegments().size());
@@ -364,7 +362,7 @@ public class ShuffleBufferTest extends BufferTestBase {
     assertEquals(3, shuffleBuffer.getBlocks().size());
     assertEquals(4, shuffleBuffer.getInFlushBlockMap().size());
 
-    // all data in shuffle buffer as following:
+    // all data in shuffle buffer are as following:
     // flush event1 -> spd1, spd2, spd3
     // flush event2 -> spd4, spd5, spd6
     // flush event3 -> spd7, spd8, spd9
diff --git 
a/server/src/test/java/org/apache/uniffle/server/storage/LocalStorageManagerTest.java
 
b/server/src/test/java/org/apache/uniffle/server/storage/LocalStorageManagerTest.java
index da536099..719b7b2c 100644
--- 
a/server/src/test/java/org/apache/uniffle/server/storage/LocalStorageManagerTest.java
+++ 
b/server/src/test/java/org/apache/uniffle/server/storage/LocalStorageManagerTest.java
@@ -38,7 +38,6 @@ import org.apache.uniffle.storage.util.StorageType;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNotEquals;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.fail;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
@@ -129,9 +128,9 @@ public class LocalStorageManagerTest {
 
     List<LocalStorage> storages = localStorageManager.getStorages();
     assertNotNull(storages);
-    assertTrue(storages.size() == storagePaths.length);
+    assertEquals(storages.size(), storagePaths.length);
     for (int i = 0; i < storagePaths.length; i++) {
-      assertTrue(storagePaths[i].equals(storages.get(i).getBasePath()));
+      assertEquals(storagePaths[i], storages.get(i).getBasePath());
     }
   }
 
diff --git 
a/server/src/test/java/org/apache/uniffle/server/storage/StorageManagerFallbackStrategyTest.java
 
b/server/src/test/java/org/apache/uniffle/server/storage/StorageManagerFallbackStrategyTest.java
index c6e37a26..c5b4512b 100644
--- 
a/server/src/test/java/org/apache/uniffle/server/storage/StorageManagerFallbackStrategyTest.java
+++ 
b/server/src/test/java/org/apache/uniffle/server/storage/StorageManagerFallbackStrategyTest.java
@@ -30,7 +30,7 @@ import org.apache.uniffle.server.ShuffleDataFlushEvent;
 import org.apache.uniffle.server.ShuffleServerConf;
 import org.apache.uniffle.storage.util.StorageType;
 
-import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertSame;
 
 public class StorageManagerFallbackStrategyTest {
   private ShuffleServerConf conf;
@@ -60,25 +60,25 @@ public class StorageManagerFallbackStrategyTest {
     event.increaseRetryTimes();
     StorageManager storageManager = fallbackStrategy.tryFallback(
         current, event, warmStorageManager, coldStorageManager);
-    assertTrue(storageManager == coldStorageManager);
+    assertSame(storageManager, coldStorageManager);
 
     conf.setLong(ShuffleServerConf.FALLBACK_MAX_FAIL_TIMES, 3);
     fallbackStrategy = new RotateStorageManagerFallbackStrategy(conf);
     storageManager = fallbackStrategy.tryFallback(current, event, 
warmStorageManager, coldStorageManager);
-    assertTrue(storageManager == warmStorageManager);
+    assertSame(storageManager, warmStorageManager);
     for (int i = 0; i < 2; i++) {
       event.increaseRetryTimes();
     }
     storageManager = fallbackStrategy.tryFallback(current, event, 
warmStorageManager, coldStorageManager);
-    assertTrue(storageManager == coldStorageManager);
+    assertSame(storageManager, coldStorageManager);
     event.increaseRetryTimes();
     storageManager = fallbackStrategy.tryFallback(current, event, 
warmStorageManager, coldStorageManager);
-    assertTrue(storageManager == warmStorageManager);
+    assertSame(storageManager, warmStorageManager);
     for (int i = 0; i < 2; i++) {
       event.increaseRetryTimes();
     }
     storageManager = fallbackStrategy.tryFallback(coldStorageManager, event, 
warmStorageManager, coldStorageManager);
-    assertTrue(storageManager == warmStorageManager);
+    assertSame(storageManager, warmStorageManager);
   }
 
   @Test
@@ -96,10 +96,10 @@ public class StorageManagerFallbackStrategyTest {
     event.increaseRetryTimes();
     StorageManager storageManager = fallbackStrategy.tryFallback(
         warmStorageManager, event, warmStorageManager, coldStorageManager);
-    assertTrue(storageManager == warmStorageManager);
+    assertSame(storageManager, warmStorageManager);
 
     storageManager = fallbackStrategy.tryFallback(coldStorageManager, event, 
warmStorageManager, coldStorageManager);
-    assertTrue(storageManager == warmStorageManager);
+    assertSame(storageManager, warmStorageManager);
   }
 
 
@@ -118,9 +118,9 @@ public class StorageManagerFallbackStrategyTest {
     event.increaseRetryTimes();
     StorageManager storageManager = fallbackStrategy.tryFallback(
         warmStorageManager, event, warmStorageManager, coldStorageManager);
-    assertTrue(storageManager == coldStorageManager);
+    assertSame(storageManager, coldStorageManager);
 
     storageManager = fallbackStrategy.tryFallback(coldStorageManager, event, 
warmStorageManager, coldStorageManager);
-    assertTrue(storageManager == coldStorageManager);
+    assertSame(storageManager, coldStorageManager);
   }
 }

Reply via email to