This is an automated email from the ASF dual-hosted git repository.

slfan1989 pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 48175c5f47d HDFS-12431. [JDK17] Upgrade JUnit from 4 to 5 in 
hadoop-hdfs Part18. (#7941)
48175c5f47d is described below

commit 48175c5f47db9e8ed3fa5c159739da1b2c5396e6
Author: zhtttylz <hualon...@hotmail.com>
AuthorDate: Tue Sep 9 12:09:33 2025 +0800

    HDFS-12431. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-hdfs Part18. (#7941)
    
    * HDFS-12431. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-hdfs Part18.
    
    Reviewed-by: Shilun Fan <slfan1...@apache.org>
    Signed-off-by: Shilun Fan <slfan1...@apache.org>
---
 hadoop-hdfs-project/hadoop-hdfs/pom.xml            |  5 --
 .../java/org/apache/hadoop/TestGenericRefresh.java | 34 +++++-----
 .../org/apache/hadoop/TestRefreshCallQueue.java    | 36 +++++------
 .../java/org/apache/hadoop/cli/TestAclCLI.java     | 10 +--
 .../cli/TestAclCLIWithPosixAclInheritance.java     |  2 +-
 .../org/apache/hadoop/cli/TestCacheAdminCLI.java   | 15 ++---
 .../org/apache/hadoop/cli/TestCryptoAdminCLI.java  | 16 ++---
 .../java/org/apache/hadoop/cli/TestDeleteCLI.java  | 16 ++---
 .../apache/hadoop/cli/TestErasureCodingCLI.java    | 17 ++---
 .../java/org/apache/hadoop/cli/TestHDFSCLI.java    | 16 ++---
 .../java/org/apache/hadoop/cli/TestXAttrCLI.java   | 16 ++---
 .../sink/TestRollingFileSystemSinkWithHdfs.java    | 46 ++++++-------
 .../TestRollingFileSystemSinkWithSecureHdfs.java   | 24 +++----
 .../org/apache/hadoop/net/TestNetworkTopology.java | 74 +++++++++++----------
 .../org/apache/hadoop/security/TestPermission.java | 56 ++++++++--------
 .../hadoop/security/TestPermissionSymlinks.java    | 75 ++++++++++++----------
 .../hadoop/security/TestRefreshUserMappings.java   | 23 ++++---
 .../java/org/apache/hadoop/tools/TestJMXGet.java   | 28 ++++----
 .../java/org/apache/hadoop/tools/TestTools.java    | 11 ++--
 19 files changed, 258 insertions(+), 262 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 10b163a8fca..19fd9e8b628 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -168,11 +168,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd";>
       </exclusions>
     </dependency>
 
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-minikdc</artifactId>
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
index 70f9af98417..c7dbc7e2e1b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -29,11 +29,11 @@
 
 import org.apache.hadoop.ipc.RefreshRegistry;
 import org.apache.hadoop.ipc.RefreshResponse;
-import org.junit.Test;
-import org.junit.Before;
-import org.junit.After;
-import org.junit.BeforeClass;
-import org.junit.AfterClass;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.AfterAll;
 import org.mockito.Mockito;
 
 /**
@@ -49,7 +49,7 @@ public class TestGenericRefresh {
   private static RefreshHandler firstHandler;
   private static RefreshHandler secondHandler;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUpBeforeClass() throws Exception {
     config = new Configuration();
     config.set("hadoop.security.authorization", "true");
@@ -59,14 +59,14 @@ public static void setUpBeforeClass() throws Exception {
     cluster.waitActive();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDownBeforeClass() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     // Register Handlers, first one just sends an ok response
     firstHandler = Mockito.mock(RefreshHandler.class);
@@ -83,7 +83,7 @@ public void setUp() throws Exception {
     RefreshRegistry.defaultRegistry().register("secondHandler", secondHandler);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     RefreshRegistry.defaultRegistry().unregisterAll("firstHandler");
     RefreshRegistry.defaultRegistry().unregisterAll("secondHandler");
@@ -94,7 +94,7 @@ public void testInvalidCommand() throws Exception {
     DFSAdmin admin = new DFSAdmin(config);
     String [] args = new String[]{"-refresh", "nn"};
     int exitCode = admin.run(args);
-    assertEquals("DFSAdmin should fail due to bad args", -1, exitCode);
+    assertEquals(-1, exitCode, "DFSAdmin should fail due to bad args");
   }
 
   @Test
@@ -103,7 +103,7 @@ public void testInvalidIdentifier() throws Exception {
     String [] args = new String[]{"-refresh", "localhost:" + 
         cluster.getNameNodePort(), "unregisteredIdentity"};
     int exitCode = admin.run(args);
-    assertEquals("DFSAdmin should fail due to no handler registered", -1, 
exitCode);
+    assertEquals(-1, exitCode, "DFSAdmin should fail due to no handler 
registered");
   }
 
   @Test
@@ -112,7 +112,7 @@ public void testValidIdentifier() throws Exception {
     String[] args = new String[]{"-refresh",
         "localhost:" + cluster.getNameNodePort(), "firstHandler"};
     int exitCode = admin.run(args);
-    assertEquals("DFSAdmin should succeed", 0, exitCode);
+    assertEquals(0, exitCode, "DFSAdmin should succeed");
 
     Mockito.verify(firstHandler).handleRefresh("firstHandler", new String[]{});
     // Second handler was never called
@@ -126,11 +126,11 @@ public void testVariableArgs() throws Exception {
     String[] args = new String[]{"-refresh", "localhost:" +
         cluster.getNameNodePort(), "secondHandler", "one"};
     int exitCode = admin.run(args);
-    assertEquals("DFSAdmin should return 2", 2, exitCode);
+    assertEquals(2, exitCode, "DFSAdmin should return 2");
 
     exitCode = admin.run(new String[]{"-refresh", "localhost:" +
         cluster.getNameNodePort(), "secondHandler", "one", "two"});
-    assertEquals("DFSAdmin should now return 3", 3, exitCode);
+    assertEquals(3, exitCode, "DFSAdmin should now return 3");
 
     Mockito.verify(secondHandler).handleRefresh("secondHandler", new 
String[]{"one"});
     Mockito.verify(secondHandler).handleRefresh("secondHandler", new 
String[]{"one", "two"});
@@ -145,7 +145,7 @@ public void testUnregistration() throws Exception {
     String[] args = new String[]{"-refresh", "localhost:" +
         cluster.getNameNodePort(), "firstHandler"};
     int exitCode = admin.run(args);
-    assertEquals("DFSAdmin should return -1", -1, exitCode);
+    assertEquals(-1, exitCode, "DFSAdmin should return -1");
   }
 
   @Test
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
index 873a524c988..4436d2ae8b3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
@@ -18,10 +18,10 @@
 
 package org.apache.hadoop;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 import java.net.BindException;
@@ -40,8 +40,8 @@
 import org.apache.hadoop.ipc.FairCallQueue;
 import org.apache.hadoop.metrics2.MetricsException;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.junit.After;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
 
 public class TestRefreshCallQueue {
   private MiniDFSCluster cluster;
@@ -77,7 +77,7 @@ private void setUp(Class<?> queueClass) throws IOException {
     }
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
@@ -115,9 +115,8 @@ public void testRefresh() throws Exception {
     mockQueuePuts = 0;
     setUp(MockCallQueue.class);
 
-    assertTrue("Mock queue should have been constructed",
-        mockQueueConstructions > 0);
-    assertTrue("Puts are routed through MockQueue", canPutInMockQueue());
+    assertTrue(mockQueueConstructions > 0, "Mock queue should have been 
constructed");
+    assertTrue(canPutInMockQueue(), "Puts are routed through MockQueue");
     int lastMockQueueConstructions = mockQueueConstructions;
 
     // Replace queue with the queue specified in core-site.xml, which would be
@@ -125,13 +124,12 @@ public void testRefresh() throws Exception {
     DFSAdmin admin = new DFSAdmin(config);
     String [] args = new String[]{"-refreshCallQueue"};
     int exitCode = admin.run(args);
-    assertEquals("DFSAdmin should return 0", 0, exitCode);
+    assertEquals(0, exitCode, "DFSAdmin should return 0");
 
-    assertEquals("Mock queue should have no additional constructions",
-        lastMockQueueConstructions, mockQueueConstructions);
+    assertEquals(lastMockQueueConstructions, mockQueueConstructions,
+        "Mock queue should have no additional constructions");
     try {
-      assertFalse("Puts are routed through LBQ instead of MockQueue",
-          canPutInMockQueue());
+      assertFalse(canPutInMockQueue(), "Puts are routed through LBQ instead of 
MockQueue");
     } catch (IOException ioe) {
       fail("Could not put into queue at all");
     }
@@ -149,8 +147,9 @@ public void testRefreshCallQueueWithFairCallQueue() throws 
Exception {
         DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
     NameNodeRpcServer rpcServer = (NameNodeRpcServer) cluster.getNameNodeRpc();
     // check callqueue size
-    assertEquals(CommonConfigurationKeys.IPC_SERVER_HANDLER_QUEUE_SIZE_DEFAULT
-        * serviceHandlerCount, 
rpcServer.getClientRpcServer().getMaxQueueSize());
+    assertEquals(
+        CommonConfigurationKeys.IPC_SERVER_HANDLER_QUEUE_SIZE_DEFAULT * 
serviceHandlerCount,
+        rpcServer.getClientRpcServer().getMaxQueueSize());
     // Replace queue and update queue size
     config.setInt(CommonConfigurationKeys.IPC_SERVER_HANDLER_QUEUE_SIZE_KEY,
         150);
@@ -170,8 +169,7 @@ public void testRefreshCallQueueWithFairCallQueue() throws 
Exception {
       DefaultMetricsSystem.setMiniClusterMode(oldValue);
     }
     // check callQueueSize has changed
-    assertEquals(150 * serviceHandlerCount, rpcServer.getClientRpcServer()
-        .getMaxQueueSize());
+    assertEquals(150 * serviceHandlerCount, 
rpcServer.getClientRpcServer().getMaxQueueSize());
   }
 
 }
\ No newline at end of file
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
index 9cf2180ff59..10dfa93a168 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
@@ -22,9 +22,9 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestAclCLI extends CLITestHelperDFS {
   private MiniDFSCluster cluster = null;
@@ -38,7 +38,7 @@ protected void initConf() {
         DFSConfigKeys.DFS_NAMENODE_POSIX_ACL_INHERITANCE_ENABLED_KEY, false);
   }
 
-  @Before
+  @BeforeEach
   @Override
   public void setUp() throws Exception {
     super.setUp();
@@ -49,7 +49,7 @@ public void setUp() throws Exception {
     username = System.getProperty("user.name");
   }
 
-  @After
+  @AfterEach
   @Override
   public void tearDown() throws Exception {
     super.tearDown();
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLIWithPosixAclInheritance.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLIWithPosixAclInheritance.java
index ec31766576d..5f8d10dfdd4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLIWithPosixAclInheritance.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLIWithPosixAclInheritance.java
@@ -19,7 +19,7 @@
 
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_POSIX_ACL_INHERITANCE_ENABLED_KEY;
 
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test ACL CLI with POSIX ACL inheritance enabled.
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java
index 2f8dfa5b36b..6097073f6bf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.cli;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -37,9 +37,9 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.tools.CacheAdmin;
 import org.apache.hadoop.security.authorize.PolicyProvider;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.xml.sax.SAXException;
 
 public class TestCacheAdminCLI extends CLITestHelper {
@@ -51,7 +51,7 @@ public class TestCacheAdminCLI extends CLITestHelper {
   protected FileSystem fs = null;
   protected String namenode = null;
 
-  @Before
+  @BeforeEach
   @Override
   public void setUp() throws Exception {
     super.setUp();
@@ -68,11 +68,10 @@ public void setUp() throws Exception {
     username = System.getProperty("user.name");
 
     fs = dfsCluster.getFileSystem();
-    assertTrue("Not a HDFS: "+fs.getUri(),
-               fs instanceof DistributedFileSystem);
+    assertTrue(fs instanceof DistributedFileSystem, "Not a HDFS: " + 
fs.getUri());
   }
 
-  @After
+  @AfterEach
   @Override
   public void tearDown() throws Exception {
     if (fs != null) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
index afc668c5f4b..3e56fc6d2e0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
@@ -23,7 +23,7 @@
 import java.security.NoSuchAlgorithmException;
 import java.util.UUID;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.apache.hadoop.cli.util.CLICommand;
 import org.apache.hadoop.cli.util.CLICommandCryptoAdmin;
@@ -45,9 +45,9 @@
 import org.apache.hadoop.hdfs.tools.CryptoAdmin;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.xml.sax.SAXException;
 
 public class TestCryptoAdminCLI extends CLITestHelperDFS {
@@ -56,7 +56,7 @@ public class TestCryptoAdminCLI extends CLITestHelperDFS {
   protected String namenode = null;
   private static File tmpDir;
 
-  @Before
+  @BeforeEach
   @Override
   public void setUp() throws Exception {
     super.setUp();
@@ -78,11 +78,11 @@ public void setUp() throws Exception {
     username = System.getProperty("user.name");
 
     fs = dfsCluster.getFileSystem();
-    assertTrue("Not an HDFS: " + fs.getUri(),
-        fs instanceof DistributedFileSystem);
+    assertTrue(fs instanceof DistributedFileSystem,
+        "Not an HDFS: " + fs.getUri());
   }
 
-  @After
+  @AfterEach
   @Override
   public void tearDown() throws Exception {
     if (fs != null) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestDeleteCLI.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestDeleteCLI.java
index 4c27f79e16a..47a89709468 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestDeleteCLI.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestDeleteCLI.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.cli;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.apache.hadoop.cli.util.CLICommand;
 import org.apache.hadoop.cli.util.CommandExecutor.Result;
@@ -27,16 +27,16 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestDeleteCLI extends CLITestHelperDFS {
   protected MiniDFSCluster dfsCluster = null;
   protected FileSystem fs = null;
   protected String namenode = null;
 
-  @Before
+  @BeforeEach
   @Override
   public void setUp() throws Exception {
     super.setUp();
@@ -49,11 +49,11 @@ public void setUp() throws Exception {
     namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
 
     fs = dfsCluster.getFileSystem();
-    assertTrue("Not an HDFS: " + fs.getUri(),
-        fs instanceof DistributedFileSystem);
+    assertTrue(fs instanceof DistributedFileSystem,
+        "Not an HDFS: " + fs.getUri());
   }
 
-  @After
+  @AfterEach
   @Override
   public void tearDown() throws Exception {
     if (fs != null) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java
index 566755db996..fd6ab7b26cc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java
@@ -24,23 +24,20 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 import org.xml.sax.SAXException;
 
+@Timeout(300)
 public class TestErasureCodingCLI extends CLITestHelper {
   private final int NUM_OF_DATANODES = 3;
   private MiniDFSCluster dfsCluster = null;
   private DistributedFileSystem fs = null;
   private String namenode = null;
 
-  @Rule
-  public Timeout globalTimeout = new Timeout(300000);
-
-  @Before
+  @BeforeEach
   @Override
   public void setUp() throws Exception {
     super.setUp();
@@ -62,7 +59,7 @@ protected String getTestFile() {
     return "testErasureCodingConf.xml";
   }
 
-  @After
+  @AfterEach
   @Override
   public void tearDown() throws Exception {
     if (fs != null) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
index e0e78941c73..2bd141d54f7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.cli;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.apache.hadoop.cli.util.CLICommand;
 import org.apache.hadoop.cli.util.CommandExecutor.Result;
@@ -28,9 +28,9 @@
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.authorize.PolicyProvider;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestHDFSCLI extends CLITestHelperDFS {
 
@@ -38,7 +38,7 @@ public class TestHDFSCLI extends CLITestHelperDFS {
   protected FileSystem fs = null;
   protected String namenode = null;
   
-  @Before
+  @BeforeEach
   @Override
   public void setUp() throws Exception {
     super.setUp();
@@ -63,8 +63,8 @@ public void setUp() throws Exception {
     username = System.getProperty("user.name");
 
     fs = dfsCluster.getFileSystem();
-    assertTrue("Not a HDFS: "+fs.getUri(),
-               fs instanceof DistributedFileSystem);
+    assertTrue(fs instanceof DistributedFileSystem,
+        "Not a HDFS: " + fs.getUri());
   }
 
   @Override
@@ -72,7 +72,7 @@ protected String getTestFile() {
     return "testHDFSConf.xml";
   }
   
-  @After
+  @AfterEach
   @Override
   public void tearDown() throws Exception {
     if (fs != null) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java
index d83baf3a971..cd91bb0b481 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.cli;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.apache.hadoop.cli.util.CLICommand;
 import org.apache.hadoop.cli.util.CommandExecutor.Result;
@@ -28,16 +28,16 @@
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.authorize.PolicyProvider;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestXAttrCLI  extends CLITestHelperDFS {
   protected MiniDFSCluster dfsCluster = null;
   protected FileSystem fs = null;
   protected String namenode = null;
   
-  @Before
+  @BeforeEach
   @Override
   public void setUp() throws Exception {
     super.setUp();
@@ -53,8 +53,8 @@ public void setUp() throws Exception {
     username = System.getProperty("user.name");
 
     fs = dfsCluster.getFileSystem();
-    assertTrue("Not a HDFS: "+fs.getUri(), 
-        fs instanceof DistributedFileSystem);
+    assertTrue(fs instanceof DistributedFileSystem,
+        "Not a HDFS: " + fs.getUri());
   }
 
   @Override
@@ -62,7 +62,7 @@ protected String getTestFile() {
     return "testXAttrConf.xml";
   }
   
-  @After
+  @AfterEach
   @Override
   public void tearDown() throws Exception {
     if (fs != null) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSinkWithHdfs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSinkWithHdfs.java
index 2a7660486e8..6e3b8caf5ea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSinkWithHdfs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSinkWithHdfs.java
@@ -29,12 +29,12 @@
 import org.apache.hadoop.metrics2.MetricsException;
 import org.apache.hadoop.metrics2.MetricsSystem;
 import 
org.apache.hadoop.metrics2.sink.RollingFileSystemSinkTestBase.MyMetrics1;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 /**
  * Test the {@link RollingFileSystemSink} class in the context of HDFS.
@@ -51,7 +51,7 @@ public class TestRollingFileSystemSinkWithHdfs
    *
    * @throws IOException thrown if cluster creation fails
    */
-  @Before
+  @BeforeEach
   public void setupHdfs() throws IOException {
     Configuration conf = new Configuration();
 
@@ -66,7 +66,7 @@ public void setupHdfs() throws IOException {
   /**
    * Stop the {@link MiniDFSCluster}.
    */
-  @After
+  @AfterEach
   public void shutdownHdfs() {
     if (cluster != null) {
       cluster.shutdown();
@@ -156,8 +156,8 @@ public void testFailedWrite() throws IOException {
 
     ms.publishMetricsNow(); // publish the metrics
 
-    assertTrue("No exception was generated while writing metrics "
-        + "even though HDFS was unavailable", MockSink.errored);
+    assertTrue(MockSink.errored,
+        "No exception was generated while writing metrics " + "even though 
HDFS was unavailable");
 
     try {
       ms.stop();
@@ -187,8 +187,8 @@ public void testFailedClose() throws IOException {
     try {
       ms.stop();
 
-      assertTrue("No exception was generated while stopping sink "
-          + "even though HDFS was unavailable", MockSink.errored);
+      assertTrue(MockSink.errored,
+          "No exception was generated while stopping sink " + "even though 
HDFS was unavailable");
     } catch (MetricsException ex) {
       // Expected
     } finally {
@@ -215,9 +215,9 @@ public void testSilentFailedWrite() throws IOException, 
InterruptedException {
 
     ms.publishMetricsNow(); // publish the metrics
 
-    assertFalse("An exception was generated writing metrics "
+    assertFalse(MockSink.errored, "An exception was generated writing metrics "
         + "while HDFS was unavailable, even though the sink is set to "
-        + "ignore errors", MockSink.errored);
+        + "ignore errors");
 
     try {
       ms.stop();
@@ -247,9 +247,9 @@ public void testSilentFailedClose() throws IOException {
     try {
       ms.stop();
 
-      assertFalse("An exception was generated stopping sink "
+      assertFalse(MockSink.errored, "An exception was generated stopping sink "
           + "while HDFS was unavailable, even though the sink is set to "
-          + "ignore errors", MockSink.errored);
+          + "ignore errors");
     } finally {
       ms.shutdown();
     }
@@ -300,9 +300,9 @@ public void testFlushThread() throws Exception {
       FileStatus status = fs.getFileStatus(currentFile);
 
       // Each metrics record is 118+ bytes, depending on hostname
-      assertTrue("The flusher thread didn't flush the log contents. Expected "
-          + "at least 236 bytes in the log file, but got " + status.getLen(),
-          status.getLen() >= 236);
+      assertTrue(status.getLen() >= 236,
+          "The flusher thread didn't flush the log contents. Expected "
+              + "at least 236 bytes in the log file, but got " + 
status.getLen());
     } finally {
       RollingFileSystemSink.forceFlush = false;
 
@@ -326,9 +326,9 @@ public void testInitWithNoHDFS() {
     MockSink.errored = false;
     initMetricsSystem(path, true, false);
 
-    assertTrue("The sink was not initialized as expected",
-        MockSink.initialized);
-    assertFalse("The sink threw an unexpected error on initialization",
-        MockSink.errored);
+    assertTrue(MockSink.initialized,
+        "The sink was not initialized as expected");
+    assertFalse(MockSink.errored,
+        "The sink threw an unexpected error on initialization");
   }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSinkWithSecureHdfs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSinkWithSecureHdfs.java
index 906950b46e3..74fd06b52e9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSinkWithSecureHdfs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/metrics2/sink/TestRollingFileSystemSinkWithSecureHdfs.java
@@ -49,12 +49,12 @@
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Test;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import static org.junit.Assert.assertTrue;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Test the {@link RollingFileSystemSink} class in the context of HDFS with
@@ -77,7 +77,7 @@ public class TestRollingFileSystemSinkWithSecureHdfs
    *
    * @throws Exception thrown if the KDC setup fails
    */
-  @BeforeClass
+  @BeforeAll
   public static void initKdc() throws Exception {
     Properties kdcConf = MiniKdc.createConf();
     kdc = new MiniKdc(kdcConf, ROOT_TEST_DIR);
@@ -101,7 +101,7 @@ public static void initKdc() throws Exception {
    *
    * @throws Exception thrown if the cluster setup fails
    */
-  @Before
+  @BeforeEach
   public void initCluster() throws Exception {
     HdfsConfiguration conf = createSecureConfig("authentication,privacy");
 
@@ -117,7 +117,7 @@ public void initCluster() throws Exception {
   /**
    * Stop the mini-DFS cluster.
    */
-  @After
+  @AfterEach
   public void stopCluster() {
     if (cluster != null) {
       cluster.shutdown();
@@ -132,7 +132,7 @@ public void stopCluster() {
   /**
    * Stop the mini-KDC.
    */
-  @AfterClass
+  @AfterAll
   public static void shutdownKdc() {
     if (kdc != null) {
       kdc.stop();
@@ -175,9 +175,9 @@ public void testMissingPropertiesWithSecureHDFS() throws 
Exception {
 
     initMetricsSystem(path, true, false);
 
-    assertTrue("No exception was generated initializing the sink against a "
+    assertTrue(MockSink.errored, "No exception was generated initializing the 
sink against a "
         + "secure cluster even though the principal and keytab properties "
-        + "were missing", MockSink.errored);
+        + "were missing");
   }
 
   /**
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
index 5ef57ad77ef..ab47063d040 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
@@ -18,10 +18,11 @@
 
 package org.apache.hadoop.net;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.util.ArrayList;
 import java.util.Collection;
@@ -43,15 +44,14 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
 
+@Timeout(30)
 public class TestNetworkTopology {
   private static final Logger LOG =
       LoggerFactory.getLogger(TestNetworkTopology.class);
@@ -59,10 +59,7 @@ public class TestNetworkTopology {
       NetworkTopology.getInstance(new Configuration());
   private DatanodeDescriptor dataNodes[];
 
-  @Rule
-  public Timeout testTimeout = new Timeout(30000, TimeUnit.MILLISECONDS);
-
-  @Before
+  @BeforeEach
   public void setupDatanodes() {
     dataNodes = new DatanodeDescriptor[] {
         DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
@@ -314,7 +311,7 @@ public void testSortByDistance() throws Exception {
         }
       }
     }
-    assertTrue("Expected to find a different first location", foundRandom);
+    assertTrue(foundRandom, "Expected to find a different first location");
 
     // Array of just remote nodes
     // Expect random first node
@@ -333,7 +330,7 @@ public void testSortByDistance() throws Exception {
         }
       }
     }
-    assertTrue("Expected to find a different first location", foundRandom);
+    assertTrue(foundRandom, "Expected to find a different first location");
 
     //Reader is not a datanode, but is in one of the datanode's rack.
     testNodes[0] = dataNodes[0];
@@ -457,18 +454,18 @@ public void testChooseRandomExcludedNodeList() {
     excludedNodes.add(dataNodes[18]);
     Map<Node, Integer> frequency = pickNodesAtRandom(100, scope, 
excludedNodes);
 
-    assertEquals("dn[3] should be excluded", 0,
-        frequency.get(dataNodes[3]).intValue());
-    assertEquals("dn[5] should be exclude18d", 0,
-        frequency.get(dataNodes[5]).intValue());
-    assertEquals("dn[7] should be excluded", 0,
-        frequency.get(dataNodes[7]).intValue());
-    assertEquals("dn[9] should be excluded", 0,
-        frequency.get(dataNodes[9]).intValue());
-    assertEquals("dn[13] should be excluded", 0,
-        frequency.get(dataNodes[13]).intValue());
-    assertEquals("dn[18] should be excluded", 0,
-        frequency.get(dataNodes[18]).intValue());
+    assertEquals(0, frequency.get(dataNodes[3]).intValue(),
+        "dn[3] should be excluded");
+    assertEquals(0, frequency.get(dataNodes[5]).intValue(),
+        "dn[5] should be exclude18d");
+    assertEquals(0, frequency.get(dataNodes[7]).intValue(),
+        "dn[7] should be excluded");
+    assertEquals(0, frequency.get(dataNodes[9]).intValue(),
+        "dn[9] should be excluded");
+    assertEquals(0, frequency.get(dataNodes[13]).intValue(),
+        "dn[13] should be excluded");
+    assertEquals(0, frequency.get(dataNodes[18]).intValue(),
+        "dn[18] should be excluded");
     for (Node key : dataNodes) {
       if (excludedNodes.contains(key)) {
         continue;
@@ -495,7 +492,8 @@ public void testChooseRandomExcludeAllNodes() {
     }
   }
 
-  @Test(timeout=180000)
+  @Test
+  @Timeout(value = 180)
   public void testInvalidNetworkTopologiesNotCachedInHdfs() throws Exception {
     // start a cluster
     Configuration conf = new HdfsConfiguration();
@@ -509,14 +507,14 @@ public void testInvalidNetworkTopologiesNotCachedInHdfs() 
throws Exception {
       cluster.waitActive();
       
       NamenodeProtocols nn = cluster.getNameNodeRpc();
-      Assert.assertNotNull(nn);
+      assertNotNull(nn);
       
       // Wait for one DataNode to register.
       // The other DataNode will not be able to register up because of the 
rack mismatch.
       DatanodeInfo[] info;
       while (true) {
         info = nn.getDatanodeReport(DatanodeReportType.LIVE);
-        Assert.assertFalse(info.length == 2);
+        assertFalse(info.length == 2);
         if (info.length == 1) {
           break;
         }
@@ -546,8 +544,8 @@ public void testInvalidNetworkTopologiesNotCachedInHdfs() 
throws Exception {
         }
         Thread.sleep(1000);
       }
-      Assert.assertEquals(info[0].getNetworkLocation(),
-                          info[1].getNetworkLocation());
+      assertEquals(info[0].getNetworkLocation(),
+          info[1].getNetworkLocation());
     } finally {
       if (cluster != null) {
         cluster.shutdown();
@@ -596,10 +594,10 @@ private void verifyResults(int upperbound, Set<Node> 
excludedNodes,
       final Node n = dataNodes[i];
       LOG.info("Verifying node {}", n);
       if (excludedNodes.contains(n)) {
-        assertEquals(n + " should not have been chosen.", 0,
-            (int) frequency.get(n));
+        assertEquals(0, (int) frequency.get(n),
+            n + " should not have been chosen.");
       } else {
-        assertTrue(n + " should have been chosen", frequency.get(n) > 0);
+        assertTrue(frequency.get(n) > 0, n + " should have been chosen");
       }
     }
   }
@@ -614,8 +612,8 @@ public void testChooseRandomInclude3() {
     LOG.info("No node is excluded.");
     for (int i = 0; i < 5; ++i) {
       // all nodes should be more than zero
-      assertTrue(dataNodes[i] + " should have been chosen.",
-          frequency.get(dataNodes[i]) > 0);
+      assertTrue(frequency.get(dataNodes[i]) > 0,
+          dataNodes[i] + " should have been chosen.");
     }
   }
 
@@ -682,7 +680,7 @@ private void testShuffleInternal(int activeLen) {
     cluster.shuffle(testNodes, activeLen);
 
     for (int i = 0; i < testNodes.length; ++i) {
-      Assert.assertEquals(testNodes[i], dataNodes[idxList.get(i)]);
+      assertEquals(testNodes[i], dataNodes[idxList.get(i)]);
     }
   }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
index 188476f0247..ec657ed3b12 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
@@ -17,13 +17,11 @@
  */
 package org.apache.hadoop.security;
 
-import static org.hamcrest.CoreMatchers.startsWith;
-import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+import static org.assertj.core.api.Assertions.assertThat;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -44,7 +42,7 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.util.StringUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /** Unit tests for permission */
 public class TestPermission {
@@ -227,8 +225,9 @@ public void testFilePermission() throws Exception {
       // following dir/file creations are legal
       nnfs.mkdirs(CHILD_DIR1);
       status = nnfs.getFileStatus(CHILD_DIR1);
-      assertThat("Expect 755 = 777 (default dir) - 022 (default umask)",
-          status.getPermission().toString(), is("rwxr-xr-x"));
+      assertThat(status.getPermission().toString())
+          .as("Expect 755 = 777 (default dir) - 022 (default umask)")
+          .isEqualTo("rwxr-xr-x");
       out = nnfs.create(CHILD_FILE1);
       status = nnfs.getFileStatus(CHILD_FILE1);
       assertTrue(status.getPermission().toString().equals("rw-r--r--"));
@@ -243,8 +242,9 @@ public void testFilePermission() throws Exception {
       // mkdirs with null permission
       nnfs.mkdirs(CHILD_DIR3, null);
       status = nnfs.getFileStatus(CHILD_DIR3);
-      assertThat("Expect 755 = 777 (default dir) - 022 (default umask)",
-          status.getPermission().toString(), is("rwxr-xr-x"));
+      assertThat(status.getPermission().toString())
+          .as("Expect 755 = 777 (default dir) - 022 (default umask)")
+          .isEqualTo("rwxr-xr-x");
 
       // following read is legal
       byte dataIn[] = new byte[FILE_LEN];
@@ -318,17 +318,20 @@ private void testSuperCanChangeOwnerGroup() throws 
Exception {
     Path file = createFile(userfs, "testSuperCanChangeOwnerGroup");
     nnfs.setOwner(file, NOUSER, NOGROUP);
     FileStatus status = nnfs.getFileStatus(file);
-    assertThat("A super user can change owner", status.getOwner(),
-        is(NOUSER));
-    assertThat("A super user can change group", status.getGroup(),
-        is(NOGROUP));
+    assertThat(status.getOwner())
+        .as("A super user can change owner")
+        .isEqualTo(NOUSER);
+    assertThat(status.getGroup())
+        .as("A super user can change group")
+        .isEqualTo(NOGROUP);
   }
 
   private void testNonSuperCanChangeToOwnGroup() throws Exception {
     Path file = createFile(userfs, "testNonSuperCanChangeToOwnGroup");
     userfs.setOwner(file, null, GROUP_NAMES[1]);
-    assertThat("A non-super user can change a file to own group",
-        nnfs.getFileStatus(file).getGroup(), is(GROUP_NAMES[1]));
+    assertThat(nnfs.getFileStatus(file).getGroup())
+        .as("A non-super user can change a file to own group")
+        .isEqualTo(GROUP_NAMES[1]);
   }
 
   private void testNonSuperCannotChangeToOtherGroup() throws Exception {
@@ -338,8 +341,9 @@ private void testNonSuperCannotChangeToOtherGroup() throws 
Exception {
       fail("Expect ACE when a non-super user tries to change a file to a " +
           "group where the user does not belong.");
     } catch (AccessControlException e) {
-      assertThat(e.getMessage(), startsWith("User " +
-          userfs.getFileStatus(file).getOwner() + " does not belong to"));
+      assertThat(e.getMessage())
+          .startsWith("User " +
+              userfs.getFileStatus(file).getOwner() + " does not belong to");
     }
   }
 
@@ -351,7 +355,7 @@ private void testNonSuperCannotChangeGroupForOtherFile() 
throws Exception {
       fail("Expect ACE when a non-super user tries to set group for a file " +
           "not owned");
     } catch (AccessControlException e) {
-      assertThat(e.getMessage(), startsWith("Permission denied"));
+      assertThat(e.getMessage()).startsWith("Permission denied");
     }
   }
 
@@ -373,9 +377,9 @@ private void testNonSuperCannotChangeOwner() throws 
Exception {
       userfs.setOwner(file, NOUSER, null);
       fail("Expect ACE when a non-super user tries to change owner");
     } catch (AccessControlException e) {
-      assertThat(e.getMessage(), startsWith("User " +
+      assertThat(e.getMessage()).startsWith("User " +
           userfs.getFileStatus(file).getOwner() +
-          " is not a super user (non-super user cannot change owner)"));
+          " is not a super user (non-super user cannot change owner)");
     }
   }
 
@@ -386,7 +390,7 @@ private void testNonSuperCannotChangeOwnerForOtherFile() 
throws Exception {
       userfs.setOwner(file, USER_NAME, null);
       fail("Expect ACE when a non-super user tries to own a file");
     } catch (AccessControlException e) {
-      assertThat(e.getMessage(), startsWith("Permission denied"));
+      assertThat(e.getMessage()).startsWith("Permission denied");
     }
   }
 
@@ -400,9 +404,9 @@ private void 
testNonSuperCannotChangeOwnerForNonExistentFile()
       fail("Expect ACE or FNFE when a non-super user tries to change owner " +
           "for a non-existent file");
     } catch (AccessControlException e) {
-      assertThat(e.getMessage(), startsWith("User " +
+      assertThat(e.getMessage()).startsWith("User " +
           userfs.getFileStatus(file).getOwner() +
-          " is not a super user (non-super user cannot change owner)"));
+          " is not a super user (non-super user cannot change owner)");
     } catch (FileNotFoundException e) {
     }
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java
index ad5b86c837e..54e0bac17cf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java
@@ -21,10 +21,10 @@
 import static org.apache.hadoop.fs.permission.AclEntryType.*;
 import static org.apache.hadoop.fs.permission.FsAction.*;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
@@ -46,11 +46,12 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 public class TestPermissionSymlinks {
 
@@ -70,7 +71,7 @@ public class TestPermissionSymlinks {
   private static FileSystem fs;
   private static FileSystemTestWrapper wrapper;
   
-  @BeforeClass
+  @BeforeAll
   public static void beforeClassSetUp() throws Exception {
     conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
@@ -81,7 +82,7 @@ public static void beforeClassSetUp() throws Exception {
     wrapper = new FileSystemTestWrapper(fs);
   }
 
-  @AfterClass
+  @AfterAll
   public static void afterClassTearDown() throws Exception {
     if (fs != null) {
       fs.close();
@@ -91,7 +92,7 @@ public static void afterClassTearDown() throws Exception {
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     // Create initial test files
     fs.mkdirs(linkParent);
@@ -100,14 +101,15 @@ public void setUp() throws Exception {
     wrapper.createSymlink(target, link, false);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     // Wipe out everything
     fs.delete(linkParent, true);
     fs.delete(targetParent, true);
   }
 
-  @Test(timeout = 5000)
+  @Test
+  @Timeout(value = 5)
   public void testDelete() throws Exception {
     fs.setPermission(linkParent, new FsPermission((short) 0555));
     doDeleteLinkParentNotWritable();
@@ -173,13 +175,14 @@ public Object run() throws IOException {
       }
     });
     // Make sure only the link was deleted
-    assertTrue("Target should not have been deleted!",
-        wrapper.exists(target));
-    assertFalse("Link should have been deleted!",
-        wrapper.exists(link));
+    assertTrue(wrapper.exists(target),
+        "Target should not have been deleted!");
+    assertFalse(wrapper.exists(link),
+        "Link should have been deleted!");
   }
 
-  @Test(timeout = 5000)
+  @Test
+  @Timeout(value = 5)
   public void testReadWhenTargetNotReadable() throws Exception {
     fs.setPermission(target, new FsPermission((short) 0000));
     doReadTargetNotReadable();
@@ -212,7 +215,8 @@ public Object run() throws IOException {
     }
   }
 
-  @Test(timeout = 5000)
+  @Test
+  @Timeout(value = 5)
   public void testFileStatus() throws Exception {
     fs.setPermission(target, new FsPermission((short) 0000));
     doGetFileLinkStatusTargetNotReadable();
@@ -235,17 +239,17 @@ private void doGetFileLinkStatusTargetNotReadable() 
throws Exception {
       public Object run() throws IOException {
         FileContext myfc = FileContext.getFileContext(conf);
         FileStatus stat = myfc.getFileLinkStatus(link);
-        assertEquals("Expected link's FileStatus path to match link!",
-            link.makeQualified(fs.getUri(), fs.getWorkingDirectory()), 
stat.getPath());
+        assertEquals(link.makeQualified(fs.getUri(), 
fs.getWorkingDirectory()), stat.getPath(),
+            "Expected link's FileStatus path to match link!");
         Path linkTarget = myfc.getLinkTarget(link);
-        assertEquals("Expected link's target to match target!",
-            target, linkTarget);
+        assertEquals(target, linkTarget, "Expected link's target to match 
target!");
         return null;
       }
     });
   }
 
-  @Test(timeout = 5000)
+  @Test
+  @Timeout(value = 5)
   public void testRenameLinkTargetNotWritableFC() throws Exception {
     fs.setPermission(target, new FsPermission((short) 0555));
     fs.setPermission(targetParent, new FsPermission((short) 0555));
@@ -277,15 +281,15 @@ public Object run() throws IOException {
         Path newlink = new Path(linkParent, "newlink");
         myfc.rename(link, newlink, Rename.NONE);
         Path linkTarget = myfc.getLinkTarget(newlink);
-        assertEquals("Expected link's target to match target!",
-            target, linkTarget);
+        assertEquals(target, linkTarget, "Expected link's target to match 
target!");
         return null;
       }
     });
-    assertTrue("Expected target to exist", wrapper.exists(target));
+    assertTrue(wrapper.exists(target), "Expected target to exist");
   }
 
-  @Test(timeout = 5000)
+  @Test
+  @Timeout(value = 5)
   public void testRenameSrcNotWritableFC() throws Exception {
     fs.setPermission(linkParent, new FsPermission((short) 0555));
     doRenameSrcNotWritableFC();
@@ -322,7 +326,8 @@ public Object run() throws IOException {
   // Need separate FileSystem tests since the server-side impl is different
   // See {@link ClientProtocol#rename} and {@link ClientProtocol#rename2}.
 
-  @Test(timeout = 5000)
+  @Test
+  @Timeout(value = 5)
   public void testRenameLinkTargetNotWritableFS() throws Exception {
     fs.setPermission(target, new FsPermission((short) 0555));
     fs.setPermission(targetParent, new FsPermission((short) 0555));
@@ -354,15 +359,15 @@ public Object run() throws IOException {
         Path newlink = new Path(linkParent, "newlink");
         myfs.rename(link, newlink);
         Path linkTarget = myfs.getLinkTarget(newlink);
-        assertEquals("Expected link's target to match target!",
-            target, linkTarget);
+        assertEquals(target, linkTarget, "Expected link's target to match 
target!");
         return null;
       }
     });
-    assertTrue("Expected target to exist", wrapper.exists(target));
+    assertTrue(wrapper.exists(target), "Expected target to exist");
   }
 
-  @Test(timeout = 5000)
+  @Test
+  @Timeout(value = 5)
   public void testRenameSrcNotWritableFS() throws Exception {
     fs.setPermission(linkParent, new FsPermission((short) 0555));
     doRenameSrcNotWritableFS();
@@ -427,7 +432,7 @@ public FileContext run() throws IOException {
     } catch (AccessControlException ace) {
       // expected
       String message = ace.getMessage();
-      assertTrue(message, message.contains("is not a directory"));
+      assertTrue(message.contains("is not a directory"), message);
       assertTrue(message.contains(target.toString()));
       assertFalse(message.contains(badPath.toString()));
     }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
index 6806b6715b1..e2cae0214b3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
@@ -19,9 +19,9 @@
 package org.apache.hadoop.security;
 
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.fail;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -52,10 +52,9 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestRefreshUserMappings {
   private static final Logger LOG = LoggerFactory.getLogger(
@@ -100,7 +99,7 @@ public Set<String> getGroupsSet(String user) {
     }
   }
   
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     config = new Configuration();
     config.setClass("hadoop.security.group.mapping",
@@ -116,7 +115,7 @@ public void setUp() throws Exception {
     GenericTestUtils.setLogLevel(Groups.LOG, Level.DEBUG);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if(cluster!=null) {
       cluster.shutdown();
@@ -144,7 +143,7 @@ public void testGroupMappingRefresh() throws Exception {
     List<String> g2 = groups.getGroups(user);
     LOG.debug(g2.toString());
     for(int i=0; i<g2.size(); i++) {
-      assertEquals("Should be same group ", g1.get(i), g2.get(i));
+      assertEquals(g1.get(i), g2.get(i), "Should be same group ");
     }
 
     // Test refresh command
@@ -153,8 +152,8 @@ public void testGroupMappingRefresh() throws Exception {
     List<String> g3 = groups.getGroups(user);
     LOG.debug(g3.toString());
     for(int i=0; i<g3.size(); i++) {
-      assertFalse("Should be different group: "
-              + g1.get(i) + " and " + g3.get(i), g1.get(i).equals(g3.get(i)));
+      assertFalse(g1.get(i).equals(g3.get(i)),
+          "Should be different group: " + g1.get(i) + " and " + g3.get(i));
     }
 
     // Test timeout
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
index acd29655071..00665464d10 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
@@ -20,8 +20,8 @@
 
 import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.File;
 import java.io.IOException;
@@ -42,10 +42,9 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.tools.JMXGet;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Startup and checkpoint tests
@@ -61,7 +60,7 @@ public class TestJMXGet {
   static final int blockSize = 4096;
   static final int fileSize = 8192;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     config = new HdfsConfiguration();
   }
@@ -69,7 +68,7 @@ public void setUp() throws Exception {
   /**
    * clean up
    */
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       if (cluster.isClusterUp()) {
@@ -101,14 +100,14 @@ public void testNameNode() throws Exception {
     String serviceName = "NameNode";
     jmx.setService(serviceName);
     jmx.init(); // default lists namenode mbeans only
-    assertTrue("error printAllValues", checkPrintAllValues(jmx));
+    assertTrue(checkPrintAllValues(jmx), "error printAllValues");
 
     //get some data from different source
     try {
       DFSTestUtil.waitForMetric(jmx, "NumLiveDataNodes", numDatanodes);
     } catch (TimeoutException e) {
-    assertEquals(String.format(WRONG_METRIC_VALUE_ERROR_MSG, 
"NumLiveDataNodes"),numDatanodes, Integer.parseInt(
-        jmx.getValue("NumLiveDataNodes")));
+      assertEquals(numDatanodes, 
Integer.parseInt(jmx.getValue("NumLiveDataNodes")),
+          String.format(WRONG_METRIC_VALUE_ERROR_MSG, "NumLiveDataNodes"));
     }
     assertGauge("CorruptBlocks", Long.parseLong(jmx.getValue("CorruptBlocks")),
                 getMetrics("FSNamesystem"));
@@ -117,7 +116,7 @@ public void testNameNode() throws Exception {
     MBeanServerConnection mbsc = ManagementFactory.getPlatformMBeanServer();
     ObjectName query = new ObjectName("Hadoop:service=" + serviceName + ",*");
     Set<ObjectName> names = mbsc.queryNames(query, null);
-    assertTrue("No beans should be registered for " + serviceName, 
names.isEmpty());
+    assertTrue(names.isEmpty(), "No beans should be registered for " + 
serviceName);
   }
   
   private static boolean checkPrintAllValues(JMXGet jmx) throws Exception {
@@ -162,13 +161,14 @@ public void testDataNode() throws Exception {
     try {
       DFSTestUtil.waitForMetric(jmx, "BytesWritten", fileSize);
     } catch (TimeoutException e) {
-      assertEquals(String.format(WRONG_METRIC_VALUE_ERROR_MSG, 
"BytesWritten"), fileSize, Integer.parseInt(jmx.getValue("BytesWritten")));
+      assertEquals(fileSize, Integer.parseInt(jmx.getValue("BytesWritten")),
+          String.format(WRONG_METRIC_VALUE_ERROR_MSG, "BytesWritten"));
     }
 
     cluster.shutdown();
     MBeanServerConnection mbsc = ManagementFactory.getPlatformMBeanServer();
     ObjectName query = new ObjectName("Hadoop:service=" + serviceName + ",*");
     Set<ObjectName> names = mbsc.queryNames(query, null);
-    assertTrue("No beans should be registered for " + serviceName, 
names.isEmpty());
+    assertTrue(names.isEmpty(), "No beans should be registered for " + 
serviceName);
   }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestTools.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestTools.java
index a814035e0f5..466d761c8cd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestTools.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestTools.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.tools;
 
-import static org.junit.Assert.*;
-
 import java.io.ByteArrayOutputStream;
 import java.io.PipedInputStream;
 import java.io.PipedOutputStream;
@@ -30,18 +28,21 @@
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ExitUtil.ExitException;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet;
 import org.apache.hadoop.thirdparty.com.google.common.io.ByteStreams;
 
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+
 public class TestTools {
 
   private static final int PIPE_BUFFER_SIZE = 1024 * 5;
   private final static String INVALID_OPTION = "-invalidOption";
   private static final String[] OPTIONS = new String[2];
 
-  @BeforeClass
+  @BeforeAll
   public static void before() {
     ExitUtil.disableSystemExit();
     OPTIONS[1] = INVALID_OPTION;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to