This is an automated email from the ASF dual-hosted git repository.

slfan1989 pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new ad12f6944e8 HADOOP-19427. [JDK17] Upgrade JUnit from 4 to 5 in 
hadoop-compat-bench part2. (#7948)
ad12f6944e8 is described below

commit ad12f6944e80e71cb067c03c8b5f9e1e9f0f84d3
Author: zhtttylz <hualon...@hotmail.com>
AuthorDate: Thu Sep 11 12:14:53 2025 +0800

    HADOOP-19427. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-compat-bench 
part2. (#7948)
    
    * HADOOP-19427. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-compat-bench 
part2.
    
    Signed-off-by: Shilun Fan <slfan1...@apache.org>
---
 hadoop-tools/hadoop-compat-bench/pom.xml           |  7 +--
 .../hadoop/fs/compat/cases/HdfsCompatAcl.java      | 31 ++++++------
 .../hadoop/fs/compat/cases/HdfsCompatCreate.java   | 20 +++++---
 .../fs/compat/cases/HdfsCompatDirectory.java       | 54 ++++++++++----------
 .../hadoop/fs/compat/cases/HdfsCompatFile.java     | 58 ++++++++++++----------
 .../hadoop/fs/compat/cases/HdfsCompatLocal.java    | 24 +++++----
 .../hadoop/fs/compat/cases/HdfsCompatServer.java   | 34 +++++++------
 .../hadoop/fs/compat/cases/HdfsCompatSnapshot.java | 19 ++++---
 .../fs/compat/cases/HdfsCompatStoragePolicy.java   |  9 ++--
 .../hadoop/fs/compat/cases/HdfsCompatSymlink.java  | 12 +++--
 .../hadoop/fs/compat/cases/HdfsCompatTpcds.java    | 23 +++++----
 .../hadoop/fs/compat/cases/HdfsCompatXAttr.java    | 23 +++++----
 12 files changed, 172 insertions(+), 142 deletions(-)

diff --git a/hadoop-tools/hadoop-compat-bench/pom.xml 
b/hadoop-tools/hadoop-compat-bench/pom.xml
index b6ef232850e..3c8d2e43591 100644
--- a/hadoop-tools/hadoop-compat-bench/pom.xml
+++ b/hadoop-tools/hadoop-compat-bench/pom.xml
@@ -40,11 +40,6 @@
       <artifactId>hadoop-hdfs</artifactId>
       <scope>provided</scope>
     </dependency>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>compile</scope>
-    </dependency>
 
     <!-- For test -->
     <dependency>
@@ -72,7 +67,7 @@
     <dependency>
       <groupId>org.junit.jupiter</groupId>
       <artifactId>junit-jupiter-api</artifactId>
-      <scope>test</scope>
+      <scope>compile</scope>
     </dependency>
     <dependency>
       <groupId>org.junit.jupiter</groupId>
diff --git 
a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatAcl.java
 
b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatAcl.java
index 010a15338ff..126535004f7 100644
--- 
a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatAcl.java
+++ 
b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatAcl.java
@@ -24,11 +24,14 @@
 import org.apache.hadoop.fs.permission.AclEntryScope;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
-import org.junit.Assert;
 
 import java.io.IOException;
 import java.util.List;
 
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
 @HdfsCompatCaseGroup(name = "ACL")
 public class HdfsCompatAcl extends AbstractHdfsCompatCase {
   private static final String INIT_FILE_ACL =
@@ -63,10 +66,10 @@ public void modifyAclEntries() throws IOException {
     for (AclEntry acl : acls) {
       if ("foo".equals(acl.getName())) {
         ++count;
-        Assert.assertEquals(FsAction.NONE, acl.getPermission());
+        assertEquals(FsAction.NONE, acl.getPermission());
       }
     }
-    Assert.assertEquals(1, count);
+    assertEquals(1, count);
   }
 
   @HdfsCompatCase
@@ -76,15 +79,15 @@ public void removeAclEntries() throws IOException {
     entries = AclEntry.parseAclSpec("user:foo:---", true);
     fs().removeAclEntries(file, entries);
     List<AclEntry> acls = fs().getAclStatus(file).getEntries();
-    Assert.assertTrue(acls.stream().noneMatch(e -> "foo".equals(e.getName())));
-    Assert.assertTrue(acls.stream().anyMatch(e -> "bar".equals(e.getName())));
+    assertTrue(acls.stream().noneMatch(e -> "foo".equals(e.getName())));
+    assertTrue(acls.stream().anyMatch(e -> "bar".equals(e.getName())));
   }
 
   @HdfsCompatCase
   public void removeDefaultAcl() throws IOException {
     fs().removeDefaultAcl(dir);
     List<AclEntry> acls = fs().getAclStatus(dir).getEntries();
-    Assert.assertTrue(acls.stream().noneMatch(
+    assertTrue(acls.stream().noneMatch(
         e -> (e.getScope() == AclEntryScope.DEFAULT)));
   }
 
@@ -92,29 +95,29 @@ public void removeDefaultAcl() throws IOException {
   public void removeAcl() throws IOException {
     fs().removeAcl(file);
     List<AclEntry> acls = fs().getAclStatus(file).getEntries();
-    Assert.assertTrue(acls.stream().noneMatch(e -> "foo".equals(e.getName())));
+    assertTrue(acls.stream().noneMatch(e -> "foo".equals(e.getName())));
   }
 
   @HdfsCompatCase
   public void setAcl() throws IOException {
     List<AclEntry> acls = fs().getAclStatus(file).getEntries();
-    Assert.assertTrue(acls.stream().anyMatch(e -> "foo".equals(e.getName())));
+    assertTrue(acls.stream().anyMatch(e -> "foo".equals(e.getName())));
   }
 
   @HdfsCompatCase
   public void getAclStatus() throws IOException {
     AclStatus status = fs().getAclStatus(dir);
-    Assert.assertFalse(status.getOwner().isEmpty());
-    Assert.assertFalse(status.getGroup().isEmpty());
+    assertFalse(status.getOwner().isEmpty());
+    assertFalse(status.getGroup().isEmpty());
     List<AclEntry> acls = status.getEntries();
-    Assert.assertTrue(acls.stream().anyMatch(e ->
+    assertTrue(acls.stream().anyMatch(e ->
         e.getScope() == AclEntryScope.DEFAULT));
 
     status = fs().getAclStatus(file);
-    Assert.assertFalse(status.getOwner().isEmpty());
-    Assert.assertFalse(status.getGroup().isEmpty());
+    assertFalse(status.getOwner().isEmpty());
+    assertFalse(status.getGroup().isEmpty());
     acls = status.getEntries();
-    Assert.assertTrue(acls.stream().anyMatch(e ->
+    assertTrue(acls.stream().anyMatch(e ->
         e.getScope() == AclEntryScope.ACCESS));
   }
 }
\ No newline at end of file
diff --git 
a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatCreate.java
 
b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatCreate.java
index 6dd907a0a1c..0f900fbf917 100644
--- 
a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatCreate.java
+++ 
b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatCreate.java
@@ -20,12 +20,16 @@
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.compat.common.*;
 import org.apache.hadoop.io.IOUtils;
-import org.junit.Assert;
 
 import java.io.IOException;
 import java.nio.charset.StandardCharsets;
 import java.util.concurrent.CompletableFuture;
 
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+
 @HdfsCompatCaseGroup(name = "Create")
 public class HdfsCompatCreate extends AbstractHdfsCompatCase {
   private Path path;
@@ -43,7 +47,7 @@ public void cleanup() {
   @HdfsCompatCase
   public void mkdirs() throws IOException {
     fs().mkdirs(path);
-    Assert.assertTrue(fs().exists(path));
+    assertTrue(fs().exists(path));
   }
 
   @HdfsCompatCase
@@ -51,7 +55,7 @@ public void create() throws IOException {
     FSDataOutputStream out = null;
     try {
       out = fs().create(path, true);
-      Assert.assertTrue(fs().exists(path));
+      assertTrue(fs().exists(path));
     } finally {
       IOUtils.closeStream(out);
     }
@@ -62,7 +66,7 @@ public void createNonRecursive() {
     Path file = new Path(path, "file-no-parent");
     try {
       fs().createNonRecursive(file, true, 1024, (short) 1, 1048576, null);
-      Assert.fail("Should fail since parent does not exist");
+      fail("Should fail since parent does not exist");
     } catch (IOException ignored) {
     }
   }
@@ -70,7 +74,7 @@ public void createNonRecursive() {
   @HdfsCompatCase
   public void createNewFile() throws IOException {
     HdfsCompatUtil.createFile(fs(), path, 0);
-    Assert.assertFalse(fs().createNewFile(path));
+    assertFalse(fs().createNewFile(path));
   }
 
   @HdfsCompatCase
@@ -84,7 +88,7 @@ public void append() throws IOException {
       out.close();
       out = null;
       FileStatus fileStatus = fs().getFileStatus(path);
-      Assert.assertEquals(128 + 64, fileStatus.getLen());
+      assertEquals(128 + 64, fileStatus.getLen());
     } finally {
       IOUtils.closeStream(out);
     }
@@ -101,7 +105,7 @@ public void createFile() throws IOException {
       out.write("Hello World!".getBytes(StandardCharsets.UTF_8));
       out.close();
       out = null;
-      Assert.assertTrue(fs().exists(file));
+      assertTrue(fs().exists(file));
     } finally {
       IOUtils.closeStream(out);
     }
@@ -119,7 +123,7 @@ public void appendFile() throws IOException {
       out.close();
       out = null;
       FileStatus fileStatus = fs().getFileStatus(path);
-      Assert.assertEquals(128 + 64, fileStatus.getLen());
+      assertEquals(128 + 64, fileStatus.getLen());
     } finally {
       IOUtils.closeStream(out);
     }
diff --git 
a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatDirectory.java
 
b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatDirectory.java
index 4e0f8235462..38659d3a0c2 100644
--- 
a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatDirectory.java
+++ 
b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatDirectory.java
@@ -19,12 +19,16 @@
 
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.compat.common.*;
-import org.junit.Assert;
 
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+
 @HdfsCompatCaseGroup(name = "Directory")
 public class HdfsCompatDirectory extends AbstractHdfsCompatCase {
   private static final int FILE_LEN = 128;
@@ -45,101 +49,101 @@ public void cleanup() throws IOException {
 
   @HdfsCompatCase
   public void isDirectory() throws IOException {
-    Assert.assertTrue(fs().isDirectory(dir));
+    assertTrue(fs().isDirectory(dir));
   }
 
   @HdfsCompatCase
   public void listStatus() throws IOException {
     FileStatus[] files = fs().listStatus(dir);
-    Assert.assertNotNull(files);
-    Assert.assertEquals(1, files.length);
-    Assert.assertEquals(file.getName(), files[0].getPath().getName());
+    assertNotNull(files);
+    assertEquals(1, files.length);
+    assertEquals(file.getName(), files[0].getPath().getName());
   }
 
   @HdfsCompatCase
   public void globStatus() throws IOException {
     FileStatus[] files = fs().globStatus(new Path(dir, "*ile"));
-    Assert.assertNotNull(files);
-    Assert.assertEquals(1, files.length);
-    Assert.assertEquals(file.getName(), files[0].getPath().getName());
+    assertNotNull(files);
+    assertEquals(1, files.length);
+    assertEquals(file.getName(), files[0].getPath().getName());
   }
 
   @HdfsCompatCase
   public void listLocatedStatus() throws IOException {
     RemoteIterator<LocatedFileStatus> locatedFileStatuses =
         fs().listLocatedStatus(dir);
-    Assert.assertNotNull(locatedFileStatuses);
+    assertNotNull(locatedFileStatuses);
     List<LocatedFileStatus> files = new ArrayList<>();
     while (locatedFileStatuses.hasNext()) {
       files.add(locatedFileStatuses.next());
     }
-    Assert.assertEquals(1, files.size());
+    assertEquals(1, files.size());
     LocatedFileStatus fileStatus = files.get(0);
-    Assert.assertEquals(file.getName(), fileStatus.getPath().getName());
+    assertEquals(file.getName(), fileStatus.getPath().getName());
   }
 
   @HdfsCompatCase
   public void listStatusIterator() throws IOException {
     RemoteIterator<FileStatus> fileStatuses = fs().listStatusIterator(dir);
-    Assert.assertNotNull(fileStatuses);
+    assertNotNull(fileStatuses);
     List<FileStatus> files = new ArrayList<>();
     while (fileStatuses.hasNext()) {
       files.add(fileStatuses.next());
     }
-    Assert.assertEquals(1, files.size());
+    assertEquals(1, files.size());
     FileStatus fileStatus = files.get(0);
-    Assert.assertEquals(file.getName(), fileStatus.getPath().getName());
+    assertEquals(file.getName(), fileStatus.getPath().getName());
   }
 
   @HdfsCompatCase
   public void listFiles() throws IOException {
     RemoteIterator<LocatedFileStatus> iter = fs().listFiles(dir, true);
-    Assert.assertNotNull(iter);
+    assertNotNull(iter);
     List<LocatedFileStatus> files = new ArrayList<>();
     while (iter.hasNext()) {
       files.add(iter.next());
     }
-    Assert.assertEquals(1, files.size());
+    assertEquals(1, files.size());
   }
 
   @HdfsCompatCase
   public void listCorruptFileBlocks() throws IOException {
     RemoteIterator<Path> iter = fs().listCorruptFileBlocks(dir);
-    Assert.assertNotNull(iter);
-    Assert.assertFalse(iter.hasNext());  // No corrupted file
+    assertNotNull(iter);
+    assertFalse(iter.hasNext());  // No corrupted file
   }
 
   @HdfsCompatCase
   public void getContentSummary() throws IOException {
     ContentSummary summary = fs().getContentSummary(dir);
-    Assert.assertEquals(1, summary.getFileCount());
-    Assert.assertEquals(1, summary.getDirectoryCount());
-    Assert.assertEquals(FILE_LEN, summary.getLength());
+    assertEquals(1, summary.getFileCount());
+    assertEquals(1, summary.getDirectoryCount());
+    assertEquals(FILE_LEN, summary.getLength());
   }
 
   @HdfsCompatCase
   public void getUsed() throws IOException {
     long used = fs().getUsed(dir);
-    Assert.assertTrue(used >= FILE_LEN);
+    assertTrue(used >= FILE_LEN);
   }
 
   @HdfsCompatCase
   public void getQuotaUsage() throws IOException {
     QuotaUsage usage = fs().getQuotaUsage(dir);
-    Assert.assertEquals(2, usage.getFileAndDirectoryCount());
+    assertEquals(2, usage.getFileAndDirectoryCount());
   }
 
   @HdfsCompatCase
   public void setQuota() throws IOException {
     fs().setQuota(dir, 1048576L, 1073741824L);
     QuotaUsage usage = fs().getQuotaUsage(dir);
-    Assert.assertEquals(1048576L, usage.getQuota());
+    assertEquals(1048576L, usage.getQuota());
   }
 
   @HdfsCompatCase
   public void setQuotaByStorageType() throws IOException {
     fs().setQuotaByStorageType(dir, StorageType.DISK, 1048576L);
     QuotaUsage usage = fs().getQuotaUsage(dir);
-    Assert.assertEquals(1048576L, usage.getTypeQuota(StorageType.DISK));
+    assertEquals(1048576L, usage.getTypeQuota(StorageType.DISK));
   }
 }
\ No newline at end of file
diff --git 
a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatFile.java
 
b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatFile.java
index a76f95fb8d7..840fc2d175a 100644
--- 
a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatFile.java
+++ 
b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatFile.java
@@ -24,12 +24,18 @@
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.DataChecksum;
-import org.junit.Assert;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
 import java.util.Random;
 
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+
 @HdfsCompatCaseGroup(name = "File")
 public class HdfsCompatFile extends AbstractHdfsCompatCase {
   private static final int FILE_LEN = 128;
@@ -53,42 +59,42 @@ public void cleanup() throws IOException {
   @HdfsCompatCase
   public void getFileStatus() throws IOException {
     FileStatus fileStatus = fs().getFileStatus(file);
-    Assert.assertNotNull(fileStatus);
-    Assert.assertEquals(file.getName(), fileStatus.getPath().getName());
+    assertNotNull(fileStatus);
+    assertEquals(file.getName(), fileStatus.getPath().getName());
   }
 
   @HdfsCompatCase
   public void exists() throws IOException {
-    Assert.assertTrue(fs().exists(file));
+    assertTrue(fs().exists(file));
   }
 
   @HdfsCompatCase
   public void isFile() throws IOException {
-    Assert.assertTrue(fs().isFile(file));
+    assertTrue(fs().isFile(file));
   }
 
   @HdfsCompatCase
   public void getLength() throws IOException {
-    Assert.assertEquals(FILE_LEN, fs().getLength(file));
+    assertEquals(FILE_LEN, fs().getLength(file));
   }
 
   @HdfsCompatCase(brief = "arbitrary blockSize")
   public void getBlockSize() throws IOException {
-    Assert.assertEquals(BLOCK_SIZE, fs().getBlockSize(file));
+    assertEquals(BLOCK_SIZE, fs().getBlockSize(file));
   }
 
   @HdfsCompatCase
   public void renameFile() throws IOException {
     Path dst = new Path(file.toString() + "_rename_dst");
     fs().rename(file, dst);
-    Assert.assertFalse(fs().exists(file));
-    Assert.assertTrue(fs().exists(dst));
+    assertFalse(fs().exists(file));
+    assertTrue(fs().exists(dst));
   }
 
   @HdfsCompatCase
   public void deleteFile() throws IOException {
     fs().delete(file, true);
-    Assert.assertFalse(fs().exists(file));
+    assertFalse(fs().exists(file));
   }
 
   @HdfsCompatCase
@@ -96,7 +102,7 @@ public void deleteOnExit() throws IOException {
     FileSystem newFs = FileSystem.newInstance(fs().getUri(), fs().getConf());
     newFs.deleteOnExit(file);
     newFs.close();
-    Assert.assertFalse(fs().exists(file));
+    assertFalse(fs().exists(file));
   }
 
   @HdfsCompatCase
@@ -105,7 +111,7 @@ public void cancelDeleteOnExit() throws IOException {
     newFs.deleteOnExit(file);
     newFs.cancelDeleteOnExit(file);
     newFs.close();
-    Assert.assertTrue(fs().exists(file));
+    assertTrue(fs().exists(file));
   }
 
   @HdfsCompatCase
@@ -117,7 +123,7 @@ public void truncate() throws IOException, 
InterruptedException {
       finished = fs().truncate(file, newLen);
     }
     FileStatus fileStatus = fs().getFileStatus(file);
-    Assert.assertEquals(newLen, fileStatus.getLen());
+    assertEquals(newLen, fileStatus.getLen());
   }
 
   @HdfsCompatCase
@@ -133,8 +139,8 @@ public void setOwner() throws Exception {
         }
     );
     FileStatus fileStatus = fs().getFileStatus(file);
-    Assert.assertEquals(owner, fileStatus.getOwner());
-    Assert.assertEquals(group, fileStatus.getGroup());
+    assertEquals(owner, fileStatus.getOwner());
+    assertEquals(group, fileStatus.getGroup());
   }
 
   @HdfsCompatCase
@@ -143,8 +149,8 @@ public void setTimes() throws IOException {
     final long mtime = atime - 1000;
     fs().setTimes(file, mtime, atime);
     FileStatus fileStatus = fs().getFileStatus(file);
-    Assert.assertEquals(mtime, fileStatus.getModificationTime());
-    Assert.assertEquals(atime, fileStatus.getAccessTime());
+    assertEquals(mtime, fileStatus.getModificationTime());
+    assertEquals(atime, fileStatus.getAccessTime());
   }
 
   @HdfsCompatCase
@@ -157,7 +163,7 @@ public void concat() throws IOException {
       HdfsCompatUtil.createFile(fs(), dst, 16);
       fs().concat(dst, new Path[]{src});
       FileStatus fileStatus = fs().getFileStatus(dst);
-      Assert.assertEquals(16 + 64, fileStatus.getLen());
+      assertEquals(16 + 64, fileStatus.getLen());
     } finally {
       HdfsCompatUtil.deleteQuietly(fs(), dir, true);
     }
@@ -166,29 +172,29 @@ public void concat() throws IOException {
   @HdfsCompatCase
   public void getFileChecksum() throws IOException {
     FileChecksum checksum = fs().getFileChecksum(file);
-    Assert.assertNotNull(checksum);
-    Assert.assertNotNull(checksum.getChecksumOpt());
+    assertNotNull(checksum);
+    assertNotNull(checksum.getChecksumOpt());
     DataChecksum.Type type = checksum.getChecksumOpt().getChecksumType();
-    Assert.assertNotEquals(DataChecksum.Type.NULL, type);
+    assertNotEquals(DataChecksum.Type.NULL, type);
   }
 
   @HdfsCompatCase
   public void getFileBlockLocations() throws IOException {
     BlockLocation[] locations = fs().getFileBlockLocations(file, 0, FILE_LEN);
-    Assert.assertTrue(locations.length >= 1);
+    assertTrue(locations.length >= 1);
     BlockLocation location = locations[0];
-    Assert.assertTrue(location.getLength() > 0);
+    assertTrue(location.getLength() > 0);
   }
 
   @HdfsCompatCase
   public void getReplication() throws IOException {
-    Assert.assertEquals(REPLICATION, fs().getReplication(file));
+    assertEquals(REPLICATION, fs().getReplication(file));
   }
 
   @HdfsCompatCase(brief = "arbitrary replication")
   public void setReplication() throws IOException {
     fs().setReplication(this.file, (short) 2);
-    Assert.assertEquals(2, fs().getReplication(this.file));
+    assertEquals(2, fs().getReplication(this.file));
   }
 
   @HdfsCompatCase
@@ -234,7 +240,7 @@ public void setPermission() throws IOException {
     fs().setPermission(file, FsPermission.createImmutable((short) 511));
     try {
       fs().access(file, FsAction.ALL);
-      Assert.fail("Should not have write permission");
+      fail("Should not have write permission");
     } catch (Throwable ignored) {
     }
   }
diff --git 
a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatLocal.java
 
b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatLocal.java
index e151c29fe70..4f902cb431c 100644
--- 
a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatLocal.java
+++ 
b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatLocal.java
@@ -21,11 +21,13 @@
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.compat.common.*;
-import org.junit.Assert;
 
 import java.io.IOException;
 import java.util.Random;
 
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
 @HdfsCompatCaseGroup(name = "Local")
 public class HdfsCompatLocal extends AbstractHdfsCompatCase {
   private static final int FILE_LEN = 128;
@@ -69,36 +71,36 @@ public void cleanup() {
   @HdfsCompatCase
   public void copyFromLocalFile() throws IOException {
     fs().copyFromLocalFile(localSrc, dst);
-    Assert.assertTrue(localFs.exists(localSrc));
-    Assert.assertTrue(fs().exists(dst));
+    assertTrue(localFs.exists(localSrc));
+    assertTrue(fs().exists(dst));
   }
 
   @HdfsCompatCase
   public void moveFromLocalFile() throws IOException {
     fs().moveFromLocalFile(localSrc, dst);
-    Assert.assertFalse(localFs.exists(localSrc));
-    Assert.assertTrue(fs().exists(dst));
+    assertFalse(localFs.exists(localSrc));
+    assertTrue(fs().exists(dst));
   }
 
   @HdfsCompatCase
   public void copyToLocalFile() throws IOException {
     fs().copyToLocalFile(src, localDst);
-    Assert.assertTrue(fs().exists(src));
-    Assert.assertTrue(localFs.exists(localDst));
+    assertTrue(fs().exists(src));
+    assertTrue(localFs.exists(localDst));
   }
 
   @HdfsCompatCase
   public void moveToLocalFile() throws IOException {
     fs().moveToLocalFile(src, localDst);
-    Assert.assertFalse(fs().exists(src));
-    Assert.assertTrue(localFs.exists(localDst));
+    assertFalse(fs().exists(src));
+    assertTrue(localFs.exists(localDst));
   }
 
   @HdfsCompatCase
   public void startLocalOutput() throws IOException {
     Path local = fs().startLocalOutput(dst, localDst);
     HdfsCompatUtil.createFile(localFs, local, 16);
-    Assert.assertTrue(localFs.exists(local));
+    assertTrue(localFs.exists(local));
   }
 
   @HdfsCompatCase
@@ -106,6 +108,6 @@ public void completeLocalOutput() throws IOException {
     Path local = fs().startLocalOutput(dst, localDst);
     HdfsCompatUtil.createFile(localFs, local, 16);
     fs().completeLocalOutput(dst, localDst);
-    Assert.assertTrue(fs().exists(dst));
+    assertTrue(fs().exists(dst));
   }
 }
\ No newline at end of file
diff --git 
a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatServer.java
 
b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatServer.java
index aa988fba3e0..7ae944fc02d 100644
--- 
a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatServer.java
+++ 
b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatServer.java
@@ -19,7 +19,6 @@
 
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.compat.common.*;
-import org.junit.Assert;
 
 import java.io.IOException;
 import java.lang.reflect.Constructor;
@@ -28,11 +27,16 @@
 import java.util.Collection;
 import java.util.List;
 
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
 @HdfsCompatCaseGroup(name = "Server")
 public class HdfsCompatServer extends AbstractHdfsCompatCase {
   private void isValid(String name) {
-    Assert.assertNotNull(name);
-    Assert.assertFalse(name.isEmpty());
+    assertNotNull(name);
+    assertFalse(name.isEmpty());
   }
 
   @HdfsCompatCase
@@ -89,7 +93,7 @@ public void resolvePath() throws IOException {
     HdfsCompatUtil.createFile(fs(), file, 0);
     fs().createSymlink(file, link, true);
     Path resolved = fs().resolvePath(link);
-    Assert.assertEquals(file.getName(), resolved.getName());
+    assertEquals(file.getName(), resolved.getName());
   }
 
   @HdfsCompatCase
@@ -103,7 +107,7 @@ public void setWorkingDirectory() throws IOException {
     FileSystem another = FileSystem.newInstance(fs().getUri(), fs().getConf());
     Path work = makePath("work");
     another.setWorkingDirectory(work);
-    Assert.assertEquals(work.getName(),
+    assertEquals(work.getName(),
         another.getWorkingDirectory().getName());
   }
 
@@ -121,17 +125,17 @@ public void close() throws IOException {
 
   @HdfsCompatCase
   public void getDefaultBlockSize() {
-    Assert.assertTrue(fs().getDefaultBlockSize(getBasePath()) >= 0);
+    assertTrue(fs().getDefaultBlockSize(getBasePath()) >= 0);
   }
 
   @HdfsCompatCase
   public void getDefaultReplication() {
-    Assert.assertTrue(fs().getDefaultReplication(getBasePath()) >= 0);
+    assertTrue(fs().getDefaultReplication(getBasePath()) >= 0);
   }
 
   @HdfsCompatCase
   public void getStorageStatistics() {
-    Assert.assertNotNull(fs().getStorageStatistics());
+    assertNotNull(fs().getStorageStatistics());
   }
 
   // @HdfsCompatCase
@@ -144,18 +148,18 @@ public void setWriteChecksum() {
 
   @HdfsCompatCase
   public void getDelegationToken() throws IOException {
-    Assert.assertNotNull(fs().getDelegationToken(getDelegationTokenRenewer()));
+    assertNotNull(fs().getDelegationToken(getDelegationTokenRenewer()));
   }
 
   @HdfsCompatCase
   public void getAdditionalTokenIssuers() throws IOException {
-    Assert.assertNotNull(fs().getAdditionalTokenIssuers());
+    assertNotNull(fs().getAdditionalTokenIssuers());
   }
 
   @HdfsCompatCase
   public void getServerDefaults() throws IOException {
     FsServerDefaults d = fs().getServerDefaults(getBasePath());
-    Assert.assertTrue(d.getBlockSize() >= 0);
+    assertTrue(d.getBlockSize() >= 0);
   }
 
   @HdfsCompatCase
@@ -166,7 +170,7 @@ public void msync() throws IOException {
   @HdfsCompatCase
   public void getStatus() throws IOException {
     FsStatus status = fs().getStatus();
-    Assert.assertTrue(status.getRemaining() > 0);
+    assertTrue(status.getRemaining() > 0);
   }
 
   @HdfsCompatCase
@@ -178,7 +182,7 @@ public void getTrashRoot() {
   @HdfsCompatCase
   public void getTrashRoots() {
     Collection<FileStatus> trashes = fs().getTrashRoots(true);
-    Assert.assertNotNull(trashes);
+    assertNotNull(trashes);
     for (FileStatus trash : trashes) {
       isValid(trash.getPath().toString());
     }
@@ -188,12 +192,12 @@ public void getTrashRoots() {
   public void getAllStoragePolicies() throws IOException {
     Collection<? extends BlockStoragePolicySpi> policies =
         fs().getAllStoragePolicies();
-    Assert.assertFalse(policies.isEmpty());
+    assertFalse(policies.isEmpty());
   }
 
   @HdfsCompatCase
   public void supportsSymlinks() {
-    Assert.assertTrue(fs().supportsSymlinks());
+    assertTrue(fs().supportsSymlinks());
   }
 
   @HdfsCompatCase
diff --git 
a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatSnapshot.java
 
b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatSnapshot.java
index 5ed1612f381..7dd56737bca 100644
--- 
a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatSnapshot.java
+++ 
b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatSnapshot.java
@@ -19,7 +19,6 @@
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.compat.common.*;
-import org.junit.Assert;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -27,6 +26,10 @@
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
 @HdfsCompatCaseGroup(name = "Snapshot")
 public class HdfsCompatSnapshot extends AbstractHdfsCompatCase {
   private static final Logger LOG = 
LoggerFactory.getLogger(HdfsCompatSnapshot.class);
@@ -114,24 +117,24 @@ public void cleanup() throws ReflectiveOperationException 
{
 
   @HdfsCompatCase
   public void createSnapshot() throws IOException {
-    Assert.assertNotEquals(snapshot.toString(), dir.toString());
-    Assert.assertTrue(fs().exists(snapshot));
-    Assert.assertTrue(fs().exists(new Path(snapshot, fileName)));
+    assertNotEquals(snapshot.toString(), dir.toString());
+    assertTrue(fs().exists(snapshot));
+    assertTrue(fs().exists(new Path(snapshot, fileName)));
   }
 
   @HdfsCompatCase
   public void renameSnapshot() throws IOException {
     fs().renameSnapshot(dir, snapshotName, "s-name2");
-    Assert.assertFalse(fs().exists(new Path(snapshot, fileName)));
+    assertFalse(fs().exists(new Path(snapshot, fileName)));
     snapshot = getSnapshotPath(dir, "s-name2");
-    Assert.assertTrue(fs().exists(new Path(snapshot, fileName)));
+    assertTrue(fs().exists(new Path(snapshot, fileName)));
     fs().renameSnapshot(dir, "s-name2", snapshotName);
   }
 
   @HdfsCompatCase
   public void deleteSnapshot() throws IOException {
     fs().deleteSnapshot(dir, snapshotName);
-    Assert.assertFalse(fs().exists(snapshot));
-    Assert.assertFalse(fs().exists(new Path(snapshot, fileName)));
+    assertFalse(fs().exists(snapshot));
+    assertFalse(fs().exists(new Path(snapshot, fileName)));
   }
 }
\ No newline at end of file
diff --git 
a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatStoragePolicy.java
 
b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatStoragePolicy.java
index 38bdde9afba..d2ac84129ad 100644
--- 
a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatStoragePolicy.java
+++ 
b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatStoragePolicy.java
@@ -20,7 +20,6 @@
 import org.apache.hadoop.fs.BlockStoragePolicySpi;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.compat.common.*;
-import org.junit.Assert;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -29,6 +28,8 @@
 import java.util.List;
 import java.util.Random;
 
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
 @HdfsCompatCaseGroup(name = "StoragePolicy")
 public class HdfsCompatStoragePolicy extends AbstractHdfsCompatCase {
   private static final Logger LOG =
@@ -79,7 +80,7 @@ public void cleanup() {
   public void setStoragePolicy() throws IOException {
     fs().setStoragePolicy(dir, policyName);
     BlockStoragePolicySpi policy = fs().getStoragePolicy(dir);
-    Assert.assertEquals(policyName, policy.getName());
+    assertEquals(policyName, policy.getName());
   }
 
   @HdfsCompatCase
@@ -88,7 +89,7 @@ public void unsetStoragePolicy() throws IOException {
     fs().unsetStoragePolicy(dir);
     BlockStoragePolicySpi policy = fs().getStoragePolicy(dir);
     String policyNameAfterUnset = (policy == null) ? null : policy.getName();
-    Assert.assertEquals(defaultPolicyName, policyNameAfterUnset);
+    assertEquals(defaultPolicyName, policyNameAfterUnset);
   }
 
   @HdfsCompatCase(ifDef = 
"org.apache.hadoop.fs.FileSystem#satisfyStoragePolicy")
@@ -101,6 +102,6 @@ public void satisfyStoragePolicy() throws IOException {
   public void getStoragePolicy() throws IOException {
     BlockStoragePolicySpi policy = fs().getStoragePolicy(file);
     String initialPolicyName = (policy == null) ? null : policy.getName();
-    Assert.assertEquals(defaultPolicyName, initialPolicyName);
+    assertEquals(defaultPolicyName, initialPolicyName);
   }
 }
\ No newline at end of file
diff --git 
a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatSymlink.java
 
b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatSymlink.java
index 45a7348777a..16fc3571302 100644
--- 
a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatSymlink.java
+++ 
b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatSymlink.java
@@ -21,10 +21,12 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.compat.common.*;
-import org.junit.Assert;
 
 import java.io.IOException;
 
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
 @HdfsCompatCaseGroup(name = "Symlink")
 public class HdfsCompatSymlink extends AbstractHdfsCompatCase {
   private static final int FILE_LEN = 128;
@@ -52,19 +54,19 @@ public void cleanup() throws IOException {
 
   @HdfsCompatCase
   public void createSymlink() throws IOException {
-    Assert.assertTrue(fs().exists(link));
+    assertTrue(fs().exists(link));
   }
 
   @HdfsCompatCase
   public void getFileLinkStatus() throws IOException {
     FileStatus linkStatus = fs().getFileLinkStatus(link);
-    Assert.assertTrue(linkStatus.isSymlink());
-    Assert.assertEquals(target.getName(), linkStatus.getSymlink().getName());
+    assertTrue(linkStatus.isSymlink());
+    assertEquals(target.getName(), linkStatus.getSymlink().getName());
   }
 
   @HdfsCompatCase
   public void getLinkTarget() throws IOException {
     Path src = fs().getLinkTarget(link);
-    Assert.assertEquals(target.getName(), src.getName());
+    assertEquals(target.getName(), src.getName());
   }
 }
\ No newline at end of file
diff --git 
a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatTpcds.java
 
b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatTpcds.java
index 421e6d4a618..f5faaeeb5cb 100644
--- 
a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatTpcds.java
+++ 
b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatTpcds.java
@@ -19,13 +19,16 @@
 
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.compat.common.*;
-import org.junit.Assert;
 
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Random;
 
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
 @HdfsCompatCaseGroup(name = "TPCDS")
 public class HdfsCompatTpcds extends AbstractHdfsCompatCase {
   private static final int FILE_LEN = 8;
@@ -62,22 +65,22 @@ public void create() throws IOException {
 
   @HdfsCompatCase
   public void mkdirs() throws IOException {
-    Assert.assertTrue(fs().mkdirs(path));
+    assertTrue(fs().mkdirs(path));
   }
 
   @HdfsCompatCase
   public void getFileStatus() throws IOException {
     HdfsCompatUtil.createFile(fs(), path, FILE_LEN);
     FileStatus fileStatus = fs().getFileStatus(path);
-    Assert.assertEquals(FILE_LEN, fileStatus.getLen());
+    assertEquals(FILE_LEN, fileStatus.getLen());
   }
 
   @HdfsCompatCase
   public void listStatus() throws IOException {
     HdfsCompatUtil.createFile(fs(), new Path(path, "file"), FILE_LEN);
     FileStatus[] files = fs().listStatus(path);
-    Assert.assertEquals(1, files.length);
-    Assert.assertEquals(FILE_LEN, files[0].getLen());
+    assertEquals(1, files.length);
+    assertEquals(FILE_LEN, files[0].getLen());
   }
 
   @HdfsCompatCase
@@ -88,8 +91,8 @@ public void listLocatedStatus() throws IOException {
     while (it.hasNext()) {
       files.add(it.next());
     }
-    Assert.assertEquals(1, files.size());
-    Assert.assertEquals(FILE_LEN, files.get(0).getLen());
+    assertEquals(1, files.size());
+    assertEquals(FILE_LEN, files.get(0).getLen());
   }
 
   @HdfsCompatCase
@@ -106,16 +109,16 @@ public void delete() throws IOException {
 
   @HdfsCompatCase
   public void getServerDefaults() throws IOException {
-    Assert.assertNotNull(fs().getServerDefaults(path));
+    assertNotNull(fs().getServerDefaults(path));
   }
 
   @HdfsCompatCase
   public void getTrashRoot() throws IOException {
-    Assert.assertNotNull(fs().getTrashRoot(path));
+    assertNotNull(fs().getTrashRoot(path));
   }
 
   @HdfsCompatCase
   public void makeQualified() throws IOException {
-    Assert.assertNotNull(fs().makeQualified(path));
+    assertNotNull(fs().makeQualified(path));
   }
 }
\ No newline at end of file
diff --git 
a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatXAttr.java
 
b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatXAttr.java
index 18db250cff1..1b14bd7f249 100644
--- 
a/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatXAttr.java
+++ 
b/hadoop-tools/hadoop-compat-bench/src/main/java/org/apache/hadoop/fs/compat/cases/HdfsCompatXAttr.java
@@ -19,7 +19,6 @@
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.compat.common.*;
-import org.junit.Assert;
 
 import java.io.IOException;
 import java.nio.charset.StandardCharsets;
@@ -27,6 +26,10 @@
 import java.util.List;
 import java.util.Map;
 
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
 @HdfsCompatCaseGroup(name = "XAttr")
 public class HdfsCompatXAttr extends AbstractHdfsCompatCase {
   private Path file;
@@ -48,7 +51,7 @@ public void setXAttr() throws IOException {
     final byte[] value = "value".getBytes(StandardCharsets.UTF_8);
     fs().setXAttr(file, key, value);
     Map<String, byte[]> attrs = fs().getXAttrs(file);
-    Assert.assertArrayEquals(value, attrs.getOrDefault(key, new byte[0]));
+    assertArrayEquals(value, attrs.getOrDefault(key, new byte[0]));
   }
 
   @HdfsCompatCase
@@ -57,7 +60,7 @@ public void getXAttr() throws IOException {
     final byte[] value = "value".getBytes(StandardCharsets.UTF_8);
     fs().setXAttr(file, key, value);
     byte[] attr = fs().getXAttr(file, key);
-    Assert.assertArrayEquals(value, attr);
+    assertArrayEquals(value, attr);
   }
 
   @HdfsCompatCase
@@ -69,9 +72,9 @@ public void getXAttrs() throws IOException {
     List<String> keys = new ArrayList<>();
     keys.add("user.key1");
     Map<String, byte[]> attrs = fs().getXAttrs(file, keys);
-    Assert.assertEquals(1, attrs.size());
+    assertEquals(1, attrs.size());
     byte[] attr = attrs.getOrDefault("user.key1", new byte[0]);
-    Assert.assertArrayEquals("value1".getBytes(StandardCharsets.UTF_8), attr);
+    assertArrayEquals("value1".getBytes(StandardCharsets.UTF_8), attr);
   }
 
   @HdfsCompatCase
@@ -81,9 +84,9 @@ public void listXAttrs() throws IOException {
     fs().setXAttr(file, "user.key2",
         "value2".getBytes(StandardCharsets.UTF_8));
     List<String> names = fs().listXAttrs(file);
-    Assert.assertEquals(2, names.size());
-    Assert.assertTrue(names.contains("user.key1"));
-    Assert.assertTrue(names.contains("user.key2"));
+    assertEquals(2, names.size());
+    assertTrue(names.contains("user.key1"));
+    assertTrue(names.contains("user.key2"));
   }
 
   @HdfsCompatCase
@@ -94,7 +97,7 @@ public void removeXAttr() throws IOException {
         "value2".getBytes(StandardCharsets.UTF_8));
     fs().removeXAttr(file, "user.key1");
     List<String> names = fs().listXAttrs(file);
-    Assert.assertEquals(1, names.size());
-    Assert.assertTrue(names.contains("user.key2"));
+    assertEquals(1, names.size());
+    assertTrue(names.contains("user.key2"));
   }
 }
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org


Reply via email to