This is an automated email from the ASF dual-hosted git repository. slfan1989 pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push: new e08bb0b28ea HADOOP-19418. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-nfs. (#7635) e08bb0b28ea is described below commit e08bb0b28eac500320f4f831fc0ff0826a1f2eff Author: slfan1989 <55643692+slfan1...@users.noreply.github.com> AuthorDate: Wed Apr 23 23:21:52 2025 +0800 HADOOP-19418. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-nfs. (#7635) * HADOOP-19418. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-nfs. Co-authored-by: He Xiaoqiao <hexiaoq...@apache.org> Reviewed-by: He Xiaoqiao <hexiaoq...@apache.org> Signed-off-by: Shilun Fan <slfan1...@apache.org> --- hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml | 30 +++ .../org/apache/hadoop/hdfs/nfs/TestMountd.java | 4 +- .../hdfs/nfs/nfs3/TestClientAccessPrivilege.java | 32 +-- .../hadoop/hdfs/nfs/nfs3/TestDFSClientCache.java | 37 ++- .../hadoop/hdfs/nfs/nfs3/TestExportsTable.java | 41 ++- .../hadoop/hdfs/nfs/nfs3/TestNfs3HttpServer.java | 18 +- .../apache/hadoop/hdfs/nfs/nfs3/TestNfs3Utils.java | 39 +-- .../hadoop/hdfs/nfs/nfs3/TestOffsetRange.java | 31 ++- .../hadoop/hdfs/nfs/nfs3/TestOpenFileCtxCache.java | 21 +- .../apache/hadoop/hdfs/nfs/nfs3/TestReaddir.java | 23 +- .../hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java | 281 +++++++++++---------- .../hadoop/hdfs/nfs/nfs3/TestViewfsWithNfs3.java | 72 +++--- .../apache/hadoop/hdfs/nfs/nfs3/TestWrites.java | 166 ++++++------ 13 files changed, 432 insertions(+), 363 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml index e3c4031b3fc..277e90fbf30 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml @@ -173,6 +173,36 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> <artifactId>bcprov-jdk18on</artifactId> <scope>test</scope> </dependency> + <dependency> + <groupId>org.assertj</groupId> + <artifactId>assertj-core</artifactId> + <scope>test</scope> + </dependency> + <dependency> + <groupId>org.junit.jupiter</groupId> + <artifactId>junit-jupiter-api</artifactId> + <scope>test</scope> + </dependency> + <dependency> + <groupId>org.junit.jupiter</groupId> + <artifactId>junit-jupiter-params</artifactId> + <scope>test</scope> + </dependency> + <dependency> + <groupId>org.junit.jupiter</groupId> + <artifactId>junit-jupiter-engine</artifactId> + <scope>test</scope> + </dependency> + <dependency> + <groupId>org.junit.vintage</groupId> + <artifactId>junit-vintage-engine</artifactId> + <scope>test</scope> + </dependency> + <dependency> + <groupId>org.junit.platform</groupId> + <artifactId>junit-platform-launcher</artifactId> + <scope>test</scope> + </dependency> </dependencies> <profiles> diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java index fe92c9062b5..4411a4ee7ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java @@ -30,8 +30,8 @@ import org.apache.hadoop.hdfs.nfs.nfs3.Nfs3; import org.apache.hadoop.hdfs.nfs.nfs3.RpcProgramNfs3; import org.apache.hadoop.oncrpc.XDR; -import org.junit.Test; -import static org.junit.Assert.assertTrue; +import org.junit.jupiter.api.Test; +import static org.junit.jupiter.api.Assertions.assertTrue; public class TestMountd { diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestClientAccessPrivilege.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestClientAccessPrivilege.java index 007803d9036..fc6f6351102 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestClientAccessPrivilege.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestClientAccessPrivilege.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.nfs.nfs3; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.io.IOException; import java.net.InetSocketAddress; @@ -36,11 +36,13 @@ import org.apache.hadoop.oncrpc.security.SecurityHandler; import org.apache.hadoop.security.authorize.DefaultImpersonationProvider; import org.apache.hadoop.security.authorize.ProxyUsers; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import org.mockito.Mockito; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class TestClientAccessPrivilege { static MiniDFSCluster cluster = null; @@ -50,7 +52,7 @@ public class TestClientAccessPrivilege { static String testdir = "/tmp"; static SecurityHandler securityHandler; - @BeforeClass + @BeforeAll public static void setup() throws Exception { String currentUser = System.getProperty("user.name"); @@ -68,26 +70,27 @@ public static void setup() throws Exception { config.setInt("nfs3.mountd.port", 0); config.setInt("nfs3.server.port", 0); - securityHandler = Mockito.mock(SecurityHandler.class); - Mockito.when(securityHandler.getUser()).thenReturn( + securityHandler = mock(SecurityHandler.class); + when(securityHandler.getUser()).thenReturn( System.getProperty("user.name")); } - @AfterClass + @AfterAll public static void shutdown() throws Exception { if (cluster != null) { cluster.shutdown(); } } - @Before + @BeforeEach public void createFiles() throws IllegalArgumentException, IOException { hdfs.delete(new Path(testdir), true); hdfs.mkdirs(new Path(testdir)); DFSTestUtil.createFile(hdfs, new Path(testdir + "/f1"), 0, (short) 1, 0); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testClientAccessPrivilegeForRemove() throws Exception { // Configure ro access for nfs1 service config.set("dfs.nfs.exports.allowed.hosts", "* ro"); @@ -113,9 +116,8 @@ public void testClientAccessPrivilegeForRemove() throws Exception { securityHandler, new InetSocketAddress("localhost", 1234)); // Assert on return code - assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES, - response.getStatus()); - + assertEquals(Nfs3Status.NFS3ERR_ACCES, + response.getStatus(), "Incorrect return code"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestDFSClientCache.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestDFSClientCache.java index d093f51b1ba..b5433e03ec4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestDFSClientCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestDFSClientCache.java @@ -18,11 +18,10 @@ package org.apache.hadoop.hdfs.nfs.nfs3; import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.assertj.core.api.Assertions.assertThat; import java.io.IOException; @@ -30,11 +29,11 @@ import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.security.UserGroupInformation; -import org.junit.After; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; public class TestDFSClientCache { - @After + @AfterEach public void cleanup() { UserGroupInformation.reset(); } @@ -58,8 +57,8 @@ public void testEviction() throws IOException { cache.getDfsClient("test2", namenodeId); assertTrue(isDfsClientClose(c1)); - assertTrue("cache size should be the max size or less", - cache.getClientCache().size() <= MAX_CACHE_SIZE); + assertTrue(cache.getClientCache().size() <= MAX_CACHE_SIZE, + "cache size should be the max size or less"); } @Test @@ -79,11 +78,10 @@ public void testGetUserGroupInformationSecure() throws IOException { UserGroupInformation ugiResult = cache.getUserGroupInformation(userName, currentUserUgi); - assertThat(ugiResult.getUserName(), is(userName)); - assertThat(ugiResult.getRealUser(), is(currentUserUgi)); - assertThat( - ugiResult.getAuthenticationMethod(), - is(UserGroupInformation.AuthenticationMethod.PROXY)); + assertThat(ugiResult.getUserName()).isEqualTo(userName); + assertThat(ugiResult.getRealUser()).isEqualTo(currentUserUgi); + assertThat(ugiResult.getAuthenticationMethod()).isEqualTo( + UserGroupInformation.AuthenticationMethod.PROXY); } @Test @@ -99,11 +97,10 @@ public void testGetUserGroupInformation() throws IOException { UserGroupInformation ugiResult = cache.getUserGroupInformation(userName, currentUserUgi); - assertThat(ugiResult.getUserName(), is(userName)); - assertThat(ugiResult.getRealUser(), is(currentUserUgi)); - assertThat( - ugiResult.getAuthenticationMethod(), - is(UserGroupInformation.AuthenticationMethod.PROXY)); + assertThat(ugiResult.getUserName()).isEqualTo(userName); + assertThat(ugiResult.getRealUser()).isEqualTo(currentUserUgi); + assertThat(ugiResult.getAuthenticationMethod()).isEqualTo( + UserGroupInformation.AuthenticationMethod.PROXY); } private static boolean isDfsClientClose(DFSClient c) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestExportsTable.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestExportsTable.java index a5c3e7a5ebc..70729eeb68f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestExportsTable.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestExportsTable.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hdfs.nfs.nfs3; -import static org.junit.Assert.assertTrue; - import java.io.IOException; import java.nio.file.FileSystemException; @@ -34,15 +32,13 @@ import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.hdfs.nfs.mount.Mountd; import org.apache.hadoop.hdfs.nfs.mount.RpcProgramMountd; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; public class TestExportsTable { - @Rule - public ExpectedException exception = ExpectedException.none(); - @Test public void testHdfsExportPoint() throws IOException { NfsConfiguration config = new NfsConfiguration(); @@ -224,13 +220,13 @@ public void testViewFsRootExportPoint() throws IOException { ConfigUtil.addLink(config, clusterName, "/hdfs2", hdfs2.makeQualified(base2).toUri()); - exception.expect(FileSystemException.class); - exception. - expectMessage("Only HDFS is supported as underlyingFileSystem, " - + "fs scheme:viewfs"); - // Start nfs - final Nfs3 nfsServer = new Nfs3(config); - nfsServer.startServiceInternal(false); + FileSystemException fileSystemException = assertThrows(FileSystemException.class, () -> { + // Start nfs + final Nfs3 nfsServer = new Nfs3(config); + nfsServer.startServiceInternal(false); + }); + assertTrue(fileSystemException.getMessage(). + contains("Only HDFS is supported as underlyingFileSystem, fs scheme:viewfs")); } finally { if (cluster != null) { cluster.shutdown(); @@ -292,13 +288,14 @@ public void testInvalidFsExport() throws IOException { config.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, FsConstants.LOCAL_FS_URI.toString()); - exception.expect(FileSystemException.class); - exception. - expectMessage("Only HDFS is supported as underlyingFileSystem, " - + "fs scheme:file"); - // Start nfs - final Nfs3 nfsServer = new Nfs3(config); - nfsServer.startServiceInternal(false); + FileSystemException fileSystemException = + assertThrows(FileSystemException.class, () -> { + // Start nfs + final Nfs3 nfsServer = new Nfs3(config); + nfsServer.startServiceInternal(false); + }); + assertTrue(fileSystemException.getMessage(). + contains("Only HDFS is supported as underlyingFileSystem, fs scheme:file")); } finally { if (cluster != null) { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3HttpServer.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3HttpServer.java index 952aae2b584..2794eb43e0f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3HttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3HttpServer.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.nfs.nfs3; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.File; import java.net.URL; @@ -31,9 +31,9 @@ import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; public class TestNfs3HttpServer { private static final String BASEDIR = @@ -43,7 +43,7 @@ public class TestNfs3HttpServer { private static String keystoresDir; private static String sslConfDir; - @BeforeClass + @BeforeAll public static void setUp() throws Exception { conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTP_AND_HTTPS.name()); @@ -64,7 +64,7 @@ public static void setUp() throws Exception { cluster.waitActive(); } - @AfterClass + @AfterAll public static void tearDown() throws Exception { FileUtil.fullyDelete(new File(BASEDIR)); if (cluster != null) { @@ -84,11 +84,11 @@ public void testHttpServer() throws Exception { // Check default servlets. String pageContents = DFSTestUtil.urlGet(new URL(urlRoot + "/jmx")); - assertTrue("Bad contents: " + pageContents, - pageContents.contains("java.lang:type=")); + assertTrue(pageContents.contains("java.lang:type="), + "Bad contents: " + pageContents); System.out.println("pc:" + pageContents); int port = infoServer.getSecurePort(); - assertTrue("Can't get https port", port > 0); + assertTrue(port > 0, "Can't get https port"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3Utils.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3Utils.java index 8380c3c9bb3..7a5fcf0d888 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3Utils.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestNfs3Utils.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hdfs.nfs.nfs3; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.IOException; @@ -36,42 +36,45 @@ public void testGetAccessRightsForUserGroup() throws IOException { Mockito.when(attr.getGid()).thenReturn(3); Mockito.when(attr.getMode()).thenReturn(448); // 700 Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSREG.toValue()); - assertEquals("No access should be allowed as UID does not match attribute over mode 700", - 0, Nfs3Utils.getAccessRightsForUserGroup(3, 3, null, attr)); + assertEquals(0, Nfs3Utils.getAccessRightsForUserGroup(3, 3, null, attr), + "No access should be allowed as UID does not match attribute over mode 700"); Mockito.when(attr.getUid()).thenReturn(2); Mockito.when(attr.getGid()).thenReturn(3); Mockito.when(attr.getMode()).thenReturn(56); // 070 Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSREG.toValue()); - assertEquals("No access should be allowed as GID does not match attribute over mode 070", - 0, Nfs3Utils.getAccessRightsForUserGroup(2, 4, null, attr)); + assertEquals(0, Nfs3Utils.getAccessRightsForUserGroup(2, 4, null, attr), + "No access should be allowed as GID does not match attribute over mode 070"); Mockito.when(attr.getUid()).thenReturn(2); Mockito.when(attr.getGid()).thenReturn(3); Mockito.when(attr.getMode()).thenReturn(7); // 007 Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSREG.toValue()); - assertEquals("Access should be allowed as mode is 007 and UID/GID do not match", - 61 /* RWX */, Nfs3Utils.getAccessRightsForUserGroup(1, 4, new int[] {5, 6}, attr)); + assertEquals(61 /* RWX */, Nfs3Utils.getAccessRightsForUserGroup(1, 4, new int[] {5, 6}, attr), + "Access should be allowed as mode is 007 and UID/GID do not match"); Mockito.when(attr.getUid()).thenReturn(2); Mockito.when(attr.getGid()).thenReturn(10); Mockito.when(attr.getMode()).thenReturn(288); // 440 Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSREG.toValue()); - assertEquals("Access should be allowed as mode is 440 and Aux GID does match", - 1 /* R */, Nfs3Utils.getAccessRightsForUserGroup(3, 4, new int[] {5, 16, 10}, attr)); + assertEquals(1 /* R */, + Nfs3Utils.getAccessRightsForUserGroup(3, 4, new int[] {5, 16, 10}, attr), + "Access should be allowed as mode is 440 and Aux GID does match"); Mockito.when(attr.getUid()).thenReturn(2); Mockito.when(attr.getGid()).thenReturn(10); Mockito.when(attr.getMode()).thenReturn(448); // 700 Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSDIR.toValue()); - assertEquals("Access should be allowed for dir as mode is 700 and UID does match", - 31 /* Lookup */, Nfs3Utils.getAccessRightsForUserGroup(2, 4, new int[] {5, 16, 10}, attr)); - assertEquals("No access should be allowed for dir as mode is 700 even though GID does match", - 0, Nfs3Utils.getAccessRightsForUserGroup(3, 10, new int[] {5, 16, 4}, attr)); - assertEquals("No access should be allowed for dir as mode is 700 even though AuxGID does match", - 0, Nfs3Utils.getAccessRightsForUserGroup(3, 20, new int[] {5, 10}, attr)); + assertEquals(31 /* Lookup */, + Nfs3Utils.getAccessRightsForUserGroup(2, 4, new int[] {5, 16, 10}, attr), + "Access should be allowed for dir as mode is 700 and UID does match"); + assertEquals(0, Nfs3Utils.getAccessRightsForUserGroup(3, 10, new int[] {5, 16, 4}, attr), + "No access should be allowed for dir as mode is 700 even though GID does match"); + assertEquals(0, Nfs3Utils.getAccessRightsForUserGroup(3, 20, new int[] {5, 10}, attr), + "No access should be allowed for dir as mode is 700 even though AuxGID does match"); Mockito.when(attr.getUid()).thenReturn(2); Mockito.when(attr.getGid()).thenReturn(10); Mockito.when(attr.getMode()).thenReturn(457); // 711 Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSDIR.toValue()); - assertEquals("Access should be allowed for dir as mode is 711 and GID matches", - 2 /* Lookup */, Nfs3Utils.getAccessRightsForUserGroup(3, 10, new int[] {5, 16, 11}, attr)); + assertEquals(2 /* Lookup */, + Nfs3Utils.getAccessRightsForUserGroup(3, 10, new int[] {5, 16, 11}, attr), + "Access should be allowed for dir as mode is 711 and GID matches"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestOffsetRange.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestOffsetRange.java index 034ffcd2721..398dd2fe5b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestOffsetRange.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestOffsetRange.java @@ -17,32 +17,41 @@ */ package org.apache.hadoop.hdfs.nfs.nfs3; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.io.IOException; -import org.junit.Test; +import org.junit.jupiter.api.Test; public class TestOffsetRange { - @Test(expected = IllegalArgumentException.class) + @Test public void testConstructor1() throws IOException { - new OffsetRange(0, 0); + assertThrows(IllegalArgumentException.class, () -> { + new OffsetRange(0, 0); + }); } - @Test(expected = IllegalArgumentException.class) + @Test public void testConstructor2() throws IOException { - new OffsetRange(-1, 0); + assertThrows(IllegalArgumentException.class, () -> { + new OffsetRange(-1, 0); + }); } - @Test(expected = IllegalArgumentException.class) + @Test public void testConstructor3() throws IOException { - new OffsetRange(-3, -1); + assertThrows(IllegalArgumentException.class, () -> { + new OffsetRange(-3, -1); + }); } - @Test(expected = IllegalArgumentException.class) + @Test public void testConstructor4() throws IOException { - new OffsetRange(-3, 100); + assertThrows(IllegalArgumentException.class, () -> { + new OffsetRange(-3, 100); + }); } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestOpenFileCtxCache.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestOpenFileCtxCache.java index 29b00c5e980..d1bdaa219a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestOpenFileCtxCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestOpenFileCtxCache.java @@ -17,8 +17,10 @@ */ package org.apache.hadoop.hdfs.nfs.nfs3; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import java.io.IOException; @@ -30,8 +32,7 @@ import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; import org.apache.hadoop.security.ShellBasedIdMapping; -import org.junit.Test; -import org.mockito.Mockito; +import org.junit.jupiter.api.Test; public class TestOpenFileCtxCache { static boolean cleaned = false; @@ -43,10 +44,10 @@ public void testEviction() throws IOException, InterruptedException { // Only two entries will be in the cache conf.setInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY, 2); - DFSClient dfsClient = Mockito.mock(DFSClient.class); + DFSClient dfsClient = mock(DFSClient.class); Nfs3FileAttributes attr = new Nfs3FileAttributes(); - HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class); - Mockito.when(fos.getPos()).thenReturn((long) 0); + HdfsDataOutputStream fos = mock(HdfsDataOutputStream.class); + when(fos.getPos()).thenReturn((long) 0); OpenFileCtx context1 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration())); @@ -102,10 +103,10 @@ public void testScan() throws IOException, InterruptedException { // Only two entries will be in the cache conf.setInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY, 2); - DFSClient dfsClient = Mockito.mock(DFSClient.class); + DFSClient dfsClient = mock(DFSClient.class); Nfs3FileAttributes attr = new Nfs3FileAttributes(); - HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class); - Mockito.when(fos.getPos()).thenReturn((long) 0); + HdfsDataOutputStream fos = mock(HdfsDataOutputStream.class); + when(fos.getPos()).thenReturn((long) 0); OpenFileCtx context1 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration())); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestReaddir.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestReaddir.java index 0af7cedefb4..531dfa9bb9e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestReaddir.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestReaddir.java @@ -18,7 +18,9 @@ package org.apache.hadoop.hdfs.nfs.nfs3; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import java.io.IOException; import java.net.InetSocketAddress; @@ -40,11 +42,10 @@ import org.apache.hadoop.oncrpc.security.SecurityHandler; import org.apache.hadoop.security.authorize.DefaultImpersonationProvider; import org.apache.hadoop.security.authorize.ProxyUsers; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import org.mockito.Mockito; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; /** * Test READDIR and READDIRPLUS request with zero, nonzero cookies @@ -59,7 +60,7 @@ public class TestReaddir { static String testdir = "/tmp"; static SecurityHandler securityHandler; - @BeforeClass + @BeforeAll public static void setup() throws Exception { String currentUser = System.getProperty("user.name"); config.set( @@ -84,19 +85,19 @@ public static void setup() throws Exception { nfsd = (RpcProgramNfs3) nfs3.getRpcProgram(); - securityHandler = Mockito.mock(SecurityHandler.class); - Mockito.when(securityHandler.getUser()).thenReturn( + securityHandler = mock(SecurityHandler.class); + when(securityHandler.getUser()).thenReturn( System.getProperty("user.name")); } - @AfterClass + @AfterAll public static void shutdown() throws Exception { if (cluster != null) { cluster.shutdown(); } } - @Before + @BeforeEach public void createFiles() throws IllegalArgumentException, IOException { hdfs.delete(new Path(testdir), true); hdfs.mkdirs(new Path(testdir)); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java index 07954c00d64..b9307e9446b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java @@ -17,9 +17,13 @@ */ package org.apache.hadoop.hdfs.nfs.nfs3; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import java.io.EOFException; import java.io.File; @@ -93,13 +97,11 @@ import org.apache.hadoop.security.IdMappingConstant; import org.apache.hadoop.security.authorize.DefaultImpersonationProvider; import org.apache.hadoop.security.authorize.ProxyUsers; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import org.mockito.Mockito; - +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; /** * Tests for {@link RpcProgramNfs3} @@ -121,7 +123,7 @@ public class TestRpcProgramNfs3 { private static final EnumSet<CreateEncryptionZoneFlag> NO_TRASH = EnumSet.of(CreateEncryptionZoneFlag.NO_TRASH); - @BeforeClass + @BeforeAll public static void setup() throws Exception { String currentUser = System.getProperty("user.name"); @@ -159,22 +161,22 @@ public static void setup() throws Exception { DFSTestUtil.createKey(TEST_KEY, cluster, config); // Mock SecurityHandler which returns system user.name - securityHandler = Mockito.mock(SecurityHandler.class); - Mockito.when(securityHandler.getUser()).thenReturn(currentUser); + securityHandler = mock(SecurityHandler.class); + when(securityHandler.getUser()).thenReturn(currentUser); // Mock SecurityHandler which returns a dummy username "harry" - securityHandlerUnpriviledged = Mockito.mock(SecurityHandler.class); - Mockito.when(securityHandlerUnpriviledged.getUser()).thenReturn("harry"); + securityHandlerUnpriviledged = mock(SecurityHandler.class); + when(securityHandlerUnpriviledged.getUser()).thenReturn("harry"); } - @AfterClass + @AfterAll public static void shutdown() throws Exception { if (cluster != null) { cluster.shutdown(); } } - @Before + @BeforeEach public void createFiles() throws IllegalArgumentException, IOException { hdfs.delete(new Path(testdir), true); hdfs.mkdirs(new Path(testdir)); @@ -182,7 +184,8 @@ public void createFiles() throws IllegalArgumentException, IOException { DFSTestUtil.createFile(hdfs, new Path(testdir + "/bar"), 0, (short) 1, 0); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testGetattr() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); @@ -196,17 +199,18 @@ public void testGetattr() throws Exception { GETATTR3Response response1 = nfsd.getattr(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES, - response1.getStatus()); + assertEquals(Nfs3Status.NFS3ERR_ACCES, + response1.getStatus(), "Incorrect return code"); // Attempt by a priviledged user should pass. GETATTR3Response response2 = nfsd.getattr(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code", Nfs3Status.NFS3_OK, - response2.getStatus()); + assertEquals(Nfs3Status.NFS3_OK, + response2.getStatus(), "Incorrect return code"); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testSetattr() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); @@ -222,17 +226,18 @@ public void testSetattr() throws Exception { SETATTR3Response response1 = nfsd.setattr(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES, - response1.getStatus()); + assertEquals(Nfs3Status.NFS3ERR_ACCES, + response1.getStatus(), "Incorrect return code"); // Attempt by a priviledged user should pass. SETATTR3Response response2 = nfsd.setattr(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code", Nfs3Status.NFS3_OK, - response2.getStatus()); + assertEquals(Nfs3Status.NFS3_OK, + response2.getStatus(), "Incorrect return code"); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testLookup() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); @@ -246,17 +251,18 @@ public void testLookup() throws Exception { LOOKUP3Response response1 = nfsd.lookup(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES, - response1.getStatus()); + assertEquals(Nfs3Status.NFS3ERR_ACCES, + response1.getStatus(), "Incorrect return code"); // Attempt by a priviledged user should pass. LOOKUP3Response response2 = nfsd.lookup(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code", Nfs3Status.NFS3_OK, - response2.getStatus()); + assertEquals(Nfs3Status.NFS3_OK, + response2.getStatus(), "Incorrect return code"); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testAccess() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); @@ -270,17 +276,18 @@ public void testAccess() throws Exception { ACCESS3Response response1 = nfsd.access(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES, - response1.getStatus()); + assertEquals(Nfs3Status.NFS3ERR_ACCES, + response1.getStatus(), "Incorrect return code"); // Attempt by a priviledged user should pass. ACCESS3Response response2 = nfsd.access(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code", Nfs3Status.NFS3_OK, - response2.getStatus()); + assertEquals(Nfs3Status.NFS3_OK, + response2.getStatus(), "Incorrect return code"); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testReadlink() throws Exception { // Create a symlink first. HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); @@ -294,8 +301,8 @@ public void testReadlink() throws Exception { SYMLINK3Response response = nfsd.symlink(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, - response.getStatus()); + assertEquals(Nfs3Status.NFS3_OK, + response.getStatus(), "Incorrect return code:"); // Now perform readlink operations. FileHandle handle2 = response.getObjFileHandle(); @@ -307,17 +314,18 @@ public void testReadlink() throws Exception { READLINK3Response response1 = nfsd.readlink(xdr_req2.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, - response1.getStatus()); + assertEquals(Nfs3Status.NFS3ERR_ACCES, + response1.getStatus(), "Incorrect return code:"); // Attempt by a priviledged user should pass. READLINK3Response response2 = nfsd.readlink(xdr_req2.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, - response2.getStatus()); + assertEquals(Nfs3Status.NFS3_OK, + response2.getStatus(), "Incorrect return code:"); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testRead() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); @@ -332,17 +340,18 @@ public void testRead() throws Exception { READ3Response response1 = nfsd.read(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, - response1.getStatus()); + assertEquals(Nfs3Status.NFS3ERR_ACCES, + response1.getStatus(), "Incorrect return code:"); // Attempt by a priviledged user should pass. READ3Response response2 = nfsd.read(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, - response2.getStatus()); + assertEquals(Nfs3Status.NFS3_OK, + response2.getStatus(), "Incorrect return code:"); } - @Test(timeout = 120000) + @Test + @Timeout(value = 120) public void testEncryptedReadWrite() throws Exception { final int len = 8192; @@ -358,9 +367,9 @@ public void testEncryptedReadWrite() throws Exception { final String encFile1 = "/zone/myfile"; createFileUsingNfs(encFile1, buffer); commit(encFile1, len); - assertArrayEquals("encFile1 not equal", + assertArrayEquals( getFileContentsUsingNfs(encFile1, len), - getFileContentsUsingDfs(encFile1, len)); + getFileContentsUsingDfs(encFile1, len), "encFile1 not equal"); /* * Same thing except this time create the encrypted file using DFS. @@ -368,9 +377,9 @@ public void testEncryptedReadWrite() throws Exception { final String encFile2 = "/zone/myfile2"; final Path encFile2Path = new Path(encFile2); DFSTestUtil.createFile(hdfs, encFile2Path, len, (short) 1, 0xFEED); - assertArrayEquals("encFile2 not equal", + assertArrayEquals( getFileContentsUsingNfs(encFile2, len), - getFileContentsUsingDfs(encFile2, len)); + getFileContentsUsingDfs(encFile2, len), "encFile2 not equal"); } private void createFileUsingNfs(String fileName, byte[] buffer) @@ -390,7 +399,7 @@ private void createFileUsingNfs(String fileName, byte[] buffer) final WRITE3Response response = nfsd.write(xdr_req.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect response: ", null, response); + assertEquals(null, response, "Incorrect response: "); } private byte[] getFileContentsUsingNfs(String fileName, int len) @@ -406,9 +415,9 @@ private byte[] getFileContentsUsingNfs(String fileName, int len) final READ3Response response = nfsd.read(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code: ", Nfs3Status.NFS3_OK, - response.getStatus()); - assertTrue("expected full read", response.isEof()); + assertEquals(Nfs3Status.NFS3_OK, + response.getStatus(), "Incorrect return code: "); + assertTrue(response.isEof(), "expected full read"); return response.getData().array(); } @@ -419,7 +428,7 @@ private byte[] getFileContentsUsingDfs(String fileName, int len) in.readFully(ret); try { in.readByte(); - Assert.fail("expected end of file"); + fail("expected end of file"); } catch (EOFException e) { // expected. Unfortunately there is no associated message to check } @@ -436,15 +445,16 @@ private void commit(String fileName, int len) throws Exception { final COMMIT3Request req = new COMMIT3Request(handle, 0, len); req.serialize(xdr_req); - Channel ch = Mockito.mock(Channel.class); + Channel ch = mock(Channel.class); COMMIT3Response response2 = nfsd.commit(xdr_req.asReadOnlyWrap(), ch, 1, securityHandler, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect COMMIT3Response:", null, response2); + assertEquals(null, response2, "Incorrect COMMIT3Response:"); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testWrite() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); @@ -465,17 +475,18 @@ public void testWrite() throws Exception { WRITE3Response response1 = nfsd.write(xdr_req.asReadOnlyWrap(), null, 1, securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, - response1.getStatus()); + assertEquals(Nfs3Status.NFS3ERR_ACCES, + response1.getStatus(), "Incorrect return code:"); // Attempt by a priviledged user should pass. WRITE3Response response2 = nfsd.write(xdr_req.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect response:", null, response2); + assertEquals(null, response2, "Incorrect response:"); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testCreate() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); @@ -490,17 +501,18 @@ public void testCreate() throws Exception { CREATE3Response response1 = nfsd.create(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, - response1.getStatus()); + assertEquals(Nfs3Status.NFS3ERR_ACCES, + response1.getStatus(), "Incorrect return code:"); // Attempt by a priviledged user should pass. CREATE3Response response2 = nfsd.create(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, - response2.getStatus()); + assertEquals(Nfs3Status.NFS3_OK, + response2.getStatus(), "Incorrect return code:"); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testMkdir() throws Exception {//FixME HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); @@ -514,8 +526,8 @@ public void testMkdir() throws Exception {//FixME MKDIR3Response response1 = nfsd.mkdir(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, - response1.getStatus()); + assertEquals(Nfs3Status.NFS3ERR_ACCES, + response1.getStatus(), "Incorrect return code:"); XDR xdr_req2 = new XDR(); MKDIR3Request req2 = new MKDIR3Request(handle, "fubar2", new SetAttr3()); @@ -524,11 +536,12 @@ public void testMkdir() throws Exception {//FixME // Attempt to mkdir by a privileged user should pass. MKDIR3Response response2 = nfsd.mkdir(xdr_req2.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, - response2.getStatus()); + assertEquals(Nfs3Status.NFS3_OK, + response2.getStatus(), "Incorrect return code:"); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testSymlink() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); @@ -543,17 +556,18 @@ public void testSymlink() throws Exception { SYMLINK3Response response1 = nfsd.symlink(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, - response1.getStatus()); + assertEquals(Nfs3Status.NFS3ERR_ACCES, + response1.getStatus(), "Incorrect return code:"); // Attempt by a privileged user should pass. SYMLINK3Response response2 = nfsd.symlink(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, - response2.getStatus()); + assertEquals(Nfs3Status.NFS3_OK, + response2.getStatus(), "Incorrect return code:"); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testRemove() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); @@ -567,17 +581,18 @@ public void testRemove() throws Exception { REMOVE3Response response1 = nfsd.remove(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, - response1.getStatus()); + assertEquals(Nfs3Status.NFS3ERR_ACCES, + response1.getStatus(), "Incorrect return code:"); // Attempt by a priviledged user should pass. REMOVE3Response response2 = nfsd.remove(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, - response2.getStatus()); + assertEquals(Nfs3Status.NFS3_OK, + response2.getStatus(), "Incorrect return code:"); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testRmdir() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); @@ -591,17 +606,18 @@ public void testRmdir() throws Exception { RMDIR3Response response1 = nfsd.rmdir(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, - response1.getStatus()); + assertEquals(Nfs3Status.NFS3ERR_ACCES, + response1.getStatus(), "Incorrect return code:"); // Attempt by a privileged user should pass. RMDIR3Response response2 = nfsd.rmdir(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, - response2.getStatus()); + assertEquals(Nfs3Status.NFS3_OK, + response2.getStatus(), "Incorrect return code:"); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testRename() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); @@ -615,17 +631,18 @@ public void testRename() throws Exception { RENAME3Response response1 = nfsd.rename(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, - response1.getStatus()); + assertEquals(Nfs3Status.NFS3ERR_ACCES, + response1.getStatus(), "Incorrect return code:"); // Attempt by a privileged user should pass. RENAME3Response response2 = nfsd.rename(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, - response2.getStatus()); + assertEquals(Nfs3Status.NFS3_OK, + response2.getStatus(), "Incorrect return code:"); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testReaddir() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); @@ -639,17 +656,18 @@ public void testReaddir() throws Exception { READDIR3Response response1 = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, - response1.getStatus()); + assertEquals(Nfs3Status.NFS3ERR_ACCES, + response1.getStatus(), "Incorrect return code:"); // Attempt by a priviledged user should pass. READDIR3Response response2 = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, - response2.getStatus()); + assertEquals(Nfs3Status.NFS3_OK, + response2.getStatus(), "Incorrect return code:"); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testReaddirplus() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); @@ -663,17 +681,18 @@ public void testReaddirplus() throws Exception { READDIRPLUS3Response response1 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, - response1.getStatus()); + assertEquals(Nfs3Status.NFS3ERR_ACCES, + response1.getStatus(), "Incorrect return code:"); // Attempt by a privileged user should pass. READDIRPLUS3Response response2 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, - response2.getStatus()); + assertEquals(Nfs3Status.NFS3_OK, + response2.getStatus(), "Incorrect return code:"); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testFsstat() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); @@ -687,17 +706,18 @@ public void testFsstat() throws Exception { FSSTAT3Response response1 = nfsd.fsstat(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, - response1.getStatus()); + assertEquals(Nfs3Status.NFS3ERR_ACCES, + response1.getStatus(), "Incorrect return code:"); // Attempt by a priviledged user should pass. FSSTAT3Response response2 = nfsd.fsstat(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, - response2.getStatus()); + assertEquals(Nfs3Status.NFS3_OK, + response2.getStatus(), "Incorrect return code:"); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testFsinfo() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); @@ -711,17 +731,18 @@ public void testFsinfo() throws Exception { FSINFO3Response response1 = nfsd.fsinfo(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, - response1.getStatus()); + assertEquals(Nfs3Status.NFS3ERR_ACCES, + response1.getStatus(), "Incorrect return code:"); // Attempt by a priviledged user should pass. FSINFO3Response response2 = nfsd.fsinfo(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, - response2.getStatus()); + assertEquals(Nfs3Status.NFS3_OK, + response2.getStatus(), "Incorrect return code:"); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testPathconf() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); @@ -735,17 +756,18 @@ public void testPathconf() throws Exception { PATHCONF3Response response1 = nfsd.pathconf(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, - response1.getStatus()); + assertEquals(Nfs3Status.NFS3ERR_ACCES, + response1.getStatus(), "Incorrect return code:"); // Attempt by a priviledged user should pass. PATHCONF3Response response2 = nfsd.pathconf(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, - response2.getStatus()); + assertEquals(Nfs3Status.NFS3_OK, + response2.getStatus(), "Incorrect return code:"); } - @Test(timeout = 60000) + @Test + @Timeout(value = 60) public void testCommit() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId = status.getFileId(); @@ -755,23 +777,24 @@ public void testCommit() throws Exception { COMMIT3Request req = new COMMIT3Request(handle, 0, 5); req.serialize(xdr_req); - Channel ch = Mockito.mock(Channel.class); + Channel ch = mock(Channel.class); // Attempt by an unpriviledged user should fail. COMMIT3Response response1 = nfsd.commit(xdr_req.asReadOnlyWrap(), ch, 1, securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, - response1.getStatus()); + assertEquals(Nfs3Status.NFS3ERR_ACCES, + response1.getStatus(), "Incorrect return code:"); // Attempt by a priviledged user should pass. COMMIT3Response response2 = nfsd.commit(xdr_req.asReadOnlyWrap(), ch, 1, securityHandler, new InetSocketAddress("localhost", 1234)); - assertEquals("Incorrect COMMIT3Response:", null, response2); + assertEquals(null, response2, "Incorrect COMMIT3Response:"); } - @Test(timeout=10000) + @Test + @Timeout(value = 10) public void testIdempotent() { Object[][] procedures = { { Nfs3Constant.NFSPROC3.NULL, 1 }, @@ -800,11 +823,9 @@ public void testIdempotent() { boolean idempotent = procedure[1].equals(Integer.valueOf(1)); Nfs3Constant.NFSPROC3 proc = (Nfs3Constant.NFSPROC3)procedure[0]; if (idempotent) { - Assert.assertTrue(("Procedure " + proc + " should be idempotent"), - proc.isIdempotent()); + assertTrue(proc.isIdempotent(), ("Procedure " + proc + " should be idempotent")); } else { - Assert.assertFalse(("Procedure " + proc + " should be non-idempotent"), - proc.isIdempotent()); + assertFalse(proc.isIdempotent(), ("Procedure " + proc + " should be non-idempotent")); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestViewfsWithNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestViewfsWithNfs3.java index 4899d9bd460..0f2040be808 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestViewfsWithNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestViewfsWithNfs3.java @@ -48,17 +48,18 @@ import org.apache.hadoop.oncrpc.security.SecurityHandler; import org.apache.hadoop.security.authorize.DefaultImpersonationProvider; import org.apache.hadoop.security.authorize.ProxyUsers; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Test; -import org.junit.Assert; -import org.mockito.Mockito; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.io.File; import java.net.InetSocketAddress; import java.nio.ByteBuffer; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** @@ -83,7 +84,7 @@ public class TestViewfsWithNfs3 { private static FileSystemTestHelper fsHelper; private static File testRootDir; - @BeforeClass + @BeforeAll public static void setup() throws Exception { String currentUser = System.getProperty("user.name"); @@ -144,8 +145,8 @@ public static void setup() throws Exception { mountd = (RpcProgramMountd) nfs.getMountd().getRpcProgram(); // Mock SecurityHandler which returns system user.name - securityHandler = Mockito.mock(SecurityHandler.class); - Mockito.when(securityHandler.getUser()).thenReturn(currentUser); + securityHandler = mock(SecurityHandler.class); + when(securityHandler.getUser()).thenReturn(currentUser); viewFs.delete(new Path("/hdfs2/dir2"), true); viewFs.mkdirs(new Path("/hdfs2/dir2")); DFSTestUtil.createFile(viewFs, new Path("/hdfs1/file1"), 0, (short) 1, 0); @@ -156,7 +157,7 @@ public static void setup() throws Exception { 0, (short) 1, 0); } - @AfterClass + @AfterAll public static void shutdown() throws Exception { if (cluster != null) { cluster.shutdown(); @@ -165,17 +166,17 @@ public static void shutdown() throws Exception { @Test public void testNumExports() throws Exception { - Assert.assertEquals(mountd.getExports().size(), + assertEquals(mountd.getExports().size(), viewFs.getChildFileSystems().length); } @Test public void testPaths() throws Exception { - Assert.assertEquals(hdfs1.resolvePath(new Path("/user1/file1")), + assertEquals(hdfs1.resolvePath(new Path("/user1/file1")), viewFs.resolvePath(new Path("/hdfs1/file1"))); - Assert.assertEquals(hdfs1.resolvePath(new Path("/user1/file2")), + assertEquals(hdfs1.resolvePath(new Path("/user1/file2")), viewFs.resolvePath(new Path("/hdfs1/file2"))); - Assert.assertEquals(hdfs2.resolvePath(new Path("/user2/dir2")), + assertEquals(hdfs2.resolvePath(new Path("/user2/dir2")), viewFs.resolvePath(new Path("/hdfs2/dir2"))); } @@ -183,11 +184,11 @@ public void testPaths() throws Exception { public void testFileStatus() throws Exception { HdfsFileStatus status = nn1.getRpcServer().getFileInfo("/user1/file1"); FileStatus st = viewFs.getFileStatus(new Path("/hdfs1/file1")); - Assert.assertEquals(st.isDirectory(), status.isDirectory()); + assertEquals(st.isDirectory(), status.isDirectory()); HdfsFileStatus status2 = nn2.getRpcServer().getFileInfo("/user2/dir2"); FileStatus st2 = viewFs.getFileStatus(new Path("/hdfs2/dir2")); - Assert.assertEquals(st2.isDirectory(), status2.isDirectory()); + assertEquals(st2.isDirectory(), status2.isDirectory()); } // Test for getattr @@ -199,25 +200,28 @@ private void testNfsGetAttrResponse(long fileId, int namenodeId, req.serialize(xdrReq); GETATTR3Response response = nfsd.getattr(xdrReq.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); - Assert.assertEquals("Incorrect return code", - expectedStatus, response.getStatus()); + assertEquals(expectedStatus, response.getStatus(), + "Incorrect return code"); } - @Test (timeout = 60000) + @Test + @Timeout(value = 60) public void testNfsAccessNN1() throws Exception { HdfsFileStatus status = nn1.getRpcServer().getFileInfo("/user1/file1"); int namenodeId = Nfs3Utils.getNamenodeId(config, hdfs1.getUri()); testNfsGetAttrResponse(status.getFileId(), namenodeId, Nfs3Status.NFS3_OK); } - @Test (timeout = 60000) + @Test + @Timeout(value = 60) public void testNfsAccessNN2() throws Exception { HdfsFileStatus status = nn2.getRpcServer().getFileInfo("/user2/dir2"); int namenodeId = Nfs3Utils.getNamenodeId(config, hdfs2.getUri()); testNfsGetAttrResponse(status.getFileId(), namenodeId, Nfs3Status.NFS3_OK); } - @Test (timeout = 60000) + @Test + @Timeout(value = 60) public void testWrongNfsAccess() throws Exception { DFSTestUtil.createFile(viewFs, new Path("/hdfs1/file3"), 0, (short) 1, 0); HdfsFileStatus status = nn1.getRpcServer().getFileInfo("/user1/file3"); @@ -245,17 +249,19 @@ private void testNfsWriteResponse(long dirId, int namenodeId) WRITE3Response response = nfsd.write(xdrReq.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234)); - Assert.assertEquals("Incorrect response:", null, response); + assertEquals(null, response, "Incorrect response:"); } - @Test (timeout = 60000) + @Test + @Timeout(value = 60) public void testNfsWriteNN1() throws Exception { HdfsFileStatus status = nn1.getRpcServer().getFileInfo("/user1/write1"); int namenodeId = Nfs3Utils.getNamenodeId(config, hdfs1.getUri()); testNfsWriteResponse(status.getFileId(), namenodeId); } - @Test (timeout = 60000) + @Test + @Timeout(value = 60) public void testNfsWriteNN2() throws Exception { HdfsFileStatus status = nn2.getRpcServer().getFileInfo("/user2/write2"); int namenodeId = Nfs3Utils.getNamenodeId(config, hdfs2.getUri()); @@ -277,7 +283,8 @@ private void testNfsRename(FileHandle fromDirHandle, String fromFileName, assertEquals(expectedStatus, response.getStatus()); } - @Test (timeout = 60000) + @Test + @Timeout(value = 60) public void testNfsRenameMultiNN() throws Exception { HdfsFileStatus fromFileStatus = nn1.getRpcServer().getFileInfo("/user1"); int fromNNId = Nfs3Utils.getNamenodeId(config, hdfs1.getUri()); @@ -290,20 +297,21 @@ public void testNfsRenameMultiNN() throws Exception { HdfsFileStatus statusBeforeRename = nn1.getRpcServer().getFileInfo("/user1/renameMultiNN"); - Assert.assertEquals(statusBeforeRename.isDirectory(), false); + assertEquals(statusBeforeRename.isDirectory(), false); testNfsRename(fromHandle, "renameMultiNN", toHandle, "renameMultiNNFail", Nfs3Status.NFS3ERR_INVAL); HdfsFileStatus statusAfterRename = nn2.getRpcServer().getFileInfo("/user2/renameMultiNNFail"); - Assert.assertEquals(statusAfterRename, null); + assertEquals(statusAfterRename, null); statusAfterRename = nn1.getRpcServer().getFileInfo("/user1/renameMultiNN"); - Assert.assertEquals(statusAfterRename.isDirectory(), false); + assertEquals(statusAfterRename.isDirectory(), false); } - @Test (timeout = 60000) + @Test + @Timeout(value = 60) public void testNfsRenameSingleNN() throws Exception { DFSTestUtil.createFile(viewFs, new Path("/hdfs1/renameSingleNN"), 0, (short) 1, 0); @@ -314,7 +322,7 @@ public void testNfsRenameSingleNN() throws Exception { HdfsFileStatus statusBeforeRename = nn1.getRpcServer().getFileInfo("/user1/renameSingleNN"); - Assert.assertEquals(statusBeforeRename.isDirectory(), false); + assertEquals(statusBeforeRename.isDirectory(), false); Path successFilePath = new Path("/user1/renameSingleNNSucess"); hdfs1.delete(successFilePath, false); @@ -323,10 +331,10 @@ public void testNfsRenameSingleNN() throws Exception { HdfsFileStatus statusAfterRename = nn1.getRpcServer().getFileInfo("/user1/renameSingleNNSucess"); - Assert.assertEquals(statusAfterRename.isDirectory(), false); + assertEquals(statusAfterRename.isDirectory(), false); statusAfterRename = nn1.getRpcServer().getFileInfo("/user1/renameSingleNN"); - Assert.assertEquals(statusAfterRename, null); + assertEquals(statusAfterRename, null); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java index 0f03c6da93b..b1ca4dcaa42 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java @@ -17,9 +17,11 @@ */ package org.apache.hadoop.hdfs.nfs.nfs3; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import java.io.IOException; import java.net.InetSocketAddress; @@ -53,9 +55,7 @@ import org.apache.hadoop.security.ShellBasedIdMapping; import org.apache.hadoop.security.authorize.DefaultImpersonationProvider; import org.apache.hadoop.security.authorize.ProxyUsers; -import org.junit.Assert; -import org.junit.Test; -import org.mockito.Mockito; +import org.junit.jupiter.api.Test; public class TestWrites { @Test @@ -77,7 +77,7 @@ public void testAlterWriteRequest() throws IOException { request.getStableHow(), request.getData(), null, 1, false, WriteCtx.DataState.NO_DUMP); - Assert.assertTrue(writeCtx1.getData().array().length == originalCount); + assertTrue(writeCtx1.getData().array().length == originalCount); // Now change the write request OpenFileCtx.alterWriteRequest(request, 12); @@ -89,12 +89,12 @@ public void testAlterWriteRequest() throws IOException { int position = appendedData.position(); int limit = appendedData.limit(); - Assert.assertTrue(position == 12); - Assert.assertTrue(limit - position == 8); - Assert.assertTrue(appendedData.get(position) == (byte) 12); - Assert.assertTrue(appendedData.get(position + 1) == (byte) 13); - Assert.assertTrue(appendedData.get(position + 2) == (byte) 14); - Assert.assertTrue(appendedData.get(position + 7) == (byte) 19); + assertTrue(position == 12); + assertTrue(limit - position == 8); + assertTrue(appendedData.get(position) == (byte) 12); + assertTrue(appendedData.get(position + 1) == (byte) 13); + assertTrue(appendedData.get(position + 2) == (byte) 14); + assertTrue(appendedData.get(position + 7) == (byte) 19); // Test current file write offset is at boundaries buffer.position(0); @@ -107,10 +107,10 @@ public void testAlterWriteRequest() throws IOException { appendedData = writeCtx3.getData(); position = appendedData.position(); limit = appendedData.limit(); - Assert.assertTrue(position == 1); - Assert.assertTrue(limit - position == 19); - Assert.assertTrue(appendedData.get(position) == (byte) 1); - Assert.assertTrue(appendedData.get(position + 18) == (byte) 19); + assertTrue(position == 1); + assertTrue(limit - position == 19); + assertTrue(appendedData.get(position) == (byte) 1); + assertTrue(appendedData.get(position + 18) == (byte) 19); // Reset buffer position before test another boundary buffer.position(0); @@ -123,9 +123,9 @@ public void testAlterWriteRequest() throws IOException { appendedData = writeCtx4.getData(); position = appendedData.position(); limit = appendedData.limit(); - Assert.assertTrue(position == 19); - Assert.assertTrue(limit - position == 1); - Assert.assertTrue(appendedData.get(position) == (byte) 19); + assertTrue(position == 19); + assertTrue(limit - position == 1); + assertTrue(appendedData.get(position) == (byte) 19); } @Test @@ -133,10 +133,10 @@ public void testAlterWriteRequest() throws IOException { // includes COMMIT_FINISHED, COMMIT_WAIT, COMMIT_INACTIVE_CTX, // COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR, and COMMIT_DO_SYNC. public void testCheckCommit() throws IOException { - DFSClient dfsClient = Mockito.mock(DFSClient.class); + DFSClient dfsClient = mock(DFSClient.class); Nfs3FileAttributes attr = new Nfs3FileAttributes(); - HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class); - Mockito.when(fos.getPos()).thenReturn((long) 0); + HdfsDataOutputStream fos = mock(HdfsDataOutputStream.class); + when(fos.getPos()).thenReturn((long) 0); NfsConfiguration conf = new NfsConfiguration(); conf.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, false); @@ -147,62 +147,62 @@ public void testCheckCommit() throws IOException { // Test inactive open file context ctx.setActiveStatusForTest(false); - Channel ch = Mockito.mock(Channel.class); + Channel ch = mock(Channel.class); ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false); - Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_CTX); + assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_CTX); ctx.getPendingWritesForTest().put(new OffsetRange(5, 10), new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null)); ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false); - Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE); + assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE); // Test request with non zero commit offset ctx.setActiveStatusForTest(true); - Mockito.when(fos.getPos()).thenReturn((long) 10); + when(fos.getPos()).thenReturn((long) 10); ctx.setNextOffsetForTest(10); COMMIT_STATUS status = ctx.checkCommitInternal(5, null, 1, attr, false); - Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC); + assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC); // Do_SYNC state will be updated to FINISHED after data sync ret = ctx.checkCommit(dfsClient, 5, ch, 1, attr, false); - Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED); + assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED); status = ctx.checkCommitInternal(10, ch, 1, attr, false); - Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC); + assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC); ret = ctx.checkCommit(dfsClient, 10, ch, 1, attr, false); - Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED); + assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED); ConcurrentNavigableMap<Long, CommitCtx> commits = ctx .getPendingCommitsForTest(); - Assert.assertTrue(commits.size() == 0); + assertTrue(commits.size() == 0); ret = ctx.checkCommit(dfsClient, 11, ch, 1, attr, false); - Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT); - Assert.assertTrue(commits.size() == 1); + assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT); + assertTrue(commits.size() == 1); long key = commits.firstKey(); - Assert.assertTrue(key == 11); + assertTrue(key == 11); // Test request with zero commit offset commits.remove(new Long(11)); // There is one pending write [5,10] ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false); - Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT); - Assert.assertTrue(commits.size() == 1); + assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT); + assertTrue(commits.size() == 1); key = commits.firstKey(); - Assert.assertTrue(key == 9); + assertTrue(key == 9); // Empty pending writes ctx.getPendingWritesForTest().remove(new OffsetRange(5, 10)); ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false); - Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED); + assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED); } @Test // Validate all the commit check return codes OpenFileCtx.COMMIT_STATUS with // large file upload option. public void testCheckCommitLargeFileUpload() throws IOException { - DFSClient dfsClient = Mockito.mock(DFSClient.class); + DFSClient dfsClient = mock(DFSClient.class); Nfs3FileAttributes attr = new Nfs3FileAttributes(); - HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class); - Mockito.when(fos.getPos()).thenReturn((long) 0); + HdfsDataOutputStream fos = mock(HdfsDataOutputStream.class); + when(fos.getPos()).thenReturn((long) 0); NfsConfiguration conf = new NfsConfiguration(); conf.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, true); @@ -213,66 +213,66 @@ public void testCheckCommitLargeFileUpload() throws IOException { // Test inactive open file context ctx.setActiveStatusForTest(false); - Channel ch = Mockito.mock(Channel.class); + Channel ch = mock(Channel.class); ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false); - Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_CTX); + assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_CTX); ctx.getPendingWritesForTest().put(new OffsetRange(10, 15), new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null)); ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false); - Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE); + assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE); // Test request with non zero commit offset ctx.setActiveStatusForTest(true); - Mockito.when(fos.getPos()).thenReturn((long) 8); + when(fos.getPos()).thenReturn((long) 8); ctx.setNextOffsetForTest(10); COMMIT_STATUS status = ctx.checkCommitInternal(5, null, 1, attr, false); - Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC); + assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC); // Do_SYNC state will be updated to FINISHED after data sync ret = ctx.checkCommit(dfsClient, 5, ch, 1, attr, false); - Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED); + assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED); // Test commit sequential writes status = ctx.checkCommitInternal(10, ch, 1, attr, false); - Assert.assertTrue(status == COMMIT_STATUS.COMMIT_SPECIAL_WAIT); + assertTrue(status == COMMIT_STATUS.COMMIT_SPECIAL_WAIT); ret = ctx.checkCommit(dfsClient, 10, ch, 1, attr, false); - Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_SPECIAL_WAIT); + assertTrue(ret == COMMIT_STATUS.COMMIT_SPECIAL_WAIT); // Test commit non-sequential writes ConcurrentNavigableMap<Long, CommitCtx> commits = ctx .getPendingCommitsForTest(); - Assert.assertTrue(commits.size() == 1); + assertTrue(commits.size() == 1); ret = ctx.checkCommit(dfsClient, 16, ch, 1, attr, false); - Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_SPECIAL_SUCCESS); - Assert.assertTrue(commits.size() == 1); + assertTrue(ret == COMMIT_STATUS.COMMIT_SPECIAL_SUCCESS); + assertTrue(commits.size() == 1); // Test request with zero commit offset commits.remove(new Long(10)); // There is one pending write [10,15] ret = ctx.checkCommitInternal(0, ch, 1, attr, false); - Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_SPECIAL_WAIT); + assertTrue(ret == COMMIT_STATUS.COMMIT_SPECIAL_WAIT); ret = ctx.checkCommitInternal(9, ch, 1, attr, false); - Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_SPECIAL_WAIT); - Assert.assertTrue(commits.size() == 2); + assertTrue(ret == COMMIT_STATUS.COMMIT_SPECIAL_WAIT); + assertTrue(commits.size() == 2); // Empty pending writes. nextOffset=10, flushed pos=8 ctx.getPendingWritesForTest().remove(new OffsetRange(10, 15)); ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false); - Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_SPECIAL_WAIT); + assertTrue(ret == COMMIT_STATUS.COMMIT_SPECIAL_WAIT); // Empty pending writes ctx.setNextOffsetForTest((long) 8); // flushed pos = 8 ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false); - Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED); + assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED); } @Test public void testCheckCommitAixCompatMode() throws IOException { - DFSClient dfsClient = Mockito.mock(DFSClient.class); + DFSClient dfsClient = mock(DFSClient.class); Nfs3FileAttributes attr = new Nfs3FileAttributes(); - HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class); + HdfsDataOutputStream fos = mock(HdfsDataOutputStream.class); NfsConfiguration conf = new NfsConfiguration(); conf.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, false); @@ -282,18 +282,18 @@ public void testCheckCommitAixCompatMode() throws IOException { // Test fall-through to pendingWrites check in the event that commitOffset // is greater than the number of bytes we've so far flushed. - Mockito.when(fos.getPos()).thenReturn((long) 2); + when(fos.getPos()).thenReturn((long) 2); COMMIT_STATUS status = ctx.checkCommitInternal(5, null, 1, attr, false); - Assert.assertTrue(status == COMMIT_STATUS.COMMIT_FINISHED); + assertTrue(status == COMMIT_STATUS.COMMIT_FINISHED); // Test the case when we actually have received more bytes than we're trying // to commit. ctx.getPendingWritesForTest().put(new OffsetRange(0, 10), new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null)); - Mockito.when(fos.getPos()).thenReturn((long) 10); + when(fos.getPos()).thenReturn((long) 10); ctx.setNextOffsetForTest((long)10); status = ctx.checkCommitInternal(5, null, 1, attr, false); - Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC); + assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC); } @Test @@ -301,10 +301,10 @@ public void testCheckCommitAixCompatMode() throws IOException { // includes COMMIT_FINISHED, COMMIT_WAIT, COMMIT_INACTIVE_CTX, // COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR, and COMMIT_DO_SYNC. public void testCheckCommitFromRead() throws IOException { - DFSClient dfsClient = Mockito.mock(DFSClient.class); + DFSClient dfsClient = mock(DFSClient.class); Nfs3FileAttributes attr = new Nfs3FileAttributes(); - HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class); - Mockito.when(fos.getPos()).thenReturn((long) 0); + HdfsDataOutputStream fos = mock(HdfsDataOutputStream.class); + when(fos.getPos()).thenReturn((long) 0); NfsConfiguration config = new NfsConfiguration(); config.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, false); @@ -318,7 +318,7 @@ public void testCheckCommitFromRead() throws IOException { // Test inactive open file context ctx.setActiveStatusForTest(false); - Channel ch = Mockito.mock(Channel.class); + Channel ch = mock(Channel.class); ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true); assertEquals( COMMIT_STATUS.COMMIT_INACTIVE_CTX, ret); assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 0)); @@ -331,7 +331,7 @@ public void testCheckCommitFromRead() throws IOException { // Test request with non zero commit offset ctx.setActiveStatusForTest(true); - Mockito.when(fos.getPos()).thenReturn((long) 10); + when(fos.getPos()).thenReturn((long) 10); ctx.setNextOffsetForTest((long)10); COMMIT_STATUS status = ctx.checkCommitInternal(5, ch, 1, attr, false); assertEquals(COMMIT_STATUS.COMMIT_DO_SYNC, status); @@ -371,10 +371,10 @@ public void testCheckCommitFromRead() throws IOException { @Test // Validate all the commit check return codes OpenFileCtx.COMMIT_STATUS with large file upload option public void testCheckCommitFromReadLargeFileUpload() throws IOException { - DFSClient dfsClient = Mockito.mock(DFSClient.class); + DFSClient dfsClient = mock(DFSClient.class); Nfs3FileAttributes attr = new Nfs3FileAttributes(); - HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class); - Mockito.when(fos.getPos()).thenReturn((long) 0); + HdfsDataOutputStream fos = mock(HdfsDataOutputStream.class); + when(fos.getPos()).thenReturn((long) 0); NfsConfiguration config = new NfsConfiguration(); config.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, true); @@ -388,7 +388,7 @@ public void testCheckCommitFromReadLargeFileUpload() throws IOException { // Test inactive open file context ctx.setActiveStatusForTest(false); - Channel ch = Mockito.mock(Channel.class); + Channel ch = mock(Channel.class); ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, true); assertEquals( COMMIT_STATUS.COMMIT_INACTIVE_CTX, ret); assertEquals(Nfs3Status.NFS3_OK, wm.commitBeforeRead(dfsClient, h, 0)); @@ -401,7 +401,7 @@ public void testCheckCommitFromReadLargeFileUpload() throws IOException { // Test request with non zero commit offset ctx.setActiveStatusForTest(true); - Mockito.when(fos.getPos()).thenReturn((long) 6); + when(fos.getPos()).thenReturn((long) 6); ctx.setNextOffsetForTest((long)10); COMMIT_STATUS status = ctx.checkCommitInternal(5, ch, 1, attr, false); assertEquals(COMMIT_STATUS.COMMIT_DO_SYNC, status); @@ -463,8 +463,8 @@ public void testWriteStableHow() throws IOException, InterruptedException { DFSClient client = null; MiniDFSCluster cluster = null; RpcProgramNfs3 nfsd; - SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class); - Mockito.when(securityHandler.getUser()).thenReturn( + SecurityHandler securityHandler = mock(SecurityHandler.class); + when(securityHandler.getUser()).thenReturn( System.getProperty("user.name")); String currentUser = System.getProperty("user.name"); config.set( @@ -572,8 +572,8 @@ public void testOOOWrites() throws IOException, InterruptedException { RpcProgramNfs3 nfsd; final int bufSize = 32; final int numOOO = 3; - SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class); - Mockito.when(securityHandler.getUser()).thenReturn( + SecurityHandler securityHandler = mock(SecurityHandler.class); + when(securityHandler.getUser()).thenReturn( System.getProperty("user.name")); String currentUser = System.getProperty("user.name"); config.set( @@ -649,8 +649,8 @@ public void testOverlappingWrites() throws IOException, InterruptedException { MiniDFSCluster cluster = null; RpcProgramNfs3 nfsd; final int bufSize = 32; - SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class); - Mockito.when(securityHandler.getUser()).thenReturn( + SecurityHandler securityHandler = mock(SecurityHandler.class); + when(securityHandler.getUser()).thenReturn( System.getProperty("user.name")); String currentUser = System.getProperty("user.name"); config.set( @@ -736,10 +736,10 @@ securityHandler, new InetSocketAddress("localhost", config.getInt( @Test public void testCheckSequential() throws IOException { - DFSClient dfsClient = Mockito.mock(DFSClient.class); + DFSClient dfsClient = mock(DFSClient.class); Nfs3FileAttributes attr = new Nfs3FileAttributes(); - HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class); - Mockito.when(fos.getPos()).thenReturn((long) 0); + HdfsDataOutputStream fos = mock(HdfsDataOutputStream.class); + when(fos.getPos()).thenReturn((long) 0); NfsConfiguration config = new NfsConfiguration(); config.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, false); --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org