[ 
https://issues.apache.org/jira/browse/HADOOP-11452?focusedWorklogId=476144&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-476144
 ]

ASF GitHub Bot logged work on HADOOP-11452:
-------------------------------------------

                Author: ASF GitHub Bot
            Created on: 29/Aug/20 18:39
            Start Date: 29/Aug/20 18:39
    Worklog Time Spent: 10m 
      Work Description: steveloughran commented on a change in pull request 
#743:
URL: https://github.com/apache/hadoop/pull/743#discussion_r479677745



##########
File path: 
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java
##########
@@ -195,99 +214,82 @@ public void testWDAbsolute() throws IOException {
     Path absoluteDir = new Path(fSys.getUri() + "/test/existingDir");
     fSys.mkdirs(absoluteDir);
     fSys.setWorkingDirectory(absoluteDir);
-    Assert.assertEquals(absoluteDir, fSys.getWorkingDirectory());
+    assertEquals(absoluteDir, fSys.getWorkingDirectory());
   }
   
   @Test
   public void testMkdirs() throws Exception {
     Path testDir = getTestRootPath(fSys, "test/hadoop");
-    Assert.assertFalse(exists(fSys, testDir));
-    Assert.assertFalse(isFile(fSys, testDir));
+    assertFalse(exists(fSys, testDir));
+    assertFalse(isFile(fSys, testDir));
 
     fSys.mkdirs(testDir);
 
-    Assert.assertTrue(exists(fSys, testDir));
-    Assert.assertFalse(isFile(fSys, testDir));
+    assertTrue(exists(fSys, testDir));
+    assertFalse(isFile(fSys, testDir));
     
     fSys.mkdirs(testDir);
 
-    Assert.assertTrue(exists(fSys, testDir));
-    Assert.assertFalse(isFile(fSys, testDir));
+    assertTrue(exists(fSys, testDir));
+    assertFalse(isFile(fSys, testDir));
 
     Path parentDir = testDir.getParent();
-    Assert.assertTrue(exists(fSys, parentDir));
-    Assert.assertFalse(isFile(fSys, parentDir));
+    assertTrue(exists(fSys, parentDir));
+    assertFalse(isFile(fSys, parentDir));
 
     Path grandparentDir = parentDir.getParent();
-    Assert.assertTrue(exists(fSys, grandparentDir));
-    Assert.assertFalse(isFile(fSys, grandparentDir));
+    assertTrue(exists(fSys, grandparentDir));
+    assertFalse(isFile(fSys, grandparentDir));
     
   }
   
   @Test
   public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
     Path testDir = getTestRootPath(fSys, "test/hadoop");
-    Assert.assertFalse(exists(fSys, testDir));
+    assertFalse(exists(fSys, testDir));
     fSys.mkdirs(testDir);
-    Assert.assertTrue(exists(fSys, testDir));
+    assertTrue(exists(fSys, testDir));
     
     createFile(getTestRootPath(fSys, "test/hadoop/file"));
     
     Path testSubDir = getTestRootPath(fSys, "test/hadoop/file/subdir");
-    try {
-      fSys.mkdirs(testSubDir);
-      Assert.fail("Should throw IOException.");
-    } catch (IOException e) {
-      // expected
-    }
-    Assert.assertFalse(exists(fSys, testSubDir));
+    intercept(IOException.class, () ->
+      fSys.mkdirs(testSubDir));
+    assertFalse(exists(fSys, testSubDir));
     
     Path testDeepSubDir = getTestRootPath(fSys, 
"test/hadoop/file/deep/sub/dir");
-    try {
-      fSys.mkdirs(testDeepSubDir);
-      Assert.fail("Should throw IOException.");
-    } catch (IOException e) {
-      // expected
-    }
-    Assert.assertFalse(exists(fSys, testDeepSubDir));
+    intercept(IOException.class, () ->
+        fSys.mkdirs(testDeepSubDir));
+    assertFalse(exists(fSys, testDeepSubDir));
     
   }
   
   @Test
-  public void testGetFileStatusThrowsExceptionForNonExistentFile() 
-    throws Exception {
-    try {
-      fSys.getFileStatus(getTestRootPath(fSys, "test/hadoop/file"));
-      Assert.fail("Should throw FileNotFoundException");
-    } catch (FileNotFoundException e) {
-      // expected
-    }
-  } 
+  public void testGetFileStatusThrowsExceptionForNonExistentFile()
+      throws Exception {
+    intercept(FileNotFoundException.class, () ->
+      fSys.getFileStatus(getTestRootPath(fSys, "test/hadoop/file")));
+  }
   
   @Test
   public void testListStatusThrowsExceptionForNonExistentFile()
-  throws Exception {
-    try {
-      fSys.listStatus(getTestRootPath(fSys, "test/hadoop/file"));
-      Assert.fail("Should throw FileNotFoundException");
-    } catch (FileNotFoundException fnfe) {
-      // expected
-    }
+      throws Exception {
+    intercept(FileNotFoundException.class, () ->
+      fSys.listStatus(getTestRootPath(fSys, "test/hadoop/file")));
   }
 
   @Test
   public void testListStatusThrowsExceptionForUnreadableDir()
   throws Exception {
+    assumePermissionsSupported();
     Path testRootDir = getTestRootPath(fSys, "test/hadoop/dir");
     Path obscuredDir = new Path(testRootDir, "foo");
     Path subDir = new Path(obscuredDir, "bar"); //so foo is non-empty
     fSys.mkdirs(subDir);
     fSys.setPermission(obscuredDir, new FsPermission((short)0)); //no access
     try {
-      fSys.listStatus(obscuredDir);
-      Assert.fail("Should throw IOException");
-    } catch (IOException ioe) {
-      // expected
+      intercept(IOException.class, () ->

Review comment:
       Now that's interesting. I wonder what unix does there. Because HDFS 
should really be doing the same. If I'm in dir /a and don't have exec perms for 
/a/b, I should be able call stat /a/b, just not ls(/a/b). (pauses to check. OK, 
on the command line, removing x perms from a dir means ls() returns an empty 
list, not an error
   
   (and hadoop fs -ls -R command returns an empty list too)
   




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Issue Time Tracking
-------------------

    Worklog Id:     (was: 476144)
    Time Spent: 20m  (was: 10m)

> Make FileSystem.rename(path, path, options) public, specified, tested
> ---------------------------------------------------------------------
>
>                 Key: HADOOP-11452
>                 URL: https://issues.apache.org/jira/browse/HADOOP-11452
>             Project: Hadoop Common
>          Issue Type: Task
>          Components: fs
>    Affects Versions: 2.7.3
>            Reporter: Yi Liu
>            Assignee: Steve Loughran
>            Priority: Major
>              Labels: pull-request-available
>         Attachments: HADOOP-11452-001.patch, HADOOP-11452-002.patch, 
> HADOOP-14452-004.patch, HADOOP-14452-branch-2-003.patch
>
>          Time Spent: 20m
>  Remaining Estimate: 0h
>
> Currently in {{FileSystem}}, {{rename}} with _Rename options_ is protected 
> and with _deprecated_ annotation. And the default implementation is not 
> atomic.
> So this method is not able to be used outside. On the other hand, HDFS has a 
> good and atomic implementation. (Also an interesting thing in {{DFSClient}}, 
> the _deprecated_ annotations for these two methods are opposite).
> It makes sense to make public for {{rename}} with _Rename options_, since 
> it's atomic for rename+overwrite, also it saves RPC calls if user desires 
> rename+overwrite.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to