HDFS-11067. DFS#listStatusIterator(..) should throw FileNotFoundException if the directory deleted before fetching next batch of entries. Contributed by Vinayakumar B.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8dbd53ef Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8dbd53ef Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8dbd53ef Branch: refs/heads/HADOOP-13345 Commit: 8dbd53ef9f34e3e05b159e4f5378e9c2c52c59c5 Parents: b649519 Author: Vinayakumar B <[email protected]> Authored: Thu Jun 22 17:35:40 2017 +0530 Committer: Vinayakumar B <[email protected]> Committed: Thu Jun 22 17:37:08 2017 +0530 ---------------------------------------------------------------------- .../src/site/markdown/filesystem/filesystem.md | 4 +++ .../main/java/org/apache/hadoop/fs/Hdfs.java | 2 +- .../hadoop/hdfs/DistributedFileSystem.java | 2 +- .../org/apache/hadoop/hdfs/TestFileStatus.java | 27 ++++++++++++++++++-- 4 files changed, 31 insertions(+), 4 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/8dbd53ef/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md index b464941..b56666c 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md @@ -1185,6 +1185,10 @@ on (possibly remote) filesystems. These filesystems are invariably accessed concurrently; the state of the filesystem MAY change between a `hasNext()` probe and the invocation of the `next()` call. +During iteration through a `RemoteIterator`, if the directory is deleted on +remote filesystem, then `hasNext()` or `next()` call may throw +`FileNotFoundException`. + Accordingly, a robust iteration through a `RemoteIterator` would catch and discard `NoSuchElementException` exceptions raised during the process, which could be done through the `while(true)` iteration example above, or http://git-wip-us.apache.org/repos/asf/hadoop/blob/8dbd53ef/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java index 645f1ad..cd870ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java @@ -232,7 +232,7 @@ public class Hdfs extends AbstractFileSystem { thisListing = dfs.listPaths(src, thisListing.getLastName(), needLocation); if (thisListing == null) { - return false; // the directory is deleted + throw new FileNotFoundException("File " + src + " does not exist."); } i = 0; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/8dbd53ef/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 3e09804..f8af4ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -1168,7 +1168,7 @@ public class DistributedFileSystem extends FileSystem { needLocation); statistics.incrementReadOps(1); if (thisListing == null) { - return false; + throw new FileNotFoundException("File " + p + " does not exist."); } i = 0; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/8dbd53ef/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java index c74bb63..31007dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java @@ -317,8 +317,31 @@ public class TestFileStatus { assertEquals(file3.toString(), itor.next().getPath().toString()); assertFalse(itor.hasNext()); - - fs.delete(dir, true); + itor = fs.listStatusIterator(dir); + assertEquals(dir3.toString(), itor.next().getPath().toString()); + assertEquals(dir4.toString(), itor.next().getPath().toString()); + fs.delete(dir.getParent(), true); + try { + itor.hasNext(); + fail("FileNotFoundException expected"); + } catch (FileNotFoundException fnfe) { + } + + fs.mkdirs(file2); + fs.mkdirs(dir3); + fs.mkdirs(dir4); + fs.mkdirs(dir5); + itor = fs.listStatusIterator(dir); + int count = 0; + try { + fs.delete(dir.getParent(), true); + while (itor.next() != null) { + count++; + } + fail("FileNotFoundException expected"); + } catch (FileNotFoundException fnfe) { + } + assertEquals(2, count); } } --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
