Author: brandonli Date: Sun Oct 13 06:29:17 2013 New Revision: 1531619 URL: http://svn.apache.org/r1531619 Log: HDFS-5329. Merging change 1531618 from branch-2
Modified: hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Modified: hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1531619&r1=1531618&r2=1531619&view=diff ============================================================================== --- hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Sun Oct 13 06:29:17 2013 @@ -36,6 +36,9 @@ Release 2.2.1 - UNRELEASED HDFS-5322. HDFS delegation token not found in cache errors seen on secure HA clusters. (jing9) + HDFS-5329. Update FSNamesystem#getListing() to handle inode path in startAfter + token. (brandonli) + Release 2.2.0 - 2013-10-13 INCOMPATIBLE CHANGES Modified: hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1531619&r1=1531618&r2=1531619&view=diff ============================================================================== --- hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Sun Oct 13 06:29:17 2013 @@ -123,6 +123,7 @@ import org.apache.hadoop.classification. import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -3873,11 +3874,27 @@ public class FSNamesystem implements Nam FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.READ); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); + String startAfterString = new String(startAfter); readLock(); try { checkOperation(OperationCategory.READ); src = FSDirectory.resolvePath(src, pathComponents, dir); + // Get file name when startAfter is an INodePath + if (FSDirectory.isReservedName(startAfterString)) { + byte[][] startAfterComponents = FSDirectory + .getPathComponentsForReservedPath(startAfterString); + try { + String tmp = FSDirectory.resolvePath(src, startAfterComponents, dir); + byte[][] regularPath = INode.getPathComponents(tmp); + startAfter = regularPath[regularPath.length - 1]; + } catch (IOException e) { + // Possibly the inode is deleted + throw new DirectoryListingStartAfterNotFoundException( + "Can't find startAfter " + startAfterString); + } + } + if (isPermissionEnabled) { if (dir.isDir(src)) { checkPathAccess(pc, src, FsAction.READ_EXECUTE); Modified: hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1531619&r1=1531618&r2=1531619&view=diff ============================================================================== --- hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original) +++ hadoop/common/branches/branch-2.2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Sun Oct 13 06:29:17 2013 @@ -31,6 +31,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -50,6 +51,7 @@ import org.apache.hadoop.hdfs.DFSTestUti import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -920,7 +922,55 @@ public class TestINodeFile { assertTrue(parentId == status.getFileId()); } finally { - cluster.shutdown(); + if (cluster != null) { + cluster.shutdown(); + } + } + } + + @Test + public void testFilesInGetListingOps() throws Exception { + final Configuration conf = new Configuration(); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + final DistributedFileSystem hdfs = cluster.getFileSystem(); + final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory(); + + hdfs.mkdirs(new Path("/tmp")); + DFSTestUtil.createFile(hdfs, new Path("/tmp/f1"), 0, (short) 1, 0); + DFSTestUtil.createFile(hdfs, new Path("/tmp/f2"), 0, (short) 1, 0); + DFSTestUtil.createFile(hdfs, new Path("/tmp/f3"), 0, (short) 1, 0); + + DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp", + HdfsFileStatus.EMPTY_NAME, false); + assertTrue(dl.getPartialListing().length == 3); + + String f2 = new String("f2"); + dl = cluster.getNameNodeRpc().getListing("/tmp", f2.getBytes(), false); + assertTrue(dl.getPartialListing().length == 1); + + INode f2INode = fsdir.getINode("/tmp/f2"); + String f2InodePath = "/.reserved/.inodes/" + f2INode.getId(); + dl = cluster.getNameNodeRpc().getListing("/tmp", f2InodePath.getBytes(), + false); + assertTrue(dl.getPartialListing().length == 1); + + // Test the deleted startAfter file + hdfs.delete(new Path("/tmp/f2"), false); + try { + dl = cluster.getNameNodeRpc().getListing("/tmp", + f2InodePath.getBytes(), false); + fail("Didn't get exception for the deleted startAfter token."); + } catch (IOException e) { + assertTrue(e instanceof DirectoryListingStartAfterNotFoundException); + } + + } finally { + if (cluster != null) { + cluster.shutdown(); + } } } }