HDFS-10744. Internally optimize path component resolution. Contributed by Daryn 
Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03dea65e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03dea65e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03dea65e

Branch: refs/heads/YARN-2915
Commit: 03dea65e0b17ca2f9460bb6110f6ab3a321b8bf2
Parents: d714030
Author: Kihwal Lee <kih...@apache.org>
Authored: Mon Aug 15 16:44:18 2016 -0500
Committer: Kihwal Lee <kih...@apache.org>
Committed: Mon Aug 15 16:45:44 2016 -0500

----------------------------------------------------------------------
 .../hadoop/hdfs/server/namenode/FSDirAclOp.java |  18 ++--
 .../hdfs/server/namenode/FSDirAppendOp.java     |   4 +-
 .../hdfs/server/namenode/FSDirAttrOp.java       |  22 ++--
 .../hdfs/server/namenode/FSDirDeleteOp.java     |   5 +-
 .../server/namenode/FSDirEncryptionZoneOp.java  |   8 +-
 .../server/namenode/FSDirErasureCodingOp.java   |   8 +-
 .../hdfs/server/namenode/FSDirMkdirOp.java      |   3 +-
 .../hdfs/server/namenode/FSDirRenameOp.java     |  12 +--
 .../server/namenode/FSDirStatAndListingOp.java  |  27 ++---
 .../hdfs/server/namenode/FSDirSymlinkOp.java    |   3 +-
 .../hdfs/server/namenode/FSDirTruncateOp.java   |   4 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  15 +--
 .../hdfs/server/namenode/FSDirXAttrOp.java      |  13 +--
 .../hdfs/server/namenode/FSDirectory.java       | 100 +++++++++----------
 .../hdfs/server/namenode/FSNamesystem.java      |  16 +--
 .../hdfs/server/namenode/TestINodeFile.java     |  53 +++++-----
 16 files changed, 118 insertions(+), 193 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03dea65e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
index 0c572b5..296bed2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
@@ -39,8 +39,7 @@ class FSDirAclOp {
     String src = srcArg;
     checkAclsConfigFlag(fsd);
     FSPermissionChecker pc = fsd.getPermissionChecker();
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-    src = fsd.resolvePath(pc, src, pathComponents);
+    src = fsd.resolvePath(pc, src);
     INodesInPath iip;
     fsd.writeLock();
     try {
@@ -65,8 +64,7 @@ class FSDirAclOp {
     String src = srcArg;
     checkAclsConfigFlag(fsd);
     FSPermissionChecker pc = fsd.getPermissionChecker();
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-    src = fsd.resolvePath(pc, src, pathComponents);
+    src = fsd.resolvePath(pc, src);
     INodesInPath iip;
     fsd.writeLock();
     try {
@@ -90,8 +88,7 @@ class FSDirAclOp {
     String src = srcArg;
     checkAclsConfigFlag(fsd);
     FSPermissionChecker pc = fsd.getPermissionChecker();
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-    src = fsd.resolvePath(pc, src, pathComponents);
+    src = fsd.resolvePath(pc, src);
     INodesInPath iip;
     fsd.writeLock();
     try {
@@ -115,8 +112,7 @@ class FSDirAclOp {
     String src = srcArg;
     checkAclsConfigFlag(fsd);
     FSPermissionChecker pc = fsd.getPermissionChecker();
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-    src = fsd.resolvePath(pc, src, pathComponents);
+    src = fsd.resolvePath(pc, src);
     INodesInPath iip;
     fsd.writeLock();
     try {
@@ -135,9 +131,8 @@ class FSDirAclOp {
       throws IOException {
     String src = srcArg;
     checkAclsConfigFlag(fsd);
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
     FSPermissionChecker pc = fsd.getPermissionChecker();
-    src = fsd.resolvePath(pc, src, pathComponents);
+    src = fsd.resolvePath(pc, src);
     INodesInPath iip;
     fsd.writeLock();
     try {
@@ -155,8 +150,7 @@ class FSDirAclOp {
       FSDirectory fsd, String src) throws IOException {
     checkAclsConfigFlag(fsd);
     FSPermissionChecker pc = fsd.getPermissionChecker();
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-    src = fsd.resolvePath(pc, src, pathComponents);
+    src = fsd.resolvePath(pc, src);
     String srcs = FSDirectory.normalizePath(src);
     fsd.readLock();
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03dea65e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
index 88d706b..daf5c71 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
@@ -82,14 +82,12 @@ final class FSDirAppendOp {
       final boolean logRetryCache) throws IOException {
     assert fsn.hasWriteLock();
 
-    final byte[][] pathComponents = FSDirectory
-        .getPathComponentsForReservedPath(srcArg);
     final LocatedBlock lb;
     final FSDirectory fsd = fsn.getFSDirectory();
     final String src;
     fsd.writeLock();
     try {
-      src = fsd.resolvePath(pc, srcArg, pathComponents);
+      src = fsd.resolvePath(pc, srcArg);
       final INodesInPath iip = fsd.getINodesInPath4Write(src);
       // Verify that the destination does not exist as a directory already
       final INode inode = iip.getLastINode();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03dea65e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 06942f8..3dc1c30 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -57,11 +57,10 @@ public class FSDirAttrOp {
       throw new InvalidPathException(src);
     }
     FSPermissionChecker pc = fsd.getPermissionChecker();
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
     INodesInPath iip;
     fsd.writeLock();
     try {
-      src = fsd.resolvePath(pc, src, pathComponents);
+      src = fsd.resolvePath(pc, src);
       iip = fsd.getINodesInPath4Write(src);
       fsd.checkOwner(pc, iip);
       unprotectedSetPermission(fsd, src, permission);
@@ -79,11 +78,10 @@ public class FSDirAttrOp {
       throw new InvalidPathException(src);
     }
     FSPermissionChecker pc = fsd.getPermissionChecker();
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
     INodesInPath iip;
     fsd.writeLock();
     try {
-      src = fsd.resolvePath(pc, src, pathComponents);
+      src = fsd.resolvePath(pc, src);
       iip = fsd.getINodesInPath4Write(src);
       fsd.checkOwner(pc, iip);
       if (!pc.isSuperUser()) {
@@ -106,12 +104,11 @@ public class FSDirAttrOp {
       FSDirectory fsd, String src, long mtime, long atime)
       throws IOException {
     FSPermissionChecker pc = fsd.getPermissionChecker();
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
 
     INodesInPath iip;
     fsd.writeLock();
     try {
-      src = fsd.resolvePath(pc, src, pathComponents);
+      src = fsd.resolvePath(pc, src);
       iip = fsd.getINodesInPath4Write(src);
       // Write access is required to set access and modification times
       if (fsd.isPermissionEnabled()) {
@@ -139,10 +136,9 @@ public class FSDirAttrOp {
     bm.verifyReplication(src, replication, null);
     final boolean isFile;
     FSPermissionChecker pc = fsd.getPermissionChecker();
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
     fsd.writeLock();
     try {
-      src = fsd.resolvePath(pc, src, pathComponents);
+      src = fsd.resolvePath(pc, src);
       final INodesInPath iip = fsd.getINodesInPath4Write(src);
       if (fsd.isPermissionEnabled()) {
         fsd.checkPathAccess(pc, iip, FsAction.WRITE);
@@ -187,11 +183,10 @@ public class FSDirAttrOp {
           DFS_STORAGE_POLICY_ENABLED_KEY));
     }
     FSPermissionChecker pc = fsd.getPermissionChecker();
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
     INodesInPath iip;
     fsd.writeLock();
     try {
-      src = FSDirectory.resolvePath(src, pathComponents, fsd);
+      src = FSDirectory.resolvePath(src, fsd);
       iip = fsd.getINodesInPath4Write(src);
 
       if (fsd.isPermissionEnabled()) {
@@ -214,11 +209,9 @@ public class FSDirAttrOp {
   static BlockStoragePolicy getStoragePolicy(FSDirectory fsd, BlockManager bm,
       String path) throws IOException {
     FSPermissionChecker pc = fsd.getPermissionChecker();
-    byte[][] pathComponents = FSDirectory
-        .getPathComponentsForReservedPath(path);
     fsd.readLock();
     try {
-      path = fsd.resolvePath(pc, path, pathComponents);
+      path = fsd.resolvePath(pc, path);
       final INodesInPath iip = fsd.getINodesInPath(path, false);
       if (fsd.isPermissionEnabled()) {
         fsd.checkPathAccess(pc, iip, FsAction.READ);
@@ -237,10 +230,9 @@ public class FSDirAttrOp {
   static long getPreferredBlockSize(FSDirectory fsd, String src)
       throws IOException {
     FSPermissionChecker pc = fsd.getPermissionChecker();
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
     fsd.readLock();
     try {
-      src = fsd.resolvePath(pc, src, pathComponents);
+      src = fsd.resolvePath(pc, src);
       final INodesInPath iip = fsd.getINodesInPath(src, false);
       if (fsd.isPermissionEnabled()) {
         fsd.checkTraverse(pc, iip);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03dea65e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
index 6db2ce8..58f7a61 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
@@ -97,9 +97,8 @@ class FSDirDeleteOp {
       throws IOException {
     FSDirectory fsd = fsn.getFSDirectory();
     FSPermissionChecker pc = fsd.getPermissionChecker();
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
 
-    src = fsd.resolvePath(pc, src, pathComponents);
+    src = fsd.resolvePath(pc, src);
     final INodesInPath iip = fsd.getINodesInPath4Write(src, false);
     if (!recursive && fsd.isNonEmptyDirectory(iip)) {
       throw new PathIsNotEmptyDirectoryException(src + " is non empty");
@@ -109,7 +108,7 @@ class FSDirDeleteOp {
                           FsAction.ALL, true);
     }
     if (recursive && fsd.isNonEmptyDirectory(iip)) {
-      checkProtectedDescendants(fsd, fsd.normalizePath(src));
+      checkProtectedDescendants(fsd, src);
     }
 
     return deleteInternal(fsn, src, iip, logRetryCache);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03dea65e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
index f3e4d46..ba9e9d1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
@@ -146,8 +146,6 @@ final class FSDirEncryptionZoneOp {
   static HdfsFileStatus createEncryptionZone(final FSDirectory fsd,
       final String srcArg, final FSPermissionChecker pc, final String cipher,
       final String keyName, final boolean logRetryCache) throws IOException {
-    final byte[][] pathComponents = FSDirectory
-        .getPathComponentsForReservedPath(srcArg);
     final CipherSuite suite = CipherSuite.convert(cipher);
     List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
     final String src;
@@ -157,7 +155,7 @@ final class FSDirEncryptionZoneOp {
 
     fsd.writeLock();
     try {
-      src = fsd.resolvePath(pc, srcArg, pathComponents);
+      src = fsd.resolvePath(pc, srcArg);
       final XAttr ezXAttr = fsd.ezManager.createEncryptionZone(src, suite,
           version, keyName);
       xAttrs.add(ezXAttr);
@@ -180,14 +178,12 @@ final class FSDirEncryptionZoneOp {
   static Map.Entry<EncryptionZone, HdfsFileStatus> getEZForPath(
       final FSDirectory fsd, final String srcArg, final FSPermissionChecker pc)
       throws IOException {
-    final byte[][] pathComponents = FSDirectory
-        .getPathComponentsForReservedPath(srcArg);
     final String src;
     final INodesInPath iip;
     final EncryptionZone ret;
     fsd.readLock();
     try {
-      src = fsd.resolvePath(pc, srcArg, pathComponents);
+      src = fsd.resolvePath(pc, srcArg);
       iip = fsd.getINodesInPath(src, true);
       if (iip.getLastINode() == null) {
         throw new FileNotFoundException("Path not found: " + iip.getPath());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03dea65e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
index 7405e1f..d46342b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
@@ -71,11 +71,9 @@ final class FSDirErasureCodingOp {
 
     String src = srcArg;
     FSPermissionChecker pc = null;
-    byte[][] pathComponents = null;
-    pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     pc = fsn.getPermissionChecker();
     FSDirectory fsd = fsn.getFSDirectory();
-    src = fsd.resolvePath(pc, src, pathComponents);
+    src = fsd.resolvePath(pc, src);
     final INodesInPath iip;
     List<XAttr> xAttrs;
     fsd.writeLock();
@@ -224,11 +222,9 @@ final class FSDirErasureCodingOp {
   private static INodesInPath getINodesInPath(final FSNamesystem fsn,
       final String srcArg) throws IOException {
     String src = srcArg;
-    final byte[][] pathComponents = FSDirectory
-        .getPathComponentsForReservedPath(src);
     final FSDirectory fsd = fsn.getFSDirectory();
     final FSPermissionChecker pc = fsn.getPermissionChecker();
-    src = fsd.resolvePath(pc, src, pathComponents);
+    src = fsd.resolvePath(pc, src);
     INodesInPath iip = fsd.getINodesInPath(src, true);
     if (fsn.isPermissionEnabled()) {
       fsn.getFSDirectory().checkPathAccess(pc, iip, FsAction.READ);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03dea65e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
index c74faca..1141422 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
@@ -50,10 +50,9 @@ class FSDirMkdirOp {
       throw new InvalidPathException(src);
     }
     FSPermissionChecker pc = fsd.getPermissionChecker();
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
     fsd.writeLock();
     try {
-      src = fsd.resolvePath(pc, src, pathComponents);
+      src = fsd.resolvePath(pc, src);
       INodesInPath iip = fsd.getINodesInPath4Write(src);
       if (fsd.isPermissionEnabled()) {
         fsd.checkTraverse(pc, iip);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03dea65e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index c64dfea..e9514ba 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -64,11 +64,9 @@ class FSDirRenameOp {
     }
     FSPermissionChecker pc = fsd.getPermissionChecker();
 
-    byte[][] srcComponents = FSDirectory.getPathComponentsForReservedPath(src);
-    byte[][] dstComponents = FSDirectory.getPathComponentsForReservedPath(dst);
     HdfsFileStatus resultingStat = null;
-    src = fsd.resolvePath(pc, src, srcComponents);
-    dst = fsd.resolvePath(pc, dst, dstComponents);
+    src = fsd.resolvePath(pc, src);
+    dst = fsd.resolvePath(pc, dst);
     @SuppressWarnings("deprecation")
     final boolean status = renameTo(fsd, pc, src, dst, logRetryCache);
     if (status) {
@@ -239,11 +237,9 @@ class FSDirRenameOp {
     }
     final FSPermissionChecker pc = fsd.getPermissionChecker();
 
-    byte[][] srcComponents = FSDirectory.getPathComponentsForReservedPath(src);
-    byte[][] dstComponents = FSDirectory.getPathComponentsForReservedPath(dst);
     BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
-    src = fsd.resolvePath(pc, src, srcComponents);
-    dst = fsd.resolvePath(pc, dst, dstComponents);
+    src = fsd.resolvePath(pc, src);
+    dst = fsd.resolvePath(pc, dst);
     renameTo(fsd, pc, src, dst, collectedBlocks, logRetryCache, options);
     INodesInPath dstIIP = fsd.getINodesInPath(dst, false);
     HdfsFileStatus resultingStat = fsd.getAuditFileInfo(dstIIP);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03dea65e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 9fbc9f4..40efc8a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -52,24 +52,20 @@ import static org.apache.hadoop.util.Time.now;
 class FSDirStatAndListingOp {
   static DirectoryListing getListingInt(FSDirectory fsd, final String srcArg,
       byte[] startAfter, boolean needLocation) throws IOException {
-    byte[][] pathComponents = FSDirectory
-        .getPathComponentsForReservedPath(srcArg);
     final String startAfterString = DFSUtil.bytes2String(startAfter);
     String src = null;
 
     if (fsd.isPermissionEnabled()) {
       FSPermissionChecker pc = fsd.getPermissionChecker();
-      src = fsd.resolvePath(pc, srcArg, pathComponents);
+      src = fsd.resolvePath(pc, srcArg);
     } else {
-      src = FSDirectory.resolvePath(srcArg, pathComponents, fsd);
+      src = FSDirectory.resolvePath(srcArg, fsd);
     }
 
     // Get file name when startAfter is an INodePath
     if (FSDirectory.isReservedName(startAfterString)) {
-      byte[][] startAfterComponents = FSDirectory
-          .getPathComponentsForReservedPath(startAfterString);
       try {
-        String tmp = FSDirectory.resolvePath(src, startAfterComponents, fsd);
+        String tmp = FSDirectory.resolvePath(startAfterString, fsd);
         byte[][] regularPath = INode.getPathComponents(tmp);
         startAfter = regularPath[regularPath.length - 1];
       } catch (IOException e) {
@@ -110,14 +106,13 @@ class FSDirStatAndListingOp {
     if (!DFSUtil.isValidName(src)) {
       throw new InvalidPathException("Invalid file name: " + src);
     }
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
     if (fsd.isPermissionEnabled()) {
       FSPermissionChecker pc = fsd.getPermissionChecker();
-      src = fsd.resolvePath(pc, src, pathComponents);
+      src = fsd.resolvePath(pc, srcArg);
       final INodesInPath iip = fsd.getINodesInPath(src, resolveLink);
       fsd.checkPermission(pc, iip, false, null, null, null, null, false);
     } else {
-      src = FSDirectory.resolvePath(src, pathComponents, fsd);
+      src = FSDirectory.resolvePath(srcArg, fsd);
     }
     return getFileInfo(fsd, src, FSDirectory.isReservedRawName(srcArg),
                        resolveLink);
@@ -128,8 +123,7 @@ class FSDirStatAndListingOp {
    */
   static boolean isFileClosed(FSDirectory fsd, String src) throws IOException {
     FSPermissionChecker pc = fsd.getPermissionChecker();
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-    src = fsd.resolvePath(pc, src, pathComponents);
+    src = fsd.resolvePath(pc, src);
     final INodesInPath iip = fsd.getINodesInPath(src, true);
     if (fsd.isPermissionEnabled()) {
       fsd.checkTraverse(pc, iip);
@@ -139,9 +133,8 @@ class FSDirStatAndListingOp {
 
   static ContentSummary getContentSummary(
       FSDirectory fsd, String src) throws IOException {
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
     FSPermissionChecker pc = fsd.getPermissionChecker();
-    src = fsd.resolvePath(pc, src, pathComponents);
+    src = fsd.resolvePath(pc, src);
     final INodesInPath iip = fsd.getINodesInPath(src, false);
     if (fsd.isPermissionEnabled()) {
       fsd.checkPermission(pc, iip, false, null, null, null,
@@ -164,11 +157,10 @@ class FSDirStatAndListingOp {
         "Negative length is not supported. File: " + src);
     CacheManager cm = fsd.getFSNamesystem().getCacheManager();
     BlockManager bm = fsd.getBlockManager();
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
     boolean isReservedName = FSDirectory.isReservedRawName(src);
     fsd.readLock();
     try {
-      src = fsd.resolvePath(pc, src, pathComponents);
+      src = fsd.resolvePath(pc, src);
       final INodesInPath iip = fsd.getINodesInPath(src, true);
       final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
       if (fsd.isPermissionEnabled()) {
@@ -628,12 +620,11 @@ class FSDirStatAndListingOp {
 
   static QuotaUsage getQuotaUsage(
       FSDirectory fsd, String src) throws IOException {
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
     FSPermissionChecker pc = fsd.getPermissionChecker();
     final INodesInPath iip;
     fsd.readLock();
     try {
-      src = fsd.resolvePath(pc, src, pathComponents);
+      src = fsd.resolvePath(pc, src);
       iip = fsd.getINodesInPath(src, false);
       if (fsd.isPermissionEnabled()) {
         fsd.checkPermission(pc, iip, false, null, null, null,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03dea65e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
index 44a171a..e78c7b5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
@@ -53,11 +53,10 @@ class FSDirSymlinkOp {
     }
 
     FSPermissionChecker pc = fsn.getPermissionChecker();
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(link);
     INodesInPath iip;
     fsd.writeLock();
     try {
-      link = fsd.resolvePath(pc, link, pathComponents);
+      link = fsd.resolvePath(pc, link);
       iip = fsd.getINodesInPath4Write(link, false);
       if (!createParent) {
         fsd.verifyParentDir(iip, link);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03dea65e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
index 29c663a..8ac59f6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
@@ -71,15 +71,13 @@ final class FSDirTruncateOp {
     assert fsn.hasWriteLock();
 
     FSDirectory fsd = fsn.getFSDirectory();
-    byte[][] pathComponents = FSDirectory
-        .getPathComponentsForReservedPath(srcArg);
     final String src;
     final INodesInPath iip;
     final boolean onBlockBoundary;
     Block truncateBlock = null;
     fsd.writeLock();
     try {
-      src = fsd.resolvePath(pc, srcArg, pathComponents);
+      src = fsd.resolvePath(pc, srcArg);
       iip = fsd.getINodesInPath4Write(src, true);
       if (fsd.isPermissionEnabled()) {
         fsd.checkPathAccess(pc, iip, FsAction.WRITE);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03dea65e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 2e5893f..0f55875 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -119,8 +119,7 @@ class FSDirWriteFileOp {
   static void abandonBlock(
       FSDirectory fsd, FSPermissionChecker pc, ExtendedBlock b, long fileId,
       String src, String holder) throws IOException {
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-    src = fsd.resolvePath(pc, src, pathComponents);
+    src = fsd.resolvePath(pc, src);
 
     final INode inode;
     final INodesInPath iip;
@@ -185,8 +184,7 @@ class FSDirWriteFileOp {
     String clientMachine;
     final boolean isStriped;
 
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-    src = fsn.dir.resolvePath(pc, src, pathComponents);
+    src = fsn.dir.resolvePath(pc, src);
     FileState fileState = analyzeFileState(fsn, src, fileId, clientName,
                                            previous, onRetryBlock);
     if (onRetryBlock[0] != null && onRetryBlock[0].getLocations().length > 0) {
@@ -359,8 +357,7 @@ class FSDirWriteFileOp {
 
     boolean isRawPath = FSDirectory.isReservedRawName(src);
     FSDirectory fsd = fsn.getFSDirectory();
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-    src = fsd.resolvePath(pc, src, pathComponents);
+    src = fsd.resolvePath(pc, src);
     INodesInPath iip = fsd.getINodesInPath4Write(src);
 
     // Verify that the destination does not exist as a directory already.
@@ -469,9 +466,8 @@ class FSDirWriteFileOp {
       FSPermissionChecker pc, String src,
       CryptoProtocolVersion[] supportedVersions)
       throws IOException {
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
     FSDirectory fsd = fsn.getFSDirectory();
-    src = fsd.resolvePath(pc, src, pathComponents);
+    src = fsd.resolvePath(pc, src);
     INodesInPath iip = fsd.getINodesInPath4Write(src);
     // Nothing to do if the path is not within an EZ
     final EncryptionZone zone = FSDirEncryptionZoneOp.getEZForPath(fsd, iip);
@@ -738,8 +734,7 @@ class FSDirWriteFileOp {
                                         src + " for " + holder);
     }
     checkBlock(fsn, last);
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-    src = fsn.dir.resolvePath(pc, src, pathComponents);
+    src = fsn.dir.resolvePath(pc, src);
     return completeFileInternal(fsn, src, holder,
         ExtendedBlock.getLocalBlock(last), fileId);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03dea65e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index 92686c5..668e9e8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -67,8 +67,7 @@ class FSDirXAttrOp {
     FSPermissionChecker pc = fsd.getPermissionChecker();
     XAttrPermissionFilter.checkPermissionForApi(
         pc, xAttr, FSDirectory.isReservedRawName(src));
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-    src = fsd.resolvePath(pc, src, pathComponents);
+    src = fsd.resolvePath(pc, src);
     List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
     xAttrs.add(xAttr);
     INodesInPath iip;
@@ -95,8 +94,7 @@ class FSDirXAttrOp {
     if (!getAll) {
       XAttrPermissionFilter.checkPermissionForApi(pc, xAttrs, isRawPath);
     }
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-    src = fsd.resolvePath(pc, src, pathComponents);
+    src = fsd.resolvePath(pc, src);
     final INodesInPath iip = fsd.getINodesInPath(src, true);
     if (fsd.isPermissionEnabled()) {
       fsd.checkPathAccess(pc, iip, FsAction.READ);
@@ -136,8 +134,7 @@ class FSDirXAttrOp {
     FSDirXAttrOp.checkXAttrsConfigFlag(fsd);
     final FSPermissionChecker pc = fsd.getPermissionChecker();
     final boolean isRawPath = FSDirectory.isReservedRawName(src);
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
-    src = fsd.resolvePath(pc, src, pathComponents);
+    src = fsd.resolvePath(pc, src);
     final INodesInPath iip = fsd.getINodesInPath(src, true);
     if (fsd.isPermissionEnabled()) {
       /* To access xattr names, you need EXECUTE in the owning directory. */
@@ -164,15 +161,13 @@ class FSDirXAttrOp {
     FSPermissionChecker pc = fsd.getPermissionChecker();
     XAttrPermissionFilter.checkPermissionForApi(
         pc, xAttr, FSDirectory.isReservedRawName(src));
-    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(
-        src);
 
     List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
     xAttrs.add(xAttr);
     INodesInPath iip;
     fsd.writeLock();
     try {
-      src = fsd.resolvePath(pc, src, pathComponents);
+      src = fsd.resolvePath(pc, src);
       iip = fsd.getINodesInPath4Write(src);
       checkXAttrChangeAccess(fsd, iip, xAttr, pc);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03dea65e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 11a7899..8321fda 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -124,6 +124,8 @@ public class FSDirectory implements Closeable {
   public final static String DOT_INODES_STRING = ".inodes";
   public final static byte[] DOT_INODES = 
       DFSUtil.string2Bytes(DOT_INODES_STRING);
+  private final static byte[] DOT_DOT =
+      DFSUtil.string2Bytes("..");
 
   public final static HdfsFileStatus DOT_RESERVED_STATUS =
       new HdfsFileStatus(0, true, 0, 0, 0, 0, new FsPermission((short) 01770),
@@ -517,7 +519,6 @@ public class FSDirectory implements Closeable {
    *
    * @param pc The permission checker used when resolving path.
    * @param path The path to resolve.
-   * @param pathComponents path components corresponding to the path
    * @return if the path indicates an inode, return path after replacing up to
    *         <inodeid> with the corresponding path of the inode, else the path
    *         in {@code src} as is. If the path refers to a path in the "raw"
@@ -525,12 +526,12 @@ public class FSDirectory implements Closeable {
    * @throws FileNotFoundException
    * @throws AccessControlException
    */
-  String resolvePath(FSPermissionChecker pc, String path, byte[][] 
pathComponents)
+  String resolvePath(FSPermissionChecker pc, String path)
       throws FileNotFoundException, AccessControlException {
     if (isReservedRawName(path) && isPermissionEnabled) {
       pc.checkSuperuserPrivilege();
     }
-    return resolvePath(path, pathComponents, this);
+    return resolvePath(path, this);
   }
 
   /**
@@ -1289,13 +1290,6 @@ public class FSDirectory implements Closeable {
     return components.toArray(new byte[components.size()][]);
   }
 
-  /**
-   * @return path components for reserved path, else null.
-   */
-  static byte[][] getPathComponentsForReservedPath(String src) {
-    return !isReservedName(src) ? null : INode.getPathComponents(src);
-  }
-
   /** Check if a given inode name is reserved */
   public static boolean isReservedName(INode inode) {
     return CHECK_RESERVED_FILE_NAMES
@@ -1321,6 +1315,12 @@ public class FSDirectory implements Closeable {
         Path.SEPARATOR + DOT_INODES_STRING);
   }
 
+  static boolean isReservedName(byte[][] components) {
+    return (components.length > 2) &&
+            Arrays.equals(INodeDirectory.ROOT_NAME, components[0]) &&
+            Arrays.equals(DOT_RESERVED, components[1]);
+  }
+
   /**
    * Resolve a /.reserved/... path to a non-reserved path.
    * <p/>
@@ -1340,7 +1340,6 @@ public class FSDirectory implements Closeable {
    * unencrypted file).
    * 
    * @param src path that is being processed
-   * @param pathComponents path components corresponding to the path
    * @param fsd FSDirectory
    * @return if the path indicates an inode, return path after replacing up to
    *         <inodeid> with the corresponding path of the inode, else the path
@@ -1348,45 +1347,37 @@ public class FSDirectory implements Closeable {
    *         directory, return the non-raw pathname.
    * @throws FileNotFoundException if inodeid is invalid
    */
-  static String resolvePath(String src, byte[][] pathComponents,
+  static String resolvePath(String src,
       FSDirectory fsd) throws FileNotFoundException {
-    final int nComponents = (pathComponents == null) ?
-        0 : pathComponents.length;
-    if (nComponents <= 2) {
-      return src;
-    }
-    if (!Arrays.equals(DOT_RESERVED, pathComponents[1])) {
+    byte[][] pathComponents = INode.getPathComponents(src);
+    final int nComponents = pathComponents.length;
+    if (!isReservedName(pathComponents)) {
       /* This is not a /.reserved/ path so do nothing. */
-      return src;
-    }
-
-    if (Arrays.equals(DOT_INODES, pathComponents[2])) {
+    } else if (Arrays.equals(DOT_INODES, pathComponents[2])) {
       /* It's a /.reserved/.inodes path. */
       if (nComponents > 3) {
-        return resolveDotInodesPath(src, pathComponents, fsd);
-      } else {
-        return src;
+        pathComponents = resolveDotInodesPath(pathComponents, fsd);
       }
     } else if (Arrays.equals(RAW, pathComponents[2])) {
       /* It's /.reserved/raw so strip off the /.reserved/raw prefix. */
       if (nComponents == 3) {
-        return Path.SEPARATOR;
+        pathComponents = new byte[][]{INodeDirectory.ROOT_NAME};
       } else {
         if (nComponents == 4
             && Arrays.equals(DOT_RESERVED, pathComponents[3])) {
           /* It's /.reserved/raw/.reserved so don't strip */
-          return src;
         } else {
-          return constructRemainingPath("", pathComponents, 3);
+          pathComponents = constructRemainingPath(
+              new byte[][]{INodeDirectory.ROOT_NAME}, pathComponents, 3);
         }
       }
-    } else {
-      /* It's some sort of /.reserved/<unknown> path. Ignore it. */
-      return src;
     }
+    // this double conversion will be unnecessary when resolving returns
+    // INodesInPath (needs components byte[][])
+    return DFSUtil.byteArray2PathString(pathComponents);
   }
 
-  private static String resolveDotInodesPath(String src,
+  private static byte[][] resolveDotInodesPath(
       byte[][] pathComponents, FSDirectory fsd)
       throws FileNotFoundException {
     final String inodeId = DFSUtil.bytes2String(pathComponents[3]);
@@ -1394,48 +1385,47 @@ public class FSDirectory implements Closeable {
     try {
       id = Long.parseLong(inodeId);
     } catch (NumberFormatException e) {
-      throw new FileNotFoundException("Invalid inode path: " + src);
+      throw new FileNotFoundException("Invalid inode path: " +
+          DFSUtil.byteArray2PathString(pathComponents));
     }
     if (id == INodeId.ROOT_INODE_ID && pathComponents.length == 4) {
-      return Path.SEPARATOR;
+      return new byte[][]{INodeDirectory.ROOT_NAME};
     }
     INode inode = fsd.getInode(id);
     if (inode == null) {
       throw new FileNotFoundException(
-          "File for given inode path does not exist: " + src);
+          "File for given inode path does not exist: " +
+              DFSUtil.byteArray2PathString(pathComponents));
     }
-    
+
     // Handle single ".." for NFS lookup support.
     if ((pathComponents.length > 4)
-        && DFSUtil.bytes2String(pathComponents[4]).equals("..")) {
+        && Arrays.equals(pathComponents[4], DOT_DOT)) {
       INode parent = inode.getParent();
       if (parent == null || parent.getId() == INodeId.ROOT_INODE_ID) {
         // inode is root, or its parent is root.
-        return Path.SEPARATOR;
-      } else {
-        return parent.getFullPathName();
+        return new byte[][]{INodeDirectory.ROOT_NAME};
       }
+      return parent.getPathComponents();
     }
-
-    String path = "";
-    if (id != INodeId.ROOT_INODE_ID) {
-      path = inode.getFullPathName();
-    }
-    return constructRemainingPath(path, pathComponents, 4);
+    return constructRemainingPath(
+        inode.getPathComponents(), pathComponents, 4);
   }
 
-  private static String constructRemainingPath(String pathPrefix,
-      byte[][] pathComponents, int startAt) {
-
-    StringBuilder path = new StringBuilder(pathPrefix);
-    for (int i = startAt; i < pathComponents.length; i++) {
-      path.append(Path.SEPARATOR).append(
-          DFSUtil.bytes2String(pathComponents[i]));
+  private static byte[][] constructRemainingPath(byte[][] components,
+      byte[][] extraComponents, int startAt) {
+    int remainder = extraComponents.length - startAt;
+    if (remainder > 0) {
+      // grow the array and copy in the remaining components
+      int pos = components.length;
+      components = Arrays.copyOf(components, pos + remainder);
+      System.arraycopy(extraComponents, startAt, components, pos, remainder);
     }
     if (NameNode.LOG.isDebugEnabled()) {
-      NameNode.LOG.debug("Resolved path is " + path);
+      NameNode.LOG.debug(
+          "Resolved path is " + DFSUtil.byteArray2PathString(components));
     }
-    return path.toString();
+    return components;
   }
 
   INode getINode4DotSnapshot(String src) throws UnresolvedLinkException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03dea65e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index fb87d4c..be084c5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1790,8 +1790,6 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
     logAuditEvent(true, "open", srcArg);
 
     if (!isInSafeMode() && res.updateAccessTime()) {
-      byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(
-          srcArg);
       String src = srcArg;
       writeLock();
       final long now = now();
@@ -1815,7 +1813,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
          * HDFS-7463. A better fix is to change the edit log of SetTime to
          * use inode id instead of a path.
          */
-        src = dir.resolvePath(pc, srcArg, pathComponents);
+        src = dir.resolvePath(pc, srcArg);
         final INodesInPath iip = dir.getINodesInPath(src, true);
         INode inode = iip.getLastINode();
         boolean updateAccessTime = inode != null &&
@@ -2309,12 +2307,11 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
     boolean skipSync = false;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot recover the lease of " + src);
-      src = dir.resolvePath(pc, src, pathComponents);
+      src = dir.resolvePath(pc, src);
       final INodesInPath iip = dir.getINodesInPath4Write(src);
       final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
       if (!inode.isUnderConstruction()) {
@@ -2553,14 +2550,13 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
     final List<DatanodeStorageInfo> chosen;
     final boolean isStriped;
     checkOperation(OperationCategory.READ);
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
     FSPermissionChecker pc = getPermissionChecker();
     readLock();
     try {
       checkOperation(OperationCategory.READ);
       //check safe mode
       checkNameNodeSafeMode("Cannot add datanode; src=" + src + ", blk=" + 
blk);
-      src = dir.resolvePath(pc, src, pathComponents);
+      src = dir.resolvePath(pc, src);
 
       //check lease
       final INode inode;
@@ -3073,14 +3069,13 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
       throws IOException {
     NameNode.stateChangeLog.info("BLOCK* fsync: " + src + " for " + 
clientName);
     checkOperation(OperationCategory.WRITE);
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
 
     FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot fsync file " + src);
-      src = dir.resolvePath(pc, src, pathComponents);
+      src = dir.resolvePath(pc, src);
       final INode inode;
       if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) {
         // Older clients may not have given us an inode ID to work with.
@@ -6878,11 +6873,10 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 
   void checkAccess(String src, FsAction mode) throws IOException {
     checkOperation(OperationCategory.READ);
-    byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
     readLock();
     try {
       checkOperation(OperationCategory.READ);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = FSDirectory.resolvePath(src, dir);
       final INodesInPath iip = dir.getINodesInPath(src, true);
       INode inode = iip.getLastINode();
       if (inode == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03dea65e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
index 731ff95..3492c2a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
@@ -857,9 +857,9 @@ public class TestINodeFile {
     byte[][] actual = FSDirectory.getPathComponents(inode);
     DFSTestUtil.checkComponentsEquals(expected, actual);
   }
-  
+
   /**
-   * Tests for {@link FSDirectory#resolvePath(String, byte[][], FSDirectory)}
+   * Tests for {@link FSDirectory#resolvePath(String, FSDirectory)}
    */
   @Test
   public void testInodePath() throws IOException {
@@ -869,60 +869,53 @@ public class TestINodeFile {
     // For an any inode look up return inode corresponding to "c" from /a/b/c
     FSDirectory fsd = Mockito.mock(FSDirectory.class);
     Mockito.doReturn(inode).when(fsd).getInode(Mockito.anyLong());
-    
-    // Null components
-    assertEquals("/test", FSDirectory.resolvePath("/test", null, fsd));
-    
+
     // Tests for FSDirectory#resolvePath()
     // Non inode regular path
-    byte[][] components = INode.getPathComponents(path);
-    String resolvedPath = FSDirectory.resolvePath(path, components, fsd);
+    String resolvedPath = FSDirectory.resolvePath(path, fsd);
     assertEquals(path, resolvedPath);
-    
+
     // Inode path with no trailing separator
-    components = INode.getPathComponents("/.reserved/.inodes/1");
-    resolvedPath = FSDirectory.resolvePath(path, components, fsd);
+    String testPath = "/.reserved/.inodes/1";
+    resolvedPath = FSDirectory.resolvePath(testPath, fsd);
     assertEquals(path, resolvedPath);
-    
+
     // Inode path with trailing separator
-    components = INode.getPathComponents("/.reserved/.inodes/1/");
+    testPath = "/.reserved/.inodes/1/";
+    resolvedPath = FSDirectory.resolvePath(testPath, fsd);
     assertEquals(path, resolvedPath);
-    
+
     // Inode relative path
-    components = INode.getPathComponents("/.reserved/.inodes/1/d/e/f");
-    resolvedPath = FSDirectory.resolvePath(path, components, fsd);
+    testPath = "/.reserved/.inodes/1/d/e/f";
+    resolvedPath = FSDirectory.resolvePath(testPath, fsd);
     assertEquals("/a/b/c/d/e/f", resolvedPath);
-    
+
     // A path with just .inodes  returns the path as is
-    String testPath = "/.reserved/.inodes";
-    components = INode.getPathComponents(testPath);
-    resolvedPath = FSDirectory.resolvePath(testPath, components, fsd);
+    testPath = "/.reserved/.inodes";
+    resolvedPath = FSDirectory.resolvePath(testPath, fsd);
     assertEquals(testPath, resolvedPath);
-    
+
     // Root inode path
     testPath = "/.reserved/.inodes/" + INodeId.ROOT_INODE_ID;
-    components = INode.getPathComponents(testPath);
-    resolvedPath = FSDirectory.resolvePath(testPath, components, fsd);
+    resolvedPath = FSDirectory.resolvePath(testPath, fsd);
     assertEquals("/", resolvedPath);
-    
+
     // An invalid inode path should remain unresolved
     testPath = "/.invalid/.inodes/1";
-    components = INode.getPathComponents(testPath);
-    resolvedPath = FSDirectory.resolvePath(testPath, components, fsd);
+    resolvedPath = FSDirectory.resolvePath(testPath, fsd);
     assertEquals(testPath, resolvedPath);
-    
+
     // Test path with nonexistent(deleted or wrong id) inode
     Mockito.doReturn(null).when(fsd).getInode(Mockito.anyLong());
     testPath = "/.reserved/.inodes/1234";
-    components = INode.getPathComponents(testPath);
     try {
-      String realPath = FSDirectory.resolvePath(testPath, components, fsd);
+      String realPath = FSDirectory.resolvePath(testPath, fsd);
       fail("Path should not be resolved:" + realPath);
     } catch (IOException e) {
       assertTrue(e instanceof FileNotFoundException);
     }
   }
-  
+
   private static INodeDirectory getDir(final FSDirectory fsdir, final Path dir)
       throws IOException {
     final String dirStr = dir.toString();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to