Repository: hadoop
Updated Branches:
refs/heads/branch-2.8 3dac73080 -> 69bdcd9b1
HDFS-10745. Directly resolve paths into INodesInPath. Contributed by Daryn
Sharp.
(cherry picked from commit 922d045e1194e5290c24ec9dc8735f0be4efb953)
Conflicts:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69bdcd9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69bdcd9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69bdcd9b
Branch: refs/heads/branch-2.8
Commit: 69bdcd9b1e5704b7b2b9e46bf7d944c10c61e388
Parents: 3dac730
Author: Kihwal Lee <[email protected]>
Authored: Fri Sep 2 11:44:01 2016 -0500
Committer: Kihwal Lee <[email protected]>
Committed: Fri Sep 2 11:44:01 2016 -0500
----------------------------------------------------------------------
.../hadoop/hdfs/server/namenode/FSDirAclOp.java | 29 +++--
.../hdfs/server/namenode/FSDirAppendOp.java | 4 +-
.../hdfs/server/namenode/FSDirAttrOp.java | 23 ++--
.../hdfs/server/namenode/FSDirDeleteOp.java | 4 +-
.../server/namenode/FSDirEncryptionZoneOp.java | 7 +-
.../hdfs/server/namenode/FSDirMkdirOp.java | 4 +-
.../hdfs/server/namenode/FSDirRenameOp.java | 41 +++----
.../server/namenode/FSDirStatAndListingOp.java | 51 ++++-----
.../hdfs/server/namenode/FSDirSymlinkOp.java | 4 +-
.../hdfs/server/namenode/FSDirTruncateOp.java | 4 +-
.../hdfs/server/namenode/FSDirWriteFileOp.java | 84 ++++-----------
.../hdfs/server/namenode/FSDirXAttrOp.java | 25 ++---
.../hdfs/server/namenode/FSDirectory.java | 107 +++++++++++++++----
.../hdfs/server/namenode/FSNamesystem.java | 49 +++------
.../hdfs/server/namenode/INodesInPath.java | 8 ++
.../hadoop/hdfs/server/namenode/TestFsck.java | 3 +-
16 files changed, 226 insertions(+), 221 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/69bdcd9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
index 296bed2..2153f02 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.AclException;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import java.io.IOException;
@@ -39,11 +38,11 @@ class FSDirAclOp {
String src = srcArg;
checkAclsConfigFlag(fsd);
FSPermissionChecker pc = fsd.getPermissionChecker();
- src = fsd.resolvePath(pc, src);
INodesInPath iip;
fsd.writeLock();
try {
- iip = fsd.getINodesInPath4Write(FSDirectory.normalizePath(src), true);
+ iip = fsd.resolvePathForWrite(pc, src);
+ src = iip.getPath();
fsd.checkOwner(pc, iip);
INode inode = FSDirectory.resolveLastINode(iip);
int snapshotId = iip.getLatestSnapshotId();
@@ -64,11 +63,11 @@ class FSDirAclOp {
String src = srcArg;
checkAclsConfigFlag(fsd);
FSPermissionChecker pc = fsd.getPermissionChecker();
- src = fsd.resolvePath(pc, src);
INodesInPath iip;
fsd.writeLock();
try {
- iip = fsd.getINodesInPath4Write(FSDirectory.normalizePath(src), true);
+ iip = fsd.resolvePathForWrite(pc, src);
+ src = iip.getPath();
fsd.checkOwner(pc, iip);
INode inode = FSDirectory.resolveLastINode(iip);
int snapshotId = iip.getLatestSnapshotId();
@@ -88,11 +87,11 @@ class FSDirAclOp {
String src = srcArg;
checkAclsConfigFlag(fsd);
FSPermissionChecker pc = fsd.getPermissionChecker();
- src = fsd.resolvePath(pc, src);
INodesInPath iip;
fsd.writeLock();
try {
- iip = fsd.getINodesInPath4Write(FSDirectory.normalizePath(src), true);
+ iip = fsd.resolvePathForWrite(pc, src);
+ src = iip.getPath();
fsd.checkOwner(pc, iip);
INode inode = FSDirectory.resolveLastINode(iip);
int snapshotId = iip.getLatestSnapshotId();
@@ -112,11 +111,11 @@ class FSDirAclOp {
String src = srcArg;
checkAclsConfigFlag(fsd);
FSPermissionChecker pc = fsd.getPermissionChecker();
- src = fsd.resolvePath(pc, src);
INodesInPath iip;
fsd.writeLock();
try {
- iip = fsd.getINodesInPath4Write(src);
+ iip = fsd.resolvePathForWrite(pc, src);
+ src = iip.getPath();
fsd.checkOwner(pc, iip);
unprotectedRemoveAcl(fsd, iip);
} finally {
@@ -132,11 +131,11 @@ class FSDirAclOp {
String src = srcArg;
checkAclsConfigFlag(fsd);
FSPermissionChecker pc = fsd.getPermissionChecker();
- src = fsd.resolvePath(pc, src);
INodesInPath iip;
fsd.writeLock();
try {
- iip = fsd.getINodesInPath4Write(src);
+ iip = fsd.resolvePathForWrite(pc, src);
+ src = iip.getPath();
fsd.checkOwner(pc, iip);
List<AclEntry> newAcl = unprotectedSetAcl(fsd, src, aclSpec, false);
fsd.getEditLog().logSetAcl(src, newAcl);
@@ -150,17 +149,15 @@ class FSDirAclOp {
FSDirectory fsd, String src) throws IOException {
checkAclsConfigFlag(fsd);
FSPermissionChecker pc = fsd.getPermissionChecker();
- src = fsd.resolvePath(pc, src);
- String srcs = FSDirectory.normalizePath(src);
fsd.readLock();
try {
+ INodesInPath iip = fsd.resolvePath(pc, src);
+ src = iip.getPath();
// There is no real inode for the path ending in ".snapshot", so return a
// non-null, unpopulated AclStatus. This is similar to getFileInfo.
- if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR) &&
- fsd.getINode4DotSnapshot(srcs) != null) {
+ if (iip.isDotSnapshotDir() && fsd.getINode4DotSnapshot(iip) != null) {
return new AclStatus.Builder().owner("").group("").build();
}
- INodesInPath iip = fsd.getINodesInPath(srcs, true);
if (fsd.isPermissionEnabled()) {
fsd.checkTraverse(pc, iip);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/69bdcd9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
index f96cf69..f0cbb30 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
@@ -87,8 +87,8 @@ final class FSDirAppendOp {
final String src;
fsd.writeLock();
try {
- src = fsd.resolvePath(pc, srcArg);
- final INodesInPath iip = fsd.getINodesInPath4Write(src);
+ final INodesInPath iip = fsd.resolvePathForWrite(pc, srcArg);
+ src = iip.getPath();
// Verify that the destination does not exist as a directory already
final INode inode = iip.getLastINode();
final String path = iip.getPath();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/69bdcd9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index e56d131..cd937b8 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -60,8 +60,8 @@ public class FSDirAttrOp {
INodesInPath iip;
fsd.writeLock();
try {
- src = fsd.resolvePath(pc, src);
- iip = fsd.getINodesInPath4Write(src);
+ iip = fsd.resolvePathForWrite(pc, src);
+ src = iip.getPath();
fsd.checkOwner(pc, iip);
unprotectedSetPermission(fsd, src, permission);
} finally {
@@ -81,8 +81,8 @@ public class FSDirAttrOp {
INodesInPath iip;
fsd.writeLock();
try {
- src = fsd.resolvePath(pc, src);
- iip = fsd.getINodesInPath4Write(src);
+ iip = fsd.resolvePathForWrite(pc, src);
+ src = iip.getPath();
fsd.checkOwner(pc, iip);
if (!pc.isSuperUser()) {
if (username != null && !pc.getUser().equals(username)) {
@@ -108,8 +108,8 @@ public class FSDirAttrOp {
INodesInPath iip;
fsd.writeLock();
try {
- src = fsd.resolvePath(pc, src);
- iip = fsd.getINodesInPath4Write(src);
+ iip = fsd.resolvePathForWrite(pc, src);
+ src = iip.getPath();
// Write access is required to set access and modification times
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.WRITE);
@@ -138,8 +138,8 @@ public class FSDirAttrOp {
FSPermissionChecker pc = fsd.getPermissionChecker();
fsd.writeLock();
try {
- src = fsd.resolvePath(pc, src);
- final INodesInPath iip = fsd.getINodesInPath4Write(src);
+ final INodesInPath iip = fsd.resolvePathForWrite(pc, src);
+ src = iip.getPath();
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.WRITE);
}
@@ -211,8 +211,7 @@ public class FSDirAttrOp {
FSPermissionChecker pc = fsd.getPermissionChecker();
fsd.readLock();
try {
- path = fsd.resolvePath(pc, path);
- final INodesInPath iip = fsd.getINodesInPath(path, false);
+ final INodesInPath iip = fsd.resolvePath(pc, path, false);
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.READ);
}
@@ -232,8 +231,8 @@ public class FSDirAttrOp {
FSPermissionChecker pc = fsd.getPermissionChecker();
fsd.readLock();
try {
- src = fsd.resolvePath(pc, src);
- final INodesInPath iip = fsd.getINodesInPath(src, false);
+ final INodesInPath iip = fsd.resolvePath(pc, src, false);
+ src = iip.getPath();
if (fsd.isPermissionEnabled()) {
fsd.checkTraverse(pc, iip);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/69bdcd9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
index 38951e7..8eb3a40 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
@@ -98,8 +98,8 @@ class FSDirDeleteOp {
FSDirectory fsd = fsn.getFSDirectory();
FSPermissionChecker pc = fsd.getPermissionChecker();
- src = fsd.resolvePath(pc, src);
- final INodesInPath iip = fsd.getINodesInPath4Write(src, false);
+ final INodesInPath iip = fsd.resolvePathForWrite(pc, src, false);
+ src = iip.getPath();
if (!recursive && fsd.isNonEmptyDirectory(iip)) {
throw new PathIsNotEmptyDirectoryException(src + " is non empty");
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/69bdcd9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
index ba9e9d1..7501fc3 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
@@ -155,7 +155,8 @@ final class FSDirEncryptionZoneOp {
fsd.writeLock();
try {
- src = fsd.resolvePath(pc, srcArg);
+ final INodesInPath iip = fsd.resolvePath(pc, srcArg);
+ src = iip.getPath();
final XAttr ezXAttr = fsd.ezManager.createEncryptionZone(src, suite,
version, keyName);
xAttrs.add(ezXAttr);
@@ -178,13 +179,11 @@ final class FSDirEncryptionZoneOp {
static Map.Entry<EncryptionZone, HdfsFileStatus> getEZForPath(
final FSDirectory fsd, final String srcArg, final FSPermissionChecker pc)
throws IOException {
- final String src;
final INodesInPath iip;
final EncryptionZone ret;
fsd.readLock();
try {
- src = fsd.resolvePath(pc, srcArg);
- iip = fsd.getINodesInPath(src, true);
+ iip = fsd.resolvePath(pc, srcArg);
if (iip.getLastINode() == null) {
throw new FileNotFoundException("Path not found: " + iip.getPath());
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/69bdcd9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
index 1141422..8aac1f8 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
@@ -52,8 +52,8 @@ class FSDirMkdirOp {
FSPermissionChecker pc = fsd.getPermissionChecker();
fsd.writeLock();
try {
- src = fsd.resolvePath(pc, src);
- INodesInPath iip = fsd.getINodesInPath4Write(src);
+ INodesInPath iip = fsd.resolvePathForWrite(pc, src);
+ src = iip.getPath();
if (fsd.isPermissionEnabled()) {
fsd.checkTraverse(pc, iip);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/69bdcd9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index 9dbf555..e98d57a 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -65,12 +65,14 @@ class FSDirRenameOp {
FSPermissionChecker pc = fsd.getPermissionChecker();
HdfsFileStatus resultingStat = null;
- src = fsd.resolvePath(pc, src);
- dst = fsd.resolvePath(pc, dst);
+ // Rename does not operate on link targets
+ // Do not resolveLink when checking permissions of src and dst
+ INodesInPath srcIIP = fsd.resolvePathForWrite(pc, src, false);
+ INodesInPath dstIIP = fsd.resolvePathForWrite(pc, dst, false);
@SuppressWarnings("deprecation")
- final boolean status = renameTo(fsd, pc, src, dst, logRetryCache);
+ final boolean status = renameTo(fsd, pc, srcIIP, dstIIP, logRetryCache);
if (status) {
- INodesInPath dstIIP = fsd.getINodesInPath(dst, false);
+ dstIIP = fsd.getINodesInPath(dstIIP.getPath(), false);
resultingStat = fsd.getAuditFileInfo(dstIIP);
}
return new RenameOldResult(status, resultingStat);
@@ -238,9 +240,8 @@ class FSDirRenameOp {
final FSPermissionChecker pc = fsd.getPermissionChecker();
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
- src = fsd.resolvePath(pc, src);
- dst = fsd.resolvePath(pc, dst);
- renameTo(fsd, pc, src, dst, collectedBlocks, logRetryCache, options);
+ // returns resolved path
+ dst = renameTo(fsd, pc, src, dst, collectedBlocks, logRetryCache, options);
INodesInPath dstIIP = fsd.getINodesInPath(dst, false);
HdfsFileStatus resultingStat = fsd.getAuditFileInfo(dstIIP);
@@ -252,11 +253,13 @@ class FSDirRenameOp {
* @see {@link #unprotectedRenameTo(FSDirectory, String, String,
INodesInPath,
* INodesInPath, long, BlocksMapUpdateInfo, Options.Rename...)}
*/
- static void renameTo(FSDirectory fsd, FSPermissionChecker pc, String src,
+ static String renameTo(FSDirectory fsd, FSPermissionChecker pc, String src,
String dst, BlocksMapUpdateInfo collectedBlocks, boolean logRetryCache,
Options.Rename... options) throws IOException {
- final INodesInPath srcIIP = fsd.getINodesInPath4Write(src, false);
- final INodesInPath dstIIP = fsd.getINodesInPath4Write(dst, false);
+ final INodesInPath srcIIP = fsd.resolvePathForWrite(pc, src, false);
+ final INodesInPath dstIIP = fsd.resolvePathForWrite(pc, dst, false);
+ src = srcIIP.getPath();
+ dst = dstIIP.getPath();
if (fsd.isPermissionEnabled()) {
// Rename does not operate on link targets
// Do not resolveLink when checking permissions of src and dst
@@ -283,6 +286,7 @@ class FSDirRenameOp {
fsd.writeUnlock();
}
fsd.getEditLog().logRename(src, dst, mtime, logRetryCache, options);
+ return dst;
}
/**
@@ -441,16 +445,17 @@ class FSDirRenameOp {
@Deprecated
@SuppressWarnings("deprecation")
private static boolean renameTo(FSDirectory fsd, FSPermissionChecker pc,
- String src, String dst, boolean logRetryCache) throws IOException {
- // Rename does not operate on link targets
- // Do not resolveLink when checking permissions of src and dst
- // Check write access to parent of src
- final INodesInPath srcIIP = fsd.getINodesInPath4Write(src, false);
+ INodesInPath srcIIP, INodesInPath dstIIP, boolean logRetryCache)
+ throws IOException {
+ String src = srcIIP.getPath();
+ String dst = dstIIP.getPath();
// Note: We should not be doing this. This is move() not renameTo().
- final String actualDst = fsd.isDir(dst) ?
- dst + Path.SEPARATOR + new Path(src).getName() : dst;
- final INodesInPath dstIIP = fsd.getINodesInPath4Write(actualDst, false);
+ if (fsd.isDir(dst)) {
+ dstIIP = INodesInPath.append(dstIIP, null, srcIIP.getLastLocalName());
+ }
+ final String actualDst = dstIIP.getPath();
if (fsd.isPermissionEnabled()) {
+ // Check write access to parent of src
fsd.checkPermission(pc, srcIIP, false, null, FsAction.WRITE, null, null,
false);
// Check write access to ancestor of dst
http://git-wip-us.apache.org/repos/asf/hadoop/blob/69bdcd9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index e54db70..e0416b7 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -53,11 +53,14 @@ class FSDirStatAndListingOp {
final String startAfterString = DFSUtil.bytes2String(startAfter);
String src = null;
+ final INodesInPath iip;
if (fsd.isPermissionEnabled()) {
FSPermissionChecker pc = fsd.getPermissionChecker();
- src = fsd.resolvePath(pc, srcArg);
+ iip = fsd.resolvePath(pc, srcArg);
+ src = iip.getPath();
} else {
src = FSDirectory.resolvePath(srcArg, fsd);
+ iip = fsd.getINodesInPath(src, true);
}
// Get file name when startAfter is an INodePath
@@ -73,7 +76,6 @@ class FSDirStatAndListingOp {
}
}
- final INodesInPath iip = fsd.getINodesInPath(src, true);
boolean isSuperUser = true;
if (fsd.isPermissionEnabled()) {
FSPermissionChecker pc = fsd.getPermissionChecker();
@@ -106,8 +108,8 @@ class FSDirStatAndListingOp {
}
if (fsd.isPermissionEnabled()) {
FSPermissionChecker pc = fsd.getPermissionChecker();
- src = fsd.resolvePath(pc, srcArg);
- final INodesInPath iip = fsd.getINodesInPath(src, resolveLink);
+ final INodesInPath iip = fsd.resolvePath(pc, srcArg, resolveLink);
+ src = iip.getPath();
fsd.checkPermission(pc, iip, false, null, null, null, null, false);
} else {
src = FSDirectory.resolvePath(srcArg, fsd);
@@ -121,8 +123,7 @@ class FSDirStatAndListingOp {
*/
static boolean isFileClosed(FSDirectory fsd, String src) throws IOException {
FSPermissionChecker pc = fsd.getPermissionChecker();
- src = fsd.resolvePath(pc, src);
- final INodesInPath iip = fsd.getINodesInPath(src, true);
+ final INodesInPath iip = fsd.resolvePath(pc, src);
if (fsd.isPermissionEnabled()) {
fsd.checkTraverse(pc, iip);
}
@@ -132,8 +133,7 @@ class FSDirStatAndListingOp {
static ContentSummary getContentSummary(
FSDirectory fsd, String src) throws IOException {
FSPermissionChecker pc = fsd.getPermissionChecker();
- src = fsd.resolvePath(pc, src);
- final INodesInPath iip = fsd.getINodesInPath(src, false);
+ final INodesInPath iip = fsd.resolvePath(pc, src, false);
if (fsd.isPermissionEnabled()) {
fsd.checkPermission(pc, iip, false, null, null, null,
FsAction.READ_EXECUTE);
@@ -158,8 +158,8 @@ class FSDirStatAndListingOp {
boolean isReservedName = FSDirectory.isReservedRawName(src);
fsd.readLock();
try {
- src = fsd.resolvePath(pc, src);
- final INodesInPath iip = fsd.getINodesInPath(src, true);
+ final INodesInPath iip = fsd.resolvePath(pc, src);
+ src = iip.getPath();
final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.READ);
@@ -383,24 +383,20 @@ class FSDirStatAndListingOp {
static HdfsFileStatus getFileInfo(
FSDirectory fsd, String src, boolean resolveLink, boolean isRawPath)
throws IOException {
- String srcs = FSDirectory.normalizePath(src);
- if (FSDirectory.isExactReservedName(src)) {
- return FSDirectory.DOT_RESERVED_STATUS;
- }
-
- if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
- if (fsd.getINode4DotSnapshot(srcs) != null) {
- return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
- HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
- HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
- }
- return null;
- }
-
fsd.readLock();
try {
- final INodesInPath iip = fsd.getINodesInPath(srcs, resolveLink);
- return getFileInfo(fsd, src, iip, isRawPath, true);
+ HdfsFileStatus status = null;
+ final INodesInPath iip = fsd.getINodesInPath(src, resolveLink);
+ if (FSDirectory.isExactReservedName(iip.getPathComponents())) {
+ status = FSDirectory.DOT_RESERVED_STATUS;
+ } else if (iip.isDotSnapshotDir()) {
+ if (fsd.getINode4DotSnapshot(iip) != null) {
+ status = FSDirectory.DOT_SNAPSHOT_DIR_STATUS;
+ }
+ } else {
+ status = getFileInfo(fsd, src, iip, isRawPath, true);
+ }
+ return status;
} finally {
fsd.readUnlock();
}
@@ -609,8 +605,7 @@ class FSDirStatAndListingOp {
final INodesInPath iip;
fsd.readLock();
try {
- src = fsd.resolvePath(pc, src);
- iip = fsd.getINodesInPath(src, false);
+ iip = fsd.resolvePath(pc, src, false);
if (fsd.isPermissionEnabled()) {
fsd.checkPermission(pc, iip, false, null, null, null,
FsAction.READ_EXECUTE);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/69bdcd9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
index e78c7b5..4d32993 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
@@ -56,8 +56,8 @@ class FSDirSymlinkOp {
INodesInPath iip;
fsd.writeLock();
try {
- link = fsd.resolvePath(pc, link);
- iip = fsd.getINodesInPath4Write(link, false);
+ iip = fsd.resolvePathForWrite(pc, link, false);
+ link = iip.getPath();
if (!createParent) {
fsd.verifyParentDir(iip, link);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/69bdcd9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
index 164538f..55a7119 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
@@ -76,8 +76,8 @@ final class FSDirTruncateOp {
Block truncateBlock = null;
fsd.writeLock();
try {
- src = fsd.resolvePath(pc, srcArg);
- iip = fsd.getINodesInPath4Write(src, true);
+ iip = fsd.resolvePathForWrite(pc, srcArg);
+ src = iip.getPath();
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.WRITE);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/69bdcd9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 3534634..8b51a00 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -39,7 +39,6 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.FSLimitException;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -117,25 +116,10 @@ class FSDirWriteFileOp {
static void abandonBlock(
FSDirectory fsd, FSPermissionChecker pc, ExtendedBlock b, long fileId,
String src, String holder) throws IOException {
- src = fsd.resolvePath(pc, src);
-
- final INode inode;
- final INodesInPath iip;
- if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) {
- // Older clients may not have given us an inode ID to work with.
- // In this case, we have to try to resolve the path and hope it
- // hasn't changed or been deleted since the file was opened for write.
- iip = fsd.getINodesInPath(src, true);
- inode = iip.getLastINode();
- } else {
- inode = fsd.getInode(fileId);
- iip = INodesInPath.fromINode(inode);
- if (inode != null) {
- src = iip.getPath();
- }
- }
+ final INodesInPath iip = fsd.resolvePath(pc, src, fileId);
+ src = iip.getPath();
FSNamesystem fsn = fsd.getFSNamesystem();
- final INodeFile file = fsn.checkLease(src, holder, inode, fileId);
+ final INodeFile file = fsn.checkLease(iip, holder, fileId);
Preconditions.checkState(file.isUnderConstruction());
Block localBlock = ExtendedBlock.getLocalBlock(b);
@@ -178,8 +162,8 @@ class FSDirWriteFileOp {
final byte storagePolicyID;
String clientMachine;
- src = fsn.dir.resolvePath(pc, src);
- FileState fileState = analyzeFileState(fsn, src, fileId, clientName,
+ INodesInPath iip = fsn.dir.resolvePath(pc, src, fileId);
+ FileState fileState = analyzeFileState(fsn, iip, fileId, clientName,
previous, onRetryBlock);
if (onRetryBlock[0] != null && onRetryBlock[0].getLocations().length > 0) {
// This is a retry. No need to generate new locations.
@@ -229,7 +213,8 @@ class FSDirWriteFileOp {
// Run the full analysis again, since things could have changed
// while chooseTarget() was executing.
LocatedBlock[] onRetryBlock = new LocatedBlock[1];
- FileState fileState = analyzeFileState(fsn, src, fileId, clientName,
+ INodesInPath iip = fsn.dir.resolvePath(null, src, fileId);
+ FileState fileState = analyzeFileState(fsn, iip, fileId, clientName,
previous, onRetryBlock);
final INodeFile pendingFile = fileState.inode;
src = fileState.path;
@@ -340,8 +325,8 @@ class FSDirWriteFileOp {
boolean isRawPath = FSDirectory.isReservedRawName(src);
FSDirectory fsd = fsn.getFSDirectory();
- src = fsd.resolvePath(pc, src);
- INodesInPath iip = fsd.getINodesInPath4Write(src);
+ INodesInPath iip = fsd.resolvePathForWrite(pc, src);
+ src = iip.getPath();
// Verify that the destination does not exist as a directory already.
final INode inode = iip.getLastINode();
@@ -450,8 +435,7 @@ class FSDirWriteFileOp {
CryptoProtocolVersion[] supportedVersions)
throws IOException {
FSDirectory fsd = fsn.getFSDirectory();
- src = fsd.resolvePath(pc, src);
- INodesInPath iip = fsd.getINodesInPath4Write(src);
+ INodesInPath iip = fsd.resolvePathForWrite(pc, src);
// Nothing to do if the path is not within an EZ
final EncryptionZone zone = FSDirEncryptionZoneOp.getEZForPath(fsd, iip);
if (zone == null) {
@@ -587,11 +571,11 @@ class FSDirWriteFileOp {
}
private static FileState analyzeFileState(
- FSNamesystem fsn, String src, long fileId, String clientName,
+ FSNamesystem fsn, INodesInPath iip, long fileId, String clientName,
ExtendedBlock previous, LocatedBlock[] onRetryBlock)
throws IOException {
assert fsn.hasReadLock();
-
+ String src = iip.getPath();
checkBlock(fsn, previous);
onRetryBlock[0] = null;
fsn.checkNameNodeSafeMode("Cannot add block to " + src);
@@ -600,24 +584,7 @@ class FSDirWriteFileOp {
fsn.checkFsObjectLimit();
Block previousBlock = ExtendedBlock.getLocalBlock(previous);
- final INode inode;
- final INodesInPath iip;
- if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) {
- // Older clients may not have given us an inode ID to work with.
- // In this case, we have to try to resolve the path and hope it
- // hasn't changed or been deleted since the file was opened for write.
- iip = fsn.dir.getINodesInPath4Write(src);
- inode = iip.getLastINode();
- } else {
- // Newer clients pass the inode ID, so we can just get the inode
- // directly.
- inode = fsn.dir.getInode(fileId);
- iip = INodesInPath.fromINode(inode);
- if (inode != null) {
- src = iip.getPath();
- }
- }
- final INodeFile file = fsn.checkLease(src, clientName, inode, fileId);
+ final INodeFile file = fsn.checkLease(iip, clientName, fileId);
BlockInfo lastBlockInFile = file.getLastBlock();
if (!Block.matchingIdAndGenStamp(previousBlock, lastBlockInFile)) {
// The block that the client claims is the current last block
@@ -696,8 +663,8 @@ class FSDirWriteFileOp {
src + " for " + holder);
}
checkBlock(fsn, last);
- src = fsn.dir.resolvePath(pc, src);
- boolean success = completeFileInternal(fsn, src, holder,
+ INodesInPath iip = fsn.dir.resolvePath(pc, src, fileId);
+ boolean success = completeFileInternal(fsn, iip, holder,
ExtendedBlock.getLocalBlock(last),
fileId);
if (success) {
@@ -708,27 +675,16 @@ class FSDirWriteFileOp {
}
private static boolean completeFileInternal(
- FSNamesystem fsn, String src, String holder, Block last, long fileId)
+ FSNamesystem fsn, INodesInPath iip,
+ String holder, Block last, long fileId)
throws IOException {
assert fsn.hasWriteLock();
+ final String src = iip.getPath();
final INodeFile pendingFile;
- final INodesInPath iip;
INode inode = null;
try {
- if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) {
- // Older clients may not have given us an inode ID to work with.
- // In this case, we have to try to resolve the path and hope it
- // hasn't changed or been deleted since the file was opened for write.
- iip = fsn.dir.getINodesInPath(src, true);
- inode = iip.getLastINode();
- } else {
- inode = fsn.dir.getInode(fileId);
- iip = INodesInPath.fromINode(inode);
- if (inode != null) {
- src = iip.getPath();
- }
- }
- pendingFile = fsn.checkLease(src, holder, inode, fileId);
+ inode = iip.getLastINode();
+ pendingFile = fsn.checkLease(iip, holder, fileId);
} catch (LeaseExpiredException lee) {
if (inode != null && inode.isFile() &&
!inode.asFile().isUnderConstruction()) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/69bdcd9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index 668e9e8..746fdb7 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -67,13 +67,13 @@ class FSDirXAttrOp {
FSPermissionChecker pc = fsd.getPermissionChecker();
XAttrPermissionFilter.checkPermissionForApi(
pc, xAttr, FSDirectory.isReservedRawName(src));
- src = fsd.resolvePath(pc, src);
List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
xAttrs.add(xAttr);
INodesInPath iip;
fsd.writeLock();
try {
- iip = fsd.getINodesInPath4Write(src);
+ iip = fsd.resolvePathForWrite(pc, src);
+ src = iip.getPath();
checkXAttrChangeAccess(fsd, iip, xAttr, pc);
unprotectedSetXAttrs(fsd, src, xAttrs, flag);
} finally {
@@ -94,12 +94,11 @@ class FSDirXAttrOp {
if (!getAll) {
XAttrPermissionFilter.checkPermissionForApi(pc, xAttrs, isRawPath);
}
- src = fsd.resolvePath(pc, src);
- final INodesInPath iip = fsd.getINodesInPath(src, true);
+ final INodesInPath iip = fsd.resolvePath(pc, src);
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.READ);
}
- List<XAttr> all = FSDirXAttrOp.getXAttrs(fsd, src);
+ List<XAttr> all = FSDirXAttrOp.getXAttrs(fsd, iip);
List<XAttr> filteredAll = XAttrPermissionFilter.
filterXAttrsForApi(pc, all, isRawPath);
@@ -134,13 +133,12 @@ class FSDirXAttrOp {
FSDirXAttrOp.checkXAttrsConfigFlag(fsd);
final FSPermissionChecker pc = fsd.getPermissionChecker();
final boolean isRawPath = FSDirectory.isReservedRawName(src);
- src = fsd.resolvePath(pc, src);
- final INodesInPath iip = fsd.getINodesInPath(src, true);
+ final INodesInPath iip = fsd.resolvePath(pc, src);
if (fsd.isPermissionEnabled()) {
/* To access xattr names, you need EXECUTE in the owning directory. */
fsd.checkParentAccess(pc, iip, FsAction.EXECUTE);
}
- final List<XAttr> all = FSDirXAttrOp.getXAttrs(fsd, src);
+ final List<XAttr> all = FSDirXAttrOp.getXAttrs(fsd, iip);
return XAttrPermissionFilter.
filterXAttrsForApi(pc, all, isRawPath);
}
@@ -167,8 +165,8 @@ class FSDirXAttrOp {
INodesInPath iip;
fsd.writeLock();
try {
- src = fsd.resolvePath(pc, src);
- iip = fsd.getINodesInPath4Write(src);
+ iip = fsd.resolvePathForWrite(pc, src);
+ src = iip.getPath();
checkXAttrChangeAccess(fsd, iip, xAttr, pc);
List<XAttr> removedXAttrs = unprotectedRemoveXAttrs(fsd, src, xAttrs);
@@ -427,12 +425,11 @@ class FSDirXAttrOp {
}
}
- private static List<XAttr> getXAttrs(FSDirectory fsd,
- String src) throws IOException {
- String srcs = FSDirectory.normalizePath(src);
+ private static List<XAttr> getXAttrs(FSDirectory fsd, INodesInPath iip)
+ throws IOException {
fsd.readLock();
try {
- INodesInPath iip = fsd.getINodesInPath(srcs, true);
+ String src = iip.getPath();
INode inode = FSDirectory.resolveLastINode(iip);
int snapshotId = iip.getPathSnapshotId();
return XAttrStorage.readINodeXAttrs(fsd.getAttributes(src,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/69bdcd9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index c8acdfb..b339966 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -129,6 +129,11 @@ public class FSDirectory implements Closeable {
null, null, null, HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
+ public final static HdfsFileStatus DOT_SNAPSHOT_DIR_STATUS =
+ new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
+ HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
+ HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
+
INodeDirectory rootDir;
private final FSNamesystem namesystem;
private volatile boolean skipQuotaCheck = false; //skip while consuming edits
@@ -486,12 +491,66 @@ public class FSDirectory implements Closeable {
* @throws FileNotFoundException
* @throws AccessControlException
*/
- String resolvePath(FSPermissionChecker pc, String path)
- throws FileNotFoundException, AccessControlException {
- if (isReservedRawName(path) && isPermissionEnabled) {
+ INodesInPath resolvePath(FSPermissionChecker pc, String src)
+ throws UnresolvedLinkException, FileNotFoundException,
+ AccessControlException {
+ return resolvePath(pc, src, true);
+ }
+
+ INodesInPath resolvePath(FSPermissionChecker pc, String src,
+ boolean resolveLink) throws UnresolvedLinkException,
+ FileNotFoundException, AccessControlException {
+ byte[][] components = INode.getPathComponents(src);
+ if (isPermissionEnabled && pc != null && isReservedRawName(components)) {
pc.checkSuperuserPrivilege();
}
- return resolvePath(path, this);
+ components = resolveComponents(components, this);
+ return INodesInPath.resolve(rootDir, components, resolveLink);
+ }
+
+ INodesInPath resolvePathForWrite(FSPermissionChecker pc, String src)
+ throws UnresolvedLinkException, FileNotFoundException,
+ AccessControlException {
+ return resolvePathForWrite(pc, src, true);
+ }
+
+ INodesInPath resolvePathForWrite(FSPermissionChecker pc, String src,
+ boolean resolveLink) throws UnresolvedLinkException,
+ FileNotFoundException, AccessControlException {
+ INodesInPath iip = resolvePath(pc, src, resolveLink);
+ if (iip.isSnapshot()) {
+ throw new SnapshotAccessControlException(
+ "Modification on a read-only snapshot is disallowed");
+ }
+ return iip;
+ }
+
+ INodesInPath resolvePath(FSPermissionChecker pc, String src, long fileId)
+ throws UnresolvedLinkException, FileNotFoundException,
+ AccessControlException {
+ // Older clients may not have given us an inode ID to work with.
+ // In this case, we have to try to resolve the path and hope it
+ // hasn't changed or been deleted since the file was opened for write.
+ INodesInPath iip;
+ if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) {
+ iip = resolvePath(pc, src);
+ } else {
+ INode inode = getInode(fileId);
+ if (inode == null) {
+ iip = INodesInPath.fromComponents(INode.getPathComponents(src));
+ } else {
+ iip = INodesInPath.fromINode(inode);
+ }
+ }
+ return iip;
+ }
+
+ // this method can be removed after IIP is used more extensively
+ static String resolvePath(String src,
+ FSDirectory fsd) throws FileNotFoundException {
+ byte[][] pathComponents = INode.getPathComponents(src);
+ pathComponents = resolveComponents(pathComponents, fsd);
+ return DFSUtil.byteArray2PathString(pathComponents);
}
/**
@@ -1265,6 +1324,12 @@ public class FSDirectory implements Closeable {
return CHECK_RESERVED_FILE_NAMES && src.equals(DOT_RESERVED_PATH_PREFIX);
}
+ public static boolean isExactReservedName(byte[][] components) {
+ return CHECK_RESERVED_FILE_NAMES &&
+ (components.length == 2) &&
+ isReservedName(components);
+ }
+
static boolean isReservedRawName(String src) {
return src.startsWith(DOT_RESERVED_PATH_PREFIX +
Path.SEPARATOR + RAW_STRING);
@@ -1276,11 +1341,17 @@ public class FSDirectory implements Closeable {
}
static boolean isReservedName(byte[][] components) {
- return (components.length > 2) &&
+ return (components.length > 1) &&
Arrays.equals(INodeDirectory.ROOT_NAME, components[0]) &&
Arrays.equals(DOT_RESERVED, components[1]);
}
+ static boolean isReservedRawName(byte[][] components) {
+ return (components.length > 2) &&
+ isReservedName(components) &&
+ Arrays.equals(RAW, components[2]);
+ }
+
/**
* Resolve a /.reserved/... path to a non-reserved path.
* <p/>
@@ -1299,19 +1370,18 @@ public class FSDirectory implements Closeable {
* /.reserved/raw/a/b/c is equivalent (they both refer to the same
* unencrypted file).
*
- * @param src path that is being processed
+ * @param pathComponents to be resolved
* @param fsd FSDirectory
* @return if the path indicates an inode, return path after replacing up to
* <inodeid> with the corresponding path of the inode, else the path
- * in {@code src} as is. If the path refers to a path in the "raw"
- * directory, return the non-raw pathname.
+ * in {@code pathComponents} as is. If the path refers to a path in
+ * the "raw" directory, return the non-raw pathname.
* @throws FileNotFoundException if inodeid is invalid
*/
- static String resolvePath(String src,
+ static byte[][] resolveComponents(byte[][] pathComponents,
FSDirectory fsd) throws FileNotFoundException {
- byte[][] pathComponents = INode.getPathComponents(src);
final int nComponents = pathComponents.length;
- if (!isReservedName(pathComponents)) {
+ if (nComponents < 3 || !isReservedName(pathComponents)) {
/* This is not a /.reserved/ path so do nothing. */
} else if (Arrays.equals(DOT_INODES, pathComponents[2])) {
/* It's a /.reserved/.inodes path. */
@@ -1332,9 +1402,7 @@ public class FSDirectory implements Closeable {
}
}
}
- // this double conversion will be unnecessary when resolving returns
- // INodesInPath (needs components byte[][])
- return DFSUtil.byteArray2PathString(pathComponents);
+ return pathComponents;
}
private static byte[][] resolveDotInodesPath(
@@ -1388,15 +1456,12 @@ public class FSDirectory implements Closeable {
return components;
}
- INode getINode4DotSnapshot(String src) throws UnresolvedLinkException {
+ INode getINode4DotSnapshot(INodesInPath iip) throws UnresolvedLinkException {
Preconditions.checkArgument(
- src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
- "%s does not end with %s", src,
HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
-
- final String dirPath = normalizePath(src.substring(0,
- src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));
+ iip.isDotSnapshotDir(), "%s does not end with %s",
+ iip.getPath(), HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
- final INode node = this.getINode(dirPath);
+ final INode node = iip.getINode(-2);
if (node != null && node.isDirectory()
&& node.asDirectory().isSnapshottable()) {
return node;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/69bdcd9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 7496007..d2f85fd 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1835,8 +1835,9 @@ public class FSNamesystem implements Namesystem,
FSNamesystemMBean,
* HDFS-7463. A better fix is to change the edit log of SetTime to
* use inode id instead of a path.
*/
- src = dir.resolvePath(pc, srcArg);
- final INodesInPath iip = dir.getINodesInPath(src, true);
+ final INodesInPath iip = dir.resolvePath(pc, srcArg);
+ src = iip.getPath();
+
INode inode = iip.getLastINode();
boolean updateAccessTime = inode != null &&
now > inode.getAccessTime() + dir.getAccessTimePrecision();
@@ -2317,8 +2318,8 @@ public class FSNamesystem implements Namesystem,
FSNamesystemMBean,
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot recover the lease of " + src);
- src = dir.resolvePath(pc, src);
- final INodesInPath iip = dir.getINodesInPath4Write(src);
+ final INodesInPath iip = dir.resolvePathForWrite(pc, src);
+ src = iip.getPath();
final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
if (!inode.isUnderConstruction()) {
return true;
@@ -2569,20 +2570,11 @@ public class FSNamesystem implements Namesystem,
FSNamesystemMBean,
checkOperation(OperationCategory.READ);
//check safe mode
checkNameNodeSafeMode("Cannot add datanode; src=" + src + ", blk=" +
blk);
- src = dir.resolvePath(pc, src);
+ final INodesInPath iip = dir.resolvePath(pc, src, fileId);
+ src = iip.getPath();
//check lease
- final INode inode;
- if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) {
- // Older clients may not have given us an inode ID to work with.
- // In this case, we have to try to resolve the path and hope it
- // hasn't changed or been deleted since the file was opened for write.
- inode = dir.getINode(src);
- } else {
- inode = dir.getInode(fileId);
- if (inode != null) src = inode.getFullPathName();
- }
- final INodeFile file = checkLease(src, clientName, inode, fileId);
+ final INodeFile file = checkLease(iip, clientName, fileId);
clientMachine =
file.getFileUnderConstructionFeature().getClientMachine();
clientnode =
blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
preferredblocksize = file.getPreferredBlockSize();
@@ -2640,8 +2632,10 @@ public class FSNamesystem implements Namesystem,
FSNamesystemMBean,
: "Holder " + holder + " does not have any open files.");
}
- INodeFile checkLease(String src, String holder, INode inode, long fileId)
+ INodeFile checkLease(INodesInPath iip, String holder, long fileId)
throws LeaseExpiredException, FileNotFoundException {
+ String src = iip.getPath();
+ INode inode = iip.getLastINode();
assert hasReadLock();
if (inode == null) {
throw new FileNotFoundException("File does not exist: "
@@ -3128,18 +3122,9 @@ public class FSNamesystem implements Namesystem,
FSNamesystemMBean,
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot fsync file " + src);
- src = dir.resolvePath(pc, src);
- final INode inode;
- if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) {
- // Older clients may not have given us an inode ID to work with.
- // In this case, we have to try to resolve the path and hope it
- // hasn't changed or been deleted since the file was opened for write.
- inode = dir.getINode(src);
- } else {
- inode = dir.getInode(fileId);
- if (inode != null) src = inode.getFullPathName();
- }
- final INodeFile pendingFile = checkLease(src, clientName, inode, fileId);
+ INodesInPath iip = dir.resolvePath(pc, src, fileId);
+ src = iip.getPath();
+ final INodeFile pendingFile = checkLease(iip, clientName, fileId);
if (lastBlockLength > 0) {
pendingFile.getFileUnderConstructionFeature().updateLengthOfLastBlock(
pendingFile, lastBlockLength);
@@ -7463,17 +7448,17 @@ public class FSNamesystem implements Namesystem,
FSNamesystemMBean,
void checkAccess(String src, FsAction mode) throws IOException {
checkOperation(OperationCategory.READ);
+ FSPermissionChecker pc = getPermissionChecker();
readLock();
try {
checkOperation(OperationCategory.READ);
- src = FSDirectory.resolvePath(src, dir);
- final INodesInPath iip = dir.getINodesInPath(src, true);
+ final INodesInPath iip = dir.resolvePath(pc, src);
+ src = iip.getPath();
INode inode = iip.getLastINode();
if (inode == null) {
throw new FileNotFoundException("Path not found");
}
if (isPermissionEnabled) {
- FSPermissionChecker pc = getPermissionChecker();
dir.checkPathAccess(pc, iip, mode);
}
} catch (AccessControlException e) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/69bdcd9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
index 7ca82aa..071bb35 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
@@ -73,6 +73,10 @@ public class INodesInPath {
return new INodesInPath(inodes, path);
}
+ static INodesInPath fromComponents(byte[][] components) {
+ return new INodesInPath(new INode[components.length], components);
+ }
+
/**
* Given some components, create a path name.
* @param components The path components
@@ -434,6 +438,10 @@ public class INodesInPath {
return this.isSnapshot;
}
+ boolean isDotSnapshotDir() {
+ return isDotSnapshotDir(getLastLocalName());
+ }
+
private static String toString(INode inode) {
return inode == null? null: inode.getLocalName();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/69bdcd9b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index f1c09ef..6883a7a 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -25,7 +25,6 @@ import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.mock;
@@ -1200,7 +1199,7 @@ public class TestFsck {
when(fsName.getBlockManager()).thenReturn(blockManager);
when(fsName.getFSDirectory()).thenReturn(fsd);
when(fsd.getFSNamesystem()).thenReturn(fsName);
- when(fsd.getINodesInPath(anyString(), anyBoolean())).thenReturn(iip);
+ when(fsd.resolvePath(any(FSPermissionChecker.class),
anyString())).thenReturn(iip);
when(blockManager.getDatanodeManager()).thenReturn(dnManager);
NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]