Author: cnauroth Date: Tue Feb 11 06:00:56 2014 New Revision: 1566988 URL: http://svn.apache.org/r1566988 Log: Merge trunk to HDFS-4685.
Added: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java - copied unchanged from r1566986, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeduplicationMap.java - copied unchanged from r1566986, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeduplicationMap.java Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/ (props changed) hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed) hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSClusterStats.java hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed) hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ (props changed) hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed) hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ (props changed) hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ (props changed) hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1566365-1566986 Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java?rev=1566988&r1=1566987&r2=1566988&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java (original) +++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java Tue Feb 11 06:00:56 2014 @@ -545,7 +545,8 @@ public class RpcProgramNfs3 extends RpcP return new READLINK3Response(Nfs3Status.NFS3ERR_SERVERFAULT); } if (MAX_READ_TRANSFER_SIZE < target.getBytes().length) { - return new READLINK3Response(Nfs3Status.NFS3ERR_IO, postOpAttr, null); + return new READLINK3Response(Nfs3Status.NFS3ERR_IO, postOpAttr, + new byte[0]); } return new READLINK3Response(Nfs3Status.NFS3_OK, postOpAttr, Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1566988&r1=1566987&r2=1566988&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Feb 11 06:00:56 2014 @@ -335,6 +335,9 @@ Trunk (Unreleased) HDFS-5911. The id of a CacheDirective instance does not get serialized in the protobuf-fsimage. (Haohui Mai via jing9) + HDFS-5915. Refactor FSImageFormatProtobuf to simplify cross section reads. + (Haohui Mai via cnauroth) + Release 2.4.0 - UNRELEASED INCOMPATIBLE CHANGES @@ -360,6 +363,8 @@ Release 2.4.0 - UNRELEASED HDFS-4911. Reduce PeerCache timeout to be commensurate with dfs.datanode.socket.reuse.keepalive (cmccabe) + HDFS-4370. Fix typo Blanacer in DataNode. (Chu Tong via shv) + OPTIMIZATIONS HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery @@ -402,6 +407,9 @@ Release 2.4.0 - UNRELEASED HDFS-5900. Cannot set cache pool limit of "unlimited" via CacheAdmin. (wang) + HDFS-5886. Potential null pointer deference in RpcProgramNfs3#readlink() + (brandonli) + Release 2.3.0 - UNRELEASED INCOMPATIBLE CHANGES @@ -940,6 +948,12 @@ Release 2.3.0 - UNRELEASED HDFS-5873. dfs.http.policy should have higher precedence over dfs.https.enable. (Haohui Mai via jing9) + HDFS-5837. dfs.namenode.replication.considerLoad should consider + decommissioned nodes. (Tao Luo via shv) + + HDFS-5921. Cannot browse file system via NN web UI if any directory has + the sticky bit set. (atm) + BREAKDOWN OF HDFS-2832 SUBTASKS AND RELATED JIRAS HDFS-4985. Add storage type to the protocol and expose it in block report Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1566365-1566986 Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java?rev=1566988&r1=1566987&r2=1566988&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java (original) +++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java Tue Feb 11 06:00:56 2014 @@ -633,9 +633,11 @@ public class BlockPlacementPolicyDefault // check the communication traffic of the target machine if (considerLoad) { double avgLoad = 0; - int size = clusterMap.getNumOfLeaves(); - if (size != 0 && stats != null) { - avgLoad = (double)stats.getTotalLoad()/size; + if (stats != null) { + int size = stats.getNumDatanodesInService(); + if (size != 0) { + avgLoad = (double)stats.getTotalLoad()/size; + } } if (node.getXceiverCount() > (2.0 * avgLoad)) { logNodeIsNotChosen(storage, "the node is too busy "); Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1566988&r1=1566987&r2=1566988&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original) +++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Tue Feb 11 06:00:56 2014 @@ -2494,7 +2494,7 @@ public class DataNode extends Configured /** * Get current value of the max balancer bandwidth in bytes per second. * - * @return bandwidth Blanacer bandwidth in bytes per second for this datanode. + * @return Balancer bandwidth in bytes per second for this datanode. */ public Long getBalancerBandwidth() { DataXceiverServer dxcs = Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSClusterStats.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSClusterStats.java?rev=1566988&r1=1566987&r2=1566988&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSClusterStats.java (original) +++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSClusterStats.java Tue Feb 11 06:00:56 2014 @@ -42,6 +42,12 @@ public interface FSClusterStats { * for writing targets, and false otherwise. */ public boolean isAvoidingStaleDataNodesForWrite(); + + /** + * Indicates number of datanodes that are in service. + * @return Number of datanodes that are both alive and not decommissioned. + */ + public int getNumDatanodesInService(); } Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java?rev=1566988&r1=1566987&r2=1566988&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java (original) +++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java Tue Feb 11 06:00:56 2014 @@ -38,7 +38,7 @@ import org.apache.hadoop.hdfs.protocolPB import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; -import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.StringMap; +import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection; @@ -208,7 +208,7 @@ public final class FSImageFormatPBINode case FILE: return loadINodeFile(n); case DIRECTORY: - return loadINodeDirectory(n, parent.getStringTable()); + return loadINodeDirectory(n, parent.getLoaderContext().getStringTable()); case SYMLINK: return loadINodeSymlink(n); default: @@ -228,7 +228,7 @@ public final class FSImageFormatPBINode blocks[i] = new BlockInfo(PBHelper.convert(bp.get(i)), replication); } final PermissionStatus permissions = loadPermission(f.getPermission(), - parent.getStringTable()); + parent.getLoaderContext().getStringTable()); final INodeFile file = new INodeFile(n.getId(), n.getName().toByteArray(), permissions, f.getModificationTime(), @@ -253,13 +253,14 @@ public final class FSImageFormatPBINode assert n.getType() == INodeSection.INode.Type.SYMLINK; INodeSection.INodeSymlink s = n.getSymlink(); final PermissionStatus permissions = loadPermission(s.getPermission(), - parent.getStringTable()); + parent.getLoaderContext().getStringTable()); return new INodeSymlink(n.getId(), n.getName().toByteArray(), permissions, 0, 0, s.getTarget().toStringUtf8()); } private void loadRootINode(INodeSection.INode p) { - INodeDirectory root = loadINodeDirectory(p, parent.getStringTable()); + INodeDirectory root = loadINodeDirectory(p, parent.getLoaderContext() + .getStringTable()); final Quota.Counts q = root.getQuotaCounts(); final long nsQuota = q.get(Quota.NAMESPACE); final long dsQuota = q.get(Quota.DISKSPACE); @@ -273,16 +274,17 @@ public final class FSImageFormatPBINode public final static class Saver { private static long buildPermissionStatus(INodeAttributes n, - final StringMap stringMap) { - long userId = stringMap.getStringId(n.getUserName()); - long groupId = stringMap.getStringId(n.getGroupName()); + final SaverContext.DeduplicationMap<String> stringMap) { + long userId = stringMap.getId(n.getUserName()); + long groupId = stringMap.getId(n.getGroupName()); return ((userId & USER_GROUP_STRID_MASK) << USER_STRID_OFFSET) | ((groupId & USER_GROUP_STRID_MASK) << GROUP_STRID_OFFSET) | n.getFsPermissionShort(); } public static INodeSection.INodeFile.Builder buildINodeFile( - INodeFileAttributes file, final StringMap stringMap) { + INodeFileAttributes file, + final SaverContext.DeduplicationMap<String> stringMap) { INodeSection.INodeFile.Builder b = INodeSection.INodeFile.newBuilder() .setAccessTime(file.getAccessTime()) .setModificationTime(file.getModificationTime()) @@ -293,7 +295,8 @@ public final class FSImageFormatPBINode } public static INodeSection.INodeDirectory.Builder buildINodeDirectory( - INodeDirectoryAttributes dir, final StringMap stringMap) { + INodeDirectoryAttributes dir, + final SaverContext.DeduplicationMap<String> stringMap) { Quota.Counts quota = dir.getQuotaCounts(); INodeSection.INodeDirectory.Builder b = INodeSection.INodeDirectory .newBuilder().setModificationTime(dir.getModificationTime()) @@ -416,7 +419,7 @@ public final class FSImageFormatPBINode private void save(OutputStream out, INodeDirectory n) throws IOException { INodeSection.INodeDirectory.Builder b = buildINodeDirectory(n, - parent.getStringMap()); + parent.getSaverContext().getStringMap()); INodeSection.INode r = buildINodeCommon(n) .setType(INodeSection.INode.Type.DIRECTORY).setDirectory(b).build(); r.writeDelimitedTo(out); @@ -424,7 +427,7 @@ public final class FSImageFormatPBINode private void save(OutputStream out, INodeFile n) throws IOException { INodeSection.INodeFile.Builder b = buildINodeFile(n, - parent.getStringMap()); + parent.getSaverContext().getStringMap()); for (Block block : n.getBlocks()) { b.addBlocks(PBHelper.convert(block)); @@ -447,7 +450,7 @@ public final class FSImageFormatPBINode private void save(OutputStream out, INodeSymlink n) throws IOException { INodeSection.INodeSymlink.Builder b = INodeSection.INodeSymlink .newBuilder() - .setPermission(buildPermissionStatus(n, parent.getStringMap())) + .setPermission(buildPermissionStatus(n, parent.getSaverContext().getStringMap())) .setTarget(ByteString.copyFrom(n.getSymlink())); INodeSection.INode r = buildINodeCommon(n) .setType(INodeSection.INode.Type.SYMLINK).setSymlink(b).build(); Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java?rev=1566988&r1=1566987&r2=1566988&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java (original) +++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java Tue Feb 11 06:00:56 2014 @@ -73,12 +73,56 @@ import com.google.protobuf.CodedOutputSt public final class FSImageFormatProtobuf { private static final Log LOG = LogFactory.getLog(FSImageFormatProtobuf.class); + public static final class LoaderContext { + private String[] stringTable; + + public String[] getStringTable() { + return stringTable; + } + } + + public static final class SaverContext { + public static class DeduplicationMap<E> { + private final Map<E, Integer> map = Maps.newHashMap(); + private DeduplicationMap() {} + + static <T> DeduplicationMap<T> newMap() { + return new DeduplicationMap<T>(); + } + + int getId(E value) { + if (value == null) { + return 0; + } + Integer v = map.get(value); + if (v == null) { + int nv = map.size() + 1; + map.put(value, nv); + return nv; + } + return v; + } + + int size() { + return map.size(); + } + + Set<Entry<E, Integer>> entrySet() { + return map.entrySet(); + } + } + private final DeduplicationMap<String> stringMap = DeduplicationMap.newMap(); + + public DeduplicationMap<String> getStringMap() { + return stringMap; + } + } + public static final class Loader implements FSImageFormat.AbstractLoader { static final int MINIMUM_FILE_LENGTH = 8; private final Configuration conf; private final FSNamesystem fsn; - - private String[] stringTable; + private final LoaderContext ctx; /** The MD5 sum of the loaded file */ private MD5Hash imgDigest; @@ -88,6 +132,7 @@ public final class FSImageFormatProtobuf Loader(Configuration conf, FSNamesystem fsn) { this.conf = conf; this.fsn = fsn; + this.ctx = new LoaderContext(); } @Override @@ -100,8 +145,8 @@ public final class FSImageFormatProtobuf return imgTxId; } - public String[] getStringTable() { - return stringTable; + public LoaderContext getLoaderContext() { + return ctx; } void load(File file) throws IOException { @@ -226,11 +271,11 @@ public final class FSImageFormatProtobuf private void loadStringTableSection(InputStream in) throws IOException { StringTableSection s = StringTableSection.parseDelimitedFrom(in); - stringTable = new String[s.getNumEntry() + 1]; + ctx.stringTable = new String[s.getNumEntry() + 1]; for (int i = 0; i < s.getNumEntry(); ++i) { StringTableSection.Entry e = StringTableSection.Entry .parseDelimitedFrom(in); - stringTable[e.getId()] = e.getStr(); + ctx.stringTable[e.getId()] = e.getStr(); } } @@ -269,9 +314,10 @@ public final class FSImageFormatProtobuf public static final class Saver { private final SaveNamespaceContext context; + private final SaverContext saverContext; + private long currentOffset = FSImageUtil.MAGIC_HEADER.length; private MD5Hash savedDigest; - private StringMap stringMap = new StringMap(); private FileChannel fileChannel; // OutputStream for the section data @@ -282,6 +328,7 @@ public final class FSImageFormatProtobuf Saver(SaveNamespaceContext context) { this.context = context; + this.saverContext = new SaverContext(); } public MD5Hash getSavedDigest() { @@ -292,6 +339,10 @@ public final class FSImageFormatProtobuf return context; } + public SaverContext getSaverContext() { + return saverContext; + } + public void commitSection(FileSummary.Builder summary, SectionName name) throws IOException { long oldOffset = currentOffset; @@ -465,48 +516,15 @@ public final class FSImageFormatProtobuf throws IOException { OutputStream out = sectionOutputStream; StringTableSection.Builder b = StringTableSection.newBuilder() - .setNumEntry(stringMap.size()); + .setNumEntry(saverContext.stringMap.size()); b.build().writeDelimitedTo(out); - for (Entry<String, Integer> e : stringMap.entrySet()) { + for (Entry<String, Integer> e : saverContext.stringMap.entrySet()) { StringTableSection.Entry.Builder eb = StringTableSection.Entry .newBuilder().setId(e.getValue()).setStr(e.getKey()); eb.build().writeDelimitedTo(out); } commitSection(summary, SectionName.STRING_TABLE); } - - public StringMap getStringMap() { - return stringMap; - } - } - - public static class StringMap { - private final Map<String, Integer> stringMap; - - public StringMap() { - stringMap = Maps.newHashMap(); - } - - int getStringId(String str) { - if (str == null) { - return 0; - } - Integer v = stringMap.get(str); - if (v == null) { - int nv = stringMap.size() + 1; - stringMap.put(str, nv); - return nv; - } - return v; - } - - int size() { - return stringMap.size(); - } - - Set<Entry<String, Integer>> entrySet() { - return stringMap.entrySet(); - } } /** Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1566988&r1=1566987&r2=1566988&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Tue Feb 11 06:00:56 2014 @@ -6847,7 +6847,12 @@ public class FSNamesystem implements Nam return this.blockManager.getDatanodeManager() .shouldAvoidStaleDataNodesForWrite(); } - + + @Override // FSClusterStats + public int getNumDatanodesInService() { + return getNumLiveDataNodes() - getNumDecomLiveDataNodes(); + } + public SnapshotManager getSnapshotManager() { return snapshotManager; } Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java?rev=1566988&r1=1566987&r2=1566988&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java (original) +++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java Tue Feb 11 06:00:56 2014 @@ -115,7 +115,7 @@ public class FSImageFormatPBSnapshot { SnapshotSection.Snapshot pbs = SnapshotSection.Snapshot .parseDelimitedFrom(in); INodeDirectory root = loadINodeDirectory(pbs.getRoot(), - parent.getStringTable()); + parent.getLoaderContext().getStringTable()); int sid = pbs.getSnapshotId(); INodeDirectorySnapshottable parent = (INodeDirectorySnapshottable) fsDir .getInode(root.getId()).asDirectory(); @@ -162,7 +162,8 @@ public class FSImageFormatPBSnapshot { if (pbf.hasSnapshotCopy()) { INodeSection.INodeFile fileInPb = pbf.getSnapshotCopy(); PermissionStatus permission = loadPermission( - fileInPb.getPermission(), parent.getStringTable()); + fileInPb.getPermission(), parent.getLoaderContext() + .getStringTable()); copy = new INodeFileAttributes.SnapshotCopy(pbf.getName() .toByteArray(), permission, null, fileInPb.getModificationTime(), fileInPb.getAccessTime(), (short) fileInPb.getReplication(), @@ -249,8 +250,9 @@ public class FSImageFormatPBSnapshot { }else if (diffInPb.hasSnapshotCopy()) { INodeSection.INodeDirectory dirCopyInPb = diffInPb.getSnapshotCopy(); final byte[] name = diffInPb.getName().toByteArray(); - PermissionStatus permission = loadPermission(dirCopyInPb - .getPermission(), parent.getStringTable()); + PermissionStatus permission = loadPermission( + dirCopyInPb.getPermission(), parent.getLoaderContext() + .getStringTable()); long modTime = dirCopyInPb.getModificationTime(); boolean noQuota = dirCopyInPb.getNsQuota() == -1 && dirCopyInPb.getDsQuota() == -1; @@ -312,7 +314,7 @@ public class FSImageFormatPBSnapshot { SnapshotSection.Snapshot.Builder sb = SnapshotSection.Snapshot .newBuilder().setSnapshotId(s.getId()); INodeSection.INodeDirectory.Builder db = buildINodeDirectory(sroot, - parent.getStringMap()); + parent.getSaverContext().getStringMap()); INodeSection.INode r = INodeSection.INode.newBuilder() .setId(sroot.getId()) .setType(INodeSection.INode.Type.DIRECTORY) @@ -370,7 +372,7 @@ public class FSImageFormatPBSnapshot { INodeFileAttributes copy = diff.snapshotINode; if (copy != null) { fb.setName(ByteString.copyFrom(copy.getLocalNameBytes())) - .setSnapshotCopy(buildINodeFile(copy, parent.getStringMap())); + .setSnapshotCopy(buildINodeFile(copy, parent.getSaverContext().getStringMap())); } fb.build().writeDelimitedTo(out); } @@ -411,7 +413,7 @@ public class FSImageFormatPBSnapshot { if (!diff.isSnapshotRoot() && copy != null) { db.setName(ByteString.copyFrom(copy.getLocalNameBytes())) .setSnapshotCopy( - buildINodeDirectory(copy, parent.getStringMap())); + buildINodeDirectory(copy, parent.getSaverContext().getStringMap())); } // process created list and deleted list List<INode> created = diff.getChildrenDiff() Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1566042-1566986 Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1566042-1566986 Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1566042-1566986 Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js?rev=1566988&r1=1566987&r2=1566988&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js (original) +++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js Tue Feb 11 06:00:56 2014 @@ -35,8 +35,8 @@ } if (sticky) { - var exec = ((parms.perm % 10) & 1) == 1; - res[res.length - 1] = exec ? 't' : 'T'; + var otherExec = ((ctx.current().permission % 10) & 1) == 1; + res = res.substr(0, res.length - 1) + (otherExec ? 't' : 'T'); } chunk.write(dir + res); Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1566042-1566986 Propchange: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1566042-1566986 Modified: hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java?rev=1566988&r1=1566987&r2=1566988&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java (original) +++ hadoop/common/branches/HDFS-4685/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageStorageInspector.java Tue Feb 11 06:00:56 2014 @@ -27,17 +27,12 @@ import static org.junit.Assert.assertTru import java.io.File; import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.junit.Test; public class TestFSImageStorageInspector { - private static final Log LOG = LogFactory.getLog( - TestFSImageStorageInspector.class); - /** * Simple test with image, edits, and inprogress edits */