Author: arp Date: Tue Nov 19 17:26:23 2013 New Revision: 1543510 URL: http://svn.apache.org/r1543510 Log: Merging r1543111 through r1543509 from trunk to branch HDFS-2832
Added: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/ - copied from r1543509, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2/ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-1.10.2.min.js - copied unchanged from r1543509, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-1.10.2.min.js Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/ (props changed) hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/pom.xml hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed) hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed) hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1543111-1543509 Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1543510&r1=1543509&r2=1543510&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Nov 19 17:26:23 2013 @@ -375,6 +375,12 @@ Trunk (Unreleased) HDFS-5320. Add datanode caching metrics. (wang) + HDFS-5520. loading cache path directives from edit log doesn't update + nextEntryId (cmccabe) + + HDFS-5512. CacheAdmin -listPools fails with NPE when user lacks permissions + to view all pools (awang via cmccabe) + Release 2.3.0 - UNRELEASED INCOMPATIBLE CHANGES @@ -496,6 +502,10 @@ Release 2.3.0 - UNRELEASED HDFS-5489. Use TokenAspect in WebHDFSFileSystem. (Haohui Mai via jing9) + HDFS-5393. Serve bootstrap and jQuery locally. (Haohui Mai via jing9) + + HDFS-5073. TestListCorruptFileBlocks fails intermittently. (Arpit Agarwal) + OPTIMIZATIONS HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn) @@ -566,6 +576,9 @@ Release 2.3.0 - UNRELEASED HDFS-5502. Fix HTTPS support in HsftpFileSystem. (Haohui Mai via jing9) + HDFS-5428. Under construction files deletion after snapshot+checkpoint+nn restart + leads nn safemode. (jing9) + Release 2.2.1 - UNRELEASED INCOMPATIBLE CHANGES Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/pom.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1543510&r1=1543509&r2=1543510&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/pom.xml (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/pom.xml Tue Nov 19 17:26:23 2013 @@ -547,8 +547,10 @@ http://maven.apache.org/xsd/maven-4.0.0. <exclude>src/main/docs/releasenotes.html</exclude> <exclude>src/contrib/**</exclude> <exclude>src/site/resources/images/*</exclude> + <exclude>src/main/webapps/static/bootstrap-3.0.2/**</exclude> <exclude>src/main/webapps/static/dust-full-2.0.0.min.js</exclude> <exclude>src/main/webapps/static/dust-helpers-1.1.1.min.js</exclude> + <exclude>src/main/webapps/static/jquery-1.10.2.min.js</exclude> <exclude>src/main/webapps/hdfs/dfshealth.dust.html</exclude> <exclude>src/main/webapps/hdfs/explorer-block-info.dust.html</exclude> <exclude>src/main/webapps/hdfs/explorer.dust.html</exclude> Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1543111-1543509 Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java?rev=1543510&r1=1543509&r2=1543510&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java Tue Nov 19 17:26:23 2013 @@ -249,7 +249,7 @@ public final class CacheManager { private long getNextEntryId() throws IOException { assert namesystem.hasWriteLock(); - if (nextEntryId == Long.MAX_VALUE) { + if (nextEntryId >= Long.MAX_VALUE - 1) { throw new IOException("No more available IDs."); } return nextEntryId++; @@ -357,6 +357,17 @@ public final class CacheManager { // We are loading an entry from the edit log. // Use the ID from the edit log. id = directive.getId(); + if (id <= 0) { + throw new InvalidRequestException("can't add an ID " + + "of " + id + ": it is not positive."); + } + if (id >= Long.MAX_VALUE) { + throw new InvalidRequestException("can't add an ID " + + "of " + id + ": it is too big."); + } + if (nextEntryId <= id) { + nextEntryId = id + 1; + } } else { // Add a new entry with the next available ID. id = getNextEntryId(); Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1543510&r1=1543509&r2=1543510&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Tue Nov 19 17:26:23 2013 @@ -32,6 +32,7 @@ import java.security.DigestOutputStream; import java.security.MessageDigest; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -48,6 +49,7 @@ import org.apache.hadoop.hdfs.protocol.H import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList; @@ -678,6 +680,12 @@ public class FSImageFormat { if (underConstruction) { clientName = FSImageSerialization.readString(in); clientMachine = FSImageSerialization.readString(in); + // convert the last block to BlockUC + if (blocks != null && blocks.length > 0) { + BlockInfo lastBlk = blocks[blocks.length - 1]; + blocks[blocks.length - 1] = new BlockInfoUnderConstruction( + lastBlk, replication); + } } } } @@ -690,10 +698,15 @@ public class FSImageFormat { } final INodeFile file = new INodeFile(inodeId, localName, permissions, modificationTime, atime, blocks, replication, blockSize); - return fileDiffs != null? new INodeFileWithSnapshot(file, fileDiffs) - : underConstruction? new INodeFileUnderConstruction( - file, clientName, clientMachine, null) - : file; + if (underConstruction) { + INodeFileUnderConstruction fileUC = new INodeFileUnderConstruction( + file, clientName, clientMachine, null); + return fileDiffs == null ? fileUC : + new INodeFileUnderConstructionWithSnapshot(fileUC, fileDiffs); + } else { + return fileDiffs == null ? file : + new INodeFileWithSnapshot(file, fileDiffs); + } } else if (numBlocks == -1) { //directory @@ -821,8 +834,20 @@ public class FSImageFormat { // verify that file exists in namespace String path = cons.getLocalName(); - final INodesInPath iip = fsDir.getLastINodeInPath(path); - INodeFile oldnode = INodeFile.valueOf(iip.getINode(0), path); + INodeFile oldnode = null; + boolean inSnapshot = false; + if (path != null && FSDirectory.isReservedName(path) && + LayoutVersion.supports(Feature.ADD_INODE_ID, getLayoutVersion())) { + // TODO: for HDFS-5428, we use reserved path for those INodeFileUC in + // snapshot. If we support INode ID in the layout version, we can use + // the inode id to find the oldnode. + oldnode = namesystem.dir.getInode(cons.getId()).asFile(); + inSnapshot = true; + } else { + final INodesInPath iip = fsDir.getLastINodeInPath(path); + oldnode = INodeFile.valueOf(iip.getINode(0), path); + } + cons.setLocalName(oldnode.getLocalNameBytes()); INodeReference parentRef = oldnode.getParentReference(); if (parentRef != null) { @@ -833,11 +858,23 @@ public class FSImageFormat { if (oldnode instanceof INodeFileWithSnapshot) { cons = new INodeFileUnderConstructionWithSnapshot(cons, - ((INodeFileWithSnapshot)oldnode).getDiffs()); + ((INodeFileWithSnapshot) oldnode).getDiffs()); } - fsDir.replaceINodeFile(path, oldnode, cons); - namesystem.leaseManager.addLease(cons.getClientName(), path); + if (!inSnapshot) { + fsDir.replaceINodeFile(path, oldnode, cons); + namesystem.leaseManager.addLease(cons.getClientName(), path); + } else { + if (parentRef != null) { + // replace oldnode with cons + parentRef.setReferredINode(cons); + } else { + // replace old node in its parent's children list and deleted list + oldnode.getParent().replaceChildFileInSnapshot(oldnode, cons); + namesystem.dir.addToInodeMap(cons); + updateBlocksMap(cons); + } + } } } @@ -916,6 +953,9 @@ public class FSImageFormat { /** The MD5 checksum of the file that was written */ private MD5Hash savedDigest; private final ReferenceMap referenceMap = new ReferenceMap(); + + private final Map<Long, INodeFileUnderConstruction> snapshotUCMap = + new HashMap<Long, INodeFileUnderConstruction>(); /** @throws IllegalStateException if the instance has not yet saved an image */ private void checkSaved() { @@ -992,14 +1032,22 @@ public class FSImageFormat { // save the root saveINode2Image(fsDir.rootDir, out, false, referenceMap, counter); // save the rest of the nodes - saveImage(fsDir.rootDir, out, true, counter); + saveImage(fsDir.rootDir, out, true, false, counter); prog.endStep(Phase.SAVING_CHECKPOINT, step); // Now that the step is finished, set counter equal to total to adjust // for possible under-counting due to reference inodes. prog.setCount(Phase.SAVING_CHECKPOINT, step, fsDir.rootDir.numItemsInTree()); // save files under construction - sourceNamesystem.saveFilesUnderConstruction(out); + // TODO: for HDFS-5428, since we cannot break the compatibility of + // fsimage, we store part of the under-construction files that are only + // in snapshots in this "under-construction-file" section. As a + // temporary solution, we use "/.reserved/.inodes/<inodeid>" as their + // paths, so that when loading fsimage we do not put them into the lease + // map. In the future, we can remove this hack when we can bump the + // layout version. + sourceNamesystem.saveFilesUnderConstruction(out, snapshotUCMap); + context.checkCancelled(); sourceNamesystem.saveSecretManagerState(out, sdPath); context.checkCancelled(); @@ -1024,20 +1072,31 @@ public class FSImageFormat { * Save children INodes. * @param children The list of children INodes * @param out The DataOutputStream to write + * @param inSnapshot Whether the parent directory or its ancestor is in + * the deleted list of some snapshot (caused by rename or + * deletion) * @param counter Counter to increment for namenode startup progress * @return Number of children that are directory */ - private int saveChildren(ReadOnlyList<INode> children, DataOutputStream out, - Counter counter) throws IOException { + private int saveChildren(ReadOnlyList<INode> children, + DataOutputStream out, boolean inSnapshot, Counter counter) + throws IOException { // Write normal children INode. out.writeInt(children.size()); int dirNum = 0; int i = 0; for(INode child : children) { // print all children first + // TODO: for HDFS-5428, we cannot change the format/content of fsimage + // here, thus even if the parent directory is in snapshot, we still + // do not handle INodeUC as those stored in deleted list saveINode2Image(child, out, false, referenceMap, counter); if (child.isDirectory()) { dirNum++; + } else if (inSnapshot && child.isFile() + && child.asFile().isUnderConstruction()) { + this.snapshotUCMap.put(child.getId(), + (INodeFileUnderConstruction) child.asFile()); } if (i++ % 50 == 0) { context.checkCancelled(); @@ -1054,14 +1113,15 @@ public class FSImageFormat { * * @param current The current node * @param out The DataoutputStream to write the image - * @param snapshot The possible snapshot associated with the current node * @param toSaveSubtree Whether or not to save the subtree to fsimage. For * reference node, its subtree may already have been * saved before. + * @param inSnapshot Whether the current directory is in snapshot * @param counter Counter to increment for namenode startup progress */ private void saveImage(INodeDirectory current, DataOutputStream out, - boolean toSaveSubtree, Counter counter) throws IOException { + boolean toSaveSubtree, boolean inSnapshot, Counter counter) + throws IOException { // write the inode id of the directory out.writeLong(current.getId()); @@ -1090,7 +1150,7 @@ public class FSImageFormat { } // 3. Write children INode - dirNum += saveChildren(children, out, counter); + dirNum += saveChildren(children, out, inSnapshot, counter); // 4. Write DirectoryDiff lists, if there is any. SnapshotFSImageFormat.saveDirectoryDiffList(current, out, referenceMap); @@ -1105,14 +1165,14 @@ public class FSImageFormat { // make sure we only save the subtree under a reference node once boolean toSave = child.isReference() ? referenceMap.toProcessSubtree(child.getId()) : true; - saveImage(child.asDirectory(), out, toSave, counter); + saveImage(child.asDirectory(), out, toSave, inSnapshot, counter); } if (snapshotDirs != null) { for (INodeDirectory subDir : snapshotDirs) { // make sure we only save the subtree under a reference node once boolean toSave = subDir.getParentReference() != null ? referenceMap.toProcessSubtree(subDir.getId()) : true; - saveImage(subDir, out, toSave, counter); + saveImage(subDir, out, toSave, true, counter); } } } Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1543510&r1=1543509&r2=1543510&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Tue Nov 19 17:26:23 2013 @@ -5921,19 +5921,40 @@ public class FSNamesystem implements Nam /** * Serializes leases. */ - void saveFilesUnderConstruction(DataOutputStream out) throws IOException { + void saveFilesUnderConstruction(DataOutputStream out, + Map<Long, INodeFileUnderConstruction> snapshotUCMap) throws IOException { // This is run by an inferior thread of saveNamespace, which holds a read // lock on our behalf. If we took the read lock here, we could block // for fairness if a writer is waiting on the lock. synchronized (leaseManager) { Map<String, INodeFileUnderConstruction> nodes = leaseManager.getINodesUnderConstruction(); - out.writeInt(nodes.size()); // write the size + for (Map.Entry<String, INodeFileUnderConstruction> entry + : nodes.entrySet()) { + // TODO: for HDFS-5428, because of rename operations, some + // under-construction files that are + // in the current fs directory can also be captured in the + // snapshotUCMap. We should remove them from the snapshotUCMap. + snapshotUCMap.remove(entry.getValue().getId()); + } + + out.writeInt(nodes.size() + snapshotUCMap.size()); // write the size for (Map.Entry<String, INodeFileUnderConstruction> entry : nodes.entrySet()) { FSImageSerialization.writeINodeUnderConstruction( out, entry.getValue(), entry.getKey()); } + for (Map.Entry<Long, INodeFileUnderConstruction> entry + : snapshotUCMap.entrySet()) { + // for those snapshot INodeFileUC, we use "/.reserved/.inodes/<inodeid>" + // as their paths + StringBuilder b = new StringBuilder(); + b.append(FSDirectory.DOT_RESERVED_PATH_PREFIX) + .append(Path.SEPARATOR).append(FSDirectory.DOT_INODES_STRING) + .append(Path.SEPARATOR).append(entry.getValue().getId()); + FSImageSerialization.writeINodeUnderConstruction( + out, entry.getValue(), b.toString()); + } } } Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=1543510&r1=1543509&r2=1543510&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java Tue Nov 19 17:26:23 2013 @@ -204,7 +204,24 @@ public class INodeDirectory extends INod clear(); return newDir; } - + + /** + * Used when load fileUC from fsimage. The file to be replaced is actually + * only in snapshot, thus may not be contained in the children list. + * See HDFS-5428 for details. + */ + public void replaceChildFileInSnapshot(INodeFile oldChild, + final INodeFile newChild) { + if (children != null) { + final int i = searchChildren(newChild.getLocalNameBytes()); + if (i >= 0 && children.get(i).getId() == oldChild.getId()) { + // no need to consider reference node here, since we already do the + // replacement in FSImageFormat.Loader#loadFilesUnderConstruction + children.set(i, newChild); + } + } + } + /** Replace the given child with a new child. */ public void replaceChild(INode oldChild, final INode newChild, final INodeMap inodeMap) { Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java?rev=1543510&r1=1543509&r2=1543510&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java Tue Nov 19 17:26:23 2013 @@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.server.nam import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes; import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota; +import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeMap; import org.apache.hadoop.hdfs.server.namenode.INodeReference; import org.apache.hadoop.hdfs.server.namenode.Quota; @@ -75,7 +76,7 @@ public class INodeDirectoryWithSnapshot final INode oldChild, final INode newChild) { final List<INode> list = getList(type); final int i = search(list, oldChild.getLocalNameBytes()); - if (i < 0) { + if (i < 0 || list.get(i).getId() != oldChild.getId()) { return false; } @@ -593,6 +594,14 @@ public class INodeDirectoryWithSnapshot } @Override + public void replaceChildFileInSnapshot(final INodeFile oldChild, + final INodeFile newChild) { + super.replaceChildFileInSnapshot(oldChild, newChild); + diffs.replaceChild(ListType.DELETED, oldChild, newChild); + diffs.replaceChild(ListType.CREATED, oldChild, newChild); + } + + @Override public void replaceChild(final INode oldChild, final INode newChild, final INodeMap inodeMap) { super.replaceChild(oldChild, newChild, inodeMap); Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java?rev=1543510&r1=1543509&r2=1543510&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java Tue Nov 19 17:26:23 2013 @@ -741,14 +741,15 @@ public class CacheAdmin extends Configur RemoteIterator<CachePoolInfo> iter = dfs.listCachePools(); while (iter.hasNext()) { CachePoolInfo info = iter.next(); + String[] row = new String[5]; if (name == null || info.getPoolName().equals(name)) { - listing.addRow(new String[] { - info.getPoolName(), - info.getOwnerName(), - info.getGroupName(), - info.getMode().toString(), - info.getWeight().toString(), - }); + row[0] = info.getPoolName(); + row[1] = info.getOwnerName(); + row[2] = info.getGroupName(); + row[3] = info.getMode() != null ? info.getMode().toString() : null; + row[4] = + info.getWeight() != null ? info.getWeight().toString() : null; + listing.addRow(row); ++numResults; if (name != null) { break; Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java?rev=1543510&r1=1543509&r2=1543510&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java Tue Nov 19 17:26:23 2013 @@ -59,6 +59,9 @@ public class TableListing { } private void addRow(String val) { + if (val == null) { + val = ""; + } if ((val.length() + 1) > maxWidth) { maxWidth = val.length() + 1; } Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1542123-1543509 Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html?rev=1543510&r1=1543509&r2=1543510&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html Tue Nov 19 17:26:23 2013 @@ -18,7 +18,7 @@ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> -<link rel="stylesheet" type="text/css" href="//netdna.bootstrapcdn.com/bootstrap/3.0.0/css/bootstrap.min.css" /> +<link rel="stylesheet" type="text/css" href="/static/bootstrap-3.0.2/css/bootstrap.min.css" /> <link rel="stylesheet" type="text/css" href="/static/hadoop.css" /> <title>Namenode information</title> </head> @@ -36,8 +36,8 @@ <div class="col-xs-1 pull-right"><a style="color: #ddd" href="dfshealth.jsp">Legacy UI</a></div> </div> -<script type="text/javascript" src="//ajax.googleapis.com/ajax/libs/jquery/2.0.3/jquery.min.js"> -</script><script type="text/javascript" src="//netdna.bootstrapcdn.com/bootstrap/3.0.0/js/bootstrap.min.js"> +<script type="text/javascript" src="/static/jquery-1.10.2.min.js"> +</script><script type="text/javascript" src="/static/bootstrap-3.0.2/js/bootstrap.min.js"> </script><script type="text/javascript" src="/static/dust-full-2.0.0.min.js"> </script><script type="text/javascript" src="/static/dust-helpers-1.1.1.min.js"> </script><script type="text/javascript" src="dfs-dust.js"> Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html?rev=1543510&r1=1543509&r2=1543510&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html Tue Nov 19 17:26:23 2013 @@ -18,7 +18,7 @@ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> - <link rel="stylesheet" type="text/css" href="//netdna.bootstrapcdn.com/bootstrap/3.0.0/css/bootstrap.min.css" /> + <link rel="stylesheet" type="text/css" href="/static/bootstrap-3.0.2/css/bootstrap.min.css" /> <link rel="stylesheet" type="text/css" href="/static/hadoop.css" /> <title>Browsing HDFS</title> </head> @@ -73,8 +73,8 @@ <br /> <div id="panel"></div> </div> - <script type="text/javascript" src="//ajax.googleapis.com/ajax/libs/jquery/2.0.3/jquery.min.js"> - </script><script type="text/javascript" src="//netdna.bootstrapcdn.com/bootstrap/3.0.0/js/bootstrap.min.js"> + <script type="text/javascript" src="/static/jquery-1.10.2.min.js"> + </script><script type="text/javascript" src="/static/bootstrap-3.0.2/js/bootstrap.min.js"> </script><script type="text/javascript" src="/static/dust-full-2.0.0.min.js"> </script><script type="text/javascript" src="/static/dust-helpers-1.1.1.min.js"> </script><script type="text/javascript" src="dfs-dust.js"> Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java?rev=1543510&r1=1543509&r2=1543510&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java Tue Nov 19 17:26:23 2013 @@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.protocolPB import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetCache.PageRounder; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.MappableBlock; +import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; import org.apache.hadoop.hdfs.server.namenode.FSImage; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.BlockIdCommand; @@ -91,6 +92,10 @@ public class TestFsDatasetCache { private static PageRounder rounder = new PageRounder(); private static CacheManipulator prevCacheManipulator; + static { + EditLogFileOutputStream.setShouldSkipFsyncForTesting(false); + } + @Before public void setUp() throws Exception { assumeTrue(!Path.WINDOWS); Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java?rev=1543510&r1=1543509&r2=1543510&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java Tue Nov 19 17:26:23 2013 @@ -442,6 +442,7 @@ public class TestListCorruptFileBlocks { /** * Test if NN.listCorruptFiles() returns the right number of results. + * The corrupt blocks are detected by the BlockPoolSliceScanner. * Also, test that DFS.listCorruptFileBlocks can make multiple successive * calls. */ @@ -450,7 +451,6 @@ public class TestListCorruptFileBlocks { MiniDFSCluster cluster = null; try { Configuration conf = new HdfsConfiguration(); - conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 15); // datanode scans directories conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000); // datanode sends block reports cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fs = cluster.getFileSystem(); @@ -491,6 +491,13 @@ public class TestListCorruptFileBlocks { } } + // Occasionally the BlockPoolSliceScanner can run before we have removed + // the blocks. Restart the Datanode to trigger the scanner into running + // once more. + LOG.info("Restarting Datanode to trigger BlockPoolSliceScanner"); + cluster.restartDataNodes(); + cluster.waitActive(); + badFiles = namenode.getNamesystem().listCorruptFileBlocks("/srcdat2", null); Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java?rev=1543510&r1=1543509&r2=1543510&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java Tue Nov 19 17:26:23 2013 @@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.MiniDFSClu import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type; +import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.MappableBlock; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.io.nativeio.NativeIO; @@ -85,6 +86,10 @@ public class TestPathBasedCacheRequests static private NamenodeProtocols proto; static private CacheManipulator prevCacheManipulator; + static { + EditLogFileOutputStream.setShouldSkipFsyncForTesting(false); + } + @Before public void setup() throws Exception { conf = new HdfsConfiguration(); @@ -510,8 +515,9 @@ public class TestPathBasedCacheRequests // Create some cache entries int numEntries = 10; String entryPrefix = "/party-"; + long prevId = -1; for (int i=0; i<numEntries; i++) { - dfs.addPathBasedCacheDirective( + prevId = dfs.addPathBasedCacheDirective( new PathBasedCacheDirective.Builder(). setPath(new Path(entryPrefix + i)).setPool(pool).build()); } @@ -549,6 +555,11 @@ public class TestPathBasedCacheRequests assertEquals(pool, cd.getPool()); } assertFalse("Unexpected # of cache directives found", dit.hasNext()); + + long nextId = dfs.addPathBasedCacheDirective( + new PathBasedCacheDirective.Builder(). + setPath(new Path("/foobar")).setPool(pool).build()); + assertEquals(prevId + 1, nextId); } /** Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java?rev=1543510&r1=1543509&r2=1543510&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java Tue Nov 19 17:26:23 2013 @@ -28,6 +28,11 @@ import org.apache.hadoop.hdfs.DFSTestUti import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; +import org.apache.hadoop.hdfs.server.namenode.INodeId; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; +import org.apache.hadoop.security.AccessControlException; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -76,6 +81,47 @@ public class TestOpenFilesWithSnapshot { cluster.restartNameNode(); } + @Test + public void testWithCheckpoint() throws Exception { + Path path = new Path("/test"); + doWriteAndAbort(fs, path); + fs.delete(new Path("/test/test"), true); + NameNode nameNode = cluster.getNameNode(); + NameNodeAdapter.enterSafeMode(nameNode, false); + NameNodeAdapter.saveNamespace(nameNode); + NameNodeAdapter.leaveSafeMode(nameNode); + cluster.restartNameNode(true); + + // read snapshot file after restart + String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(), + "s1/test/test2"); + DFSTestUtil.readFile(fs, new Path(test2snapshotPath)); + String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(), + "s1/test/test3"); + DFSTestUtil.readFile(fs, new Path(test3snapshotPath)); + } + + @Test + public void testFilesDeletionWithCheckpoint() throws Exception { + Path path = new Path("/test"); + doWriteAndAbort(fs, path); + fs.delete(new Path("/test/test/test2"), true); + fs.delete(new Path("/test/test/test3"), true); + NameNode nameNode = cluster.getNameNode(); + NameNodeAdapter.enterSafeMode(nameNode, false); + NameNodeAdapter.saveNamespace(nameNode); + NameNodeAdapter.leaveSafeMode(nameNode); + cluster.restartNameNode(true); + + // read snapshot file after restart + String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(), + "s1/test/test2"); + DFSTestUtil.readFile(fs, new Path(test2snapshotPath)); + String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(), + "s1/test/test3"); + DFSTestUtil.readFile(fs, new Path(test3snapshotPath)); + } + private void doWriteAndAbort(DistributedFileSystem fs, Path path) throws IOException { fs.mkdirs(path); @@ -110,4 +156,55 @@ public class TestOpenFilesWithSnapshot { DFSTestUtil.abortStream((DFSOutputStream) out2.getWrappedStream()); fs.createSnapshot(path, "s1"); } + + @Test + public void testOpenFilesWithMultipleSnapshots() throws Exception { + doTestMultipleSnapshots(true); + } + + @Test + public void testOpenFilesWithMultipleSnapshotsWithoutCheckpoint() + throws Exception { + doTestMultipleSnapshots(false); + } + + private void doTestMultipleSnapshots(boolean saveNamespace) + throws IOException, AccessControlException { + Path path = new Path("/test"); + doWriteAndAbort(fs, path); + fs.createSnapshot(path, "s2"); + fs.delete(new Path("/test/test"), true); + fs.deleteSnapshot(path, "s2"); + if (saveNamespace) { + NameNode nameNode = cluster.getNameNode(); + NameNodeAdapter.enterSafeMode(nameNode, false); + NameNodeAdapter.saveNamespace(nameNode); + NameNodeAdapter.leaveSafeMode(nameNode); + } + cluster.restartNameNode(true); + } + + @Test + public void testOpenFilesWithRename() throws Exception { + Path path = new Path("/test"); + doWriteAndAbort(fs, path); + + // check for zero sized blocks + Path fileWithEmptyBlock = new Path("/test/test/test4"); + fs.create(fileWithEmptyBlock); + NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc(); + String clientName = fs.getClient().getClientName(); + // create one empty block + nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null, + INodeId.GRANDFATHER_INODE_ID, null); + fs.createSnapshot(path, "s2"); + + fs.rename(new Path("/test/test"), new Path("/test/test-renamed")); + fs.delete(new Path("/test/test-renamed"), true); + NameNode nameNode = cluster.getNameNode(); + NameNodeAdapter.enterSafeMode(nameNode, false); + NameNodeAdapter.saveNamespace(nameNode); + NameNodeAdapter.leaveSafeMode(nameNode); + cluster.restartNameNode(true); + } } \ No newline at end of file