Author: jing9 Date: Fri Feb 14 07:18:58 2014 New Revision: 1568204 URL: http://svn.apache.org/r1568204 Log: HDFS-5537. Merge change r1546184 from trunk.
Added: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java - copied unchanged from r1546184, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiff.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java - copied unchanged from r1546184, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java Removed: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshot.java Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1568204&r1=1568203&r2=1568204&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Feb 14 07:18:58 2014 @@ -46,6 +46,8 @@ Release 2.4.0 - UNRELEASED HDFS-5286. Flatten INodeDirectory hierarchy: Replace INodeDirectoryWithQuota with DirectoryWithQuotaFeature. (szetszwo) + HDFS-5537. Remove FileWithSnapshot interface. (jing9 via szetszwo) + OPTIMIZATIONS HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1568204&r1=1568203&r2=1568204&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Fri Feb 14 07:18:58 2014 @@ -475,8 +475,8 @@ public class FSDirectory implements Clos boolean unprotectedRemoveBlock(String path, INodeFile fileNode, Block block) throws IOException { - Preconditions.checkArgument(fileNode.isUnderConstruction()); // modify file-> block and blocksMap + // fileNode should be under construction boolean removed = fileNode.removeLastBlock(block); if (!removed) { return false; Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java?rev=1568204&r1=1568203&r2=1568204&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java Fri Feb 14 07:18:58 2014 @@ -57,7 +57,7 @@ import org.apache.hadoop.hdfs.server.blo import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; -import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList; +import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot; Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java?rev=1568204&r1=1568203&r2=1568204&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java Fri Feb 14 07:18:58 2014 @@ -26,7 +26,7 @@ import org.apache.hadoop.hdfs.server.blo import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; /** - * I-node for file being written. + * Feature for under-construction file. */ @InterfaceAudience.Private public class FileUnderConstructionFeature extends INodeFile.Feature { Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java?rev=1568204&r1=1568203&r2=1568204&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java Fri Feb 14 07:18:58 2014 @@ -35,7 +35,6 @@ import org.apache.hadoop.hdfs.protocol.B import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference; import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName; -import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.util.ChunkedArrayList; @@ -315,7 +314,7 @@ public abstract class INode implements I * 1.2.2 Else do nothing with the current INode. Recursively clean its * children. * - * 1.3 The current inode is a {@link FileWithSnapshot}. + * 1.3 The current inode is a file with snapshot. * Call recordModification(..) to capture the current states. * Mark the INode as deleted. * @@ -328,7 +327,7 @@ public abstract class INode implements I * 2. When deleting a snapshot. * 2.1 To clean {@link INodeFile}: do nothing. * 2.2 To clean {@link INodeDirectory}: recursively clean its children. - * 2.3 To clean {@link FileWithSnapshot}: delete the corresponding snapshot in + * 2.3 To clean INodeFile with snapshot: delete the corresponding snapshot in * its diff list. * 2.4 To clean {@link INodeDirectoryWithSnapshot}: delete the corresponding * snapshot in its diff list. Recursively clean its children. Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=1568204&r1=1568203&r2=1568204&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Fri Feb 14 07:18:58 2014 @@ -33,10 +33,8 @@ import org.apache.hadoop.hdfs.server.blo import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; -import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot; -import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff; -import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList; -import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.Util; +import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff; +import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; @@ -189,7 +187,7 @@ public class INodeFile extends INodeWith INodeFile toUnderConstruction(String clientName, String clientMachine, DatanodeDescriptor clientNode) { Preconditions.checkState(!isUnderConstruction(), - "file is already an INodeFileUnderConstruction"); + "file is already under construction"); FileUnderConstructionFeature uc = new FileUnderConstructionFeature( clientName, clientMachine, clientNode); addFeature(uc); @@ -201,6 +199,8 @@ public class INodeFile extends INodeWith * feature. */ public INodeFile toCompleteFile(long mtime) { + Preconditions.checkState(isUnderConstruction(), + "file is no longer under construction"); FileUnderConstructionFeature uc = getFileUnderConstructionFeature(); if (uc != null) { assertAllBlocksComplete(); @@ -222,15 +222,16 @@ public class INodeFile extends INodeWith } } - @Override //BlockCollection + @Override // BlockCollection public void setBlock(int index, BlockInfo blk) { this.blocks[index] = blk; } - @Override // BlockCollection + @Override // BlockCollection, the file should be under construction public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock, DatanodeStorageInfo[] locations) throws IOException { - Preconditions.checkState(isUnderConstruction()); + Preconditions.checkState(isUnderConstruction(), + "file is no longer under construction"); if (numBlocks() == 0) { throw new IOException("Failed to set last block: File is empty."); @@ -248,6 +249,8 @@ public class INodeFile extends INodeWith * the last one on the list. */ boolean removeLastBlock(Block oldblock) { + Preconditions.checkState(isUnderConstruction(), + "file is no longer under construction"); if (blocks == null || blocks.length == 0) { return false; } @@ -299,10 +302,8 @@ public class INodeFile extends INodeWith } @Override - public final short getBlockReplication() { - return this instanceof FileWithSnapshot? - Util.getBlockReplication((FileWithSnapshot)this) - : getFileReplication(null); + public short getBlockReplication() { + return getFileReplication(null); } /** Set the replication factor of this file. */ @@ -422,8 +423,8 @@ public class INodeFile extends INodeWith clear(); removedINodes.add(this); - if (this instanceof FileWithSnapshot) { - ((FileWithSnapshot) this).getDiffs().clear(); + if (this instanceof INodeFileWithSnapshot) { + ((INodeFileWithSnapshot) this).getDiffs().clear(); } } @@ -438,8 +439,8 @@ public class INodeFile extends INodeWith boolean useCache, int lastSnapshotId) { long nsDelta = 1; final long dsDelta; - if (this instanceof FileWithSnapshot) { - FileDiffList fileDiffList = ((FileWithSnapshot) this).getDiffs(); + if (this instanceof INodeFileWithSnapshot) { + FileDiffList fileDiffList = ((INodeFileWithSnapshot) this).getDiffs(); Snapshot last = fileDiffList.getLastSnapshot(); List<FileDiff> diffs = fileDiffList.asList(); @@ -471,8 +472,8 @@ public class INodeFile extends INodeWith private void computeContentSummary4Snapshot(final Content.Counts counts) { // file length and diskspace only counted for the latest state of the file // i.e. either the current state or the last snapshot - if (this instanceof FileWithSnapshot) { - final FileWithSnapshot withSnapshot = (FileWithSnapshot)this; + if (this instanceof INodeFileWithSnapshot) { + final INodeFileWithSnapshot withSnapshot = (INodeFileWithSnapshot) this; final FileDiffList diffs = withSnapshot.getDiffs(); final int n = diffs.asList().size(); counts.add(Content.FILE, n); @@ -488,8 +489,8 @@ public class INodeFile extends INodeWith } private void computeContentSummary4Current(final Content.Counts counts) { - if (this instanceof FileWithSnapshot - && ((FileWithSnapshot)this).isCurrentFileDeleted()) { + if (this instanceof INodeFileWithSnapshot + && ((INodeFileWithSnapshot) this).isCurrentFileDeleted()) { return; } @@ -508,8 +509,9 @@ public class INodeFile extends INodeWith * otherwise, get the file size from the given snapshot. */ public final long computeFileSize(Snapshot snapshot) { - if (snapshot != null && this instanceof FileWithSnapshot) { - final FileDiff d = ((FileWithSnapshot)this).getDiffs().getDiff(snapshot); + if (snapshot != null && this instanceof INodeFileWithSnapshot) { + final FileDiff d = ((INodeFileWithSnapshot) this).getDiffs().getDiff( + snapshot); if (d != null) { return d.getFileSize(); } Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java?rev=1568204&r1=1568203&r2=1568204&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java Fri Feb 14 07:18:58 2014 @@ -26,8 +26,8 @@ import java.util.List; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; -import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot; +import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import com.google.common.base.Preconditions; @@ -102,8 +102,8 @@ public abstract class INodeReference ext } if (wn != null) { INode referred = wc.getReferredINode(); - if (referred instanceof FileWithSnapshot) { - return ((FileWithSnapshot) referred).getDiffs().getPrior( + if (referred instanceof INodeFileWithSnapshot) { + return ((INodeFileWithSnapshot) referred).getDiffs().getPrior( wn.lastSnapshotId); } else if (referred instanceof INodeDirectoryWithSnapshot) { return ((INodeDirectoryWithSnapshot) referred).getDiffs().getPrior( @@ -547,8 +547,8 @@ public abstract class INodeReference ext private Snapshot getSelfSnapshot() { INode referred = getReferredINode().asReference().getReferredINode(); Snapshot snapshot = null; - if (referred instanceof FileWithSnapshot) { - snapshot = ((FileWithSnapshot) referred).getDiffs().getPrior( + if (referred instanceof INodeFileWithSnapshot) { + snapshot = ((INodeFileWithSnapshot) referred).getDiffs().getPrior( lastSnapshotId); } else if (referred instanceof INodeDirectoryWithSnapshot) { snapshot = ((INodeDirectoryWithSnapshot) referred).getDiffs().getPrior( @@ -637,10 +637,10 @@ public abstract class INodeReference ext Snapshot snapshot = getSelfSnapshot(prior); INode referred = getReferredINode().asReference().getReferredINode(); - if (referred instanceof FileWithSnapshot) { + if (referred instanceof INodeFileWithSnapshot) { // if referred is a file, it must be a FileWithSnapshot since we did // recordModification before the rename - FileWithSnapshot sfile = (FileWithSnapshot) referred; + INodeFileWithSnapshot sfile = (INodeFileWithSnapshot) referred; // make sure we mark the file as deleted sfile.deleteCurrentFile(); try { @@ -671,8 +671,8 @@ public abstract class INodeReference ext WithCount wc = (WithCount) getReferredINode().asReference(); INode referred = wc.getReferredINode(); Snapshot lastSnapshot = null; - if (referred instanceof FileWithSnapshot) { - lastSnapshot = ((FileWithSnapshot) referred).getDiffs() + if (referred instanceof INodeFileWithSnapshot) { + lastSnapshot = ((INodeFileWithSnapshot) referred).getDiffs() .getLastSnapshot(); } else if (referred instanceof INodeDirectoryWithSnapshot) { lastSnapshot = ((INodeDirectoryWithSnapshot) referred) Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java?rev=1568204&r1=1568203&r2=1568204&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java Fri Feb 14 07:18:58 2014 @@ -432,8 +432,8 @@ public class INodeDirectorySnapshottable parentPath.remove(parentPath.size() - 1); } } - } else if (node.isFile() && node.asFile() instanceof FileWithSnapshot) { - FileWithSnapshot file = (FileWithSnapshot) node.asFile(); + } else if (node.isFile() && node.asFile() instanceof INodeFileWithSnapshot) { + INodeFileWithSnapshot file = (INodeFileWithSnapshot) node.asFile(); Snapshot earlierSnapshot = diffReport.isFromEarlier() ? diffReport.from : diffReport.to; Snapshot laterSnapshot = diffReport.isFromEarlier() ? diffReport.to @@ -441,7 +441,7 @@ public class INodeDirectorySnapshottable boolean change = file.getDiffs().changedBetweenSnapshots(earlierSnapshot, laterSnapshot); if (change) { - diffReport.addFileDiff(file.asINodeFile(), relativePath); + diffReport.addFileDiff(file, relativePath); } } } Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java?rev=1568204&r1=1568203&r2=1568204&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java Fri Feb 14 07:18:58 2014 @@ -804,10 +804,10 @@ public class INodeDirectoryWithSnapshot // For DstReference node, since the node is not in the created list of // prior, we should treat it as regular file/dir } else if (topNode.isFile() - && topNode.asFile() instanceof FileWithSnapshot) { - FileWithSnapshot fs = (FileWithSnapshot) topNode.asFile(); - counts.add(fs.getDiffs().deleteSnapshotDiff(post, prior, - topNode.asFile(), collectedBlocks, removedINodes, countDiffChange)); + && topNode.asFile() instanceof INodeFileWithSnapshot) { + INodeFileWithSnapshot fs = (INodeFileWithSnapshot) topNode.asFile(); + counts.add(fs.getDiffs().deleteSnapshotDiff(post, prior, fs, + collectedBlocks, removedINodes, countDiffChange)); } else if (topNode.isDirectory()) { INodeDirectory dir = topNode.asDirectory(); ChildrenDiff priorChildrenDiff = null; Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java?rev=1568204&r1=1568203&r2=1568204&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java Fri Feb 14 07:18:58 2014 @@ -21,6 +21,7 @@ import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes; @@ -31,14 +32,13 @@ import org.apache.hadoop.hdfs.server.nam * Represent an {@link INodeFile} that is snapshotted. */ @InterfaceAudience.Private -public class INodeFileWithSnapshot extends INodeFile - implements FileWithSnapshot { +public class INodeFileWithSnapshot extends INodeFile { private final FileDiffList diffs; private boolean isCurrentFileDeleted = false; public INodeFileWithSnapshot(INodeFile f) { - this(f, f instanceof FileWithSnapshot? - ((FileWithSnapshot)f).getDiffs(): null); + this(f, f instanceof INodeFileWithSnapshot ? + ((INodeFileWithSnapshot) f).getDiffs() : null); } public INodeFileWithSnapshot(INodeFile f, FileDiffList diffs) { @@ -46,12 +46,12 @@ public class INodeFileWithSnapshot exten this.diffs = diffs != null? diffs: new FileDiffList(); } - @Override + /** Is the current file deleted? */ public boolean isCurrentFileDeleted() { return isCurrentFileDeleted; } - @Override + /** Delete the file from the current tree */ public void deleteCurrentFile() { isCurrentFileDeleted = true; } @@ -70,12 +70,7 @@ public class INodeFileWithSnapshot exten return this; } - @Override - public INodeFile asINodeFile() { - return this; - } - - @Override + /** @return the file diff list. */ public FileDiffList getDiffs() { return diffs; } @@ -90,7 +85,7 @@ public class INodeFileWithSnapshot exten recordModification(prior, null); deleteCurrentFile(); } - Util.collectBlocksAndClear(this, collectedBlocks, removedINodes); + this.collectBlocksAndClear(collectedBlocks, removedINodes); return Quota.Counts.newInstance(); } else { // delete a snapshot prior = getDiffs().updatePrior(snapshot, prior); @@ -104,4 +99,100 @@ public class INodeFileWithSnapshot exten return super.toDetailString() + (isCurrentFileDeleted()? "(DELETED), ": ", ") + diffs; } + + /** + * @return block replication, which is the max file replication among + * the file and the diff list. + */ + @Override + public short getBlockReplication() { + short max = isCurrentFileDeleted() ? 0 : getFileReplication(); + for(FileDiff d : getDiffs()) { + if (d.snapshotINode != null) { + final short replication = d.snapshotINode.getFileReplication(); + if (replication > max) { + max = replication; + } + } + } + return max; + } + + /** + * If some blocks at the end of the block list no longer belongs to + * any inode, collect them and update the block list. + */ + void collectBlocksAndClear(final BlocksMapUpdateInfo info, + final List<INode> removedINodes) { + // check if everything is deleted. + if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) { + destroyAndCollectBlocks(info, removedINodes); + return; + } + + // find max file size. + final long max; + if (isCurrentFileDeleted()) { + final FileDiff last = getDiffs().getLast(); + max = last == null? 0: last.getFileSize(); + } else { + max = computeFileSize(); + } + + collectBlocksBeyondMax(max, info); + } + + private void collectBlocksBeyondMax(final long max, + final BlocksMapUpdateInfo collectedBlocks) { + final BlockInfo[] oldBlocks = getBlocks(); + if (oldBlocks != null) { + //find the minimum n such that the size of the first n blocks > max + int n = 0; + for(long size = 0; n < oldBlocks.length && max > size; n++) { + size += oldBlocks[n].getNumBytes(); + } + + // starting from block n, the data is beyond max. + if (n < oldBlocks.length) { + // resize the array. + final BlockInfo[] newBlocks; + if (n == 0) { + newBlocks = null; + } else { + newBlocks = new BlockInfo[n]; + System.arraycopy(oldBlocks, 0, newBlocks, 0, n); + } + + // set new blocks + setBlocks(newBlocks); + + // collect the blocks beyond max. + if (collectedBlocks != null) { + for(; n < oldBlocks.length; n++) { + collectedBlocks.addDeleteBlock(oldBlocks[n]); + } + } + } + } + } + + Quota.Counts updateQuotaAndCollectBlocks(FileDiff removed, + BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) { + long oldDiskspace = this.diskspaceConsumed(); + if (removed.snapshotINode != null) { + short replication = removed.snapshotINode.getFileReplication(); + short currentRepl = getBlockReplication(); + if (currentRepl == 0) { + oldDiskspace = computeFileSize(true, true) * replication; + } else if (replication > currentRepl) { + oldDiskspace = oldDiskspace / getBlockReplication() + * replication; + } + } + + this.collectBlocksAndClear(collectedBlocks, removedINodes); + + long dsDelta = oldDiskspace - diskspaceConsumed(); + return Quota.Counts.newInstance(0, dsDelta); + } } Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java?rev=1568204&r1=1568203&r2=1568204&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java Fri Feb 14 07:18:58 2014 @@ -36,8 +36,6 @@ import org.apache.hadoop.hdfs.server.nam import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes; import org.apache.hadoop.hdfs.server.namenode.INodeReference; -import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff; -import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiffList; import org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff; @@ -99,8 +97,8 @@ public class SnapshotFSImageFormat { public static void saveFileDiffList(final INodeFile file, final DataOutput out) throws IOException { - saveINodeDiffs(file instanceof FileWithSnapshot? - ((FileWithSnapshot)file).getDiffs(): null, out, null); + saveINodeDiffs(file instanceof INodeFileWithSnapshot? + ((INodeFileWithSnapshot) file).getDiffs(): null, out, null); } public static FileDiffList loadFileDiffList(DataInput in, Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java?rev=1568204&r1=1568203&r2=1568204&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java Fri Feb 14 07:18:58 2014 @@ -63,7 +63,6 @@ import org.apache.hadoop.hdfs.server.nam import org.apache.hadoop.hdfs.server.namenode.INodeReference; import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; import org.apache.hadoop.hdfs.server.namenode.Quota; -import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.ChildrenDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff; import org.apache.hadoop.hdfs.util.Diff.ListType;