HDFS-12638. Delete copy-on-truncate block along with the original block, when deleting a file being truncated. Contributed by Konstantin Shvachko.
(cherry picked from commit 60fd0d7fd73198fd610e59d1a4cd007c5fcc7205) (cherry picked from commit 19c18f7cc90c8243af634600c4c69ec1109fcce6) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3fe5640 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3fe5640 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3fe5640 Branch: refs/heads/branch-2.8.3 Commit: b3fe56402d908019d99af1f1f4fc65cb1d1436a2 Parents: a32ae95 Author: Konstantin V Shvachko <[email protected]> Authored: Thu Nov 30 18:18:09 2017 -0800 Committer: Junping Du <[email protected]> Committed: Mon Dec 4 17:02:47 2017 -0800 ---------------------------------------------------------------------- .../hadoop/hdfs/server/namenode/INode.java | 14 +++++++ .../hdfs/server/namenode/TestFileTruncate.java | 41 ++++++++++++++++++++ 2 files changed, 55 insertions(+) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3fe5640/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 779ae13..838497c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -33,9 +33,11 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSUtilClient; +import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference; @@ -1051,6 +1053,18 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> { assert toDelete != null : "toDelete is null"; toDelete.delete(); toDeleteList.add(toDelete); + // If the file is being truncated + // the copy-on-truncate block should also be collected for deletion + BlockUnderConstructionFeature uc = toDelete.getUnderConstructionFeature(); + if(uc == null) { + return; + } + Block truncateBlock = uc.getTruncateBlock(); + if(truncateBlock == null || truncateBlock.equals(toDelete)) { + return; + } + assert truncateBlock instanceof BlockInfo : "should be BlockInfo"; + addDeleteBlock((BlockInfo) truncateBlock); } public void addUpdateReplicationFactor(BlockInfo block, short targetRepl) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3fe5640/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index f97939a..292eefd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; +import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; @@ -1153,6 +1154,46 @@ public class TestFileTruncate { fs.delete(parent, true); } + /** + * While rolling upgrade is in-progress the test truncates a file + * such that copy-on-truncate is triggered, then deletes the file, + * and makes sure that no blocks involved in truncate are hanging around. + */ + @Test + public void testTruncateWithRollingUpgrade() throws Exception { + final DFSAdmin dfsadmin = new DFSAdmin(cluster.getConfiguration(0)); + DistributedFileSystem dfs = cluster.getFileSystem(); + //start rolling upgrade + dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); + int status = dfsadmin.run(new String[]{"-rollingUpgrade", "prepare"}); + assertEquals("could not prepare for rolling upgrade", 0, status); + dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); + + Path dir = new Path("/testTruncateWithRollingUpgrade"); + fs.mkdirs(dir); + final Path p = new Path(dir, "file"); + final byte[] data = new byte[3]; + ThreadLocalRandom.current().nextBytes(data); + writeContents(data, data.length, p); + + assertEquals("block num should 1", 1, + cluster.getNamesystem().getFSDirectory().getBlockManager() + .getTotalBlocks()); + + final boolean isReady = fs.truncate(p, 2); + assertFalse("should be copy-on-truncate", isReady); + assertEquals("block num should 2", 2, + cluster.getNamesystem().getFSDirectory().getBlockManager() + .getTotalBlocks()); + fs.delete(p, true); + + assertEquals("block num should 0", 0, + cluster.getNamesystem().getFSDirectory().getBlockManager() + .getTotalBlocks()); + status = dfsadmin.run(new String[]{"-rollingUpgrade", "finalize"}); + assertEquals("could not finalize rolling upgrade", 0, status); + } + static void writeContents(byte[] contents, int fileLength, Path p) throws IOException { FSDataOutputStream out = fs.create(p, true, BLOCK_SIZE, REPLICATION, --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
