Repository: hadoop Updated Branches: refs/heads/HDFS-7285 c9e026821 -> 1299357a0
HDFS-8479. Erasure coding: fix striping related logic in FSDirWriteFileOp to sync with HDFS-8421. Contributed by Zhe Zhang. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1299357a Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1299357a Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1299357a Branch: refs/heads/HDFS-7285 Commit: 1299357a05c52ad45513ed0ea854edc9c7ec3de8 Parents: c9e0268 Author: Jing Zhao <[email protected]> Authored: Tue May 26 16:06:50 2015 -0700 Committer: Jing Zhao <[email protected]> Committed: Tue May 26 16:06:50 2015 -0700 ---------------------------------------------------------------------- .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 3 +++ .../hadoop/hdfs/server/namenode/FSDirWriteFileOp.java | 7 +++++++ .../hadoop/hdfs/server/namenode/FSDirectory.java | 5 ----- .../server/blockmanagement/TestReplicationPolicy.java | 14 +++++++++++--- 4 files changed, 21 insertions(+), 8 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/1299357a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt index d045ee5..ccc3cb0 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt @@ -259,3 +259,6 @@ HDFS-8382. Remove chunkSize and initialize from erasure coder. (Kai Zheng) HDFS-8408. Revisit and refactor ErasureCodingInfo (vinayakumarb) + + HDFS-8479. Erasure coding: fix striping related logic in FSDirWriteFileOp to + sync with HDFS-8421. (Zhe Zhang via jing9) http://git-wip-us.apache.org/repos/asf/hadoop/blob/1299357a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java index 11b6d8f..c037cd7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java @@ -494,6 +494,10 @@ class FSDirWriteFileOp { try { INodesInPath iip = fsd.addINode(existing, newNode); if (iip != null) { + // check if the file is in an EC zone + if (fsd.isInECZone(iip)) { + newNode.addStripedBlocksFeature(); + } if (aclEntries != null) { AclStorage.updateINodeAcl(newNode, aclEntries, CURRENT_STATE_ID); } @@ -582,6 +586,9 @@ class FSDirWriteFileOp { fsd.writeLock(); try { newiip = fsd.addINode(existing, newNode); + if (newiip != null && fsd.isInECZone(newiip)) { + newNode.addStripedBlocksFeature(); + } } finally { fsd.writeUnlock(); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/1299357a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 0e3694f..32c8e05 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -33,10 +33,8 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttrSetFlag; -import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.XAttrHelper; @@ -54,9 +52,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; http://git-wip-us.apache.org/repos/asf/hadoop/blob/1299357a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 6e92264..e699e7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -34,7 +34,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Random; import java.util.Set; import java.util.concurrent.ThreadLocalRandom; @@ -1209,8 +1208,17 @@ public class TestReplicationPolicy { BlockManager bm = new BlockManager(mockNS, new HdfsConfiguration()); UnderReplicatedBlocks underReplicatedBlocks = bm.neededReplications; - BlockInfo block1 = genBlockInfo(ThreadLocalRandom.current().nextLong()); - BlockInfo block2 = genBlockInfo(ThreadLocalRandom.current().nextLong()); + long blkID1 = ThreadLocalRandom.current().nextLong(); + if (blkID1 < 0) { + blkID1 *= -1; + } + long blkID2 = ThreadLocalRandom.current().nextLong(); + if (blkID2 < 0) { + blkID2 *= -1; + } + + BlockInfo block1 = genBlockInfo(blkID1); + BlockInfo block2 = genBlockInfo(blkID2); // Adding QUEUE_UNDER_REPLICATED block underReplicatedBlocks.add(block1, 0, 1, 1);
